Go to the documentation of this file.
34 #include "../internal.h"
36 #include <c_api/ie_c_api.h>
71 #define APPEND_STRING(generated_string, iterate_string) \
72 generated_string = generated_string ? av_asprintf("%s %s", generated_string, iterate_string) : \
73 av_asprintf("%s", iterate_string);
75 #define OFFSET(x) offsetof(OVContext, x)
76 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
106 return sizeof(float);
108 return sizeof(uint8_t);
118 precision_e precision;
119 ie_blob_buffer_t blob_buffer;
123 ie_blob_t *input_blob =
NULL;
137 status |= ie_blob_get_dims(input_blob, &dims);
138 status |= ie_blob_get_precision(input_blob, &precision);
140 ie_blob_free(&input_blob);
145 status = ie_blob_get_buffer(input_blob, &blob_buffer);
147 ie_blob_free(&input_blob);
152 input.height = dims.dims[2];
153 input.width = dims.dims[3];
154 input.channels = dims.dims[1];
155 input.data = blob_buffer.buffer;
161 for (
int i = 0;
i <
ctx->options.batch_size; ++
i) {
192 ie_blob_free(&input_blob);
200 precision_e precision;
207 ie_blob_t *output_blob =
NULL;
208 ie_blob_buffer_t blob_buffer;
215 char *model_output_name =
NULL;
216 char *all_output_names =
NULL;
217 size_t model_output_count = 0;
219 status = ie_network_get_outputs_number(ov_model->
network, &model_output_count);
220 for (
size_t i = 0;
i < model_output_count;
i++) {
221 status = ie_network_get_output_name(ov_model->
network,
i, &model_output_name);
222 APPEND_STRING(all_output_names, model_output_name)
225 "output \"%s\" may not correct, all output(s) are: \"%s\"\n",
230 status = ie_blob_get_buffer(output_blob, &blob_buffer);
232 ie_blob_free(&output_blob);
237 status |= ie_blob_get_dims(output_blob, &dims);
238 status |= ie_blob_get_precision(output_blob, &precision);
240 ie_blob_free(&output_blob);
245 output.channels = dims.dims[1];
246 output.height = dims.dims[2];
247 output.width = dims.dims[3];
249 output.data = blob_buffer.buffer;
293 ie_blob_free(&output_blob);
308 ie_available_devices_t a_dev;
310 char *all_dev_names =
NULL;
313 if (
ctx->options.batch_size <= 0) {
314 ctx->options.batch_size = 1;
317 if (
ctx->options.batch_size > 1) {
318 input_shapes_t input_shapes;
319 status = ie_network_get_input_shapes(ov_model->
network, &input_shapes);
322 for (
int i = 0;
i < input_shapes.shape_num;
i++)
323 input_shapes.shapes[
i].shape.dims[0] =
ctx->options.batch_size;
324 status = ie_network_reshape(ov_model->
network, input_shapes);
325 ie_network_input_shapes_free(&input_shapes);
332 status = ie_network_set_input_layout(ov_model->
network, input_name, NHWC);
337 status = ie_network_set_output_layout(ov_model->
network, output_name, NHWC);
350 status = ie_network_set_input_precision(ov_model->
network, input_name, U8);
360 status = ie_core_get_available_devices(ov_model->
core, &a_dev);
365 for (
int i = 0;
i < a_dev.num_devices;
i++) {
366 APPEND_STRING(all_dev_names, a_dev.devices[
i])
369 ctx->options.device_type, all_dev_names);
374 if (
ctx->options.nireq <= 0) {
384 for (
int i = 0;
i <
ctx->options.nireq;
i++) {
443 ov_model = task->
model;
487 char *model_input_name =
NULL;
488 char *all_input_names =
NULL;
490 size_t model_input_count = 0;
492 precision_e precision;
493 int input_resizable =
ctx->options.input_resizable;
495 status = ie_network_get_inputs_number(ov_model->
network, &model_input_count);
501 for (
size_t i = 0;
i < model_input_count;
i++) {
502 status = ie_network_get_input_name(ov_model->
network,
i, &model_input_name);
507 if (strcmp(model_input_name, input_name) == 0) {
508 ie_network_name_free(&model_input_name);
509 status |= ie_network_get_input_dims(ov_model->
network, input_name, &dims);
510 status |= ie_network_get_input_precision(ov_model->
network, input_name, &precision);
516 input->channels = dims.dims[1];
517 input->height = input_resizable ? -1 : dims.dims[2];
518 input->width = input_resizable ? -1 : dims.dims[3];
523 APPEND_STRING(all_input_names, model_input_name)
526 ie_network_name_free(&model_input_name);
529 av_log(
ctx,
AV_LOG_ERROR,
"Could not find \"%s\" in model, all input(s) are: \"%s\"\n", input_name, all_input_names);
553 for (uint32_t
i = 0;
i <
header->nb_bboxes;
i++) {
555 if (bbox->
x < 0 || bbox->
w < 0 || bbox->
x + bbox->
w >=
frame->width) {
558 if (bbox->
y < 0 || bbox->
h < 0 || bbox->
y + bbox->
h >=
frame->width) {
637 const char *output_name,
int *output_width,
int *output_height)
645 input_shapes_t input_shapes;
648 .output_names = &output_name,
659 if (
ctx->options.input_resizable) {
660 status = ie_network_get_input_shapes(ov_model->
network, &input_shapes);
661 input_shapes.shapes->shape.dims[2] = input_height;
662 input_shapes.shapes->shape.dims[3] = input_width;
663 status |= ie_network_reshape(ov_model->
network, input_shapes);
664 ie_network_input_shapes_free(&input_shapes);
721 model->
model = ov_model;
722 ov_model->
model = model;
723 ov_model->
ctx.
class = &dnn_openvino_class;
740 ver = ie_c_api_version();
742 "Please check if the model version matches the runtime OpenVINO %s\n",
743 model_filename, ver.api_version);
744 ie_version_free(&ver);
802 if (
ctx->options.async) {
826 if (
ctx->options.batch_size > 1) {
915 ie_network_free(&ov_model->
network);
917 ie_core_free(&ov_model->
core);
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
ie_complete_call_back_t callback
DNNReturnType ff_dnn_fill_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int async, int do_ioproc)
Fill the Task for Backend Execution.
static FilteringContext * filter_ctx
void av_opt_set_defaults(void *s)
Set the values of all AVOption fields to their default values.
void * ff_safe_queue_pop_front(SafeQueue *sq)
Remove and free first element from the queue in SafeQueue.
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
ie_executable_network_t * exe_network
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
void * ff_queue_pop_front(Queue *q)
Remove and free first element from the Queue.
int ff_check_exec_params(void *ctx, DNNBackendType backend, DNNFunctionType func_type, DNNExecBaseParams *exec_params)
size_t ff_queue_size(Queue *q)
Return the length of the Queue.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
This structure describes decoded (raw) audio or video data.
AVFILTER_DEFINE_CLASS(dnn_openvino)
Double-ended queue with mutex locks ensuring data consistency while multithreading.
FramePrePostProc frame_pre_proc
DNNModel * ff_dnn_load_model_ov(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx)
static DNNReturnType get_input_ov(void *model, DNNData *input, const char *input_name)
AVFilterContext * filter_ctx
Queue * ff_queue_create(void)
Create a Queue instance.
DNNReturnType ff_proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *log_ctx)
DNNReturnType ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, void *log_ctx)
char detect_label[AV_DETECTION_BBOX_LABEL_NAME_MAX_SIZE]
Detect result with confidence.
DNNReturnType(* get_output)(void *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height)
static DNNReturnType init_model_ov(OVModel *ov_model, const char *input_name, const char *output_name)
Linear double-ended data structure.
static av_always_inline AVDetectionBBox * av_get_detection_bbox(const AVDetectionBBoxHeader *header, unsigned int idx)
int ff_queue_push_back(Queue *q, void *v)
Add data to the tail of the queue.
#define DNN_BACKEND_COMMON_OPTIONS
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static DNNReturnType fill_model_input_ov(OVModel *ov_model, OVRequestItem *request)
void ff_queue_destroy(Queue *q)
Destroy the Queue instance.
void ff_dnn_free_model_ov(DNNModel **model)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static DNNReturnType execute_model_ov(OVRequestItem *request, Queue *inferenceq)
DNNReturnType(* get_input)(void *model, DNNData *input, const char *input_name)
ie_infer_request_t * infer_request
size_t ff_safe_queue_size(SafeQueue *sq)
Return the length of the SafeQueue.
Describe the class of an AVClass context structure.
SafeQueue * ff_safe_queue_create(void)
Create and initialize a SafeQueue instance.
FramePrePostProc frame_post_proc
int av_opt_set_from_string(void *ctx, const char *opts, const char *const *shorthand, const char *key_val_sep, const char *pairs_sep)
Parse the key-value pairs list in opts.
LastLevelTaskItem ** lltasks
int av_strncasecmp(const char *a, const char *b, size_t n)
Locale-independent case-insensitive compare.
static int get_datatype_size(DNNDataType dt)
const OptionDef options[]
DNNReturnType ff_dnn_fill_gettingoutput_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int input_height, int input_width, void *ctx)
Allocate input and output frames and fill the Task with execution parameters.
DetectPostProc detect_post_proc
DNNFunctionType func_type
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
static DNNReturnType extract_lltask_from_task(DNNFunctionType func_type, TaskItem *task, Queue *lltask_queue, DNNExecBaseParams *exec_params)
void ff_safe_queue_destroy(SafeQueue *sq)
Destroy the SafeQueue instance.
static const uint8_t header[24]
static DNNReturnType get_output_ov(void *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height)
DNNReturnType ff_frame_to_dnn_detect(AVFrame *frame, DNNData *input, void *log_ctx)
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
DNNReturnType ff_dnn_flush_ov(const DNNModel *model)
static int contain_valid_detection_bbox(AVFrame *frame)
int ff_safe_queue_push_back(SafeQueue *sq, void *v)
Add data to the tail of queue in the SafeQueue after locking mutex.
static const AVOption dnn_openvino_options[]
static void infer_completion_callback(void *args)
#define i(width, name, range_min, range_max)
#define av_malloc_array(a, b)
DNNReturnType ff_frame_to_dnn_classify(AVFrame *frame, DNNData *input, uint32_t bbox_index, void *log_ctx)
static DNNDataType precision_to_datatype(precision_e precision)
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
SafeQueue * request_queue
const char ** output_names
ClassifyPostProc classify_post_proc
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
int x
Distance in pixels from the left/top edge of the frame, together with width and height,...
DNNAsyncStatusType ff_dnn_get_result_common(Queue *task_queue, AVFrame **in, AVFrame **out)
Extract input and output frame from the Task Queue after asynchronous inference.
void * ff_queue_peek_front(Queue *q)
Return a pointer to the data at the head of the queue.
const char ** output_names
Structure to hold side data for an AVFrame.
#define AV_NUM_DETECTION_BBOX_CLASSIFY
At most 4 classifications based on the detected bounding box.
DNNReturnType ff_dnn_execute_model_ov(const DNNModel *model, DNNExecBaseParams *exec_params)
@ AV_FRAME_DATA_DETECTION_BBOXES
Bounding boxes for object detection and classification, as described by AVDetectionBBoxHeader.
DNNAsyncStatusType ff_dnn_get_result_ov(const DNNModel *model, AVFrame **in, AVFrame **out)