FFmpeg
dnn_backend_openvino.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2020
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * DNN OpenVINO backend implementation.
24  */
25 
26 #include "dnn_backend_openvino.h"
27 #include "dnn_io_proc.h"
28 #include "libavformat/avio.h"
29 #include "libavutil/avassert.h"
30 #include "libavutil/opt.h"
31 #include "libavutil/avstring.h"
33 #include "../internal.h"
34 #include "queue.h"
35 #include "safe_queue.h"
36 #include <c_api/ie_c_api.h>
37 #include "dnn_backend_common.h"
38 
39 typedef struct OVOptions{
40  char *device_type;
41  int nireq;
44 } OVOptions;
45 
46 typedef struct OVContext {
47  const AVClass *class;
49 } OVContext;
50 
51 typedef struct OVModel{
54  ie_core_t *core;
55  ie_network_t *network;
56  ie_executable_network_t *exe_network;
57  SafeQueue *request_queue; // holds RequestItem
58  Queue *task_queue; // holds TaskItem
59  Queue *inference_queue; // holds InferenceItem
60 } OVModel;
61 
62 // one task for one function call from dnn interface
63 typedef struct TaskItem {
65  const char *input_name;
67  const char *output_name;
69  int do_ioproc;
70  int async;
71  uint32_t inference_todo;
72  uint32_t inference_done;
73 } TaskItem;
74 
75 // one task might have multiple inferences
76 typedef struct InferenceItem {
78  uint32_t bbox_index;
80 
81 // one request for one call to openvino
82 typedef struct RequestItem {
83  ie_infer_request_t *infer_request;
85  uint32_t inference_count;
86  ie_complete_call_back_t callback;
87 } RequestItem;
88 
89 #define APPEND_STRING(generated_string, iterate_string) \
90  generated_string = generated_string ? av_asprintf("%s %s", generated_string, iterate_string) : \
91  av_asprintf("%s", iterate_string);
92 
93 #define OFFSET(x) offsetof(OVContext, x)
94 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
95 static const AVOption dnn_openvino_options[] = {
96  { "device", "device to run model", OFFSET(options.device_type), AV_OPT_TYPE_STRING, { .str = "CPU" }, 0, 0, FLAGS },
97  { "nireq", "number of request", OFFSET(options.nireq), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
98  { "batch_size", "batch size per request", OFFSET(options.batch_size), AV_OPT_TYPE_INT, { .i64 = 1 }, 1, 1000, FLAGS},
99  { "input_resizable", "can input be resizable or not", OFFSET(options.input_resizable), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
100  { NULL }
101 };
102 
103 AVFILTER_DEFINE_CLASS(dnn_openvino);
104 
105 static DNNDataType precision_to_datatype(precision_e precision)
106 {
107  switch (precision)
108  {
109  case FP32:
110  return DNN_FLOAT;
111  case U8:
112  return DNN_UINT8;
113  default:
114  av_assert0(!"not supported yet.");
115  return DNN_FLOAT;
116  }
117 }
118 
120 {
121  switch (dt)
122  {
123  case DNN_FLOAT:
124  return sizeof(float);
125  case DNN_UINT8:
126  return sizeof(uint8_t);
127  default:
128  av_assert0(!"not supported yet.");
129  return 1;
130  }
131 }
132 
134 {
135  dimensions_t dims;
136  precision_e precision;
137  ie_blob_buffer_t blob_buffer;
138  OVContext *ctx = &ov_model->ctx;
139  IEStatusCode status;
140  DNNData input;
141  ie_blob_t *input_blob = NULL;
142  InferenceItem *inference;
143  TaskItem *task;
144 
145  inference = ff_queue_peek_front(ov_model->inference_queue);
146  av_assert0(inference);
147  task = inference->task;
148 
149  status = ie_infer_request_get_blob(request->infer_request, task->input_name, &input_blob);
150  if (status != OK) {
151  av_log(ctx, AV_LOG_ERROR, "Failed to get input blob with name %s\n", task->input_name);
152  return DNN_ERROR;
153  }
154 
155  status |= ie_blob_get_dims(input_blob, &dims);
156  status |= ie_blob_get_precision(input_blob, &precision);
157  if (status != OK) {
158  ie_blob_free(&input_blob);
159  av_log(ctx, AV_LOG_ERROR, "Failed to get input blob dims/precision\n");
160  return DNN_ERROR;
161  }
162 
163  status = ie_blob_get_buffer(input_blob, &blob_buffer);
164  if (status != OK) {
165  ie_blob_free(&input_blob);
166  av_log(ctx, AV_LOG_ERROR, "Failed to get input blob buffer\n");
167  return DNN_ERROR;
168  }
169 
170  input.height = dims.dims[2];
171  input.width = dims.dims[3];
172  input.channels = dims.dims[1];
173  input.data = blob_buffer.buffer;
174  input.dt = precision_to_datatype(precision);
175  // all models in openvino open model zoo use BGR as input,
176  // change to be an option when necessary.
177  input.order = DCO_BGR;
178 
179  for (int i = 0; i < ctx->options.batch_size; ++i) {
180  inference = ff_queue_pop_front(ov_model->inference_queue);
181  if (!inference) {
182  break;
183  }
184  request->inferences[i] = inference;
185  request->inference_count = i + 1;
186  task = inference->task;
187  switch (task->ov_model->model->func_type) {
188  case DFT_PROCESS_FRAME:
190  if (task->do_ioproc) {
191  if (ov_model->model->frame_pre_proc != NULL) {
192  ov_model->model->frame_pre_proc(task->in_frame, &input, ov_model->model->filter_ctx);
193  } else {
195  }
196  }
197  break;
199  ff_frame_to_dnn_classify(task->in_frame, &input, inference->bbox_index, ctx);
200  break;
201  default:
202  av_assert0(!"should not reach here");
203  break;
204  }
205  input.data = (uint8_t *)input.data
206  + input.width * input.height * input.channels * get_datatype_size(input.dt);
207  }
208  ie_blob_free(&input_blob);
209 
210  return DNN_SUCCESS;
211 }
212 
213 static void infer_completion_callback(void *args)
214 {
215  dimensions_t dims;
216  precision_e precision;
217  IEStatusCode status;
218  RequestItem *request = args;
219  InferenceItem *inference = request->inferences[0];
220  TaskItem *task = inference->task;
221  SafeQueue *requestq = task->ov_model->request_queue;
222  ie_blob_t *output_blob = NULL;
223  ie_blob_buffer_t blob_buffer;
224  DNNData output;
225  OVContext *ctx = &task->ov_model->ctx;
226 
227  status = ie_infer_request_get_blob(request->infer_request, task->output_name, &output_blob);
228  if (status != OK) {
229  //incorrect output name
230  char *model_output_name = NULL;
231  char *all_output_names = NULL;
232  size_t model_output_count = 0;
233  av_log(ctx, AV_LOG_ERROR, "Failed to get model output data\n");
234  status = ie_network_get_outputs_number(task->ov_model->network, &model_output_count);
235  for (size_t i = 0; i < model_output_count; i++) {
236  status = ie_network_get_output_name(task->ov_model->network, i, &model_output_name);
237  APPEND_STRING(all_output_names, model_output_name)
238  }
240  "output \"%s\" may not correct, all output(s) are: \"%s\"\n",
241  task->output_name, all_output_names);
242  return;
243  }
244 
245  status = ie_blob_get_buffer(output_blob, &blob_buffer);
246  if (status != OK) {
247  ie_blob_free(&output_blob);
248  av_log(ctx, AV_LOG_ERROR, "Failed to access output memory\n");
249  return;
250  }
251 
252  status |= ie_blob_get_dims(output_blob, &dims);
253  status |= ie_blob_get_precision(output_blob, &precision);
254  if (status != OK) {
255  ie_blob_free(&output_blob);
256  av_log(ctx, AV_LOG_ERROR, "Failed to get dims or precision of output\n");
257  return;
258  }
259 
260  output.channels = dims.dims[1];
261  output.height = dims.dims[2];
262  output.width = dims.dims[3];
263  output.dt = precision_to_datatype(precision);
264  output.data = blob_buffer.buffer;
265 
266  av_assert0(request->inference_count <= dims.dims[0]);
267  av_assert0(request->inference_count >= 1);
268  for (int i = 0; i < request->inference_count; ++i) {
269  task = request->inferences[i]->task;
270  task->inference_done++;
271 
272  switch (task->ov_model->model->func_type) {
273  case DFT_PROCESS_FRAME:
274  if (task->do_ioproc) {
275  if (task->ov_model->model->frame_post_proc != NULL) {
277  } else {
279  }
280  } else {
281  task->out_frame->width = output.width;
282  task->out_frame->height = output.height;
283  }
284  break;
286  if (!task->ov_model->model->detect_post_proc) {
287  av_log(ctx, AV_LOG_ERROR, "detect filter needs to provide post proc\n");
288  return;
289  }
291  break;
293  if (!task->ov_model->model->classify_post_proc) {
294  av_log(ctx, AV_LOG_ERROR, "classify filter needs to provide post proc\n");
295  return;
296  }
298  break;
299  default:
300  av_assert0(!"should not reach here");
301  break;
302  }
303 
304  av_freep(&request->inferences[i]);
305  output.data = (uint8_t *)output.data
306  + output.width * output.height * output.channels * get_datatype_size(output.dt);
307  }
308  ie_blob_free(&output_blob);
309 
310  request->inference_count = 0;
311  if (ff_safe_queue_push_back(requestq, request) < 0) {
312  av_log(ctx, AV_LOG_ERROR, "Failed to push back request_queue.\n");
313  return;
314  }
315 }
316 
317 static DNNReturnType init_model_ov(OVModel *ov_model, const char *input_name, const char *output_name)
318 {
319  OVContext *ctx = &ov_model->ctx;
320  IEStatusCode status;
321  ie_available_devices_t a_dev;
322  ie_config_t config = {NULL, NULL, NULL};
323  char *all_dev_names = NULL;
324 
325  // batch size
326  if (ctx->options.batch_size <= 0) {
327  ctx->options.batch_size = 1;
328  }
329 
330  if (ctx->options.batch_size > 1) {
331  input_shapes_t input_shapes;
332  status = ie_network_get_input_shapes(ov_model->network, &input_shapes);
333  if (status != OK)
334  goto err;
335  for (int i = 0; i < input_shapes.shape_num; i++)
336  input_shapes.shapes[i].shape.dims[0] = ctx->options.batch_size;
337  status = ie_network_reshape(ov_model->network, input_shapes);
338  ie_network_input_shapes_free(&input_shapes);
339  if (status != OK)
340  goto err;
341  }
342 
343  // The order of dims in the openvino is fixed and it is always NCHW for 4-D data.
344  // while we pass NHWC data from FFmpeg to openvino
345  status = ie_network_set_input_layout(ov_model->network, input_name, NHWC);
346  if (status != OK) {
347  av_log(ctx, AV_LOG_ERROR, "Failed to set layout as NHWC for input %s\n", input_name);
348  goto err;
349  }
350  status = ie_network_set_output_layout(ov_model->network, output_name, NHWC);
351  if (status != OK) {
352  av_log(ctx, AV_LOG_ERROR, "Failed to set layout as NHWC for output %s\n", output_name);
353  goto err;
354  }
355 
356  // all models in openvino open model zoo use BGR with range [0.0f, 255.0f] as input,
357  // we don't have a AVPixelFormat to describe it, so we'll use AV_PIX_FMT_BGR24 and
358  // ask openvino to do the conversion internally.
359  // the current supported SR model (frame processing) is generated from tensorflow model,
360  // and its input is Y channel as float with range [0.0f, 1.0f], so do not set for this case.
361  // TODO: we need to get a final clear&general solution with all backends/formats considered.
362  if (ov_model->model->func_type != DFT_PROCESS_FRAME) {
363  status = ie_network_set_input_precision(ov_model->network, input_name, U8);
364  if (status != OK) {
365  av_log(ctx, AV_LOG_ERROR, "Failed to set input precision as U8 for %s\n", input_name);
366  goto err;
367  }
368  }
369 
370  status = ie_core_load_network(ov_model->core, ov_model->network, ctx->options.device_type, &config, &ov_model->exe_network);
371  if (status != OK) {
372  av_log(ctx, AV_LOG_ERROR, "Failed to load OpenVINO model network\n");
373  status = ie_core_get_available_devices(ov_model->core, &a_dev);
374  if (status != OK) {
375  av_log(ctx, AV_LOG_ERROR, "Failed to get available devices\n");
376  goto err;
377  }
378  for (int i = 0; i < a_dev.num_devices; i++) {
379  APPEND_STRING(all_dev_names, a_dev.devices[i])
380  }
381  av_log(ctx, AV_LOG_ERROR,"device %s may not be supported, all available devices are: \"%s\"\n",
382  ctx->options.device_type, all_dev_names);
383  goto err;
384  }
385 
386  // create infer_requests for async execution
387  if (ctx->options.nireq <= 0) {
388  // the default value is a rough estimation
389  ctx->options.nireq = av_cpu_count() / 2 + 1;
390  }
391 
392  ov_model->request_queue = ff_safe_queue_create();
393  if (!ov_model->request_queue) {
394  goto err;
395  }
396 
397  for (int i = 0; i < ctx->options.nireq; i++) {
398  RequestItem *item = av_mallocz(sizeof(*item));
399  if (!item) {
400  goto err;
401  }
402 
403  item->callback.completeCallBackFunc = infer_completion_callback;
404  item->callback.args = item;
405  if (ff_safe_queue_push_back(ov_model->request_queue, item) < 0) {
406  av_freep(&item);
407  goto err;
408  }
409 
410  status = ie_exec_network_create_infer_request(ov_model->exe_network, &item->infer_request);
411  if (status != OK) {
412  goto err;
413  }
414 
415  item->inferences = av_malloc_array(ctx->options.batch_size, sizeof(*item->inferences));
416  if (!item->inferences) {
417  goto err;
418  }
419  item->inference_count = 0;
420  }
421 
422  ov_model->task_queue = ff_queue_create();
423  if (!ov_model->task_queue) {
424  goto err;
425  }
426 
427  ov_model->inference_queue = ff_queue_create();
428  if (!ov_model->inference_queue) {
429  goto err;
430  }
431 
432  return DNN_SUCCESS;
433 
434 err:
435  ff_dnn_free_model_ov(&ov_model->model);
436  return DNN_ERROR;
437 }
438 
439 static DNNReturnType execute_model_ov(RequestItem *request, Queue *inferenceq)
440 {
441  IEStatusCode status;
443  InferenceItem *inference;
444  TaskItem *task;
445  OVContext *ctx;
446 
447  if (ff_queue_size(inferenceq) == 0) {
448  return DNN_SUCCESS;
449  }
450 
451  inference = ff_queue_peek_front(inferenceq);
452  task = inference->task;
453  ctx = &task->ov_model->ctx;
454 
455  if (task->async) {
456  ret = fill_model_input_ov(task->ov_model, request);
457  if (ret != DNN_SUCCESS) {
458  return ret;
459  }
460  status = ie_infer_set_completion_callback(request->infer_request, &request->callback);
461  if (status != OK) {
462  av_log(ctx, AV_LOG_ERROR, "Failed to set completion callback for inference\n");
463  return DNN_ERROR;
464  }
465  status = ie_infer_request_infer_async(request->infer_request);
466  if (status != OK) {
467  av_log(ctx, AV_LOG_ERROR, "Failed to start async inference\n");
468  return DNN_ERROR;
469  }
470  return DNN_SUCCESS;
471  } else {
472  ret = fill_model_input_ov(task->ov_model, request);
473  if (ret != DNN_SUCCESS) {
474  return ret;
475  }
476  status = ie_infer_request_infer(request->infer_request);
477  if (status != OK) {
478  av_log(ctx, AV_LOG_ERROR, "Failed to start synchronous model inference\n");
479  return DNN_ERROR;
480  }
481  infer_completion_callback(request);
482  return (task->inference_done == task->inference_todo) ? DNN_SUCCESS : DNN_ERROR;
483  }
484 }
485 
486 static DNNReturnType get_input_ov(void *model, DNNData *input, const char *input_name)
487 {
488  OVModel *ov_model = model;
489  OVContext *ctx = &ov_model->ctx;
490  char *model_input_name = NULL;
491  char *all_input_names = NULL;
492  IEStatusCode status;
493  size_t model_input_count = 0;
494  dimensions_t dims;
495  precision_e precision;
496  int input_resizable = ctx->options.input_resizable;
497 
498  status = ie_network_get_inputs_number(ov_model->network, &model_input_count);
499  if (status != OK) {
500  av_log(ctx, AV_LOG_ERROR, "Failed to get input count\n");
501  return DNN_ERROR;
502  }
503 
504  for (size_t i = 0; i < model_input_count; i++) {
505  status = ie_network_get_input_name(ov_model->network, i, &model_input_name);
506  if (status != OK) {
507  av_log(ctx, AV_LOG_ERROR, "Failed to get No.%d input's name\n", (int)i);
508  return DNN_ERROR;
509  }
510  if (strcmp(model_input_name, input_name) == 0) {
511  ie_network_name_free(&model_input_name);
512  status |= ie_network_get_input_dims(ov_model->network, input_name, &dims);
513  status |= ie_network_get_input_precision(ov_model->network, input_name, &precision);
514  if (status != OK) {
515  av_log(ctx, AV_LOG_ERROR, "Failed to get No.%d input's dims or precision\n", (int)i);
516  return DNN_ERROR;
517  }
518 
519  input->channels = dims.dims[1];
520  input->height = input_resizable ? -1 : dims.dims[2];
521  input->width = input_resizable ? -1 : dims.dims[3];
522  input->dt = precision_to_datatype(precision);
523  return DNN_SUCCESS;
524  } else {
525  //incorrect input name
526  APPEND_STRING(all_input_names, model_input_name)
527  }
528 
529  ie_network_name_free(&model_input_name);
530  }
531 
532  av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model, all input(s) are: \"%s\"\n", input_name, all_input_names);
533  return DNN_ERROR;
534 }
535 
537 {
538  AVFrameSideData *sd;
540  const AVDetectionBBox *bbox;
541 
543  if (!sd) { // this frame has nothing detected
544  return 0;
545  }
546 
547  if (!sd->size) {
548  return 0;
549  }
550 
551  header = (const AVDetectionBBoxHeader *)sd->data;
552  if (!header->nb_bboxes) {
553  return 0;
554  }
555 
556  for (uint32_t i = 0; i < header->nb_bboxes; i++) {
557  bbox = av_get_detection_bbox(header, i);
558  if (bbox->x < 0 || bbox->w < 0 || bbox->x + bbox->w >= frame->width) {
559  return 0;
560  }
561  if (bbox->y < 0 || bbox->h < 0 || bbox->y + bbox->h >= frame->width) {
562  return 0;
563  }
564 
566  return 0;
567  }
568  }
569 
570  return 1;
571 }
572 
573 static DNNReturnType extract_inference_from_task(DNNFunctionType func_type, TaskItem *task, Queue *inference_queue, DNNExecBaseParams *exec_params)
574 {
575  switch (func_type) {
576  case DFT_PROCESS_FRAME:
578  {
579  InferenceItem *inference = av_malloc(sizeof(*inference));
580  if (!inference) {
581  return DNN_ERROR;
582  }
583  task->inference_todo = 1;
584  task->inference_done = 0;
585  inference->task = task;
586  if (ff_queue_push_back(inference_queue, inference) < 0) {
587  av_freep(&inference);
588  return DNN_ERROR;
589  }
590  return DNN_SUCCESS;
591  }
593  {
595  AVFrame *frame = task->in_frame;
596  AVFrameSideData *sd;
598 
599  task->inference_todo = 0;
600  task->inference_done = 0;
601 
603  return DNN_SUCCESS;
604  }
605 
607  header = (const AVDetectionBBoxHeader *)sd->data;
608 
609  for (uint32_t i = 0; i < header->nb_bboxes; i++) {
610  InferenceItem *inference;
612 
613  if (av_strncasecmp(bbox->detect_label, params->target, sizeof(bbox->detect_label)) != 0) {
614  continue;
615  }
616 
617  inference = av_malloc(sizeof(*inference));
618  if (!inference) {
619  return DNN_ERROR;
620  }
621  task->inference_todo++;
622  inference->task = task;
623  inference->bbox_index = i;
624  if (ff_queue_push_back(inference_queue, inference) < 0) {
625  av_freep(&inference);
626  return DNN_ERROR;
627  }
628  }
629  return DNN_SUCCESS;
630  }
631  default:
632  av_assert0(!"should not reach here");
633  return DNN_ERROR;
634  }
635 }
636 
637 static DNNReturnType get_output_ov(void *model, const char *input_name, int input_width, int input_height,
638  const char *output_name, int *output_width, int *output_height)
639 {
641  OVModel *ov_model = model;
642  OVContext *ctx = &ov_model->ctx;
643  TaskItem task;
644  RequestItem *request;
645  AVFrame *in_frame = NULL;
646  AVFrame *out_frame = NULL;
647  IEStatusCode status;
648  input_shapes_t input_shapes;
649 
650  if (ov_model->model->func_type != DFT_PROCESS_FRAME) {
651  av_log(ctx, AV_LOG_ERROR, "Get output dim only when processing frame.\n");
652  return DNN_ERROR;
653  }
654 
655  if (ctx->options.input_resizable) {
656  status = ie_network_get_input_shapes(ov_model->network, &input_shapes);
657  input_shapes.shapes->shape.dims[2] = input_height;
658  input_shapes.shapes->shape.dims[3] = input_width;
659  status |= ie_network_reshape(ov_model->network, input_shapes);
660  ie_network_input_shapes_free(&input_shapes);
661  if (status != OK) {
662  av_log(ctx, AV_LOG_ERROR, "Failed to reshape input size for %s\n", input_name);
663  return DNN_ERROR;
664  }
665  }
666 
667  if (!ov_model->exe_network) {
668  if (init_model_ov(ov_model, input_name, output_name) != DNN_SUCCESS) {
669  av_log(ctx, AV_LOG_ERROR, "Failed init OpenVINO exectuable network or inference request\n");
670  return DNN_ERROR;
671  }
672  }
673 
674  in_frame = av_frame_alloc();
675  if (!in_frame) {
676  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for input frame\n");
677  return DNN_ERROR;
678  }
679  in_frame->width = input_width;
680  in_frame->height = input_height;
681 
682  out_frame = av_frame_alloc();
683  if (!out_frame) {
684  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for output frame\n");
685  av_frame_free(&in_frame);
686  return DNN_ERROR;
687  }
688 
689  task.do_ioproc = 0;
690  task.async = 0;
691  task.input_name = input_name;
692  task.in_frame = in_frame;
693  task.output_name = output_name;
694  task.out_frame = out_frame;
695  task.ov_model = ov_model;
696 
697  if (extract_inference_from_task(ov_model->model->func_type, &task, ov_model->inference_queue, NULL) != DNN_SUCCESS) {
698  av_frame_free(&out_frame);
699  av_frame_free(&in_frame);
700  av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
701  return DNN_ERROR;
702  }
703 
704  request = ff_safe_queue_pop_front(ov_model->request_queue);
705  if (!request) {
706  av_frame_free(&out_frame);
707  av_frame_free(&in_frame);
708  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
709  return DNN_ERROR;
710  }
711 
712  ret = execute_model_ov(request, ov_model->inference_queue);
713  *output_width = out_frame->width;
714  *output_height = out_frame->height;
715 
716  av_frame_free(&out_frame);
717  av_frame_free(&in_frame);
718  return ret;
719 }
720 
721 DNNModel *ff_dnn_load_model_ov(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx)
722 {
723  DNNModel *model = NULL;
724  OVModel *ov_model = NULL;
725  OVContext *ctx = NULL;
726  IEStatusCode status;
727 
728  model = av_mallocz(sizeof(DNNModel));
729  if (!model){
730  return NULL;
731  }
732 
733  ov_model = av_mallocz(sizeof(OVModel));
734  if (!ov_model) {
735  av_freep(&model);
736  return NULL;
737  }
738  model->model = ov_model;
739  ov_model->model = model;
740  ov_model->ctx.class = &dnn_openvino_class;
741  ctx = &ov_model->ctx;
742 
743  //parse options
745  if (av_opt_set_from_string(ctx, options, NULL, "=", "&") < 0) {
746  av_log(ctx, AV_LOG_ERROR, "Failed to parse options \"%s\"\n", options);
747  goto err;
748  }
749 
750  status = ie_core_create("", &ov_model->core);
751  if (status != OK)
752  goto err;
753 
754  status = ie_core_read_network(ov_model->core, model_filename, NULL, &ov_model->network);
755  if (status != OK) {
756  ie_version_t ver;
757  ver = ie_c_api_version();
758  av_log(ctx, AV_LOG_ERROR, "Failed to read the network from model file %s,\n"
759  "Please check if the model version matches the runtime OpenVINO %s\n",
760  model_filename, ver.api_version);
761  ie_version_free(&ver);
762  goto err;
763  }
764 
765  model->get_input = &get_input_ov;
766  model->get_output = &get_output_ov;
767  model->options = options;
768  model->filter_ctx = filter_ctx;
769  model->func_type = func_type;
770 
771  return model;
772 
773 err:
774  ff_dnn_free_model_ov(&model);
775  return NULL;
776 }
777 
779 {
780  OVModel *ov_model = model->model;
781  OVContext *ctx = &ov_model->ctx;
782  TaskItem task;
783  RequestItem *request;
784 
785  if (ff_check_exec_params(ctx, DNN_OV, model->func_type, exec_params) != 0) {
786  return DNN_ERROR;
787  }
788 
789  if (model->func_type == DFT_ANALYTICS_CLASSIFY) {
790  // Once we add async support for tensorflow backend and native backend,
791  // we'll combine the two sync/async functions in dnn_interface.h to
792  // simplify the code in filter, and async will be an option within backends.
793  // so, do not support now, and classify filter will not call this function.
794  return DNN_ERROR;
795  }
796 
797  if (ctx->options.batch_size > 1) {
798  avpriv_report_missing_feature(ctx, "batch mode for sync execution");
799  return DNN_ERROR;
800  }
801 
802  if (!ov_model->exe_network) {
803  if (init_model_ov(ov_model, exec_params->input_name, exec_params->output_names[0]) != DNN_SUCCESS) {
804  av_log(ctx, AV_LOG_ERROR, "Failed init OpenVINO exectuable network or inference request\n");
805  return DNN_ERROR;
806  }
807  }
808 
809  task.do_ioproc = 1;
810  task.async = 0;
811  task.input_name = exec_params->input_name;
812  task.in_frame = exec_params->in_frame;
813  task.output_name = exec_params->output_names[0];
814  task.out_frame = exec_params->out_frame ? exec_params->out_frame : exec_params->in_frame;
815  task.ov_model = ov_model;
816 
817  if (extract_inference_from_task(ov_model->model->func_type, &task, ov_model->inference_queue, exec_params) != DNN_SUCCESS) {
818  av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
819  return DNN_ERROR;
820  }
821 
822  request = ff_safe_queue_pop_front(ov_model->request_queue);
823  if (!request) {
824  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
825  return DNN_ERROR;
826  }
827 
828  return execute_model_ov(request, ov_model->inference_queue);
829 }
830 
832 {
833  OVModel *ov_model = model->model;
834  OVContext *ctx = &ov_model->ctx;
835  RequestItem *request;
836  TaskItem *task;
838 
839  if (ff_check_exec_params(ctx, DNN_OV, model->func_type, exec_params) != 0) {
840  return DNN_ERROR;
841  }
842 
843  if (!ov_model->exe_network) {
844  if (init_model_ov(ov_model, exec_params->input_name, exec_params->output_names[0]) != DNN_SUCCESS) {
845  av_log(ctx, AV_LOG_ERROR, "Failed init OpenVINO exectuable network or inference request\n");
846  return DNN_ERROR;
847  }
848  }
849 
850  task = av_malloc(sizeof(*task));
851  if (!task) {
852  av_log(ctx, AV_LOG_ERROR, "unable to alloc memory for task item.\n");
853  return DNN_ERROR;
854  }
855 
856  task->do_ioproc = 1;
857  task->async = 1;
858  task->input_name = exec_params->input_name;
859  task->in_frame = exec_params->in_frame;
860  task->output_name = exec_params->output_names[0];
861  task->out_frame = exec_params->out_frame ? exec_params->out_frame : exec_params->in_frame;
862  task->ov_model = ov_model;
863  if (ff_queue_push_back(ov_model->task_queue, task) < 0) {
864  av_freep(&task);
865  av_log(ctx, AV_LOG_ERROR, "unable to push back task_queue.\n");
866  return DNN_ERROR;
867  }
868 
869  if (extract_inference_from_task(model->func_type, task, ov_model->inference_queue, exec_params) != DNN_SUCCESS) {
870  av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
871  return DNN_ERROR;
872  }
873 
874  while (ff_queue_size(ov_model->inference_queue) >= ctx->options.batch_size) {
875  request = ff_safe_queue_pop_front(ov_model->request_queue);
876  if (!request) {
877  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
878  return DNN_ERROR;
879  }
880 
881  ret = execute_model_ov(request, ov_model->inference_queue);
882  if (ret != DNN_SUCCESS) {
883  return ret;
884  }
885  }
886 
887  return DNN_SUCCESS;
888 }
889 
891 {
892  OVModel *ov_model = model->model;
893  TaskItem *task = ff_queue_peek_front(ov_model->task_queue);
894 
895  if (!task) {
896  return DAST_EMPTY_QUEUE;
897  }
898 
899  if (task->inference_done != task->inference_todo) {
900  return DAST_NOT_READY;
901  }
902 
903  *in = task->in_frame;
904  *out = task->out_frame;
905  ff_queue_pop_front(ov_model->task_queue);
906  av_freep(&task);
907 
908  return DAST_SUCCESS;
909 }
910 
912 {
913  OVModel *ov_model = model->model;
914  OVContext *ctx = &ov_model->ctx;
915  RequestItem *request;
916  IEStatusCode status;
918 
919  if (ff_queue_size(ov_model->inference_queue) == 0) {
920  // no pending task need to flush
921  return DNN_SUCCESS;
922  }
923 
924  request = ff_safe_queue_pop_front(ov_model->request_queue);
925  if (!request) {
926  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
927  return DNN_ERROR;
928  }
929 
930  ret = fill_model_input_ov(ov_model, request);
931  if (ret != DNN_SUCCESS) {
932  av_log(ctx, AV_LOG_ERROR, "Failed to fill model input.\n");
933  return ret;
934  }
935  status = ie_infer_set_completion_callback(request->infer_request, &request->callback);
936  if (status != OK) {
937  av_log(ctx, AV_LOG_ERROR, "Failed to set completion callback for inference\n");
938  return DNN_ERROR;
939  }
940  status = ie_infer_request_infer_async(request->infer_request);
941  if (status != OK) {
942  av_log(ctx, AV_LOG_ERROR, "Failed to start async inference\n");
943  return DNN_ERROR;
944  }
945 
946  return DNN_SUCCESS;
947 }
948 
950 {
951  if (*model){
952  OVModel *ov_model = (*model)->model;
953  while (ff_safe_queue_size(ov_model->request_queue) != 0) {
955  if (item && item->infer_request) {
956  ie_infer_request_free(&item->infer_request);
957  }
958  av_freep(&item->inferences);
959  av_freep(&item);
960  }
962 
963  while (ff_queue_size(ov_model->inference_queue) != 0) {
964  TaskItem *item = ff_queue_pop_front(ov_model->inference_queue);
965  av_freep(&item);
966  }
968 
969  while (ff_queue_size(ov_model->task_queue) != 0) {
970  TaskItem *item = ff_queue_pop_front(ov_model->task_queue);
971  av_frame_free(&item->in_frame);
972  av_frame_free(&item->out_frame);
973  av_freep(&item);
974  }
975  ff_queue_destroy(ov_model->task_queue);
976 
977  if (ov_model->exe_network)
978  ie_exec_network_free(&ov_model->exe_network);
979  if (ov_model->network)
980  ie_network_free(&ov_model->network);
981  if (ov_model->core)
982  ie_core_free(&ov_model->core);
983  av_freep(&ov_model);
984  av_freep(model);
985  }
986 }
InferenceItem
Definition: dnn_backend_openvino.c:76
status
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
TaskItem::ov_model
OVModel * ov_model
Definition: dnn_backend_openvino.c:64
OVContext::class
const AVClass * class
Definition: dnn_backend_openvino.c:47
opt.h
filter_ctx
static FilteringContext * filter_ctx
Definition: transcoding.c:48
RequestItem::callback
ie_complete_call_back_t callback
Definition: dnn_backend_openvino.c:86
av_opt_set_defaults
void av_opt_set_defaults(void *s)
Set the values of all AVOption fields to their default values.
Definition: opt.c:1358
ff_safe_queue_pop_front
void * ff_safe_queue_pop_front(SafeQueue *sq)
Remove and free first element from the queue in SafeQueue.
Definition: safe_queue.c:105
out
FILE * out
Definition: movenc.c:54
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:616
OVModel::exe_network
ie_executable_network_t * exe_network
Definition: dnn_backend_openvino.c:56
DNNFunctionType
DNNFunctionType
Definition: dnn_interface.h:52
TaskItem::output_name
const char * output_name
Definition: dnn_backend_openvino.c:67
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:225
ff_queue_pop_front
void * ff_queue_pop_front(Queue *q)
Remove and free first element from the Queue.
Definition: queue.c:152
ff_check_exec_params
int ff_check_exec_params(void *ctx, DNNBackendType backend, DNNFunctionType func_type, DNNExecBaseParams *exec_params)
Definition: dnn_backend_common.c:26
ff_queue_size
size_t ff_queue_size(Queue *q)
Return the length of the Queue.
Definition: queue.c:89
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:111
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:303
AVFrame::width
int width
Definition: frame.h:361
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(dnn_openvino)
SafeQueue
Double-ended queue with mutex locks ensuring data consistency while multithreading.
Definition: safe_queue.c:46
AVOption
AVOption.
Definition: opt.h:248
DNNModel::frame_pre_proc
FramePrePostProc frame_pre_proc
Definition: dnn_interface.h:101
ff_dnn_load_model_ov
DNNModel * ff_dnn_load_model_ov(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx)
Definition: dnn_backend_openvino.c:721
OVModel::core
ie_core_t * core
Definition: dnn_backend_openvino.c:54
FLAGS
#define FLAGS
Definition: cmdutils.c:539
DNNExecBaseParams::input_name
const char * input_name
Definition: dnn_interface.h:68
dnn_io_proc.h
TaskItem
Definition: dnn_backend_openvino.c:63
InferenceItem::bbox_index
uint32_t bbox_index
Definition: dnn_backend_openvino.c:78
AVDetectionBBox::y
int y
Definition: detection_bbox.h:32
DNNExecBaseParams::in_frame
AVFrame * in_frame
Definition: dnn_interface.h:71
tf_sess_config.config
config
Definition: tf_sess_config.py:33
OVModel
Definition: dnn_backend_openvino.c:51
get_input_ov
static DNNReturnType get_input_ov(void *model, DNNData *input, const char *input_name)
Definition: dnn_backend_openvino.c:486
OVOptions::batch_size
int batch_size
Definition: dnn_backend_openvino.c:42
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
InferenceItem::task
TaskItem * task
Definition: dnn_backend_openvino.c:77
DNNModel::filter_ctx
AVFilterContext * filter_ctx
Definition: dnn_interface.h:90
ff_queue_create
Queue * ff_queue_create(void)
Create a Queue instance.
Definition: queue.c:48
ff_proc_from_dnn_to_frame
DNNReturnType ff_proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *log_ctx)
Definition: dnn_io_proc.c:27
AVDetectionBBox::detect_label
char detect_label[AV_DETECTION_BBOX_LABEL_NAME_MAX_SIZE]
Detect result with confidence.
Definition: detection_bbox.h:41
DNN_SUCCESS
@ DNN_SUCCESS
Definition: dnn_interface.h:33
OVOptions::device_type
char * device_type
Definition: dnn_backend_openvino.c:40
DNNModel::get_output
DNNReturnType(* get_output)(void *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height)
Definition: dnn_interface.h:97
init_model_ov
static DNNReturnType init_model_ov(OVModel *ov_model, const char *input_name, const char *output_name)
Definition: dnn_backend_openvino.c:317
Queue
Linear double-ended data structure.
Definition: queue.c:34
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:98
av_get_detection_bbox
static av_always_inline AVDetectionBBox * av_get_detection_bbox(const AVDetectionBBoxHeader *header, unsigned int idx)
Definition: detection_bbox.h:84
ff_queue_push_back
int ff_queue_push_back(Queue *q, void *v)
Add data to the tail of the queue.
Definition: queue.c:131
avassert.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:181
AVFrameSideData::size
size_t size
Definition: frame.h:212
DNNExecClassificationParams
Definition: dnn_interface.h:75
ff_queue_destroy
void ff_queue_destroy(Queue *q)
Destroy the Queue instance.
Definition: queue.c:73
ff_dnn_free_model_ov
void ff_dnn_free_model_ov(DNNModel **model)
Definition: dnn_backend_openvino.c:949
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
DNNReturnType
DNNReturnType
Definition: dnn_interface.h:33
DNNData
Definition: dnn_interface.h:59
OVModel::inference_queue
Queue * inference_queue
Definition: dnn_backend_openvino.c:59
ctx
AVFormatContext * ctx
Definition: movenc.c:48
DNNModel::get_input
DNNReturnType(* get_input)(void *model, DNNData *input, const char *input_name)
Definition: dnn_interface.h:95
TaskItem::inference_todo
uint32_t inference_todo
Definition: dnn_backend_openvino.c:71
DNN_OV
@ DNN_OV
Definition: dnn_interface.h:35
ff_safe_queue_size
size_t ff_safe_queue_size(SafeQueue *sq)
Return the length of the SafeQueue.
Definition: safe_queue.c:80
if
if(ret)
Definition: filter_design.txt:179
DNNExecClassificationParams::target
const char * target
Definition: dnn_interface.h:77
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
NULL
#define NULL
Definition: coverity.c:32
fill_model_input_ov
static DNNReturnType fill_model_input_ov(OVModel *ov_model, RequestItem *request)
Definition: dnn_backend_openvino.c:133
OVModel::network
ie_network_t * network
Definition: dnn_backend_openvino.c:55
ff_safe_queue_create
SafeQueue * ff_safe_queue_create(void)
Create and initialize a SafeQueue instance.
Definition: safe_queue.c:52
AVDetectionBBoxHeader
Definition: detection_bbox.h:56
DNNModel::frame_post_proc
FramePrePostProc frame_post_proc
Definition: dnn_interface.h:104
av_opt_set_from_string
int av_opt_set_from_string(void *ctx, const char *opts, const char *const *shorthand, const char *key_val_sep, const char *pairs_sep)
Parse the key-value pairs list in opts.
Definition: opt.c:1559
RequestItem::infer_request
ie_infer_request_t * infer_request
Definition: dnn_backend_openvino.c:83
execute_model_ov
static DNNReturnType execute_model_ov(RequestItem *request, Queue *inferenceq)
Definition: dnn_backend_openvino.c:439
TaskItem::in_frame
AVFrame * in_frame
Definition: dnn_backend_openvino.c:66
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
OVModel::ctx
OVContext ctx
Definition: dnn_backend_openvino.c:52
av_cpu_count
int av_cpu_count(void)
Definition: cpu.c:184
av_strncasecmp
int av_strncasecmp(const char *a, const char *b, size_t n)
Locale-independent case-insensitive compare.
Definition: avstring.c:225
get_datatype_size
static int get_datatype_size(DNNDataType dt)
Definition: dnn_backend_openvino.c:119
options
const OptionDef options[]
DAST_SUCCESS
@ DAST_SUCCESS
Definition: dnn_interface.h:49
TaskItem::inference_done
uint32_t inference_done
Definition: dnn_backend_openvino.c:72
AVDetectionBBox::w
int w
Definition: detection_bbox.h:33
queue.h
DAST_EMPTY_QUEUE
@ DAST_EMPTY_QUEUE
Definition: dnn_interface.h:47
TaskItem::async
int async
Definition: dnn_backend_openvino.c:70
OVModel::model
DNNModel * model
Definition: dnn_backend_openvino.c:53
DNNModel::detect_post_proc
DetectPostProc detect_post_proc
Definition: dnn_interface.h:106
RequestItem::inference_count
uint32_t inference_count
Definition: dnn_backend_openvino.c:85
avio.h
DNNModel::func_type
DNNFunctionType func_type
Definition: dnn_interface.h:92
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
AVFrameSideData::data
uint8_t * data
Definition: frame.h:211
ff_safe_queue_destroy
void ff_safe_queue_destroy(SafeQueue *sq)
Destroy the SafeQueue instance.
Definition: safe_queue.c:69
DNNDataType
DNNDataType
Definition: dnn_interface.h:37
header
static const uint8_t header[24]
Definition: sdr2.c:67
AVDetectionBBox::classify_count
uint32_t classify_count
Definition: detection_bbox.h:51
RequestItem
Definition: dnn_backend_openvino.c:82
DNN_FLOAT
@ DNN_FLOAT
Definition: dnn_interface.h:37
get_output_ov
static DNNReturnType get_output_ov(void *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height)
Definition: dnn_backend_openvino.c:637
DNNExecBaseParams::out_frame
AVFrame * out_frame
Definition: dnn_interface.h:72
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
ff_dnn_flush_ov
DNNReturnType ff_dnn_flush_ov(const DNNModel *model)
Definition: dnn_backend_openvino.c:911
ff_proc_from_frame_to_dnn
DNNReturnType ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, DNNFunctionType func_type, void *log_ctx)
Definition: dnn_io_proc.c:275
contain_valid_detection_bbox
static int contain_valid_detection_bbox(AVFrame *frame)
Definition: dnn_backend_openvino.c:536
ff_safe_queue_push_back
int ff_safe_queue_push_back(SafeQueue *sq, void *v)
Add data to the tail of queue in the SafeQueue after locking mutex.
Definition: safe_queue.c:95
OVOptions::input_resizable
int input_resizable
Definition: dnn_backend_openvino.c:43
RequestItem::inferences
InferenceItem ** inferences
Definition: dnn_backend_openvino.c:84
i
int i
Definition: input.c:407
dnn_openvino_options
static const AVOption dnn_openvino_options[]
Definition: dnn_backend_openvino.c:95
infer_completion_callback
static void infer_completion_callback(void *args)
Definition: dnn_backend_openvino.c:213
DFT_ANALYTICS_DETECT
@ DFT_ANALYTICS_DETECT
Definition: dnn_interface.h:55
ff_dnn_get_async_result_ov
DNNAsyncStatusType ff_dnn_get_async_result_ov(const DNNModel *model, AVFrame **in, AVFrame **out)
Definition: dnn_backend_openvino.c:890
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
ff_frame_to_dnn_classify
DNNReturnType ff_frame_to_dnn_classify(AVFrame *frame, DNNData *input, uint32_t bbox_index, void *log_ctx)
Definition: dnn_io_proc.c:187
precision_to_datatype
static DNNDataType precision_to_datatype(precision_e precision)
Definition: dnn_backend_openvino.c:105
DNN_ERROR
@ DNN_ERROR
Definition: dnn_interface.h:33
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:243
safe_queue.h
OVModel::request_queue
SafeQueue * request_queue
Definition: dnn_backend_openvino.c:57
OVContext
Definition: dnn_backend_openvino.c:46
dnn_backend_openvino.h
DNNModel::classify_post_proc
ClassifyPostProc classify_post_proc
Definition: dnn_interface.h:108
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVDetectionBBox::h
int h
Definition: detection_bbox.h:34
DNN_UINT8
@ DNN_UINT8
Definition: dnn_interface.h:37
TaskItem::out_frame
AVFrame * out_frame
Definition: dnn_backend_openvino.c:68
OFFSET
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
OVModel::task_queue
Queue * task_queue
Definition: dnn_backend_openvino.c:58
DFT_ANALYTICS_CLASSIFY
@ DFT_ANALYTICS_CLASSIFY
Definition: dnn_interface.h:56
AVFrame::height
int height
Definition: frame.h:361
dnn_backend_common.h
TaskItem::do_ioproc
int do_ioproc
Definition: dnn_backend_openvino.c:69
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
AVDetectionBBox::x
int x
Distance in pixels from the left/top edge of the frame, together with width and height,...
Definition: detection_bbox.h:31
ff_queue_peek_front
void * ff_queue_peek_front(Queue *q)
Return a pointer to the data at the head of the queue.
Definition: queue.c:94
DNNExecBaseParams::output_names
const char ** output_names
Definition: dnn_interface.h:69
AVFilterContext
An instance of a filter.
Definition: avfilter.h:333
DNNModel
Definition: dnn_interface.h:84
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:209
TaskItem::input_name
const char * input_name
Definition: dnn_backend_openvino.c:65
AV_NUM_DETECTION_BBOX_CLASSIFY
#define AV_NUM_DETECTION_BBOX_CLASSIFY
At most 4 classifications based on the detected bounding box.
Definition: detection_bbox.h:50
DNNModel::options
const char * options
Definition: dnn_interface.h:88
OVOptions::nireq
int nireq
Definition: dnn_backend_openvino.c:41
ff_dnn_execute_model_ov
DNNReturnType ff_dnn_execute_model_ov(const DNNModel *model, DNNExecBaseParams *exec_params)
Definition: dnn_backend_openvino.c:778
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:242
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
OVOptions
Definition: dnn_backend_openvino.c:39
DNNExecBaseParams
Definition: dnn_interface.h:67
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVDetectionBBox
Definition: detection_bbox.h:26
extract_inference_from_task
static DNNReturnType extract_inference_from_task(DNNFunctionType func_type, TaskItem *task, Queue *inference_queue, DNNExecBaseParams *exec_params)
Definition: dnn_backend_openvino.c:573
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:229
DCO_BGR
@ DCO_BGR
Definition: dnn_interface.h:41
DAST_NOT_READY
@ DAST_NOT_READY
Definition: dnn_interface.h:48
DNNAsyncStatusType
DNNAsyncStatusType
Definition: dnn_interface.h:45
DFT_PROCESS_FRAME
@ DFT_PROCESS_FRAME
Definition: dnn_interface.h:54
detection_bbox.h
AV_FRAME_DATA_DETECTION_BBOXES
@ AV_FRAME_DATA_DETECTION_BBOXES
Bounding boxes for object detection and classification, as described by AVDetectionBBoxHeader.
Definition: frame.h:189
OVContext::options
OVOptions options
Definition: dnn_backend_openvino.c:48
ff_dnn_execute_model_async_ov
DNNReturnType ff_dnn_execute_model_async_ov(const DNNModel *model, DNNExecBaseParams *exec_params)
Definition: dnn_backend_openvino.c:831
DNNModel::model
void * model
Definition: dnn_interface.h:86