FFmpeg
dnn_backend_openvino.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2020
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * DNN OpenVINO backend implementation.
24  */
25 
26 #include "dnn_backend_openvino.h"
27 #include "dnn_io_proc.h"
28 #include "libavformat/avio.h"
29 #include "libavutil/avassert.h"
30 #include "libavutil/cpu.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/avstring.h"
34 #include "../internal.h"
35 #include "safe_queue.h"
36 #include <c_api/ie_c_api.h>
37 #include "dnn_backend_common.h"
38 
39 typedef struct OVOptions{
40  char *device_type;
41  int nireq;
42  uint8_t async;
45 } OVOptions;
46 
47 typedef struct OVContext {
48  const AVClass *class;
50 } OVContext;
51 
52 typedef struct OVModel{
55  ie_core_t *core;
56  ie_network_t *network;
57  ie_executable_network_t *exe_network;
58  SafeQueue *request_queue; // holds OVRequestItem
59  Queue *task_queue; // holds TaskItem
60  Queue *lltask_queue; // holds LastLevelTaskItem
61 } OVModel;
62 
63 // one request for one call to openvino
64 typedef struct OVRequestItem {
65  ie_infer_request_t *infer_request;
67  uint32_t lltask_count;
68  ie_complete_call_back_t callback;
70 
71 #define APPEND_STRING(generated_string, iterate_string) \
72  generated_string = generated_string ? av_asprintf("%s %s", generated_string, iterate_string) : \
73  av_asprintf("%s", iterate_string);
74 
75 #define OFFSET(x) offsetof(OVContext, x)
76 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
77 static const AVOption dnn_openvino_options[] = {
78  { "device", "device to run model", OFFSET(options.device_type), AV_OPT_TYPE_STRING, { .str = "CPU" }, 0, 0, FLAGS },
80  { "batch_size", "batch size per request", OFFSET(options.batch_size), AV_OPT_TYPE_INT, { .i64 = 1 }, 1, 1000, FLAGS},
81  { "input_resizable", "can input be resizable or not", OFFSET(options.input_resizable), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
82  { NULL }
83 };
84 
85 AVFILTER_DEFINE_CLASS(dnn_openvino);
86 
87 static DNNDataType precision_to_datatype(precision_e precision)
88 {
89  switch (precision)
90  {
91  case FP32:
92  return DNN_FLOAT;
93  case U8:
94  return DNN_UINT8;
95  default:
96  av_assert0(!"not supported yet.");
97  return DNN_FLOAT;
98  }
99 }
100 
102 {
103  switch (dt)
104  {
105  case DNN_FLOAT:
106  return sizeof(float);
107  case DNN_UINT8:
108  return sizeof(uint8_t);
109  default:
110  av_assert0(!"not supported yet.");
111  return 1;
112  }
113 }
114 
116 {
117  dimensions_t dims;
118  precision_e precision;
119  ie_blob_buffer_t blob_buffer;
120  OVContext *ctx = &ov_model->ctx;
121  IEStatusCode status;
122  DNNData input;
123  ie_blob_t *input_blob = NULL;
124  LastLevelTaskItem *lltask;
125  TaskItem *task;
126 
127  lltask = ff_queue_peek_front(ov_model->lltask_queue);
128  av_assert0(lltask);
129  task = lltask->task;
130 
131  status = ie_infer_request_get_blob(request->infer_request, task->input_name, &input_blob);
132  if (status != OK) {
133  av_log(ctx, AV_LOG_ERROR, "Failed to get input blob with name %s\n", task->input_name);
134  return DNN_ERROR;
135  }
136 
137  status |= ie_blob_get_dims(input_blob, &dims);
138  status |= ie_blob_get_precision(input_blob, &precision);
139  if (status != OK) {
140  ie_blob_free(&input_blob);
141  av_log(ctx, AV_LOG_ERROR, "Failed to get input blob dims/precision\n");
142  return DNN_ERROR;
143  }
144 
145  status = ie_blob_get_buffer(input_blob, &blob_buffer);
146  if (status != OK) {
147  ie_blob_free(&input_blob);
148  av_log(ctx, AV_LOG_ERROR, "Failed to get input blob buffer\n");
149  return DNN_ERROR;
150  }
151 
152  input.height = dims.dims[2];
153  input.width = dims.dims[3];
154  input.channels = dims.dims[1];
155  input.data = blob_buffer.buffer;
156  input.dt = precision_to_datatype(precision);
157  // all models in openvino open model zoo use BGR as input,
158  // change to be an option when necessary.
159  input.order = DCO_BGR;
160 
161  for (int i = 0; i < ctx->options.batch_size; ++i) {
162  lltask = ff_queue_pop_front(ov_model->lltask_queue);
163  if (!lltask) {
164  break;
165  }
166  request->lltasks[i] = lltask;
167  request->lltask_count = i + 1;
168  task = lltask->task;
169  switch (ov_model->model->func_type) {
170  case DFT_PROCESS_FRAME:
171  if (task->do_ioproc) {
172  if (ov_model->model->frame_pre_proc != NULL) {
173  ov_model->model->frame_pre_proc(task->in_frame, &input, ov_model->model->filter_ctx);
174  } else {
176  }
177  }
178  break;
181  break;
184  break;
185  default:
186  av_assert0(!"should not reach here");
187  break;
188  }
189  input.data = (uint8_t *)input.data
190  + input.width * input.height * input.channels * get_datatype_size(input.dt);
191  }
192  ie_blob_free(&input_blob);
193 
194  return DNN_SUCCESS;
195 }
196 
197 static void infer_completion_callback(void *args)
198 {
199  dimensions_t dims;
200  precision_e precision;
201  IEStatusCode status;
202  OVRequestItem *request = args;
203  LastLevelTaskItem *lltask = request->lltasks[0];
204  TaskItem *task = lltask->task;
205  OVModel *ov_model = task->model;
206  SafeQueue *requestq = ov_model->request_queue;
207  ie_blob_t *output_blob = NULL;
208  ie_blob_buffer_t blob_buffer;
209  DNNData output;
210  OVContext *ctx = &ov_model->ctx;
211 
212  status = ie_infer_request_get_blob(request->infer_request, task->output_names[0], &output_blob);
213  if (status != OK) {
214  //incorrect output name
215  char *model_output_name = NULL;
216  char *all_output_names = NULL;
217  size_t model_output_count = 0;
218  av_log(ctx, AV_LOG_ERROR, "Failed to get model output data\n");
219  status = ie_network_get_outputs_number(ov_model->network, &model_output_count);
220  for (size_t i = 0; i < model_output_count; i++) {
221  status = ie_network_get_output_name(ov_model->network, i, &model_output_name);
222  APPEND_STRING(all_output_names, model_output_name)
223  }
225  "output \"%s\" may not correct, all output(s) are: \"%s\"\n",
226  task->output_names[0], all_output_names);
227  return;
228  }
229 
230  status = ie_blob_get_buffer(output_blob, &blob_buffer);
231  if (status != OK) {
232  ie_blob_free(&output_blob);
233  av_log(ctx, AV_LOG_ERROR, "Failed to access output memory\n");
234  return;
235  }
236 
237  status |= ie_blob_get_dims(output_blob, &dims);
238  status |= ie_blob_get_precision(output_blob, &precision);
239  if (status != OK) {
240  ie_blob_free(&output_blob);
241  av_log(ctx, AV_LOG_ERROR, "Failed to get dims or precision of output\n");
242  return;
243  }
244 
245  output.channels = dims.dims[1];
246  output.height = dims.dims[2];
247  output.width = dims.dims[3];
248  output.dt = precision_to_datatype(precision);
249  output.data = blob_buffer.buffer;
250 
251  av_assert0(request->lltask_count <= dims.dims[0]);
252  av_assert0(request->lltask_count >= 1);
253  for (int i = 0; i < request->lltask_count; ++i) {
254  task = request->lltasks[i]->task;
255  task->inference_done++;
256 
257  switch (ov_model->model->func_type) {
258  case DFT_PROCESS_FRAME:
259  if (task->do_ioproc) {
260  if (ov_model->model->frame_post_proc != NULL) {
261  ov_model->model->frame_post_proc(task->out_frame, &output, ov_model->model->filter_ctx);
262  } else {
264  }
265  } else {
266  task->out_frame->width = output.width;
267  task->out_frame->height = output.height;
268  }
269  break;
271  if (!ov_model->model->detect_post_proc) {
272  av_log(ctx, AV_LOG_ERROR, "detect filter needs to provide post proc\n");
273  return;
274  }
275  ov_model->model->detect_post_proc(task->in_frame, &output, 1, ov_model->model->filter_ctx);
276  break;
278  if (!ov_model->model->classify_post_proc) {
279  av_log(ctx, AV_LOG_ERROR, "classify filter needs to provide post proc\n");
280  return;
281  }
282  ov_model->model->classify_post_proc(task->in_frame, &output, request->lltasks[i]->bbox_index, ov_model->model->filter_ctx);
283  break;
284  default:
285  av_assert0(!"should not reach here");
286  break;
287  }
288 
289  av_freep(&request->lltasks[i]);
290  output.data = (uint8_t *)output.data
291  + output.width * output.height * output.channels * get_datatype_size(output.dt);
292  }
293  ie_blob_free(&output_blob);
294 
295  request->lltask_count = 0;
296  if (ff_safe_queue_push_back(requestq, request) < 0) {
297  ie_infer_request_free(&request->infer_request);
298  av_freep(&request);
299  av_log(ctx, AV_LOG_ERROR, "Failed to push back request_queue.\n");
300  return;
301  }
302 }
303 
304 static DNNReturnType init_model_ov(OVModel *ov_model, const char *input_name, const char *output_name)
305 {
306  OVContext *ctx = &ov_model->ctx;
307  IEStatusCode status;
308  ie_available_devices_t a_dev;
309  ie_config_t config = {NULL, NULL, NULL};
310  char *all_dev_names = NULL;
311 
312  // batch size
313  if (ctx->options.batch_size <= 0) {
314  ctx->options.batch_size = 1;
315  }
316 
317  if (ctx->options.batch_size > 1) {
318  input_shapes_t input_shapes;
319  status = ie_network_get_input_shapes(ov_model->network, &input_shapes);
320  if (status != OK)
321  goto err;
322  for (int i = 0; i < input_shapes.shape_num; i++)
323  input_shapes.shapes[i].shape.dims[0] = ctx->options.batch_size;
324  status = ie_network_reshape(ov_model->network, input_shapes);
325  ie_network_input_shapes_free(&input_shapes);
326  if (status != OK)
327  goto err;
328  }
329 
330  // The order of dims in the openvino is fixed and it is always NCHW for 4-D data.
331  // while we pass NHWC data from FFmpeg to openvino
332  status = ie_network_set_input_layout(ov_model->network, input_name, NHWC);
333  if (status != OK) {
334  av_log(ctx, AV_LOG_ERROR, "Failed to set layout as NHWC for input %s\n", input_name);
335  goto err;
336  }
337  status = ie_network_set_output_layout(ov_model->network, output_name, NHWC);
338  if (status != OK) {
339  av_log(ctx, AV_LOG_ERROR, "Failed to set layout as NHWC for output %s\n", output_name);
340  goto err;
341  }
342 
343  // all models in openvino open model zoo use BGR with range [0.0f, 255.0f] as input,
344  // we don't have a AVPixelFormat to describe it, so we'll use AV_PIX_FMT_BGR24 and
345  // ask openvino to do the conversion internally.
346  // the current supported SR model (frame processing) is generated from tensorflow model,
347  // and its input is Y channel as float with range [0.0f, 1.0f], so do not set for this case.
348  // TODO: we need to get a final clear&general solution with all backends/formats considered.
349  if (ov_model->model->func_type != DFT_PROCESS_FRAME) {
350  status = ie_network_set_input_precision(ov_model->network, input_name, U8);
351  if (status != OK) {
352  av_log(ctx, AV_LOG_ERROR, "Failed to set input precision as U8 for %s\n", input_name);
353  goto err;
354  }
355  }
356 
357  status = ie_core_load_network(ov_model->core, ov_model->network, ctx->options.device_type, &config, &ov_model->exe_network);
358  if (status != OK) {
359  av_log(ctx, AV_LOG_ERROR, "Failed to load OpenVINO model network\n");
360  status = ie_core_get_available_devices(ov_model->core, &a_dev);
361  if (status != OK) {
362  av_log(ctx, AV_LOG_ERROR, "Failed to get available devices\n");
363  goto err;
364  }
365  for (int i = 0; i < a_dev.num_devices; i++) {
366  APPEND_STRING(all_dev_names, a_dev.devices[i])
367  }
368  av_log(ctx, AV_LOG_ERROR,"device %s may not be supported, all available devices are: \"%s\"\n",
369  ctx->options.device_type, all_dev_names);
370  goto err;
371  }
372 
373  // create infer_requests for async execution
374  if (ctx->options.nireq <= 0) {
375  // the default value is a rough estimation
376  ctx->options.nireq = av_cpu_count() / 2 + 1;
377  }
378 
379  ov_model->request_queue = ff_safe_queue_create();
380  if (!ov_model->request_queue) {
381  goto err;
382  }
383 
384  for (int i = 0; i < ctx->options.nireq; i++) {
385  OVRequestItem *item = av_mallocz(sizeof(*item));
386  if (!item) {
387  goto err;
388  }
389 
390  item->callback.completeCallBackFunc = infer_completion_callback;
391  item->callback.args = item;
392  if (ff_safe_queue_push_back(ov_model->request_queue, item) < 0) {
393  av_freep(&item);
394  goto err;
395  }
396 
397  status = ie_exec_network_create_infer_request(ov_model->exe_network, &item->infer_request);
398  if (status != OK) {
399  goto err;
400  }
401 
402  item->lltasks = av_malloc_array(ctx->options.batch_size, sizeof(*item->lltasks));
403  if (!item->lltasks) {
404  goto err;
405  }
406  item->lltask_count = 0;
407  }
408 
409  ov_model->task_queue = ff_queue_create();
410  if (!ov_model->task_queue) {
411  goto err;
412  }
413 
414  ov_model->lltask_queue = ff_queue_create();
415  if (!ov_model->lltask_queue) {
416  goto err;
417  }
418 
419  return DNN_SUCCESS;
420 
421 err:
422  ff_dnn_free_model_ov(&ov_model->model);
423  return DNN_ERROR;
424 }
425 
426 static DNNReturnType execute_model_ov(OVRequestItem *request, Queue *inferenceq)
427 {
428  IEStatusCode status;
430  LastLevelTaskItem *lltask;
431  TaskItem *task;
432  OVContext *ctx;
433  OVModel *ov_model;
434 
435  if (ff_queue_size(inferenceq) == 0) {
436  ie_infer_request_free(&request->infer_request);
437  av_freep(&request);
438  return DNN_SUCCESS;
439  }
440 
441  lltask = ff_queue_peek_front(inferenceq);
442  task = lltask->task;
443  ov_model = task->model;
444  ctx = &ov_model->ctx;
445 
446  if (task->async) {
447  ret = fill_model_input_ov(ov_model, request);
448  if (ret != DNN_SUCCESS) {
449  goto err;
450  }
451  status = ie_infer_set_completion_callback(request->infer_request, &request->callback);
452  if (status != OK) {
453  av_log(ctx, AV_LOG_ERROR, "Failed to set completion callback for inference\n");
454  goto err;
455  }
456  status = ie_infer_request_infer_async(request->infer_request);
457  if (status != OK) {
458  av_log(ctx, AV_LOG_ERROR, "Failed to start async inference\n");
459  goto err;
460  }
461  return DNN_SUCCESS;
462  } else {
463  ret = fill_model_input_ov(ov_model, request);
464  if (ret != DNN_SUCCESS) {
465  goto err;
466  }
467  status = ie_infer_request_infer(request->infer_request);
468  if (status != OK) {
469  av_log(ctx, AV_LOG_ERROR, "Failed to start synchronous model inference\n");
470  goto err;
471  }
472  infer_completion_callback(request);
473  return (task->inference_done == task->inference_todo) ? DNN_SUCCESS : DNN_ERROR;
474  }
475 err:
476  if (ff_safe_queue_push_back(ov_model->request_queue, request) < 0) {
477  ie_infer_request_free(&request->infer_request);
478  av_freep(&request);
479  }
480  return DNN_ERROR;
481 }
482 
483 static DNNReturnType get_input_ov(void *model, DNNData *input, const char *input_name)
484 {
485  OVModel *ov_model = model;
486  OVContext *ctx = &ov_model->ctx;
487  char *model_input_name = NULL;
488  char *all_input_names = NULL;
489  IEStatusCode status;
490  size_t model_input_count = 0;
491  dimensions_t dims;
492  precision_e precision;
493  int input_resizable = ctx->options.input_resizable;
494 
495  status = ie_network_get_inputs_number(ov_model->network, &model_input_count);
496  if (status != OK) {
497  av_log(ctx, AV_LOG_ERROR, "Failed to get input count\n");
498  return DNN_ERROR;
499  }
500 
501  for (size_t i = 0; i < model_input_count; i++) {
502  status = ie_network_get_input_name(ov_model->network, i, &model_input_name);
503  if (status != OK) {
504  av_log(ctx, AV_LOG_ERROR, "Failed to get No.%d input's name\n", (int)i);
505  return DNN_ERROR;
506  }
507  if (strcmp(model_input_name, input_name) == 0) {
508  ie_network_name_free(&model_input_name);
509  status |= ie_network_get_input_dims(ov_model->network, input_name, &dims);
510  status |= ie_network_get_input_precision(ov_model->network, input_name, &precision);
511  if (status != OK) {
512  av_log(ctx, AV_LOG_ERROR, "Failed to get No.%d input's dims or precision\n", (int)i);
513  return DNN_ERROR;
514  }
515 
516  input->channels = dims.dims[1];
517  input->height = input_resizable ? -1 : dims.dims[2];
518  input->width = input_resizable ? -1 : dims.dims[3];
519  input->dt = precision_to_datatype(precision);
520  return DNN_SUCCESS;
521  } else {
522  //incorrect input name
523  APPEND_STRING(all_input_names, model_input_name)
524  }
525 
526  ie_network_name_free(&model_input_name);
527  }
528 
529  av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model, all input(s) are: \"%s\"\n", input_name, all_input_names);
530  return DNN_ERROR;
531 }
532 
534 {
535  AVFrameSideData *sd;
537  const AVDetectionBBox *bbox;
538 
540  if (!sd) { // this frame has nothing detected
541  return 0;
542  }
543 
544  if (!sd->size) {
545  return 0;
546  }
547 
548  header = (const AVDetectionBBoxHeader *)sd->data;
549  if (!header->nb_bboxes) {
550  return 0;
551  }
552 
553  for (uint32_t i = 0; i < header->nb_bboxes; i++) {
554  bbox = av_get_detection_bbox(header, i);
555  if (bbox->x < 0 || bbox->w < 0 || bbox->x + bbox->w >= frame->width) {
556  return 0;
557  }
558  if (bbox->y < 0 || bbox->h < 0 || bbox->y + bbox->h >= frame->width) {
559  return 0;
560  }
561 
563  return 0;
564  }
565  }
566 
567  return 1;
568 }
569 
570 static DNNReturnType extract_lltask_from_task(DNNFunctionType func_type, TaskItem *task, Queue *lltask_queue, DNNExecBaseParams *exec_params)
571 {
572  switch (func_type) {
573  case DFT_PROCESS_FRAME:
575  {
576  LastLevelTaskItem *lltask = av_malloc(sizeof(*lltask));
577  if (!lltask) {
578  return DNN_ERROR;
579  }
580  task->inference_todo = 1;
581  task->inference_done = 0;
582  lltask->task = task;
583  if (ff_queue_push_back(lltask_queue, lltask) < 0) {
584  av_freep(&lltask);
585  return DNN_ERROR;
586  }
587  return DNN_SUCCESS;
588  }
590  {
592  AVFrame *frame = task->in_frame;
593  AVFrameSideData *sd;
595 
596  task->inference_todo = 0;
597  task->inference_done = 0;
598 
600  return DNN_SUCCESS;
601  }
602 
604  header = (const AVDetectionBBoxHeader *)sd->data;
605 
606  for (uint32_t i = 0; i < header->nb_bboxes; i++) {
607  LastLevelTaskItem *lltask;
609 
610  if (params->target) {
611  if (av_strncasecmp(bbox->detect_label, params->target, sizeof(bbox->detect_label)) != 0) {
612  continue;
613  }
614  }
615 
616  lltask = av_malloc(sizeof(*lltask));
617  if (!lltask) {
618  return DNN_ERROR;
619  }
620  task->inference_todo++;
621  lltask->task = task;
622  lltask->bbox_index = i;
623  if (ff_queue_push_back(lltask_queue, lltask) < 0) {
624  av_freep(&lltask);
625  return DNN_ERROR;
626  }
627  }
628  return DNN_SUCCESS;
629  }
630  default:
631  av_assert0(!"should not reach here");
632  return DNN_ERROR;
633  }
634 }
635 
636 static DNNReturnType get_output_ov(void *model, const char *input_name, int input_width, int input_height,
637  const char *output_name, int *output_width, int *output_height)
638 {
640  OVModel *ov_model = model;
641  OVContext *ctx = &ov_model->ctx;
642  TaskItem task;
643  OVRequestItem *request;
644  IEStatusCode status;
645  input_shapes_t input_shapes;
646  DNNExecBaseParams exec_params = {
647  .input_name = input_name,
648  .output_names = &output_name,
649  .nb_output = 1,
650  .in_frame = NULL,
651  .out_frame = NULL,
652  };
653 
654  if (ov_model->model->func_type != DFT_PROCESS_FRAME) {
655  av_log(ctx, AV_LOG_ERROR, "Get output dim only when processing frame.\n");
656  return DNN_ERROR;
657  }
658 
659  if (ctx->options.input_resizable) {
660  status = ie_network_get_input_shapes(ov_model->network, &input_shapes);
661  input_shapes.shapes->shape.dims[2] = input_height;
662  input_shapes.shapes->shape.dims[3] = input_width;
663  status |= ie_network_reshape(ov_model->network, input_shapes);
664  ie_network_input_shapes_free(&input_shapes);
665  if (status != OK) {
666  av_log(ctx, AV_LOG_ERROR, "Failed to reshape input size for %s\n", input_name);
667  return DNN_ERROR;
668  }
669  }
670 
671  if (!ov_model->exe_network) {
672  if (init_model_ov(ov_model, input_name, output_name) != DNN_SUCCESS) {
673  av_log(ctx, AV_LOG_ERROR, "Failed init OpenVINO exectuable network or inference request\n");
674  return DNN_ERROR;
675  }
676  }
677 
678  if (ff_dnn_fill_gettingoutput_task(&task, &exec_params, ov_model, input_height, input_width, ctx) != DNN_SUCCESS) {
679  return DNN_ERROR;
680  }
681 
682  if (extract_lltask_from_task(ov_model->model->func_type, &task, ov_model->lltask_queue, NULL) != DNN_SUCCESS) {
683  av_log(ctx, AV_LOG_ERROR, "unable to extract last level task from task.\n");
684  ret = DNN_ERROR;
685  goto err;
686  }
687 
688  request = ff_safe_queue_pop_front(ov_model->request_queue);
689  if (!request) {
690  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
691  ret = DNN_ERROR;
692  goto err;
693  }
694 
695  ret = execute_model_ov(request, ov_model->lltask_queue);
696  *output_width = task.out_frame->width;
697  *output_height = task.out_frame->height;
698 err:
699  av_frame_free(&task.out_frame);
700  av_frame_free(&task.in_frame);
701  return ret;
702 }
703 
704 DNNModel *ff_dnn_load_model_ov(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx)
705 {
706  DNNModel *model = NULL;
707  OVModel *ov_model = NULL;
708  OVContext *ctx = NULL;
709  IEStatusCode status;
710 
711  model = av_mallocz(sizeof(DNNModel));
712  if (!model){
713  return NULL;
714  }
715 
716  ov_model = av_mallocz(sizeof(OVModel));
717  if (!ov_model) {
718  av_freep(&model);
719  return NULL;
720  }
721  model->model = ov_model;
722  ov_model->model = model;
723  ov_model->ctx.class = &dnn_openvino_class;
724  ctx = &ov_model->ctx;
725 
726  //parse options
728  if (av_opt_set_from_string(ctx, options, NULL, "=", "&") < 0) {
729  av_log(ctx, AV_LOG_ERROR, "Failed to parse options \"%s\"\n", options);
730  goto err;
731  }
732 
733  status = ie_core_create("", &ov_model->core);
734  if (status != OK)
735  goto err;
736 
737  status = ie_core_read_network(ov_model->core, model_filename, NULL, &ov_model->network);
738  if (status != OK) {
739  ie_version_t ver;
740  ver = ie_c_api_version();
741  av_log(ctx, AV_LOG_ERROR, "Failed to read the network from model file %s,\n"
742  "Please check if the model version matches the runtime OpenVINO %s\n",
743  model_filename, ver.api_version);
744  ie_version_free(&ver);
745  goto err;
746  }
747 
748  model->get_input = &get_input_ov;
749  model->get_output = &get_output_ov;
750  model->options = options;
751  model->filter_ctx = filter_ctx;
752  model->func_type = func_type;
753 
754  return model;
755 
756 err:
757  ff_dnn_free_model_ov(&model);
758  return NULL;
759 }
760 
762 {
763  OVModel *ov_model = model->model;
764  OVContext *ctx = &ov_model->ctx;
765  OVRequestItem *request;
766  TaskItem *task;
768 
769  if (ff_check_exec_params(ctx, DNN_OV, model->func_type, exec_params) != 0) {
770  return DNN_ERROR;
771  }
772 
773  if (!ov_model->exe_network) {
774  if (init_model_ov(ov_model, exec_params->input_name, exec_params->output_names[0]) != DNN_SUCCESS) {
775  av_log(ctx, AV_LOG_ERROR, "Failed init OpenVINO exectuable network or inference request\n");
776  return DNN_ERROR;
777  }
778  }
779 
780  task = av_malloc(sizeof(*task));
781  if (!task) {
782  av_log(ctx, AV_LOG_ERROR, "unable to alloc memory for task item.\n");
783  return DNN_ERROR;
784  }
785 
786  if (ff_dnn_fill_task(task, exec_params, ov_model, ctx->options.async, 1) != DNN_SUCCESS) {
787  av_freep(&task);
788  return DNN_ERROR;
789  }
790 
791  if (ff_queue_push_back(ov_model->task_queue, task) < 0) {
792  av_freep(&task);
793  av_log(ctx, AV_LOG_ERROR, "unable to push back task_queue.\n");
794  return DNN_ERROR;
795  }
796 
797  if (extract_lltask_from_task(model->func_type, task, ov_model->lltask_queue, exec_params) != DNN_SUCCESS) {
798  av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
799  return DNN_ERROR;
800  }
801 
802  if (ctx->options.async) {
803  while (ff_queue_size(ov_model->lltask_queue) >= ctx->options.batch_size) {
804  request = ff_safe_queue_pop_front(ov_model->request_queue);
805  if (!request) {
806  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
807  return DNN_ERROR;
808  }
809 
810  ret = execute_model_ov(request, ov_model->lltask_queue);
811  if (ret != DNN_SUCCESS) {
812  return ret;
813  }
814  }
815 
816  return DNN_SUCCESS;
817  }
818  else {
819  if (model->func_type == DFT_ANALYTICS_CLASSIFY) {
820  // Classification filter has not been completely
821  // tested with the sync mode. So, do not support now.
822  avpriv_report_missing_feature(ctx, "classify for sync execution");
823  return DNN_ERROR;
824  }
825 
826  if (ctx->options.batch_size > 1) {
827  avpriv_report_missing_feature(ctx, "batch mode for sync execution");
828  return DNN_ERROR;
829  }
830 
831  request = ff_safe_queue_pop_front(ov_model->request_queue);
832  if (!request) {
833  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
834  return DNN_ERROR;
835  }
836  return execute_model_ov(request, ov_model->lltask_queue);
837  }
838 }
839 
841 {
842  OVModel *ov_model = model->model;
843  return ff_dnn_get_result_common(ov_model->task_queue, in, out);
844 }
845 
847 {
848  OVModel *ov_model = model->model;
849  OVContext *ctx = &ov_model->ctx;
850  OVRequestItem *request;
851  IEStatusCode status;
853 
854  if (ff_queue_size(ov_model->lltask_queue) == 0) {
855  // no pending task need to flush
856  return DNN_SUCCESS;
857  }
858 
859  request = ff_safe_queue_pop_front(ov_model->request_queue);
860  if (!request) {
861  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
862  return DNN_ERROR;
863  }
864 
865  ret = fill_model_input_ov(ov_model, request);
866  if (ret != DNN_SUCCESS) {
867  av_log(ctx, AV_LOG_ERROR, "Failed to fill model input.\n");
868  return ret;
869  }
870  status = ie_infer_set_completion_callback(request->infer_request, &request->callback);
871  if (status != OK) {
872  av_log(ctx, AV_LOG_ERROR, "Failed to set completion callback for inference\n");
873  return DNN_ERROR;
874  }
875  status = ie_infer_request_infer_async(request->infer_request);
876  if (status != OK) {
877  av_log(ctx, AV_LOG_ERROR, "Failed to start async inference\n");
878  return DNN_ERROR;
879  }
880 
881  return DNN_SUCCESS;
882 }
883 
885 {
886  if (*model){
887  OVModel *ov_model = (*model)->model;
888  while (ff_safe_queue_size(ov_model->request_queue) != 0) {
890  if (item && item->infer_request) {
891  ie_infer_request_free(&item->infer_request);
892  }
893  av_freep(&item->lltasks);
894  av_freep(&item);
895  }
897 
898  while (ff_queue_size(ov_model->lltask_queue) != 0) {
900  av_freep(&item);
901  }
902  ff_queue_destroy(ov_model->lltask_queue);
903 
904  while (ff_queue_size(ov_model->task_queue) != 0) {
905  TaskItem *item = ff_queue_pop_front(ov_model->task_queue);
906  av_frame_free(&item->in_frame);
907  av_frame_free(&item->out_frame);
908  av_freep(&item);
909  }
910  ff_queue_destroy(ov_model->task_queue);
911 
912  if (ov_model->exe_network)
913  ie_exec_network_free(&ov_model->exe_network);
914  if (ov_model->network)
915  ie_network_free(&ov_model->network);
916  if (ov_model->core)
917  ie_core_free(&ov_model->core);
918  av_freep(&ov_model);
919  av_freep(model);
920  }
921 }
OVModel::lltask_queue
Queue * lltask_queue
Definition: dnn_backend_openvino.c:60
status
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
OVRequestItem::callback
ie_complete_call_back_t callback
Definition: dnn_backend_openvino.c:68
OVContext::class
const AVClass * class
Definition: dnn_backend_openvino.c:48
ff_dnn_fill_task
DNNReturnType ff_dnn_fill_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int async, int do_ioproc)
Fill the Task for Backend Execution.
Definition: dnn_backend_common.c:56
opt.h
filter_ctx
static FilteringContext * filter_ctx
Definition: transcoding.c:49
av_opt_set_defaults
void av_opt_set_defaults(void *s)
Set the values of all AVOption fields to their default values.
Definition: opt.c:1364
ff_safe_queue_pop_front
void * ff_safe_queue_pop_front(SafeQueue *sq)
Remove and free first element from the queue in SafeQueue.
Definition: safe_queue.c:105
out
FILE * out
Definition: movenc.c:54
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:617
OVModel::exe_network
ie_executable_network_t * exe_network
Definition: dnn_backend_openvino.c:57
DNNFunctionType
DNNFunctionType
Definition: dnn_interface.h:52
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:225
ff_queue_pop_front
void * ff_queue_pop_front(Queue *q)
Remove and free first element from the Queue.
Definition: queue.c:151
ff_check_exec_params
int ff_check_exec_params(void *ctx, DNNBackendType backend, DNNFunctionType func_type, DNNExecBaseParams *exec_params)
Definition: dnn_backend_common.c:29
ff_queue_size
size_t ff_queue_size(Queue *q)
Return the length of the Queue.
Definition: queue.c:88
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:109
LastLevelTaskItem
Definition: dnn_backend_common.h:50
LastLevelTaskItem::bbox_index
uint32_t bbox_index
Definition: dnn_backend_common.h:52
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:317
AVFrame::width
int width
Definition: frame.h:389
OVOptions::async
uint8_t async
Definition: dnn_backend_openvino.c:42
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(dnn_openvino)
SafeQueue
Double-ended queue with mutex locks ensuring data consistency while multithreading.
Definition: safe_queue.c:46
AVOption
AVOption.
Definition: opt.h:247
DNNModel::frame_pre_proc
FramePrePostProc frame_pre_proc
Definition: dnn_interface.h:101
ff_dnn_load_model_ov
DNNModel * ff_dnn_load_model_ov(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx)
Definition: dnn_backend_openvino.c:704
OVModel::core
ie_core_t * core
Definition: dnn_backend_openvino.c:55
FLAGS
#define FLAGS
Definition: cmdutils.c:535
DNNExecBaseParams::input_name
const char * input_name
Definition: dnn_interface.h:68
dnn_io_proc.h
TaskItem
Definition: dnn_backend_common.h:36
AVDetectionBBox::y
int y
Definition: detection_bbox.h:32
tf_sess_config.config
config
Definition: tf_sess_config.py:33
OVModel
Definition: dnn_backend_openvino.c:52
get_input_ov
static DNNReturnType get_input_ov(void *model, DNNData *input, const char *input_name)
Definition: dnn_backend_openvino.c:483
OVOptions::batch_size
int batch_size
Definition: dnn_backend_openvino.c:43
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
DNNModel::filter_ctx
AVFilterContext * filter_ctx
Definition: dnn_interface.h:90
ff_queue_create
Queue * ff_queue_create(void)
Create a Queue instance.
Definition: queue.c:47
ff_proc_from_dnn_to_frame
DNNReturnType ff_proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *log_ctx)
Definition: dnn_io_proc.c:27
ff_proc_from_frame_to_dnn
DNNReturnType ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, void *log_ctx)
Definition: dnn_io_proc.c:100
AVDetectionBBox::detect_label
char detect_label[AV_DETECTION_BBOX_LABEL_NAME_MAX_SIZE]
Detect result with confidence.
Definition: detection_bbox.h:41
TaskItem::model
void * model
Definition: dnn_backend_common.h:37
DNN_SUCCESS
@ DNN_SUCCESS
Definition: dnn_interface.h:33
OVOptions::device_type
char * device_type
Definition: dnn_backend_openvino.c:40
DNNModel::get_output
DNNReturnType(* get_output)(void *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height)
Definition: dnn_interface.h:97
init_model_ov
static DNNReturnType init_model_ov(OVModel *ov_model, const char *input_name, const char *output_name)
Definition: dnn_backend_openvino.c:304
Queue
Linear double-ended data structure.
Definition: queue.c:33
av_get_detection_bbox
static av_always_inline AVDetectionBBox * av_get_detection_bbox(const AVDetectionBBoxHeader *header, unsigned int idx)
Definition: detection_bbox.h:84
ff_queue_push_back
int ff_queue_push_back(Queue *q, void *v)
Add data to the tail of the queue.
Definition: queue.c:130
avassert.h
DNN_BACKEND_COMMON_OPTIONS
#define DNN_BACKEND_COMMON_OPTIONS
Definition: dnn_backend_common.h:31
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
AVFrameSideData::size
size_t size
Definition: frame.h:226
LastLevelTaskItem::task
TaskItem * task
Definition: dnn_backend_common.h:51
fill_model_input_ov
static DNNReturnType fill_model_input_ov(OVModel *ov_model, OVRequestItem *request)
Definition: dnn_backend_openvino.c:115
DNNExecClassificationParams
Definition: dnn_interface.h:75
ff_queue_destroy
void ff_queue_destroy(Queue *q)
Destroy the Queue instance.
Definition: queue.c:72
ff_dnn_free_model_ov
void ff_dnn_free_model_ov(DNNModel **model)
Definition: dnn_backend_openvino.c:884
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
DNNReturnType
DNNReturnType
Definition: dnn_interface.h:33
execute_model_ov
static DNNReturnType execute_model_ov(OVRequestItem *request, Queue *inferenceq)
Definition: dnn_backend_openvino.c:426
DNNData
Definition: dnn_interface.h:59
ctx
AVFormatContext * ctx
Definition: movenc.c:48
DNNModel::get_input
DNNReturnType(* get_input)(void *model, DNNData *input, const char *input_name)
Definition: dnn_interface.h:95
TaskItem::inference_todo
uint32_t inference_todo
Definition: dnn_backend_common.h:45
OVRequestItem::infer_request
ie_infer_request_t * infer_request
Definition: dnn_backend_openvino.c:65
DNN_OV
@ DNN_OV
Definition: dnn_interface.h:35
ff_safe_queue_size
size_t ff_safe_queue_size(SafeQueue *sq)
Return the length of the SafeQueue.
Definition: safe_queue.c:80
if
if(ret)
Definition: filter_design.txt:179
DNNExecClassificationParams::target
const char * target
Definition: dnn_interface.h:77
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
OVRequestItem::lltask_count
uint32_t lltask_count
Definition: dnn_backend_openvino.c:67
OVModel::network
ie_network_t * network
Definition: dnn_backend_openvino.c:56
ff_safe_queue_create
SafeQueue * ff_safe_queue_create(void)
Create and initialize a SafeQueue instance.
Definition: safe_queue.c:52
AVDetectionBBoxHeader
Definition: detection_bbox.h:56
DNNModel::frame_post_proc
FramePrePostProc frame_post_proc
Definition: dnn_interface.h:104
av_opt_set_from_string
int av_opt_set_from_string(void *ctx, const char *opts, const char *const *shorthand, const char *key_val_sep, const char *pairs_sep)
Parse the key-value pairs list in opts.
Definition: opt.c:1565
TaskItem::in_frame
AVFrame * in_frame
Definition: dnn_backend_common.h:38
OVRequestItem::lltasks
LastLevelTaskItem ** lltasks
Definition: dnn_backend_openvino.c:66
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
OVModel::ctx
OVContext ctx
Definition: dnn_backend_openvino.c:53
OVRequestItem
Definition: dnn_backend_openvino.c:64
av_cpu_count
int av_cpu_count(void)
Definition: cpu.c:191
av_strncasecmp
int av_strncasecmp(const char *a, const char *b, size_t n)
Locale-independent case-insensitive compare.
Definition: avstring.c:225
get_datatype_size
static int get_datatype_size(DNNDataType dt)
Definition: dnn_backend_openvino.c:101
options
const OptionDef options[]
TaskItem::async
uint8_t async
Definition: dnn_backend_common.h:42
ff_dnn_fill_gettingoutput_task
DNNReturnType ff_dnn_fill_gettingoutput_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int input_height, int input_width, void *ctx)
Allocate input and output frames and fill the Task with execution parameters.
Definition: dnn_backend_common.c:161
TaskItem::inference_done
uint32_t inference_done
Definition: dnn_backend_common.h:46
AVDetectionBBox::w
int w
Definition: detection_bbox.h:33
cpu.h
OVModel::model
DNNModel * model
Definition: dnn_backend_openvino.c:54
DNNModel::detect_post_proc
DetectPostProc detect_post_proc
Definition: dnn_interface.h:106
avio.h
DNNModel::func_type
DNNFunctionType func_type
Definition: dnn_interface.h:92
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
AVFrameSideData::data
uint8_t * data
Definition: frame.h:225
extract_lltask_from_task
static DNNReturnType extract_lltask_from_task(DNNFunctionType func_type, TaskItem *task, Queue *lltask_queue, DNNExecBaseParams *exec_params)
Definition: dnn_backend_openvino.c:570
ff_safe_queue_destroy
void ff_safe_queue_destroy(SafeQueue *sq)
Destroy the SafeQueue instance.
Definition: safe_queue.c:69
DNNDataType
DNNDataType
Definition: dnn_interface.h:37
header
static const uint8_t header[24]
Definition: sdr2.c:67
AVDetectionBBox::classify_count
uint32_t classify_count
Definition: detection_bbox.h:51
DNN_FLOAT
@ DNN_FLOAT
Definition: dnn_interface.h:37
get_output_ov
static DNNReturnType get_output_ov(void *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height)
Definition: dnn_backend_openvino.c:636
ff_frame_to_dnn_detect
DNNReturnType ff_frame_to_dnn_detect(AVFrame *frame, DNNData *input, void *log_ctx)
Definition: dnn_io_proc.c:252
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
ff_dnn_flush_ov
DNNReturnType ff_dnn_flush_ov(const DNNModel *model)
Definition: dnn_backend_openvino.c:846
contain_valid_detection_bbox
static int contain_valid_detection_bbox(AVFrame *frame)
Definition: dnn_backend_openvino.c:533
ff_safe_queue_push_back
int ff_safe_queue_push_back(SafeQueue *sq, void *v)
Add data to the tail of queue in the SafeQueue after locking mutex.
Definition: safe_queue.c:95
OVOptions::input_resizable
int input_resizable
Definition: dnn_backend_openvino.c:44
dnn_openvino_options
static const AVOption dnn_openvino_options[]
Definition: dnn_backend_openvino.c:77
infer_completion_callback
static void infer_completion_callback(void *args)
Definition: dnn_backend_openvino.c:197
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
DFT_ANALYTICS_DETECT
@ DFT_ANALYTICS_DETECT
Definition: dnn_interface.h:55
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
ff_frame_to_dnn_classify
DNNReturnType ff_frame_to_dnn_classify(AVFrame *frame, DNNData *input, uint32_t bbox_index, void *log_ctx)
Definition: dnn_io_proc.c:193
precision_to_datatype
static DNNDataType precision_to_datatype(precision_e precision)
Definition: dnn_backend_openvino.c:87
DNN_ERROR
@ DNN_ERROR
Definition: dnn_interface.h:33
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:263
safe_queue.h
OVModel::request_queue
SafeQueue * request_queue
Definition: dnn_backend_openvino.c:58
TaskItem::output_names
const char ** output_names
Definition: dnn_backend_common.h:41
OVContext
Definition: dnn_backend_openvino.c:47
dnn_backend_openvino.h
DNNModel::classify_post_proc
ClassifyPostProc classify_post_proc
Definition: dnn_interface.h:108
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVDetectionBBox::h
int h
Definition: detection_bbox.h:34
DNN_UINT8
@ DNN_UINT8
Definition: dnn_interface.h:37
TaskItem::out_frame
AVFrame * out_frame
Definition: dnn_backend_common.h:39
OFFSET
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
OVModel::task_queue
Queue * task_queue
Definition: dnn_backend_openvino.c:59
DFT_ANALYTICS_CLASSIFY
@ DFT_ANALYTICS_CLASSIFY
Definition: dnn_interface.h:56
AVFrame::height
int height
Definition: frame.h:389
dnn_backend_common.h
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:224
AVDetectionBBox::x
int x
Distance in pixels from the left/top edge of the frame, together with width and height,...
Definition: detection_bbox.h:31
ff_dnn_get_result_common
DNNAsyncStatusType ff_dnn_get_result_common(Queue *task_queue, AVFrame **in, AVFrame **out)
Extract input and output frame from the Task Queue after asynchronous inference.
Definition: dnn_backend_common.c:141
ff_queue_peek_front
void * ff_queue_peek_front(Queue *q)
Return a pointer to the data at the head of the queue.
Definition: queue.c:93
DNNExecBaseParams::output_names
const char ** output_names
Definition: dnn_interface.h:69
AVFilterContext
An instance of a filter.
Definition: avfilter.h:402
DNNModel
Definition: dnn_interface.h:84
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:223
TaskItem::input_name
const char * input_name
Definition: dnn_backend_common.h:40
AV_NUM_DETECTION_BBOX_CLASSIFY
#define AV_NUM_DETECTION_BBOX_CLASSIFY
At most 4 classifications based on the detected bounding box.
Definition: detection_bbox.h:50
DNNModel::options
const char * options
Definition: dnn_interface.h:88
OVOptions::nireq
int nireq
Definition: dnn_backend_openvino.c:41
ff_dnn_execute_model_ov
DNNReturnType ff_dnn_execute_model_ov(const DNNModel *model, DNNExecBaseParams *exec_params)
Definition: dnn_backend_openvino.c:761
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:241
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
OVOptions
Definition: dnn_backend_openvino.c:39
DNNExecBaseParams
Definition: dnn_interface.h:67
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVDetectionBBox
Definition: detection_bbox.h:26
TaskItem::do_ioproc
uint8_t do_ioproc
Definition: dnn_backend_common.h:43
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:228
DCO_BGR
@ DCO_BGR
Definition: dnn_interface.h:41
DNNAsyncStatusType
DNNAsyncStatusType
Definition: dnn_interface.h:45
DFT_PROCESS_FRAME
@ DFT_PROCESS_FRAME
Definition: dnn_interface.h:54
detection_bbox.h
AV_FRAME_DATA_DETECTION_BBOXES
@ AV_FRAME_DATA_DETECTION_BBOXES
Bounding boxes for object detection and classification, as described by AVDetectionBBoxHeader.
Definition: frame.h:189
OVContext::options
OVOptions options
Definition: dnn_backend_openvino.c:49
ff_dnn_get_result_ov
DNNAsyncStatusType ff_dnn_get_result_ov(const DNNModel *model, AVFrame **in, AVFrame **out)
Definition: dnn_backend_openvino.c:840
DNNModel::model
void * model
Definition: dnn_interface.h:86