FFmpeg
dnn_backend_openvino.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2020
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * DNN OpenVINO backend implementation.
24  */
25 
26 #include "dnn_backend_openvino.h"
27 #include "dnn_io_proc.h"
28 #include "libavformat/avio.h"
29 #include "libavutil/avassert.h"
30 #include "libavutil/cpu.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/avstring.h"
34 #include "../internal.h"
35 #include "queue.h"
36 #include "safe_queue.h"
37 #include <c_api/ie_c_api.h>
38 #include "dnn_backend_common.h"
39 
40 typedef struct OVOptions{
41  char *device_type;
42  int nireq;
45 } OVOptions;
46 
47 typedef struct OVContext {
48  const AVClass *class;
50 } OVContext;
51 
52 typedef struct OVModel{
55  ie_core_t *core;
56  ie_network_t *network;
57  ie_executable_network_t *exe_network;
58  SafeQueue *request_queue; // holds OVRequestItem
59  Queue *task_queue; // holds TaskItem
60  Queue *inference_queue; // holds InferenceItem
61 } OVModel;
62 
63 // one request for one call to openvino
64 typedef struct OVRequestItem {
65  ie_infer_request_t *infer_request;
67  uint32_t inference_count;
68  ie_complete_call_back_t callback;
70 
71 #define APPEND_STRING(generated_string, iterate_string) \
72  generated_string = generated_string ? av_asprintf("%s %s", generated_string, iterate_string) : \
73  av_asprintf("%s", iterate_string);
74 
75 #define OFFSET(x) offsetof(OVContext, x)
76 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
77 static const AVOption dnn_openvino_options[] = {
78  { "device", "device to run model", OFFSET(options.device_type), AV_OPT_TYPE_STRING, { .str = "CPU" }, 0, 0, FLAGS },
80  { "batch_size", "batch size per request", OFFSET(options.batch_size), AV_OPT_TYPE_INT, { .i64 = 1 }, 1, 1000, FLAGS},
81  { "input_resizable", "can input be resizable or not", OFFSET(options.input_resizable), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
82  { NULL }
83 };
84 
85 AVFILTER_DEFINE_CLASS(dnn_openvino);
86 
87 static DNNDataType precision_to_datatype(precision_e precision)
88 {
89  switch (precision)
90  {
91  case FP32:
92  return DNN_FLOAT;
93  case U8:
94  return DNN_UINT8;
95  default:
96  av_assert0(!"not supported yet.");
97  return DNN_FLOAT;
98  }
99 }
100 
102 {
103  switch (dt)
104  {
105  case DNN_FLOAT:
106  return sizeof(float);
107  case DNN_UINT8:
108  return sizeof(uint8_t);
109  default:
110  av_assert0(!"not supported yet.");
111  return 1;
112  }
113 }
114 
116 {
117  dimensions_t dims;
118  precision_e precision;
119  ie_blob_buffer_t blob_buffer;
120  OVContext *ctx = &ov_model->ctx;
121  IEStatusCode status;
122  DNNData input;
123  ie_blob_t *input_blob = NULL;
124  InferenceItem *inference;
125  TaskItem *task;
126 
127  inference = ff_queue_peek_front(ov_model->inference_queue);
128  av_assert0(inference);
129  task = inference->task;
130 
131  status = ie_infer_request_get_blob(request->infer_request, task->input_name, &input_blob);
132  if (status != OK) {
133  av_log(ctx, AV_LOG_ERROR, "Failed to get input blob with name %s\n", task->input_name);
134  return DNN_ERROR;
135  }
136 
137  status |= ie_blob_get_dims(input_blob, &dims);
138  status |= ie_blob_get_precision(input_blob, &precision);
139  if (status != OK) {
140  ie_blob_free(&input_blob);
141  av_log(ctx, AV_LOG_ERROR, "Failed to get input blob dims/precision\n");
142  return DNN_ERROR;
143  }
144 
145  status = ie_blob_get_buffer(input_blob, &blob_buffer);
146  if (status != OK) {
147  ie_blob_free(&input_blob);
148  av_log(ctx, AV_LOG_ERROR, "Failed to get input blob buffer\n");
149  return DNN_ERROR;
150  }
151 
152  input.height = dims.dims[2];
153  input.width = dims.dims[3];
154  input.channels = dims.dims[1];
155  input.data = blob_buffer.buffer;
156  input.dt = precision_to_datatype(precision);
157  // all models in openvino open model zoo use BGR as input,
158  // change to be an option when necessary.
159  input.order = DCO_BGR;
160 
161  for (int i = 0; i < ctx->options.batch_size; ++i) {
162  inference = ff_queue_pop_front(ov_model->inference_queue);
163  if (!inference) {
164  break;
165  }
166  request->inferences[i] = inference;
167  request->inference_count = i + 1;
168  task = inference->task;
169  switch (ov_model->model->func_type) {
170  case DFT_PROCESS_FRAME:
171  if (task->do_ioproc) {
172  if (ov_model->model->frame_pre_proc != NULL) {
173  ov_model->model->frame_pre_proc(task->in_frame, &input, ov_model->model->filter_ctx);
174  } else {
176  }
177  }
178  break;
181  break;
183  ff_frame_to_dnn_classify(task->in_frame, &input, inference->bbox_index, ctx);
184  break;
185  default:
186  av_assert0(!"should not reach here");
187  break;
188  }
189  input.data = (uint8_t *)input.data
190  + input.width * input.height * input.channels * get_datatype_size(input.dt);
191  }
192  ie_blob_free(&input_blob);
193 
194  return DNN_SUCCESS;
195 }
196 
197 static void infer_completion_callback(void *args)
198 {
199  dimensions_t dims;
200  precision_e precision;
201  IEStatusCode status;
202  OVRequestItem *request = args;
203  InferenceItem *inference = request->inferences[0];
204  TaskItem *task = inference->task;
205  OVModel *ov_model = task->model;
206  SafeQueue *requestq = ov_model->request_queue;
207  ie_blob_t *output_blob = NULL;
208  ie_blob_buffer_t blob_buffer;
209  DNNData output;
210  OVContext *ctx = &ov_model->ctx;
211 
212  status = ie_infer_request_get_blob(request->infer_request, task->output_names[0], &output_blob);
213  if (status != OK) {
214  //incorrect output name
215  char *model_output_name = NULL;
216  char *all_output_names = NULL;
217  size_t model_output_count = 0;
218  av_log(ctx, AV_LOG_ERROR, "Failed to get model output data\n");
219  status = ie_network_get_outputs_number(ov_model->network, &model_output_count);
220  for (size_t i = 0; i < model_output_count; i++) {
221  status = ie_network_get_output_name(ov_model->network, i, &model_output_name);
222  APPEND_STRING(all_output_names, model_output_name)
223  }
225  "output \"%s\" may not correct, all output(s) are: \"%s\"\n",
226  task->output_names[0], all_output_names);
227  return;
228  }
229 
230  status = ie_blob_get_buffer(output_blob, &blob_buffer);
231  if (status != OK) {
232  ie_blob_free(&output_blob);
233  av_log(ctx, AV_LOG_ERROR, "Failed to access output memory\n");
234  return;
235  }
236 
237  status |= ie_blob_get_dims(output_blob, &dims);
238  status |= ie_blob_get_precision(output_blob, &precision);
239  if (status != OK) {
240  ie_blob_free(&output_blob);
241  av_log(ctx, AV_LOG_ERROR, "Failed to get dims or precision of output\n");
242  return;
243  }
244 
245  output.channels = dims.dims[1];
246  output.height = dims.dims[2];
247  output.width = dims.dims[3];
248  output.dt = precision_to_datatype(precision);
249  output.data = blob_buffer.buffer;
250 
251  av_assert0(request->inference_count <= dims.dims[0]);
252  av_assert0(request->inference_count >= 1);
253  for (int i = 0; i < request->inference_count; ++i) {
254  task = request->inferences[i]->task;
255  task->inference_done++;
256 
257  switch (ov_model->model->func_type) {
258  case DFT_PROCESS_FRAME:
259  if (task->do_ioproc) {
260  if (ov_model->model->frame_post_proc != NULL) {
261  ov_model->model->frame_post_proc(task->out_frame, &output, ov_model->model->filter_ctx);
262  } else {
264  }
265  } else {
266  task->out_frame->width = output.width;
267  task->out_frame->height = output.height;
268  }
269  break;
271  if (!ov_model->model->detect_post_proc) {
272  av_log(ctx, AV_LOG_ERROR, "detect filter needs to provide post proc\n");
273  return;
274  }
275  ov_model->model->detect_post_proc(task->out_frame, &output, 1, ov_model->model->filter_ctx);
276  break;
278  if (!ov_model->model->classify_post_proc) {
279  av_log(ctx, AV_LOG_ERROR, "classify filter needs to provide post proc\n");
280  return;
281  }
282  ov_model->model->classify_post_proc(task->out_frame, &output, request->inferences[i]->bbox_index, ov_model->model->filter_ctx);
283  break;
284  default:
285  av_assert0(!"should not reach here");
286  break;
287  }
288 
289  av_freep(&request->inferences[i]);
290  output.data = (uint8_t *)output.data
291  + output.width * output.height * output.channels * get_datatype_size(output.dt);
292  }
293  ie_blob_free(&output_blob);
294 
295  request->inference_count = 0;
296  if (ff_safe_queue_push_back(requestq, request) < 0) {
297  ie_infer_request_free(&request->infer_request);
298  av_freep(&request);
299  av_log(ctx, AV_LOG_ERROR, "Failed to push back request_queue.\n");
300  return;
301  }
302 }
303 
304 static DNNReturnType init_model_ov(OVModel *ov_model, const char *input_name, const char *output_name)
305 {
306  OVContext *ctx = &ov_model->ctx;
307  IEStatusCode status;
308  ie_available_devices_t a_dev;
309  ie_config_t config = {NULL, NULL, NULL};
310  char *all_dev_names = NULL;
311 
312  // batch size
313  if (ctx->options.batch_size <= 0) {
314  ctx->options.batch_size = 1;
315  }
316 
317  if (ctx->options.batch_size > 1) {
318  input_shapes_t input_shapes;
319  status = ie_network_get_input_shapes(ov_model->network, &input_shapes);
320  if (status != OK)
321  goto err;
322  for (int i = 0; i < input_shapes.shape_num; i++)
323  input_shapes.shapes[i].shape.dims[0] = ctx->options.batch_size;
324  status = ie_network_reshape(ov_model->network, input_shapes);
325  ie_network_input_shapes_free(&input_shapes);
326  if (status != OK)
327  goto err;
328  }
329 
330  // The order of dims in the openvino is fixed and it is always NCHW for 4-D data.
331  // while we pass NHWC data from FFmpeg to openvino
332  status = ie_network_set_input_layout(ov_model->network, input_name, NHWC);
333  if (status != OK) {
334  av_log(ctx, AV_LOG_ERROR, "Failed to set layout as NHWC for input %s\n", input_name);
335  goto err;
336  }
337  status = ie_network_set_output_layout(ov_model->network, output_name, NHWC);
338  if (status != OK) {
339  av_log(ctx, AV_LOG_ERROR, "Failed to set layout as NHWC for output %s\n", output_name);
340  goto err;
341  }
342 
343  // all models in openvino open model zoo use BGR with range [0.0f, 255.0f] as input,
344  // we don't have a AVPixelFormat to describe it, so we'll use AV_PIX_FMT_BGR24 and
345  // ask openvino to do the conversion internally.
346  // the current supported SR model (frame processing) is generated from tensorflow model,
347  // and its input is Y channel as float with range [0.0f, 1.0f], so do not set for this case.
348  // TODO: we need to get a final clear&general solution with all backends/formats considered.
349  if (ov_model->model->func_type != DFT_PROCESS_FRAME) {
350  status = ie_network_set_input_precision(ov_model->network, input_name, U8);
351  if (status != OK) {
352  av_log(ctx, AV_LOG_ERROR, "Failed to set input precision as U8 for %s\n", input_name);
353  goto err;
354  }
355  }
356 
357  status = ie_core_load_network(ov_model->core, ov_model->network, ctx->options.device_type, &config, &ov_model->exe_network);
358  if (status != OK) {
359  av_log(ctx, AV_LOG_ERROR, "Failed to load OpenVINO model network\n");
360  status = ie_core_get_available_devices(ov_model->core, &a_dev);
361  if (status != OK) {
362  av_log(ctx, AV_LOG_ERROR, "Failed to get available devices\n");
363  goto err;
364  }
365  for (int i = 0; i < a_dev.num_devices; i++) {
366  APPEND_STRING(all_dev_names, a_dev.devices[i])
367  }
368  av_log(ctx, AV_LOG_ERROR,"device %s may not be supported, all available devices are: \"%s\"\n",
369  ctx->options.device_type, all_dev_names);
370  goto err;
371  }
372 
373  // create infer_requests for async execution
374  if (ctx->options.nireq <= 0) {
375  // the default value is a rough estimation
376  ctx->options.nireq = av_cpu_count() / 2 + 1;
377  }
378 
379  ov_model->request_queue = ff_safe_queue_create();
380  if (!ov_model->request_queue) {
381  goto err;
382  }
383 
384  for (int i = 0; i < ctx->options.nireq; i++) {
385  OVRequestItem *item = av_mallocz(sizeof(*item));
386  if (!item) {
387  goto err;
388  }
389 
390  item->callback.completeCallBackFunc = infer_completion_callback;
391  item->callback.args = item;
392  if (ff_safe_queue_push_back(ov_model->request_queue, item) < 0) {
393  av_freep(&item);
394  goto err;
395  }
396 
397  status = ie_exec_network_create_infer_request(ov_model->exe_network, &item->infer_request);
398  if (status != OK) {
399  goto err;
400  }
401 
402  item->inferences = av_malloc_array(ctx->options.batch_size, sizeof(*item->inferences));
403  if (!item->inferences) {
404  goto err;
405  }
406  item->inference_count = 0;
407  }
408 
409  ov_model->task_queue = ff_queue_create();
410  if (!ov_model->task_queue) {
411  goto err;
412  }
413 
414  ov_model->inference_queue = ff_queue_create();
415  if (!ov_model->inference_queue) {
416  goto err;
417  }
418 
419  return DNN_SUCCESS;
420 
421 err:
422  ff_dnn_free_model_ov(&ov_model->model);
423  return DNN_ERROR;
424 }
425 
426 static DNNReturnType execute_model_ov(OVRequestItem *request, Queue *inferenceq)
427 {
428  IEStatusCode status;
430  InferenceItem *inference;
431  TaskItem *task;
432  OVContext *ctx;
433  OVModel *ov_model;
434 
435  if (ff_queue_size(inferenceq) == 0) {
436  ie_infer_request_free(&request->infer_request);
437  av_freep(&request);
438  return DNN_SUCCESS;
439  }
440 
441  inference = ff_queue_peek_front(inferenceq);
442  task = inference->task;
443  ov_model = task->model;
444  ctx = &ov_model->ctx;
445 
446  if (task->async) {
447  ret = fill_model_input_ov(ov_model, request);
448  if (ret != DNN_SUCCESS) {
449  goto err;
450  }
451  status = ie_infer_set_completion_callback(request->infer_request, &request->callback);
452  if (status != OK) {
453  av_log(ctx, AV_LOG_ERROR, "Failed to set completion callback for inference\n");
454  goto err;
455  }
456  status = ie_infer_request_infer_async(request->infer_request);
457  if (status != OK) {
458  av_log(ctx, AV_LOG_ERROR, "Failed to start async inference\n");
459  goto err;
460  }
461  return DNN_SUCCESS;
462  } else {
463  ret = fill_model_input_ov(ov_model, request);
464  if (ret != DNN_SUCCESS) {
465  goto err;
466  }
467  status = ie_infer_request_infer(request->infer_request);
468  if (status != OK) {
469  av_log(ctx, AV_LOG_ERROR, "Failed to start synchronous model inference\n");
470  goto err;
471  }
472  infer_completion_callback(request);
473  return (task->inference_done == task->inference_todo) ? DNN_SUCCESS : DNN_ERROR;
474  }
475 err:
476  if (ff_safe_queue_push_back(ov_model->request_queue, request) < 0) {
477  ie_infer_request_free(&request->infer_request);
478  av_freep(&request);
479  }
480  return DNN_ERROR;
481 }
482 
483 static DNNReturnType get_input_ov(void *model, DNNData *input, const char *input_name)
484 {
485  OVModel *ov_model = model;
486  OVContext *ctx = &ov_model->ctx;
487  char *model_input_name = NULL;
488  char *all_input_names = NULL;
489  IEStatusCode status;
490  size_t model_input_count = 0;
491  dimensions_t dims;
492  precision_e precision;
493  int input_resizable = ctx->options.input_resizable;
494 
495  status = ie_network_get_inputs_number(ov_model->network, &model_input_count);
496  if (status != OK) {
497  av_log(ctx, AV_LOG_ERROR, "Failed to get input count\n");
498  return DNN_ERROR;
499  }
500 
501  for (size_t i = 0; i < model_input_count; i++) {
502  status = ie_network_get_input_name(ov_model->network, i, &model_input_name);
503  if (status != OK) {
504  av_log(ctx, AV_LOG_ERROR, "Failed to get No.%d input's name\n", (int)i);
505  return DNN_ERROR;
506  }
507  if (strcmp(model_input_name, input_name) == 0) {
508  ie_network_name_free(&model_input_name);
509  status |= ie_network_get_input_dims(ov_model->network, input_name, &dims);
510  status |= ie_network_get_input_precision(ov_model->network, input_name, &precision);
511  if (status != OK) {
512  av_log(ctx, AV_LOG_ERROR, "Failed to get No.%d input's dims or precision\n", (int)i);
513  return DNN_ERROR;
514  }
515 
516  input->channels = dims.dims[1];
517  input->height = input_resizable ? -1 : dims.dims[2];
518  input->width = input_resizable ? -1 : dims.dims[3];
519  input->dt = precision_to_datatype(precision);
520  return DNN_SUCCESS;
521  } else {
522  //incorrect input name
523  APPEND_STRING(all_input_names, model_input_name)
524  }
525 
526  ie_network_name_free(&model_input_name);
527  }
528 
529  av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model, all input(s) are: \"%s\"\n", input_name, all_input_names);
530  return DNN_ERROR;
531 }
532 
534 {
535  AVFrameSideData *sd;
537  const AVDetectionBBox *bbox;
538 
540  if (!sd) { // this frame has nothing detected
541  return 0;
542  }
543 
544  if (!sd->size) {
545  return 0;
546  }
547 
548  header = (const AVDetectionBBoxHeader *)sd->data;
549  if (!header->nb_bboxes) {
550  return 0;
551  }
552 
553  for (uint32_t i = 0; i < header->nb_bboxes; i++) {
554  bbox = av_get_detection_bbox(header, i);
555  if (bbox->x < 0 || bbox->w < 0 || bbox->x + bbox->w >= frame->width) {
556  return 0;
557  }
558  if (bbox->y < 0 || bbox->h < 0 || bbox->y + bbox->h >= frame->width) {
559  return 0;
560  }
561 
563  return 0;
564  }
565  }
566 
567  return 1;
568 }
569 
570 static DNNReturnType extract_inference_from_task(DNNFunctionType func_type, TaskItem *task, Queue *inference_queue, DNNExecBaseParams *exec_params)
571 {
572  switch (func_type) {
573  case DFT_PROCESS_FRAME:
575  {
576  InferenceItem *inference = av_malloc(sizeof(*inference));
577  if (!inference) {
578  return DNN_ERROR;
579  }
580  task->inference_todo = 1;
581  task->inference_done = 0;
582  inference->task = task;
583  if (ff_queue_push_back(inference_queue, inference) < 0) {
584  av_freep(&inference);
585  return DNN_ERROR;
586  }
587  return DNN_SUCCESS;
588  }
590  {
592  AVFrame *frame = task->in_frame;
593  AVFrameSideData *sd;
595 
596  task->inference_todo = 0;
597  task->inference_done = 0;
598 
600  return DNN_SUCCESS;
601  }
602 
604  header = (const AVDetectionBBoxHeader *)sd->data;
605 
606  for (uint32_t i = 0; i < header->nb_bboxes; i++) {
607  InferenceItem *inference;
609 
610  if (params->target) {
611  if (av_strncasecmp(bbox->detect_label, params->target, sizeof(bbox->detect_label)) != 0) {
612  continue;
613  }
614  }
615 
616  inference = av_malloc(sizeof(*inference));
617  if (!inference) {
618  return DNN_ERROR;
619  }
620  task->inference_todo++;
621  inference->task = task;
622  inference->bbox_index = i;
623  if (ff_queue_push_back(inference_queue, inference) < 0) {
624  av_freep(&inference);
625  return DNN_ERROR;
626  }
627  }
628  return DNN_SUCCESS;
629  }
630  default:
631  av_assert0(!"should not reach here");
632  return DNN_ERROR;
633  }
634 }
635 
636 static DNNReturnType get_output_ov(void *model, const char *input_name, int input_width, int input_height,
637  const char *output_name, int *output_width, int *output_height)
638 {
640  OVModel *ov_model = model;
641  OVContext *ctx = &ov_model->ctx;
642  TaskItem task;
643  OVRequestItem *request;
644  AVFrame *in_frame = NULL;
645  AVFrame *out_frame = NULL;
646  IEStatusCode status;
647  input_shapes_t input_shapes;
648 
649  if (ov_model->model->func_type != DFT_PROCESS_FRAME) {
650  av_log(ctx, AV_LOG_ERROR, "Get output dim only when processing frame.\n");
651  return DNN_ERROR;
652  }
653 
654  if (ctx->options.input_resizable) {
655  status = ie_network_get_input_shapes(ov_model->network, &input_shapes);
656  input_shapes.shapes->shape.dims[2] = input_height;
657  input_shapes.shapes->shape.dims[3] = input_width;
658  status |= ie_network_reshape(ov_model->network, input_shapes);
659  ie_network_input_shapes_free(&input_shapes);
660  if (status != OK) {
661  av_log(ctx, AV_LOG_ERROR, "Failed to reshape input size for %s\n", input_name);
662  return DNN_ERROR;
663  }
664  }
665 
666  if (!ov_model->exe_network) {
667  if (init_model_ov(ov_model, input_name, output_name) != DNN_SUCCESS) {
668  av_log(ctx, AV_LOG_ERROR, "Failed init OpenVINO exectuable network or inference request\n");
669  return DNN_ERROR;
670  }
671  }
672 
673  in_frame = av_frame_alloc();
674  if (!in_frame) {
675  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for input frame\n");
676  return DNN_ERROR;
677  }
678  in_frame->width = input_width;
679  in_frame->height = input_height;
680 
681  out_frame = av_frame_alloc();
682  if (!out_frame) {
683  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for output frame\n");
684  av_frame_free(&in_frame);
685  return DNN_ERROR;
686  }
687 
688  task.do_ioproc = 0;
689  task.async = 0;
690  task.input_name = input_name;
691  task.in_frame = in_frame;
692  task.output_names = &output_name;
693  task.out_frame = out_frame;
694  task.nb_output = 1;
695  task.model = ov_model;
696 
697  if (extract_inference_from_task(ov_model->model->func_type, &task, ov_model->inference_queue, NULL) != DNN_SUCCESS) {
698  av_frame_free(&out_frame);
699  av_frame_free(&in_frame);
700  av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
701  return DNN_ERROR;
702  }
703 
704  request = ff_safe_queue_pop_front(ov_model->request_queue);
705  if (!request) {
706  av_frame_free(&out_frame);
707  av_frame_free(&in_frame);
708  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
709  return DNN_ERROR;
710  }
711 
712  ret = execute_model_ov(request, ov_model->inference_queue);
713  *output_width = out_frame->width;
714  *output_height = out_frame->height;
715 
716  av_frame_free(&out_frame);
717  av_frame_free(&in_frame);
718  return ret;
719 }
720 
721 DNNModel *ff_dnn_load_model_ov(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx)
722 {
723  DNNModel *model = NULL;
724  OVModel *ov_model = NULL;
725  OVContext *ctx = NULL;
726  IEStatusCode status;
727 
728  model = av_mallocz(sizeof(DNNModel));
729  if (!model){
730  return NULL;
731  }
732 
733  ov_model = av_mallocz(sizeof(OVModel));
734  if (!ov_model) {
735  av_freep(&model);
736  return NULL;
737  }
738  model->model = ov_model;
739  ov_model->model = model;
740  ov_model->ctx.class = &dnn_openvino_class;
741  ctx = &ov_model->ctx;
742 
743  //parse options
745  if (av_opt_set_from_string(ctx, options, NULL, "=", "&") < 0) {
746  av_log(ctx, AV_LOG_ERROR, "Failed to parse options \"%s\"\n", options);
747  goto err;
748  }
749 
750  status = ie_core_create("", &ov_model->core);
751  if (status != OK)
752  goto err;
753 
754  status = ie_core_read_network(ov_model->core, model_filename, NULL, &ov_model->network);
755  if (status != OK) {
756  ie_version_t ver;
757  ver = ie_c_api_version();
758  av_log(ctx, AV_LOG_ERROR, "Failed to read the network from model file %s,\n"
759  "Please check if the model version matches the runtime OpenVINO %s\n",
760  model_filename, ver.api_version);
761  ie_version_free(&ver);
762  goto err;
763  }
764 
765  model->get_input = &get_input_ov;
766  model->get_output = &get_output_ov;
767  model->options = options;
768  model->filter_ctx = filter_ctx;
769  model->func_type = func_type;
770 
771  return model;
772 
773 err:
774  ff_dnn_free_model_ov(&model);
775  return NULL;
776 }
777 
779 {
780  OVModel *ov_model = model->model;
781  OVContext *ctx = &ov_model->ctx;
782  TaskItem task;
783  OVRequestItem *request;
784 
785  if (ff_check_exec_params(ctx, DNN_OV, model->func_type, exec_params) != 0) {
786  return DNN_ERROR;
787  }
788 
789  if (model->func_type == DFT_ANALYTICS_CLASSIFY) {
790  // Once we add async support for tensorflow backend and native backend,
791  // we'll combine the two sync/async functions in dnn_interface.h to
792  // simplify the code in filter, and async will be an option within backends.
793  // so, do not support now, and classify filter will not call this function.
794  return DNN_ERROR;
795  }
796 
797  if (ctx->options.batch_size > 1) {
798  avpriv_report_missing_feature(ctx, "batch mode for sync execution");
799  return DNN_ERROR;
800  }
801 
802  if (!ov_model->exe_network) {
803  if (init_model_ov(ov_model, exec_params->input_name, exec_params->output_names[0]) != DNN_SUCCESS) {
804  av_log(ctx, AV_LOG_ERROR, "Failed init OpenVINO exectuable network or inference request\n");
805  return DNN_ERROR;
806  }
807  }
808 
809  if (ff_dnn_fill_task(&task, exec_params, ov_model, 0, 1) != DNN_SUCCESS) {
810  return DNN_ERROR;
811  }
812 
813  if (extract_inference_from_task(ov_model->model->func_type, &task, ov_model->inference_queue, exec_params) != DNN_SUCCESS) {
814  av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
815  return DNN_ERROR;
816  }
817 
818  request = ff_safe_queue_pop_front(ov_model->request_queue);
819  if (!request) {
820  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
821  return DNN_ERROR;
822  }
823 
824  return execute_model_ov(request, ov_model->inference_queue);
825 }
826 
828 {
829  OVModel *ov_model = model->model;
830  OVContext *ctx = &ov_model->ctx;
831  OVRequestItem *request;
832  TaskItem *task;
834 
835  if (ff_check_exec_params(ctx, DNN_OV, model->func_type, exec_params) != 0) {
836  return DNN_ERROR;
837  }
838 
839  if (!ov_model->exe_network) {
840  if (init_model_ov(ov_model, exec_params->input_name, exec_params->output_names[0]) != DNN_SUCCESS) {
841  av_log(ctx, AV_LOG_ERROR, "Failed init OpenVINO exectuable network or inference request\n");
842  return DNN_ERROR;
843  }
844  }
845 
846  task = av_malloc(sizeof(*task));
847  if (!task) {
848  av_log(ctx, AV_LOG_ERROR, "unable to alloc memory for task item.\n");
849  return DNN_ERROR;
850  }
851 
852  if (ff_dnn_fill_task(task, exec_params, ov_model, 1, 1) != DNN_SUCCESS) {
853  return DNN_ERROR;
854  }
855 
856  if (ff_queue_push_back(ov_model->task_queue, task) < 0) {
857  av_freep(&task);
858  av_log(ctx, AV_LOG_ERROR, "unable to push back task_queue.\n");
859  return DNN_ERROR;
860  }
861 
862  if (extract_inference_from_task(model->func_type, task, ov_model->inference_queue, exec_params) != DNN_SUCCESS) {
863  av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
864  return DNN_ERROR;
865  }
866 
867  while (ff_queue_size(ov_model->inference_queue) >= ctx->options.batch_size) {
868  request = ff_safe_queue_pop_front(ov_model->request_queue);
869  if (!request) {
870  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
871  return DNN_ERROR;
872  }
873 
874  ret = execute_model_ov(request, ov_model->inference_queue);
875  if (ret != DNN_SUCCESS) {
876  return ret;
877  }
878  }
879 
880  return DNN_SUCCESS;
881 }
882 
884 {
885  OVModel *ov_model = model->model;
886  TaskItem *task = ff_queue_peek_front(ov_model->task_queue);
887 
888  if (!task) {
889  return DAST_EMPTY_QUEUE;
890  }
891 
892  if (task->inference_done != task->inference_todo) {
893  return DAST_NOT_READY;
894  }
895 
896  *in = task->in_frame;
897  *out = task->out_frame;
898  ff_queue_pop_front(ov_model->task_queue);
899  av_freep(&task);
900 
901  return DAST_SUCCESS;
902 }
903 
905 {
906  OVModel *ov_model = model->model;
907  OVContext *ctx = &ov_model->ctx;
908  OVRequestItem *request;
909  IEStatusCode status;
911 
912  if (ff_queue_size(ov_model->inference_queue) == 0) {
913  // no pending task need to flush
914  return DNN_SUCCESS;
915  }
916 
917  request = ff_safe_queue_pop_front(ov_model->request_queue);
918  if (!request) {
919  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
920  return DNN_ERROR;
921  }
922 
923  ret = fill_model_input_ov(ov_model, request);
924  if (ret != DNN_SUCCESS) {
925  av_log(ctx, AV_LOG_ERROR, "Failed to fill model input.\n");
926  return ret;
927  }
928  status = ie_infer_set_completion_callback(request->infer_request, &request->callback);
929  if (status != OK) {
930  av_log(ctx, AV_LOG_ERROR, "Failed to set completion callback for inference\n");
931  return DNN_ERROR;
932  }
933  status = ie_infer_request_infer_async(request->infer_request);
934  if (status != OK) {
935  av_log(ctx, AV_LOG_ERROR, "Failed to start async inference\n");
936  return DNN_ERROR;
937  }
938 
939  return DNN_SUCCESS;
940 }
941 
943 {
944  if (*model){
945  OVModel *ov_model = (*model)->model;
946  while (ff_safe_queue_size(ov_model->request_queue) != 0) {
948  if (item && item->infer_request) {
949  ie_infer_request_free(&item->infer_request);
950  }
951  av_freep(&item->inferences);
952  av_freep(&item);
953  }
955 
956  while (ff_queue_size(ov_model->inference_queue) != 0) {
958  av_freep(&item);
959  }
961 
962  while (ff_queue_size(ov_model->task_queue) != 0) {
963  TaskItem *item = ff_queue_pop_front(ov_model->task_queue);
964  av_frame_free(&item->in_frame);
965  av_frame_free(&item->out_frame);
966  av_freep(&item);
967  }
968  ff_queue_destroy(ov_model->task_queue);
969 
970  if (ov_model->exe_network)
971  ie_exec_network_free(&ov_model->exe_network);
972  if (ov_model->network)
973  ie_network_free(&ov_model->network);
974  if (ov_model->core)
975  ie_core_free(&ov_model->core);
976  av_freep(&ov_model);
977  av_freep(model);
978  }
979 }
InferenceItem
Definition: dnn_backend_common.h:47
status
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
OVRequestItem::callback
ie_complete_call_back_t callback
Definition: dnn_backend_openvino.c:68
OVContext::class
const AVClass * class
Definition: dnn_backend_openvino.c:48
ff_dnn_fill_task
DNNReturnType ff_dnn_fill_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int async, int do_ioproc)
Fill the Task for Backend Execution.
Definition: dnn_backend_common.c:53
opt.h
filter_ctx
static FilteringContext * filter_ctx
Definition: transcoding.c:49
av_opt_set_defaults
void av_opt_set_defaults(void *s)
Set the values of all AVOption fields to their default values.
Definition: opt.c:1358
ff_safe_queue_pop_front
void * ff_safe_queue_pop_front(SafeQueue *sq)
Remove and free first element from the queue in SafeQueue.
Definition: safe_queue.c:105
out
FILE * out
Definition: movenc.c:54
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:617
OVModel::exe_network
ie_executable_network_t * exe_network
Definition: dnn_backend_openvino.c:57
DNNFunctionType
DNNFunctionType
Definition: dnn_interface.h:52
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:225
ff_queue_pop_front
void * ff_queue_pop_front(Queue *q)
Remove and free first element from the Queue.
Definition: queue.c:151
ff_check_exec_params
int ff_check_exec_params(void *ctx, DNNBackendType backend, DNNFunctionType func_type, DNNExecBaseParams *exec_params)
Definition: dnn_backend_common.c:26
ff_queue_size
size_t ff_queue_size(Queue *q)
Return the length of the Queue.
Definition: queue.c:88
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:112
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:303
AVFrame::width
int width
Definition: frame.h:361
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(dnn_openvino)
SafeQueue
Double-ended queue with mutex locks ensuring data consistency while multithreading.
Definition: safe_queue.c:46
AVOption
AVOption.
Definition: opt.h:247
DNNModel::frame_pre_proc
FramePrePostProc frame_pre_proc
Definition: dnn_interface.h:101
ff_dnn_load_model_ov
DNNModel * ff_dnn_load_model_ov(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx)
Definition: dnn_backend_openvino.c:721
OVModel::core
ie_core_t * core
Definition: dnn_backend_openvino.c:55
OVRequestItem::inferences
InferenceItem ** inferences
Definition: dnn_backend_openvino.c:66
FLAGS
#define FLAGS
Definition: cmdutils.c:542
DNNExecBaseParams::input_name
const char * input_name
Definition: dnn_interface.h:68
dnn_io_proc.h
TaskItem
Definition: dnn_backend_common.h:33
InferenceItem::bbox_index
uint32_t bbox_index
Definition: dnn_backend_common.h:49
AVDetectionBBox::y
int y
Definition: detection_bbox.h:32
tf_sess_config.config
config
Definition: tf_sess_config.py:33
OVModel
Definition: dnn_backend_openvino.c:52
get_input_ov
static DNNReturnType get_input_ov(void *model, DNNData *input, const char *input_name)
Definition: dnn_backend_openvino.c:483
OVOptions::batch_size
int batch_size
Definition: dnn_backend_openvino.c:43
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
InferenceItem::task
TaskItem * task
Definition: dnn_backend_common.h:48
DNNModel::filter_ctx
AVFilterContext * filter_ctx
Definition: dnn_interface.h:90
ff_queue_create
Queue * ff_queue_create(void)
Create a Queue instance.
Definition: queue.c:47
ff_proc_from_dnn_to_frame
DNNReturnType ff_proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *log_ctx)
Definition: dnn_io_proc.c:27
ff_proc_from_frame_to_dnn
DNNReturnType ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, void *log_ctx)
Definition: dnn_io_proc.c:100
AVDetectionBBox::detect_label
char detect_label[AV_DETECTION_BBOX_LABEL_NAME_MAX_SIZE]
Detect result with confidence.
Definition: detection_bbox.h:41
TaskItem::model
void * model
Definition: dnn_backend_common.h:34
DNN_SUCCESS
@ DNN_SUCCESS
Definition: dnn_interface.h:33
OVOptions::device_type
char * device_type
Definition: dnn_backend_openvino.c:41
DNNModel::get_output
DNNReturnType(* get_output)(void *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height)
Definition: dnn_interface.h:97
init_model_ov
static DNNReturnType init_model_ov(OVModel *ov_model, const char *input_name, const char *output_name)
Definition: dnn_backend_openvino.c:304
Queue
Linear double-ended data structure.
Definition: queue.c:33
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:99
av_get_detection_bbox
static av_always_inline AVDetectionBBox * av_get_detection_bbox(const AVDetectionBBoxHeader *header, unsigned int idx)
Definition: detection_bbox.h:84
ff_queue_push_back
int ff_queue_push_back(Queue *q, void *v)
Add data to the tail of the queue.
Definition: queue.c:130
avassert.h
DNN_BACKEND_COMMON_OPTIONS
#define DNN_BACKEND_COMMON_OPTIONS
Definition: dnn_backend_common.h:29
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
AVFrameSideData::size
size_t size
Definition: frame.h:212
fill_model_input_ov
static DNNReturnType fill_model_input_ov(OVModel *ov_model, OVRequestItem *request)
Definition: dnn_backend_openvino.c:115
DNNExecClassificationParams
Definition: dnn_interface.h:75
ff_queue_destroy
void ff_queue_destroy(Queue *q)
Destroy the Queue instance.
Definition: queue.c:72
ff_dnn_free_model_ov
void ff_dnn_free_model_ov(DNNModel **model)
Definition: dnn_backend_openvino.c:942
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
DNNReturnType
DNNReturnType
Definition: dnn_interface.h:33
execute_model_ov
static DNNReturnType execute_model_ov(OVRequestItem *request, Queue *inferenceq)
Definition: dnn_backend_openvino.c:426
DNNData
Definition: dnn_interface.h:59
OVModel::inference_queue
Queue * inference_queue
Definition: dnn_backend_openvino.c:60
ctx
AVFormatContext * ctx
Definition: movenc.c:48
DNNModel::get_input
DNNReturnType(* get_input)(void *model, DNNData *input, const char *input_name)
Definition: dnn_interface.h:95
TaskItem::inference_todo
uint32_t inference_todo
Definition: dnn_backend_common.h:42
OVRequestItem::infer_request
ie_infer_request_t * infer_request
Definition: dnn_backend_openvino.c:65
DNN_OV
@ DNN_OV
Definition: dnn_interface.h:35
ff_safe_queue_size
size_t ff_safe_queue_size(SafeQueue *sq)
Return the length of the SafeQueue.
Definition: safe_queue.c:80
if
if(ret)
Definition: filter_design.txt:179
DNNExecClassificationParams::target
const char * target
Definition: dnn_interface.h:77
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
OVModel::network
ie_network_t * network
Definition: dnn_backend_openvino.c:56
ff_safe_queue_create
SafeQueue * ff_safe_queue_create(void)
Create and initialize a SafeQueue instance.
Definition: safe_queue.c:52
AVDetectionBBoxHeader
Definition: detection_bbox.h:56
DNNModel::frame_post_proc
FramePrePostProc frame_post_proc
Definition: dnn_interface.h:104
av_opt_set_from_string
int av_opt_set_from_string(void *ctx, const char *opts, const char *const *shorthand, const char *key_val_sep, const char *pairs_sep)
Parse the key-value pairs list in opts.
Definition: opt.c:1559
TaskItem::in_frame
AVFrame * in_frame
Definition: dnn_backend_common.h:35
OVRequestItem::inference_count
uint32_t inference_count
Definition: dnn_backend_openvino.c:67
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
OVModel::ctx
OVContext ctx
Definition: dnn_backend_openvino.c:53
OVRequestItem
Definition: dnn_backend_openvino.c:64
av_cpu_count
int av_cpu_count(void)
Definition: cpu.c:185
av_strncasecmp
int av_strncasecmp(const char *a, const char *b, size_t n)
Locale-independent case-insensitive compare.
Definition: avstring.c:225
get_datatype_size
static int get_datatype_size(DNNDataType dt)
Definition: dnn_backend_openvino.c:101
options
const OptionDef options[]
TaskItem::async
uint8_t async
Definition: dnn_backend_common.h:39
DAST_SUCCESS
@ DAST_SUCCESS
Definition: dnn_interface.h:49
TaskItem::inference_done
uint32_t inference_done
Definition: dnn_backend_common.h:43
AVDetectionBBox::w
int w
Definition: detection_bbox.h:33
cpu.h
queue.h
DAST_EMPTY_QUEUE
@ DAST_EMPTY_QUEUE
Definition: dnn_interface.h:47
OVModel::model
DNNModel * model
Definition: dnn_backend_openvino.c:54
DNNModel::detect_post_proc
DetectPostProc detect_post_proc
Definition: dnn_interface.h:106
avio.h
DNNModel::func_type
DNNFunctionType func_type
Definition: dnn_interface.h:92
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
AVFrameSideData::data
uint8_t * data
Definition: frame.h:211
ff_safe_queue_destroy
void ff_safe_queue_destroy(SafeQueue *sq)
Destroy the SafeQueue instance.
Definition: safe_queue.c:69
DNNDataType
DNNDataType
Definition: dnn_interface.h:37
header
static const uint8_t header[24]
Definition: sdr2.c:67
AVDetectionBBox::classify_count
uint32_t classify_count
Definition: detection_bbox.h:51
DNN_FLOAT
@ DNN_FLOAT
Definition: dnn_interface.h:37
get_output_ov
static DNNReturnType get_output_ov(void *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height)
Definition: dnn_backend_openvino.c:636
ff_frame_to_dnn_detect
DNNReturnType ff_frame_to_dnn_detect(AVFrame *frame, DNNData *input, void *log_ctx)
Definition: dnn_io_proc.c:252
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
ff_dnn_flush_ov
DNNReturnType ff_dnn_flush_ov(const DNNModel *model)
Definition: dnn_backend_openvino.c:904
contain_valid_detection_bbox
static int contain_valid_detection_bbox(AVFrame *frame)
Definition: dnn_backend_openvino.c:533
ff_safe_queue_push_back
int ff_safe_queue_push_back(SafeQueue *sq, void *v)
Add data to the tail of queue in the SafeQueue after locking mutex.
Definition: safe_queue.c:95
OVOptions::input_resizable
int input_resizable
Definition: dnn_backend_openvino.c:44
i
int i
Definition: input.c:406
dnn_openvino_options
static const AVOption dnn_openvino_options[]
Definition: dnn_backend_openvino.c:77
infer_completion_callback
static void infer_completion_callback(void *args)
Definition: dnn_backend_openvino.c:197
DFT_ANALYTICS_DETECT
@ DFT_ANALYTICS_DETECT
Definition: dnn_interface.h:55
ff_dnn_get_async_result_ov
DNNAsyncStatusType ff_dnn_get_async_result_ov(const DNNModel *model, AVFrame **in, AVFrame **out)
Definition: dnn_backend_openvino.c:883
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
ff_frame_to_dnn_classify
DNNReturnType ff_frame_to_dnn_classify(AVFrame *frame, DNNData *input, uint32_t bbox_index, void *log_ctx)
Definition: dnn_io_proc.c:193
precision_to_datatype
static DNNDataType precision_to_datatype(precision_e precision)
Definition: dnn_backend_openvino.c:87
DNN_ERROR
@ DNN_ERROR
Definition: dnn_interface.h:33
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:243
safe_queue.h
OVModel::request_queue
SafeQueue * request_queue
Definition: dnn_backend_openvino.c:58
TaskItem::output_names
const char ** output_names
Definition: dnn_backend_common.h:38
OVContext
Definition: dnn_backend_openvino.c:47
dnn_backend_openvino.h
DNNModel::classify_post_proc
ClassifyPostProc classify_post_proc
Definition: dnn_interface.h:108
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVDetectionBBox::h
int h
Definition: detection_bbox.h:34
DNN_UINT8
@ DNN_UINT8
Definition: dnn_interface.h:37
TaskItem::out_frame
AVFrame * out_frame
Definition: dnn_backend_common.h:36
OFFSET
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
OVModel::task_queue
Queue * task_queue
Definition: dnn_backend_openvino.c:59
DFT_ANALYTICS_CLASSIFY
@ DFT_ANALYTICS_CLASSIFY
Definition: dnn_interface.h:56
AVFrame::height
int height
Definition: frame.h:361
dnn_backend_common.h
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:224
AVDetectionBBox::x
int x
Distance in pixels from the left/top edge of the frame, together with width and height,...
Definition: detection_bbox.h:31
ff_queue_peek_front
void * ff_queue_peek_front(Queue *q)
Return a pointer to the data at the head of the queue.
Definition: queue.c:93
DNNExecBaseParams::output_names
const char ** output_names
Definition: dnn_interface.h:69
AVFilterContext
An instance of a filter.
Definition: avfilter.h:333
DNNModel
Definition: dnn_interface.h:84
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:209
TaskItem::input_name
const char * input_name
Definition: dnn_backend_common.h:37
AV_NUM_DETECTION_BBOX_CLASSIFY
#define AV_NUM_DETECTION_BBOX_CLASSIFY
At most 4 classifications based on the detected bounding box.
Definition: detection_bbox.h:50
DNNModel::options
const char * options
Definition: dnn_interface.h:88
OVOptions::nireq
int nireq
Definition: dnn_backend_openvino.c:42
ff_dnn_execute_model_ov
DNNReturnType ff_dnn_execute_model_ov(const DNNModel *model, DNNExecBaseParams *exec_params)
Definition: dnn_backend_openvino.c:778
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:241
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
OVOptions
Definition: dnn_backend_openvino.c:40
DNNExecBaseParams
Definition: dnn_interface.h:67
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVDetectionBBox
Definition: detection_bbox.h:26
extract_inference_from_task
static DNNReturnType extract_inference_from_task(DNNFunctionType func_type, TaskItem *task, Queue *inference_queue, DNNExecBaseParams *exec_params)
Definition: dnn_backend_openvino.c:570
TaskItem::do_ioproc
uint8_t do_ioproc
Definition: dnn_backend_common.h:40
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:228
DCO_BGR
@ DCO_BGR
Definition: dnn_interface.h:41
DAST_NOT_READY
@ DAST_NOT_READY
Definition: dnn_interface.h:48
DNNAsyncStatusType
DNNAsyncStatusType
Definition: dnn_interface.h:45
DFT_PROCESS_FRAME
@ DFT_PROCESS_FRAME
Definition: dnn_interface.h:54
detection_bbox.h
AV_FRAME_DATA_DETECTION_BBOXES
@ AV_FRAME_DATA_DETECTION_BBOXES
Bounding boxes for object detection and classification, as described by AVDetectionBBoxHeader.
Definition: frame.h:189
TaskItem::nb_output
uint32_t nb_output
Definition: dnn_backend_common.h:41
OVContext::options
OVOptions options
Definition: dnn_backend_openvino.c:49
ff_dnn_execute_model_async_ov
DNNReturnType ff_dnn_execute_model_async_ov(const DNNModel *model, DNNExecBaseParams *exec_params)
Definition: dnn_backend_openvino.c:827
DNNModel::model
void * model
Definition: dnn_interface.h:86