FFmpeg
dnn_backend_openvino.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2020
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * DNN OpenVINO backend implementation.
24  */
25 
26 #include "dnn_io_proc.h"
27 #include "libavformat/avio.h"
28 #include "libavutil/avassert.h"
29 #include "libavutil/cpu.h"
30 #include "libavutil/mem.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/avstring.h"
34 #include "../internal.h"
35 #include "safe_queue.h"
36 #if HAVE_OPENVINO2
37 #include <openvino/c/openvino.h>
38 #else
39 #include <c_api/ie_c_api.h>
40 #endif
41 #include "dnn_backend_common.h"
42 
43 typedef struct OVOptions{
44  char *device_type;
45  int nireq;
46  uint8_t async;
50  float scale;
51  float mean;
52 } OVOptions;
53 
54 typedef struct OVContext {
55  const AVClass *class;
57 } OVContext;
58 
59 typedef struct OVModel{
62 #if HAVE_OPENVINO2
63  ov_core_t *core;
64  ov_model_t *ov_model;
65  ov_compiled_model_t *compiled_model;
66  ov_output_const_port_t* input_port;
67  ov_preprocess_input_info_t* input_info;
68  ov_output_const_port_t** output_ports;
69  ov_preprocess_output_info_t* output_info;
70  ov_preprocess_prepostprocessor_t* preprocess;
71 #else
72  ie_core_t *core;
73  ie_network_t *network;
74  ie_executable_network_t *exe_network;
75  const char *all_input_names;
76  const char *all_output_names;
77 #endif
78  SafeQueue *request_queue; // holds OVRequestItem
79  Queue *task_queue; // holds TaskItem
80  Queue *lltask_queue; // holds LastLevelTaskItem
82 } OVModel;
83 
84 // one request for one call to openvino
85 typedef struct OVRequestItem {
87  uint32_t lltask_count;
88 #if HAVE_OPENVINO2
89  ov_infer_request_t *infer_request;
90  ov_callback_t callback;
91 #else
92  ie_complete_call_back_t callback;
93  ie_infer_request_t *infer_request;
94 #endif
96 
97 #define APPEND_STRING(generated_string, iterate_string) \
98  generated_string = generated_string ? av_asprintf("%s %s", generated_string, iterate_string) : \
99  av_asprintf("%s", iterate_string);
100 
101 #define OFFSET(x) offsetof(OVContext, x)
102 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
103 static const AVOption dnn_openvino_options[] = {
104  { "device", "device to run model", OFFSET(options.device_type), AV_OPT_TYPE_STRING, { .str = "CPU" }, 0, 0, FLAGS },
106  { "batch_size", "batch size per request", OFFSET(options.batch_size), AV_OPT_TYPE_INT, { .i64 = 1 }, 1, 1000, FLAGS},
107  { "input_resizable", "can input be resizable or not", OFFSET(options.input_resizable), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
108  { "layout", "input layout of model", OFFSET(options.layout), AV_OPT_TYPE_INT, { .i64 = DL_NONE}, DL_NONE, DL_NHWC, FLAGS, .unit = "layout" },
109  { "none", "none", 0, AV_OPT_TYPE_CONST, { .i64 = DL_NONE }, 0, 0, FLAGS, .unit = "layout"},
110  { "nchw", "nchw", 0, AV_OPT_TYPE_CONST, { .i64 = DL_NCHW }, 0, 0, FLAGS, .unit = "layout"},
111  { "nhwc", "nhwc", 0, AV_OPT_TYPE_CONST, { .i64 = DL_NHWC }, 0, 0, FLAGS, .unit = "layout"},
112  { "scale", "Add scale preprocess operation. Divide each element of input by specified value.", OFFSET(options.scale), AV_OPT_TYPE_FLOAT, { .dbl = 0 }, INT_MIN, INT_MAX, FLAGS},
113  { "mean", "Add mean preprocess operation. Subtract specified value from each element of input.", OFFSET(options.mean), AV_OPT_TYPE_FLOAT, { .dbl = 0 }, INT_MIN, INT_MAX, FLAGS},
114  { NULL }
115 };
116 
117 AVFILTER_DEFINE_CLASS(dnn_openvino);
118 
119 #if HAVE_OPENVINO2
120 static const struct {
121  ov_status_e status;
122  int av_err;
123  const char *desc;
124 } ov2_errors[] = {
125  { OK, 0, "success" },
126  { GENERAL_ERROR, AVERROR_EXTERNAL, "general error" },
127  { NOT_IMPLEMENTED, AVERROR(ENOSYS), "not implemented" },
128  { NETWORK_NOT_LOADED, AVERROR_EXTERNAL, "network not loaded" },
129  { PARAMETER_MISMATCH, AVERROR(EINVAL), "parameter mismatch" },
130  { NOT_FOUND, AVERROR_EXTERNAL, "not found" },
131  { OUT_OF_BOUNDS, AVERROR(EOVERFLOW), "out of bounds" },
132  { UNEXPECTED, AVERROR_EXTERNAL, "unexpected" },
133  { REQUEST_BUSY, AVERROR(EBUSY), "request busy" },
134  { RESULT_NOT_READY, AVERROR(EBUSY), "result not ready" },
135  { NOT_ALLOCATED, AVERROR(ENODATA), "not allocated" },
136  { INFER_NOT_STARTED, AVERROR_EXTERNAL, "infer not started" },
137  { NETWORK_NOT_READ, AVERROR_EXTERNAL, "network not read" },
138  { INFER_CANCELLED, AVERROR(ECANCELED), "infer cancelled" },
139  { INVALID_C_PARAM, AVERROR(EINVAL), "invalid C parameter" },
140  { UNKNOWN_C_ERROR, AVERROR_UNKNOWN, "unknown C error" },
141  { NOT_IMPLEMENT_C_METHOD, AVERROR(ENOSYS), "not implement C method" },
142  { UNKNOW_EXCEPTION, AVERROR_UNKNOWN, "unknown exception" },
143 };
144 
145 static int ov2_map_error(ov_status_e status, const char **desc)
146 {
147  int i;
148  for (i = 0; i < FF_ARRAY_ELEMS(ov2_errors); i++) {
149  if (ov2_errors[i].status == status) {
150  if (desc)
151  *desc = ov2_errors[i].desc;
152  return ov2_errors[i].av_err;
153  }
154  }
155  if (desc)
156  *desc = "unknown error";
157  return AVERROR_UNKNOWN;
158 }
159 #endif
160 
161 #if HAVE_OPENVINO2
162 static DNNDataType precision_to_datatype(ov_element_type_e precision)
163 #else
164 static DNNDataType precision_to_datatype(precision_e precision)
165 #endif
166 {
167  switch (precision)
168  {
169 #if HAVE_OPENVINO2
170  case F32:
171 #else
172  case FP32:
173 #endif
174  return DNN_FLOAT;
175  case U8:
176  return DNN_UINT8;
177  default:
178  av_assert0(!"not supported yet.");
179  return DNN_FLOAT;
180  }
181 }
182 
184 {
185  switch (dt)
186  {
187  case DNN_FLOAT:
188  return sizeof(float);
189  case DNN_UINT8:
190  return sizeof(uint8_t);
191  default:
192  av_assert0(!"not supported yet.");
193  return 1;
194  }
195 }
196 
197 static int fill_model_input_ov(OVModel *ov_model, OVRequestItem *request)
198 {
199  DNNData input;
200  LastLevelTaskItem *lltask;
201  TaskItem *task;
202  OVContext *ctx = &ov_model->ctx;
203 #if HAVE_OPENVINO2
204  int64_t* dims;
205  ov_status_e status;
206  ov_tensor_t* tensor = NULL;
207  ov_shape_t input_shape = {0};
208  ov_element_type_e precision;
209  char *port_name;
210 #else
211  dimensions_t dims;
212  precision_e precision;
213  ie_blob_buffer_t blob_buffer;
214  IEStatusCode status;
215  ie_blob_t *input_blob = NULL;
216 #endif
217 
218  memset(&input, 0, sizeof(input));
219  lltask = ff_queue_peek_front(ov_model->lltask_queue);
220  av_assert0(lltask);
221  task = lltask->task;
222 
223 #if HAVE_OPENVINO2
224  if (ov_model->input_port) {
225  ov_output_const_port_free(ov_model->input_port);
226  ov_model->input_port = NULL;
227  }
228  if (task->input_name)
229  status = ov_model_const_input_by_name(ov_model->ov_model, task->input_name, &ov_model->input_port);
230  else
231  status = ov_model_const_input(ov_model->ov_model, &ov_model->input_port);
232  if (status != OK) {
233  av_log(ctx, AV_LOG_ERROR, "Failed to get input port shape.\n");
234  return ov2_map_error(status, NULL);
235  }
236  status = ov_port_get_any_name(ov_model->input_port, &port_name);
237  if (status != OK) {
238  av_log(ctx, AV_LOG_ERROR, "Failed to get input port name.\n");
239  return ov2_map_error(status, NULL);
240  }
241  av_log(ctx, AV_LOG_VERBOSE, "OpenVINO model input: %s\n", port_name);
242  ov_free(port_name);
243  port_name = NULL;
244 
245  status = ov_const_port_get_shape(ov_model->input_port, &input_shape);
246  if (status != OK) {
247  av_log(ctx, AV_LOG_ERROR, "Failed to get input port shape.\n");
248  return ov2_map_error(status, NULL);
249  }
250  dims = input_shape.dims;
251  status = ov_port_get_element_type(ov_model->input_port, &precision);
252  if (status != OK) {
253  av_log(ctx, AV_LOG_ERROR, "Failed to get input port data type.\n");
254  ov_shape_free(&input_shape);
255  return ov2_map_error(status, NULL);
256  }
257  for (int i = 0; i < input_shape.rank; i++)
258  input.dims[i] = dims[i];
259  input.layout = DL_NHWC;
260  input.dt = precision_to_datatype(precision);
261 #else
262  status = ie_infer_request_get_blob(request->infer_request, task->input_name, &input_blob);
263  if (status != OK) {
264  av_log(ctx, AV_LOG_ERROR, "Failed to get input blob with name %s\n", task->input_name);
265  return DNN_GENERIC_ERROR;
266  }
267 
268  status |= ie_blob_get_dims(input_blob, &dims);
269  status |= ie_blob_get_precision(input_blob, &precision);
270  if (status != OK) {
271  ie_blob_free(&input_blob);
272  av_log(ctx, AV_LOG_ERROR, "Failed to get input blob dims/precision\n");
273  return DNN_GENERIC_ERROR;
274  }
275 
276  status = ie_blob_get_buffer(input_blob, &blob_buffer);
277  if (status != OK) {
278  ie_blob_free(&input_blob);
279  av_log(ctx, AV_LOG_ERROR, "Failed to get input blob buffer\n");
280  return DNN_GENERIC_ERROR;
281  }
282  for (int i = 0; i < input_shape.rank; i++)
283  input.dims[i] = dims[i];
284  input.layout = DL_NCHW;
285  input.data = blob_buffer.buffer;
286  input.dt = precision_to_datatype(precision);
287 #endif
288  // all models in openvino open model zoo use BGR as input,
289  // change to be an option when necessary.
290  input.order = DCO_BGR;
291  // We use preprocess_steps to scale input data, so disable scale and mean here.
292  input.scale = 1;
293  input.mean = 0;
294 
295  for (int i = 0; i < ctx->options.batch_size; ++i) {
296  lltask = ff_queue_pop_front(ov_model->lltask_queue);
297  if (!lltask) {
298  break;
299  }
300  request->lltasks[i] = lltask;
301  request->lltask_count = i + 1;
302  task = lltask->task;
303 #if HAVE_OPENVINO2
304  if (tensor)
305  ov_tensor_free(tensor);
306  status = ov_tensor_create(precision, input_shape, &tensor);
307  ov_shape_free(&input_shape);
308  if (status != OK) {
309  av_log(ctx, AV_LOG_ERROR, "Failed to create tensor from host prt.\n");
310  return ov2_map_error(status, NULL);
311  }
312  status = ov_tensor_data(tensor, &input.data);
313  if (status != OK) {
314  av_log(ctx, AV_LOG_ERROR, "Failed to get input data.\n");
315  return ov2_map_error(status, NULL);
316  }
317  status = ov_infer_request_set_input_tensor(request->infer_request, tensor);
318  if (status != OK) {
319  av_log(ctx, AV_LOG_ERROR, "Failed to Set an input tensor for the model.\n");
320  return ov2_map_error(status, NULL);
321  }
322 #endif
323  switch (ov_model->model->func_type) {
324  case DFT_PROCESS_FRAME:
325  if (task->do_ioproc) {
326  if (ov_model->model->frame_pre_proc != NULL) {
327  ov_model->model->frame_pre_proc(task->in_frame, &input, ov_model->model->filter_ctx);
328  } else {
330  }
331  }
332  break;
335  break;
338  break;
339  default:
340  av_assert0(!"should not reach here");
341  break;
342  }
343  input.data = (uint8_t *)input.data +
344  input.dims[1] * input.dims[2] * input.dims[3] * get_datatype_size(input.dt);
345  }
346 #if HAVE_OPENVINO2
347  ov_tensor_free(tensor);
348 #else
349  ie_blob_free(&input_blob);
350 #endif
351 
352  return 0;
353 }
354 
355 static void infer_completion_callback(void *args)
356 {
357  OVRequestItem *request = args;
358  LastLevelTaskItem *lltask = request->lltasks[0];
359  TaskItem *task = lltask->task;
360  OVModel *ov_model = task->model;
361  SafeQueue *requestq = ov_model->request_queue;
362  DNNData *outputs;
363  OVContext *ctx = &ov_model->ctx;
364 #if HAVE_OPENVINO2
365  size_t* dims;
366  ov_status_e status;
367  ov_tensor_t *output_tensor;
368  ov_shape_t output_shape = {0};
369  ov_element_type_e precision;
370 
371  outputs = av_calloc(ov_model->nb_outputs, sizeof(*outputs));
372  if (!outputs) {
373  av_log(ctx, AV_LOG_ERROR, "Failed to alloc outputs.");
374  return;
375  }
376 
377  for (int i = 0; i < ov_model->nb_outputs; i++) {
378  status = ov_infer_request_get_tensor_by_const_port(request->infer_request,
379  ov_model->output_ports[i],
380  &output_tensor);
381  if (status != OK) {
383  "Failed to get output tensor.");
384  goto end;
385  }
386 
387  status = ov_tensor_data(output_tensor, &outputs[i].data);
388  if (status != OK) {
390  "Failed to get output data.");
391  goto end;
392  }
393 
394  status = ov_tensor_get_shape(output_tensor, &output_shape);
395  if (status != OK) {
396  av_log(ctx, AV_LOG_ERROR, "Failed to get output port shape.\n");
397  goto end;
398  }
399  dims = output_shape.dims;
400 
401  status = ov_port_get_element_type(ov_model->output_ports[i], &precision);
402  if (status != OK) {
403  av_log(ctx, AV_LOG_ERROR, "Failed to get output port data type.\n");
404  goto end;
405  }
406  outputs[i].dt = precision_to_datatype(precision);
407  outputs[i].layout = DL_NCHW;
408  outputs[i].dims[0] = 1;
409  outputs[i].dims[1] = output_shape.rank > 2 ? dims[output_shape.rank - 3] : 1;
410  outputs[i].dims[2] = output_shape.rank > 1 ? dims[output_shape.rank - 2] : 1;
411  outputs[i].dims[3] = output_shape.rank > 0 ? dims[output_shape.rank - 1] : 1;
412  av_assert0(request->lltask_count <= dims[0]);
413  outputs[i].layout = ctx->options.layout;
414  outputs[i].scale = ctx->options.scale;
415  outputs[i].mean = ctx->options.mean;
416  ov_shape_free(&output_shape);
417  ov_tensor_free(output_tensor);
418  output_tensor = NULL;
419  }
420 #else
421  IEStatusCode status;
422  dimensions_t dims;
423  ie_blob_t *output_blob = NULL;
424  ie_blob_buffer_t blob_buffer;
425  precision_e precision;
426  DNNData output;
427  status = ie_infer_request_get_blob(request->infer_request, task->output_names[0], &output_blob);
428  if (status != OK) {
430  "output \"%s\" may not correct, all output(s) are: \"%s\"\n",
431  task->output_names[0], ov_model->all_output_names);
432  return;
433  }
434 
435  status = ie_blob_get_buffer(output_blob, &blob_buffer);
436  if (status != OK) {
437  ie_blob_free(&output_blob);
438  av_log(ctx, AV_LOG_ERROR, "Failed to access output memory\n");
439  return;
440  }
441 
442  status |= ie_blob_get_dims(output_blob, &dims);
443  status |= ie_blob_get_precision(output_blob, &precision);
444  if (status != OK) {
445  ie_blob_free(&output_blob);
446  av_log(ctx, AV_LOG_ERROR, "Failed to get dims or precision of output\n");
447  return;
448  }
449  output.data = blob_buffer.buffer;
450  output.layout = DL_NCHW;
451  for (int i = 0; i < 4; i++)
452  output.dims[i] = dims.dims[i];
453  av_assert0(request->lltask_count <= dims.dims[0]);
454  output.dt = precision_to_datatype(precision);
455  output.layout = ctx->options.layout;
456  output.scale = ctx->options.scale;
457  output.mean = ctx->options.mean;
458  outputs = &output;
459 #endif
460 
461  av_assert0(request->lltask_count >= 1);
462  for (int i = 0; i < request->lltask_count; ++i) {
463  task = request->lltasks[i]->task;
464 
465  switch (ov_model->model->func_type) {
466  case DFT_PROCESS_FRAME:
467  if (task->do_ioproc) {
468  if (ov_model->model->frame_post_proc != NULL) {
469  ov_model->model->frame_post_proc(task->out_frame, outputs, ov_model->model->filter_ctx);
470  } else {
472  }
473  } else {
474  task->out_frame->width =
476  task->out_frame->height =
478  }
479  break;
481  if (!ov_model->model->detect_post_proc) {
482  av_log(ctx, AV_LOG_ERROR, "detect filter needs to provide post proc\n");
483  goto end;
484  }
485  ov_model->model->detect_post_proc(task->in_frame, outputs,
486  ov_model->nb_outputs,
487  ov_model->model->filter_ctx);
488  break;
490  if (!ov_model->model->classify_post_proc) {
491  av_log(ctx, AV_LOG_ERROR, "classify filter needs to provide post proc\n");
492  goto end;
493  }
494  for (int output_i = 0; output_i < ov_model->nb_outputs; output_i++)
495  ov_model->model->classify_post_proc(task->in_frame, outputs,
496  request->lltasks[i]->bbox_index,
497  ov_model->model->filter_ctx);
498  break;
499  default:
500  av_assert0(!"should not reach here");
501  break;
502  }
503 
504  task->inference_done++;
505  av_freep(&request->lltasks[i]);
506  for (int i = 0; i < ov_model->nb_outputs; i++)
507  outputs[i].data = (uint8_t *)outputs[i].data +
508  outputs[i].dims[1] * outputs[i].dims[2] * outputs[i].dims[3] *
510  }
511 end:
512 #if HAVE_OPENVINO2
513  av_freep(&outputs);
514  ov_shape_free(&output_shape);
515  if (output_tensor)
516  ov_tensor_free(output_tensor);
517 #else
518  ie_blob_free(&output_blob);
519 #endif
520  request->lltask_count = 0;
521  if (ff_safe_queue_push_back(requestq, request) < 0) {
522 #if HAVE_OPENVINO2
523  ov_infer_request_free(request->infer_request);
524 #else
525  ie_infer_request_free(&request->infer_request);
526 #endif
527  av_freep(&request);
528  av_log(ctx, AV_LOG_ERROR, "Failed to push back request_queue.\n");
529  return;
530  }
531 }
532 
533 static void dnn_free_model_ov(DNNModel **model)
534 {
535  OVModel *ov_model;
536 
537  if (!model || !*model)
538  return;
539 
540  ov_model = (*model)->model;
541  while (ff_safe_queue_size(ov_model->request_queue) != 0) {
543  if (item && item->infer_request) {
544 #if HAVE_OPENVINO2
545  ov_infer_request_free(item->infer_request);
546 #else
547  ie_infer_request_free(&item->infer_request);
548 #endif
549  }
550  av_freep(&item->lltasks);
551  av_freep(&item);
552  }
554 
555  while (ff_queue_size(ov_model->lltask_queue) != 0) {
557  av_freep(&item);
558  }
559  ff_queue_destroy(ov_model->lltask_queue);
560 
561  while (ff_queue_size(ov_model->task_queue) != 0) {
562  TaskItem *item = ff_queue_pop_front(ov_model->task_queue);
563  av_frame_free(&item->in_frame);
564  av_frame_free(&item->out_frame);
565  av_freep(&item);
566  }
567  ff_queue_destroy(ov_model->task_queue);
568 #if HAVE_OPENVINO2
569  if (ov_model->input_port)
570  ov_output_const_port_free(ov_model->input_port);
571  for (int i = 0; i < ov_model->nb_outputs; i++)
572  if (ov_model->output_ports[i])
573  ov_output_const_port_free(ov_model->output_ports[i]);
574  av_freep(&ov_model->output_ports);
575  if (ov_model->preprocess)
576  ov_preprocess_prepostprocessor_free(ov_model->preprocess);
577  if (ov_model->compiled_model)
578  ov_compiled_model_free(ov_model->compiled_model);
579  if (ov_model->ov_model)
580  ov_model_free(ov_model->ov_model);
581  if (ov_model->core)
582  ov_core_free(ov_model->core);
583 #else
584  if (ov_model->exe_network)
585  ie_exec_network_free(&ov_model->exe_network);
586  if (ov_model->network)
587  ie_network_free(&ov_model->network);
588  if (ov_model->core)
589  ie_core_free(&ov_model->core);
590  av_free(ov_model->all_output_names);
591  av_free(ov_model->all_input_names);
592 #endif
593  av_opt_free(&ov_model->ctx);
594  av_freep(&ov_model);
595  av_freep(model);
596 }
597 
598 
599 static int init_model_ov(OVModel *ov_model, const char *input_name, const char **output_names, int nb_outputs)
600 {
601  int ret = 0;
602  OVContext *ctx = &ov_model->ctx;
603 #if HAVE_OPENVINO2
604  ov_status_e status;
605  ov_preprocess_input_tensor_info_t* input_tensor_info = NULL;
606  ov_preprocess_output_tensor_info_t* output_tensor_info = NULL;
607  ov_preprocess_input_model_info_t* input_model_info = NULL;
608  ov_model_t *tmp_ov_model;
609  ov_layout_t* NHWC_layout = NULL;
610  ov_layout_t* NCHW_layout = NULL;
611  const char* NHWC_desc = "NHWC";
612  const char* NCHW_desc = "NCHW";
613  const char* device = ctx->options.device_type;
614 #else
615  IEStatusCode status;
616  ie_available_devices_t a_dev;
617  ie_config_t config = {NULL, NULL, NULL};
618  char *all_dev_names = NULL;
619 #endif
620  // We scale pixel by default when do frame processing.
621  if (fabsf(ctx->options.scale) < 1e-6f)
622  ctx->options.scale = ov_model->model->func_type == DFT_PROCESS_FRAME ? 255 : 1;
623  // batch size
624  if (ctx->options.batch_size <= 0) {
625  ctx->options.batch_size = 1;
626  }
627 #if HAVE_OPENVINO2
628  if (ctx->options.batch_size > 1) {
629  avpriv_report_missing_feature(ctx, "Do not support batch_size > 1 for now,"
630  "change batch_size to 1.\n");
631  ctx->options.batch_size = 1;
632  }
633 
634  status = ov_preprocess_prepostprocessor_create(ov_model->ov_model, &ov_model->preprocess);
635  if (status != OK) {
636  av_log(ctx, AV_LOG_ERROR, "Failed to create preprocess for ov_model.\n");
638  goto err;
639  }
640 
641  if (input_name)
642  status = ov_preprocess_prepostprocessor_get_input_info_by_name(ov_model->preprocess, input_name, &ov_model->input_info);
643  else
644  status = ov_preprocess_prepostprocessor_get_input_info(ov_model->preprocess, &ov_model->input_info);
645  if (status != OK) {
646  av_log(ctx, AV_LOG_ERROR, "Failed to get input info from preprocess.\n");
648  goto err;
649  }
650 
651  status = ov_preprocess_input_info_get_tensor_info(ov_model->input_info, &input_tensor_info);
652  if (status != OK) {
653  av_log(ctx, AV_LOG_ERROR, "Failed to get tensor info from input.\n");
655  goto err;
656  }
657 
658  //set input layout
659  status = ov_layout_create(NHWC_desc, &NHWC_layout);
660  status |= ov_layout_create(NCHW_desc, &NCHW_layout);
661  if (status != OK) {
662  av_log(ctx, AV_LOG_ERROR, "Failed to create layout for input.\n");
664  goto err;
665  }
666 
667  status = ov_preprocess_input_tensor_info_set_layout(input_tensor_info, NHWC_layout);
668  if (status != OK) {
669  av_log(ctx, AV_LOG_ERROR, "Failed to set input tensor layout\n");
671  goto err;
672  }
673 
674  status = ov_preprocess_input_info_get_model_info(ov_model->input_info, &input_model_info);
675  if (status != OK) {
676  av_log(ctx, AV_LOG_ERROR, "Failed to get input model info\n");
678  goto err;
679  }
680  if (ctx->options.layout == DL_NCHW)
681  status = ov_preprocess_input_model_info_set_layout(input_model_info, NCHW_layout);
682  else if (ctx->options.layout == DL_NHWC)
683  status = ov_preprocess_input_model_info_set_layout(input_model_info, NHWC_layout);
684  if (status != OK) {
685  av_log(ctx, AV_LOG_ERROR, "Failed to get set input model layout\n");
687  goto err;
688  }
689 
690  status = ov_preprocess_input_tensor_info_set_element_type(input_tensor_info, U8);
691  if (status != OK) {
692  av_log(ctx, AV_LOG_ERROR, "Failed to set input element type\n");
694  goto err;
695  }
696 
697  if (!nb_outputs) {
698  size_t output_size;
699  status = ov_model_outputs_size(ov_model->ov_model, &output_size);
700  if (status != OK) {
701  av_log(ctx, AV_LOG_ERROR, "Failed to get output size.\n");
703  goto err;
704  }
705  nb_outputs = output_size;
706  }
707  ov_model->nb_outputs = nb_outputs;
708  for (int i = 0; i < nb_outputs; i++) {
709  if (output_names)
710  status = ov_preprocess_prepostprocessor_get_output_info_by_name(
711  ov_model->preprocess, output_names[i], &ov_model->output_info);
712  else
713  status = ov_preprocess_prepostprocessor_get_output_info_by_index(
714  ov_model->preprocess, i, &ov_model->output_info);
715  if (status != OK) {
716  av_log(ctx, AV_LOG_ERROR, "Failed to get output info from preprocess.\n");
718  goto err;
719  }
720  status |= ov_preprocess_output_info_get_tensor_info(ov_model->output_info, &output_tensor_info);
721  if (status != OK) {
722  av_log(ctx, AV_LOG_ERROR, "Failed to get tensor info from input/output.\n");
724  goto err;
725  }
726  if (ov_model->model->func_type != DFT_PROCESS_FRAME)
727  status |= ov_preprocess_output_set_element_type(output_tensor_info, F32);
728  else if (fabsf(ctx->options.scale - 1) > 1e-6f || fabsf(ctx->options.mean) > 1e-6f)
729  status |= ov_preprocess_output_set_element_type(output_tensor_info, F32);
730  else
731  status |= ov_preprocess_output_set_element_type(output_tensor_info, U8);
732  if (status != OK) {
733  av_log(ctx, AV_LOG_ERROR, "Failed to set output element type\n");
735  goto err;
736  }
737  ov_preprocess_output_tensor_info_free(output_tensor_info);
738  output_tensor_info = NULL;
739  ov_preprocess_output_info_free(ov_model->output_info);
740  ov_model->output_info = NULL;
741  }
742  // set preprocess steps.
743  if (fabsf(ctx->options.scale - 1) > 1e-6f || fabsf(ctx->options.mean) > 1e-6f) {
744  ov_preprocess_preprocess_steps_t* input_process_steps = NULL;
745  status = ov_preprocess_input_info_get_preprocess_steps(ov_model->input_info, &input_process_steps);
746  if (status != OK) {
747  av_log(ctx, AV_LOG_ERROR, "Failed to get preprocess steps\n");
749  goto err;
750  }
751  status = ov_preprocess_preprocess_steps_convert_element_type(input_process_steps, F32);
752  status |= ov_preprocess_preprocess_steps_mean(input_process_steps, ctx->options.mean);
753  status |= ov_preprocess_preprocess_steps_scale(input_process_steps, ctx->options.scale);
754  if (status != OK) {
755  av_log(ctx, AV_LOG_ERROR, "Failed to set preprocess steps\n");
756  ov_preprocess_preprocess_steps_free(input_process_steps);
757  input_process_steps = NULL;
759  goto err;
760  }
761  ov_preprocess_preprocess_steps_free(input_process_steps);
762  input_process_steps = NULL;
763  }
764  ov_preprocess_input_tensor_info_free(input_tensor_info);
765  input_tensor_info = NULL;
766  ov_preprocess_input_info_free(ov_model->input_info);
767  ov_model->input_info = NULL;
768 
769  //update model
770  if(ov_model->ov_model)
771  tmp_ov_model = ov_model->ov_model;
772  status = ov_preprocess_prepostprocessor_build(ov_model->preprocess, &ov_model->ov_model);
773  if (status != OK) {
774  av_log(ctx, AV_LOG_ERROR, "Failed to update OV model\n");
775  ov_model_free(tmp_ov_model);
776  tmp_ov_model = NULL;
778  goto err;
779  }
780  ov_model_free(tmp_ov_model);
781 
782  //update output_port
783  if (!ov_model->output_ports) {
784  ov_model->output_ports = av_calloc(nb_outputs, sizeof(*ov_model->output_ports));
785  if (!ov_model->output_ports) {
786  ret = AVERROR(ENOMEM);
787  goto err;
788  }
789  } else
790  for (int i = 0; i < nb_outputs; i++) {
791  ov_output_const_port_free(ov_model->output_ports[i]);
792  ov_model->output_ports[i] = NULL;
793  }
794 
795  for (int i = 0; i < nb_outputs; i++) {
796  char *port_name;
797  if (output_names)
798  status = ov_model_const_output_by_name(ov_model->ov_model, output_names[i],
799  &ov_model->output_ports[i]);
800  else
801  status = ov_model_const_output_by_index(ov_model->ov_model, i,
802  &ov_model->output_ports[i]);
803  if (status != OK) {
804  av_log(ctx, AV_LOG_ERROR, "Failed to get output port %s.\n", output_names[i]);
805  goto err;
806  }
807  status = ov_port_get_any_name(ov_model->output_ports[i], &port_name);
808  if (status != OK) {
809  av_log(ctx, AV_LOG_ERROR, "Failed to get output port name.\n");
810  goto err;
811  }
812  av_log(ctx, AV_LOG_VERBOSE, "OpenVINO model outputs: %s\n", port_name);
813  ov_free(port_name);
814  port_name = NULL;
815  }
816  //compile network
817  status = ov_core_compile_model(ov_model->core, ov_model->ov_model, device, 0, &ov_model->compiled_model);
818  if (status != OK) {
820  goto err;
821  }
822  ov_preprocess_input_model_info_free(input_model_info);
823  input_model_info = NULL;
824  ov_layout_free(NCHW_layout);
825  ov_layout_free(NHWC_layout);
826 #else
827  if (ctx->options.batch_size > 1) {
828  input_shapes_t input_shapes;
829  status = ie_network_get_input_shapes(ov_model->network, &input_shapes);
830  if (status != OK) {
832  goto err;
833  }
834  for (int i = 0; i < input_shapes.shape_num; i++)
835  input_shapes.shapes[i].shape.dims[0] = ctx->options.batch_size;
836  status = ie_network_reshape(ov_model->network, input_shapes);
837  ie_network_input_shapes_free(&input_shapes);
838  if (status != OK) {
840  goto err;
841  }
842  }
843 
844  // The order of dims in the openvino is fixed and it is always NCHW for 4-D data.
845  // while we pass NHWC data from FFmpeg to openvino
846  status = ie_network_set_input_layout(ov_model->network, input_name, NHWC);
847  if (status != OK) {
848  if (status == NOT_FOUND) {
849  av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model, failed to set input layout as NHWC, "\
850  "all input(s) are: \"%s\"\n", input_name, ov_model->all_input_names);
851  } else{
852  av_log(ctx, AV_LOG_ERROR, "Failed to set layout as NHWC for input %s\n", input_name);
853  }
855  goto err;
856  }
857  status = ie_network_set_output_layout(ov_model->network, output_name, NHWC);
858  if (status != OK) {
859  if (status == NOT_FOUND) {
860  av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model, failed to set output layout as NHWC, "\
861  "all output(s) are: \"%s\"\n", output_name, ov_model->all_output_names);
862  } else{
863  av_log(ctx, AV_LOG_ERROR, "Failed to set layout as NHWC for output %s\n", output_name);
864  }
866  goto err;
867  }
868  ov_model->nb_outputs = 1;
869 
870  // all models in openvino open model zoo use BGR with range [0.0f, 255.0f] as input,
871  // we don't have a AVPixelFormat to describe it, so we'll use AV_PIX_FMT_BGR24 and
872  // ask openvino to do the conversion internally.
873  // the current supported SR model (frame processing) is generated from tensorflow model,
874  // and its input is Y channel as float with range [0.0f, 1.0f], so do not set for this case.
875  // TODO: we need to get a final clear&general solution with all backends/formats considered.
876  if (ov_model->model->func_type != DFT_PROCESS_FRAME) {
877  status = ie_network_set_input_precision(ov_model->network, input_name, U8);
878  if (status != OK) {
879  av_log(ctx, AV_LOG_ERROR, "Failed to set input precision as U8 for %s\n", input_name);
881  goto err;
882  }
883  }
884 
885  status = ie_core_load_network(ov_model->core, ov_model->network, ctx->options.device_type, &config, &ov_model->exe_network);
886  if (status != OK) {
887  av_log(ctx, AV_LOG_ERROR, "Failed to load OpenVINO model network\n");
888  status = ie_core_get_available_devices(ov_model->core, &a_dev);
889  if (status != OK) {
890  av_log(ctx, AV_LOG_ERROR, "Failed to get available devices\n");
892  goto err;
893  }
894  for (int i = 0; i < a_dev.num_devices; i++) {
895  APPEND_STRING(all_dev_names, a_dev.devices[i])
896  }
897  av_log(ctx, AV_LOG_ERROR,"device %s may not be supported, all available devices are: \"%s\"\n",
898  ctx->options.device_type, all_dev_names);
899  ret = AVERROR(ENODEV);
900  goto err;
901  }
902 #endif
903  // create infer_requests for async execution
904  if (ctx->options.nireq <= 0) {
905  // the default value is a rough estimation
906  ctx->options.nireq = av_cpu_count() / 2 + 1;
907  }
908 
909  ov_model->request_queue = ff_safe_queue_create();
910  if (!ov_model->request_queue) {
911  ret = AVERROR(ENOMEM);
912  goto err;
913  }
914 
915  for (int i = 0; i < ctx->options.nireq; i++) {
916  OVRequestItem *item = av_mallocz(sizeof(*item));
917  if (!item) {
918  ret = AVERROR(ENOMEM);
919  goto err;
920  }
921 
922 #if HAVE_OPENVINO2
923  item->callback.callback_func = infer_completion_callback;
924 #else
925  item->callback.completeCallBackFunc = infer_completion_callback;
926 #endif
927  item->callback.args = item;
928  if (ff_safe_queue_push_back(ov_model->request_queue, item) < 0) {
929  av_freep(&item);
930  ret = AVERROR(ENOMEM);
931  goto err;
932  }
933 
934 #if HAVE_OPENVINO2
935  status = ov_compiled_model_create_infer_request(ov_model->compiled_model, &item->infer_request);
936  if (status != OK) {
937  av_log(ctx, AV_LOG_ERROR, "Failed to Creates an inference request object.\n");
938  goto err;
939  }
940 #else
941  status = ie_exec_network_create_infer_request(ov_model->exe_network, &item->infer_request);
942  if (status != OK) {
944  goto err;
945  }
946 #endif
947 
948  item->lltasks = av_malloc_array(ctx->options.batch_size, sizeof(*item->lltasks));
949  if (!item->lltasks) {
950  ret = AVERROR(ENOMEM);
951  goto err;
952  }
953  item->lltask_count = 0;
954  }
955 
956  ov_model->task_queue = ff_queue_create();
957  if (!ov_model->task_queue) {
958  ret = AVERROR(ENOMEM);
959  goto err;
960  }
961 
962  ov_model->lltask_queue = ff_queue_create();
963  if (!ov_model->lltask_queue) {
964  ret = AVERROR(ENOMEM);
965  goto err;
966  }
967 
968  return 0;
969 
970 err:
971 #if HAVE_OPENVINO2
972  if (output_tensor_info)
973  ov_preprocess_output_tensor_info_free(output_tensor_info);
974  if (ov_model->output_info)
975  ov_preprocess_output_info_free(ov_model->output_info);
976  if (NCHW_layout)
977  ov_layout_free(NCHW_layout);
978  if (NHWC_layout)
979  ov_layout_free(NHWC_layout);
980  if (input_model_info)
981  ov_preprocess_input_model_info_free(input_model_info);
982 #endif
983  dnn_free_model_ov(&ov_model->model);
984  return ret;
985 }
986 
987 static int execute_model_ov(OVRequestItem *request, Queue *inferenceq)
988 {
989 #if HAVE_OPENVINO2
990  ov_status_e status;
991 #else
992  IEStatusCode status;
993 #endif
994  LastLevelTaskItem *lltask;
995  int ret = 0;
996  TaskItem *task;
997  OVContext *ctx;
998  OVModel *ov_model;
999 
1000  if (ff_queue_size(inferenceq) == 0) {
1001 #if HAVE_OPENVINO2
1002  ov_infer_request_free(request->infer_request);
1003 #else
1004  ie_infer_request_free(&request->infer_request);
1005 #endif
1006  av_freep(&request);
1007  return 0;
1008  }
1009 
1010  lltask = ff_queue_peek_front(inferenceq);
1011  task = lltask->task;
1012  ov_model = task->model;
1013  ctx = &ov_model->ctx;
1014 
1015  ret = fill_model_input_ov(ov_model, request);
1016  if (ret != 0) {
1017  goto err;
1018  }
1019 
1020 #if HAVE_OPENVINO2
1021  if (task->async) {
1022  status = ov_infer_request_set_callback(request->infer_request, &request->callback);
1023  if (status != OK) {
1024  av_log(ctx, AV_LOG_ERROR, "Failed to set completion callback for inference\n");
1026  goto err;
1027  }
1028 
1029  status = ov_infer_request_start_async(request->infer_request);
1030  if (status != OK) {
1031  av_log(ctx, AV_LOG_ERROR, "Failed to start async inference\n");
1033  goto err;
1034  }
1035  return 0;
1036  } else {
1037  status = ov_infer_request_infer(request->infer_request);
1038  if (status != OK) {
1039  av_log(NULL, AV_LOG_ERROR, "Failed to start synchronous model inference for OV2\n");
1041  goto err;
1042  }
1043  infer_completion_callback(request);
1044  return (task->inference_done == task->inference_todo) ? 0 : DNN_GENERIC_ERROR;
1045  }
1046 #else
1047  if (task->async) {
1048  status = ie_infer_set_completion_callback(request->infer_request, &request->callback);
1049  if (status != OK) {
1050  av_log(ctx, AV_LOG_ERROR, "Failed to set completion callback for inference\n");
1052  goto err;
1053  }
1054  status = ie_infer_request_infer_async(request->infer_request);
1055  if (status != OK) {
1056  av_log(ctx, AV_LOG_ERROR, "Failed to start async inference\n");
1058  goto err;
1059  }
1060  return 0;
1061  } else {
1062  status = ie_infer_request_infer(request->infer_request);
1063  if (status != OK) {
1064  av_log(ctx, AV_LOG_ERROR, "Failed to start synchronous model inference\n");
1066  goto err;
1067  }
1068  infer_completion_callback(request);
1069  return (task->inference_done == task->inference_todo) ? 0 : DNN_GENERIC_ERROR;
1070  }
1071 #endif
1072 err:
1073  if (ff_safe_queue_push_back(ov_model->request_queue, request) < 0) {
1074 #if HAVE_OPENVINO2
1075  ov_infer_request_free(request->infer_request);
1076 #else
1077  ie_infer_request_free(&request->infer_request);
1078 #endif
1079  av_freep(&request);
1080  }
1081  return ret;
1082 }
1083 
1084 static int get_input_ov(void *model, DNNData *input, const char *input_name)
1085 {
1086  OVModel *ov_model = model;
1087  OVContext *ctx = &ov_model->ctx;
1088  int input_resizable = ctx->options.input_resizable;
1089 
1090 #if HAVE_OPENVINO2
1091  ov_shape_t input_shape = {0};
1092  ov_element_type_e precision;
1093  ov_status_e status;
1094  if (input_name)
1095  status = ov_model_const_input_by_name(ov_model->ov_model, input_name, &ov_model->input_port);
1096  else
1097  status = ov_model_const_input(ov_model->ov_model, &ov_model->input_port);
1098  if (status != OK) {
1099  av_log(ctx, AV_LOG_ERROR, "Failed to get input port shape.\n");
1100  return ov2_map_error(status, NULL);
1101  }
1102  status = ov_port_get_element_type(ov_model->input_port, &precision);
1103  if (status != OK) {
1104  av_log(ctx, AV_LOG_ERROR, "Failed to get input port data type.\n");
1105  return ov2_map_error(status, NULL);
1106  }
1107  status = ov_const_port_get_shape(ov_model->input_port, &input_shape);
1108  if (status != OK) {
1109  av_log(ctx, AV_LOG_ERROR, "Failed to get input port shape.\n");
1110  return ov2_map_error(status, NULL);
1111  }
1112  for (int i = 0; i < 4; i++)
1113  input->dims[i] = input_shape.dims[i];
1114  if (input_resizable) {
1115  input->dims[dnn_get_width_idx_by_layout(input->layout)] = -1;
1116  input->dims[dnn_get_height_idx_by_layout(input->layout)] = -1;
1117  }
1118 
1119  if (input_shape.dims[1] <= 3) // NCHW
1120  input->layout = DL_NCHW;
1121  else // NHWC
1122  input->layout = DL_NHWC;
1123 
1124  input->dt = precision_to_datatype(precision);
1125  ov_shape_free(&input_shape);
1126  return 0;
1127 #else
1128  char *model_input_name = NULL;
1129  IEStatusCode status;
1130  size_t model_input_count = 0;
1131  dimensions_t dims;
1132  precision_e precision;
1133  status = ie_network_get_inputs_number(ov_model->network, &model_input_count);
1134  if (status != OK) {
1135  av_log(ctx, AV_LOG_ERROR, "Failed to get input count\n");
1136  return DNN_GENERIC_ERROR;
1137  }
1138  for (size_t i = 0; i < model_input_count; i++) {
1139  status = ie_network_get_input_name(ov_model->network, i, &model_input_name);
1140  if (status != OK) {
1141  av_log(ctx, AV_LOG_ERROR, "Failed to get No.%d input's name\n", (int)i);
1142  return DNN_GENERIC_ERROR;
1143  }
1144  if (strcmp(model_input_name, input_name) == 0) {
1145  ie_network_name_free(&model_input_name);
1146  status |= ie_network_get_input_dims(ov_model->network, input_name, &dims);
1147  status |= ie_network_get_input_precision(ov_model->network, input_name, &precision);
1148  if (status != OK) {
1149  av_log(ctx, AV_LOG_ERROR, "Failed to get No.%d input's dims or precision\n", (int)i);
1150  return DNN_GENERIC_ERROR;
1151  }
1152 
1153  for (int i = 0; i < 4; i++)
1154  input->dims[i] = input_shape.dims[i];
1155  if (input_resizable) {
1156  input->dims[dnn_get_width_idx_by_layout(input->layout)] = -1;
1157  input->dims[dnn_get_height_idx_by_layout(input->layout)] = -1;
1158  }
1159 
1160  if (input_shape.dims[1] <= 3) // NCHW
1161  input->layout = DL_NCHW;
1162  else // NHWC
1163  input->layout = DL_NHWC;
1164 
1165  input->dt = precision_to_datatype(precision);
1166  return 0;
1167  }
1168 
1169  ie_network_name_free(&model_input_name);
1170  }
1171 
1172  av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model, all input(s) are: \"%s\"\n", input_name, ov_model->all_input_names);
1173  return AVERROR(EINVAL);
1174 #endif
1175 }
1176 
1178 {
1179  AVFrameSideData *sd;
1181  const AVDetectionBBox *bbox;
1182 
1184  if (!sd) { // this frame has nothing detected
1185  return 0;
1186  }
1187 
1188  if (!sd->size) {
1189  return 0;
1190  }
1191 
1192  header = (const AVDetectionBBoxHeader *)sd->data;
1193  if (!header->nb_bboxes) {
1194  return 0;
1195  }
1196 
1197  for (uint32_t i = 0; i < header->nb_bboxes; i++) {
1198  bbox = av_get_detection_bbox(header, i);
1199  if (bbox->x < 0 || bbox->w < 0 || bbox->x + bbox->w >= frame->width) {
1200  return 0;
1201  }
1202  if (bbox->y < 0 || bbox->h < 0 || bbox->y + bbox->h >= frame->height) {
1203  return 0;
1204  }
1205 
1207  return 0;
1208  }
1209  }
1210 
1211  return 1;
1212 }
1213 
1214 static int extract_lltask_from_task(DNNFunctionType func_type, TaskItem *task, Queue *lltask_queue, DNNExecBaseParams *exec_params)
1215 {
1216  switch (func_type) {
1217  case DFT_PROCESS_FRAME:
1218  case DFT_ANALYTICS_DETECT:
1219  {
1220  LastLevelTaskItem *lltask = av_malloc(sizeof(*lltask));
1221  if (!lltask) {
1222  return AVERROR(ENOMEM);
1223  }
1224  task->inference_todo = 1;
1225  task->inference_done = 0;
1226  lltask->task = task;
1227  if (ff_queue_push_back(lltask_queue, lltask) < 0) {
1228  av_freep(&lltask);
1229  return AVERROR(ENOMEM);
1230  }
1231  return 0;
1232  }
1234  {
1236  AVFrame *frame = task->in_frame;
1237  AVFrameSideData *sd;
1239 
1240  task->inference_todo = 0;
1241  task->inference_done = 0;
1242 
1244  return 0;
1245  }
1246 
1248  header = (const AVDetectionBBoxHeader *)sd->data;
1249 
1250  for (uint32_t i = 0; i < header->nb_bboxes; i++) {
1251  LastLevelTaskItem *lltask;
1253 
1254  if (params->target) {
1255  if (av_strncasecmp(bbox->detect_label, params->target, sizeof(bbox->detect_label)) != 0) {
1256  continue;
1257  }
1258  }
1259 
1260  lltask = av_malloc(sizeof(*lltask));
1261  if (!lltask) {
1262  return AVERROR(ENOMEM);
1263  }
1264  task->inference_todo++;
1265  lltask->task = task;
1266  lltask->bbox_index = i;
1267  if (ff_queue_push_back(lltask_queue, lltask) < 0) {
1268  av_freep(&lltask);
1269  return AVERROR(ENOMEM);
1270  }
1271  }
1272  return 0;
1273  }
1274  default:
1275  av_assert0(!"should not reach here");
1276  return AVERROR(EINVAL);
1277  }
1278 }
1279 
1280 static int get_output_ov(void *model, const char *input_name, int input_width, int input_height,
1281  const char *output_name, int *output_width, int *output_height)
1282 {
1283 #if HAVE_OPENVINO2
1284  ov_dimension_t dims[4] = {{1, 1}, {1, 1}, {input_height, input_height}, {input_width, input_width}};
1285  ov_status_e status;
1286  ov_shape_t input_shape = {0};
1287  ov_partial_shape_t partial_shape;
1288 #else
1289  IEStatusCode status;
1290  input_shapes_t input_shapes;
1291 #endif
1292  int ret;
1293  OVModel *ov_model = model;
1294  OVContext *ctx = &ov_model->ctx;
1295  TaskItem task;
1296  OVRequestItem *request;
1297  DNNExecBaseParams exec_params = {
1298  .input_name = input_name,
1299  .output_names = output_name ? &output_name : NULL,
1300  .nb_output = 1,
1301  .in_frame = NULL,
1302  .out_frame = NULL,
1303  };
1304 
1305  if (ov_model->model->func_type != DFT_PROCESS_FRAME) {
1306  av_log(ctx, AV_LOG_ERROR, "Get output dim only when processing frame.\n");
1307  return AVERROR(EINVAL);
1308  }
1309 
1310 #if HAVE_OPENVINO2
1311  if (ctx->options.input_resizable) {
1312  status = ov_partial_shape_create(4, dims, &partial_shape);
1313  if (status != OK) {
1314  av_log(ctx, AV_LOG_ERROR, "Failed to create partial shape.\n");
1315  return ov2_map_error(status, NULL);
1316  }
1317  status = ov_const_port_get_shape(ov_model->input_port, &input_shape);
1318  if (status != OK) {
1319  av_log(ctx, AV_LOG_ERROR, "Failed to create shape for model input resize.\n");
1320  return ov2_map_error(status, NULL);
1321  }
1322  input_shape.dims[2] = input_height;
1323  input_shape.dims[3] = input_width;
1324 
1325  status = ov_shape_to_partial_shape(input_shape, &partial_shape);
1326  ov_shape_free(&input_shape);
1327  if (status != OK) {
1328  av_log(ctx, AV_LOG_ERROR, "Failed to create partial shape for model input resize.\n");
1329  return ov2_map_error(status, NULL);
1330  }
1331 
1332  status = ov_model_reshape_single_input(ov_model->ov_model, partial_shape);
1333  ov_partial_shape_free(&partial_shape);
1334  if (status != OK) {
1335  av_log(ctx, AV_LOG_ERROR, "Failed to reszie model input.\n");
1336  return ov2_map_error(status, NULL);
1337  }
1338  }
1339 
1340  if (!ov_model->compiled_model) {
1341 #else
1342  if (ctx->options.input_resizable) {
1343  status = ie_network_get_input_shapes(ov_model->network, &input_shapes);
1344  input_shapes.shapes->shape.dims[2] = input_height;
1345  input_shapes.shapes->shape.dims[3] = input_width;
1346  status |= ie_network_reshape(ov_model->network, input_shapes);
1347  ie_network_input_shapes_free(&input_shapes);
1348  if (status != OK) {
1349  av_log(ctx, AV_LOG_ERROR, "Failed to reshape input size for %s\n", input_name);
1350  return DNN_GENERIC_ERROR;
1351  }
1352  }
1353  if (!ov_model->exe_network) {
1354 #endif
1355  ret = init_model_ov(ov_model, input_name, output_name ? &output_name : NULL, 1);
1356  if (ret != 0) {
1357  av_log(ctx, AV_LOG_ERROR, "Failed init OpenVINO exectuable network or inference request\n");
1358  return ret;
1359  }
1360  }
1361 
1362  ret = ff_dnn_fill_gettingoutput_task(&task, &exec_params, ov_model, input_height, input_width, ctx);
1363  if (ret != 0) {
1364  goto err;
1365  }
1366 
1367  ret = extract_lltask_from_task(ov_model->model->func_type, &task, ov_model->lltask_queue, NULL);
1368  if (ret != 0) {
1369  av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
1370  goto err;
1371  }
1372 
1373  request = ff_safe_queue_pop_front(ov_model->request_queue);
1374  if (!request) {
1375  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
1376  ret = AVERROR(EINVAL);
1377  goto err;
1378  }
1379 
1380  ret = execute_model_ov(request, ov_model->lltask_queue);
1381  *output_width = task.out_frame->width;
1382  *output_height = task.out_frame->height;
1383 err:
1384  av_frame_free(&task.out_frame);
1385  av_frame_free(&task.in_frame);
1386  return ret;
1387 }
1388 
1389 static DNNModel *dnn_load_model_ov(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx)
1390 {
1391  DNNModel *model = NULL;
1392  OVModel *ov_model = NULL;
1393  OVContext *ctx = NULL;
1394 #if HAVE_OPENVINO2
1395  ov_core_t* core = NULL;
1396  ov_model_t* ovmodel = NULL;
1397  ov_status_e status;
1398 #else
1399  size_t node_count = 0;
1400  char *node_name = NULL;
1401  IEStatusCode status;
1402 #endif
1403 
1404  model = av_mallocz(sizeof(DNNModel));
1405  if (!model){
1406  return NULL;
1407  }
1408 
1409  ov_model = av_mallocz(sizeof(OVModel));
1410  if (!ov_model) {
1411  av_freep(&model);
1412  return NULL;
1413  }
1414  model->model = ov_model;
1415  ov_model->model = model;
1416  ov_model->ctx.class = &dnn_openvino_class;
1417  ctx = &ov_model->ctx;
1418 
1419  //parse options
1421  if (av_opt_set_from_string(ctx, options, NULL, "=", "&") < 0) {
1422  av_log(ctx, AV_LOG_ERROR, "Failed to parse options \"%s\"\n", options);
1423  goto err;
1424  }
1425 
1426 #if HAVE_OPENVINO2
1427  status = ov_core_create(&core);
1428  if (status != OK) {
1429  goto err;
1430  }
1431  ov_model->core = core;
1432 
1433  status = ov_core_read_model(core, model_filename, NULL, &ovmodel);
1434  if (status != OK) {
1435  ov_version_t ver;
1436  status = ov_get_openvino_version(&ver);
1437  av_log(NULL, AV_LOG_ERROR, "Failed to read the network from model file %s,\n"
1438  "Please check if the model version matches the runtime OpenVINO Version:\n",
1439  model_filename);
1440  if (status == OK) {
1441  av_log(NULL, AV_LOG_ERROR, "BuildNumber: %s\n", ver.buildNumber);
1442  }
1443  ov_version_free(&ver);
1444  goto err;
1445  }
1446  ov_model->ov_model = ovmodel;
1447 #else
1448  ov_model->all_input_names = NULL;
1449  ov_model->all_output_names = NULL;
1450 
1451  status = ie_core_create("", &ov_model->core);
1452  if (status != OK)
1453  goto err;
1454 
1455  status = ie_core_read_network(ov_model->core, model_filename, NULL, &ov_model->network);
1456  if (status != OK) {
1457  ie_version_t ver;
1458  ver = ie_c_api_version();
1459  av_log(ctx, AV_LOG_ERROR, "Failed to read the network from model file %s,\n"
1460  "Please check if the model version matches the runtime OpenVINO %s\n",
1461  model_filename, ver.api_version);
1462  ie_version_free(&ver);
1463  goto err;
1464  }
1465 
1466  //get all the input and output names
1467  status = ie_network_get_inputs_number(ov_model->network, &node_count);
1468  if (status != OK) {
1469  av_log(ctx, AV_LOG_ERROR, "Failed to get input count\n");
1470  goto err;
1471  }
1472  for (size_t i = 0; i < node_count; i++) {
1473  status = ie_network_get_input_name(ov_model->network, i, &node_name);
1474  if (status != OK) {
1475  av_log(ctx, AV_LOG_ERROR, "Failed to get No.%d input's name\n", (int)i);
1476  goto err;
1477  }
1478  APPEND_STRING(ov_model->all_input_names, node_name)
1479  ie_network_name_free(&node_name);
1480  }
1481  status = ie_network_get_outputs_number(ov_model->network, &node_count);
1482  if (status != OK) {
1483  av_log(ctx, AV_LOG_ERROR, "Failed to get output count\n");
1484  goto err;
1485  }
1486  for (size_t i = 0; i < node_count; i++) {
1487  status = ie_network_get_output_name(ov_model->network, i, &node_name);
1488  if (status != OK) {
1489  av_log(ctx, AV_LOG_ERROR, "Failed to get No.%d output's name\n", (int)i);
1490  goto err;
1491  }
1492  APPEND_STRING(ov_model->all_output_names, node_name)
1493  ie_network_name_free(&node_name);
1494  }
1495 #endif
1496 
1497  model->get_input = &get_input_ov;
1498  model->get_output = &get_output_ov;
1499  model->options = options;
1500  model->filter_ctx = filter_ctx;
1501  model->func_type = func_type;
1502 
1503  return model;
1504 
1505 err:
1506  dnn_free_model_ov(&model);
1507  return NULL;
1508 }
1509 
1510 static int dnn_execute_model_ov(const DNNModel *model, DNNExecBaseParams *exec_params)
1511 {
1512  OVModel *ov_model = model->model;
1513  OVContext *ctx = &ov_model->ctx;
1514  OVRequestItem *request;
1515  TaskItem *task;
1516  int ret;
1517 
1518  ret = ff_check_exec_params(ctx, DNN_OV, model->func_type, exec_params);
1519  if (ret != 0) {
1520  return ret;
1521  }
1522 
1523 #if HAVE_OPENVINO2
1524  if (!ov_model->compiled_model) {
1525 #else
1526  if (!ov_model->exe_network) {
1527 #endif
1528  ret = init_model_ov(ov_model, exec_params->input_name,
1529  exec_params->output_names, exec_params->nb_output);
1530  if (ret != 0) {
1531  av_log(ctx, AV_LOG_ERROR, "Failed init OpenVINO exectuable network or inference request\n");
1532  return ret;
1533  }
1534  }
1535 
1536  task = av_malloc(sizeof(*task));
1537  if (!task) {
1538  av_log(ctx, AV_LOG_ERROR, "unable to alloc memory for task item.\n");
1539  return AVERROR(ENOMEM);
1540  }
1541 
1542  ret = ff_dnn_fill_task(task, exec_params, ov_model, ctx->options.async, 1);
1543  if (ret != 0) {
1544  av_freep(&task);
1545  return ret;
1546  }
1547 
1548  if (ff_queue_push_back(ov_model->task_queue, task) < 0) {
1549  av_freep(&task);
1550  av_log(ctx, AV_LOG_ERROR, "unable to push back task_queue.\n");
1551  return AVERROR(ENOMEM);
1552  }
1553 
1554  ret = extract_lltask_from_task(model->func_type, task, ov_model->lltask_queue, exec_params);
1555  if (ret != 0) {
1556  av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
1557  return ret;
1558  }
1559 
1560  if (ctx->options.async) {
1561  while (ff_queue_size(ov_model->lltask_queue) >= ctx->options.batch_size) {
1562  request = ff_safe_queue_pop_front(ov_model->request_queue);
1563  if (!request) {
1564  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
1565  return AVERROR(EINVAL);
1566  }
1567 
1568  ret = execute_model_ov(request, ov_model->lltask_queue);
1569  if (ret != 0) {
1570  return ret;
1571  }
1572  }
1573 
1574  return 0;
1575  }
1576  else {
1577  if (model->func_type == DFT_ANALYTICS_CLASSIFY) {
1578  // Classification filter has not been completely
1579  // tested with the sync mode. So, do not support now.
1580  avpriv_report_missing_feature(ctx, "classify for sync execution");
1581  return AVERROR(ENOSYS);
1582  }
1583 
1584  if (ctx->options.batch_size > 1) {
1585  avpriv_report_missing_feature(ctx, "batch mode for sync execution");
1586  return AVERROR(ENOSYS);
1587  }
1588 
1589  request = ff_safe_queue_pop_front(ov_model->request_queue);
1590  if (!request) {
1591  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
1592  return AVERROR(EINVAL);
1593  }
1594  return execute_model_ov(request, ov_model->lltask_queue);
1595  }
1596 }
1597 
1598 static DNNAsyncStatusType dnn_get_result_ov(const DNNModel *model, AVFrame **in, AVFrame **out)
1599 {
1600  OVModel *ov_model = model->model;
1601  return ff_dnn_get_result_common(ov_model->task_queue, in, out);
1602 }
1603 
1604 static int dnn_flush_ov(const DNNModel *model)
1605 {
1606  OVModel *ov_model = model->model;
1607  OVContext *ctx = &ov_model->ctx;
1608  OVRequestItem *request;
1609 #if HAVE_OPENVINO2
1610  ov_status_e status;
1611 #else
1612  IEStatusCode status;
1613 #endif
1614  int ret;
1615 
1616  if (ff_queue_size(ov_model->lltask_queue) == 0) {
1617  // no pending task need to flush
1618  return 0;
1619  }
1620 
1621  request = ff_safe_queue_pop_front(ov_model->request_queue);
1622  if (!request) {
1623  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
1624  return AVERROR(EINVAL);
1625  }
1626 
1627  ret = fill_model_input_ov(ov_model, request);
1628  if (ret != 0) {
1629  av_log(ctx, AV_LOG_ERROR, "Failed to fill model input.\n");
1630  return ret;
1631  }
1632 #if HAVE_OPENVINO2
1633  status = ov_infer_request_infer(request->infer_request);
1634  if (status != OK) {
1635  av_log(ctx, AV_LOG_ERROR, "Failed to start sync inference for OV2\n");
1636  return ov2_map_error(status, NULL);
1637  }
1638 #else
1639  status = ie_infer_set_completion_callback(request->infer_request, &request->callback);
1640  if (status != OK) {
1641  av_log(ctx, AV_LOG_ERROR, "Failed to set completion callback for inference\n");
1642  return DNN_GENERIC_ERROR;
1643  }
1644  status = ie_infer_request_infer_async(request->infer_request);
1645  if (status != OK) {
1646  av_log(ctx, AV_LOG_ERROR, "Failed to start async inference\n");
1647  return DNN_GENERIC_ERROR;
1648  }
1649 #endif
1650 
1651  return 0;
1652 }
1653 
1655  .load_model = dnn_load_model_ov,
1656  .execute_model = dnn_execute_model_ov,
1657  .get_result = dnn_get_result_ov,
1658  .flush = dnn_flush_ov,
1659  .free_model = dnn_free_model_ov,
1660 };
OVModel::lltask_queue
Queue * lltask_queue
Definition: dnn_backend_openvino.c:80
ff_dnn_backend_openvino
const DNNModule ff_dnn_backend_openvino
OVModel::input_info
ov_preprocess_input_info_t * input_info
Definition: dnn_backend_openvino.c:67
av_opt_set_defaults
void av_opt_set_defaults(void *s)
Set the values of all AVOption fields to their default values.
Definition: opt.c:1640
OVRequestItem::callback
ie_complete_call_back_t callback
Definition: dnn_backend_openvino.c:92
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
OVContext::class
const AVClass * class
Definition: dnn_backend_openvino.c:55
opt.h
ff_safe_queue_pop_front
void * ff_safe_queue_pop_front(SafeQueue *sq)
Remove and free first element from the queue in SafeQueue.
Definition: safe_queue.c:105
out
FILE * out
Definition: movenc.c:55
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:947
OVModel::nb_outputs
int nb_outputs
Definition: dnn_backend_openvino.c:81
OVModel::exe_network
ie_executable_network_t * exe_network
Definition: dnn_backend_openvino.c:74
DNNFunctionType
DNNFunctionType
Definition: dnn_interface.h:52
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:225
ff_queue_pop_front
void * ff_queue_pop_front(Queue *q)
Remove and free first element from the Queue.
Definition: queue.c:151
ff_check_exec_params
int ff_check_exec_params(void *ctx, DNNBackendType backend, DNNFunctionType func_type, DNNExecBaseParams *exec_params)
Definition: dnn_backend_common.c:30
ff_queue_size
size_t ff_queue_size(Queue *q)
Return the length of the Queue.
Definition: queue.c:88
get_input_ov
static int get_input_ov(void *model, DNNData *input, const char *input_name)
Definition: dnn_backend_openvino.c:1084
DNN_GENERIC_ERROR
#define DNN_GENERIC_ERROR
Definition: dnn_interface.h:33
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:160
LastLevelTaskItem
Definition: dnn_backend_common.h:50
LastLevelTaskItem::bbox_index
uint32_t bbox_index
Definition: dnn_backend_common.h:52
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:375
AVFrame::width
int width
Definition: frame.h:447
OVOptions::async
uint8_t async
Definition: dnn_backend_openvino.c:46
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(dnn_openvino)
SafeQueue
Double-ended queue with mutex locks ensuring data consistency while multithreading.
Definition: safe_queue.c:46
av_opt_set_from_string
int av_opt_set_from_string(void *ctx, const char *opts, const char *const *shorthand, const char *key_val_sep, const char *pairs_sep)
Parse the key-value pairs list in opts.
Definition: opt.c:1858
OVOptions::mean
float mean
Definition: dnn_backend_openvino.c:51
AVOption
AVOption.
Definition: opt.h:346
DNNModule::load_model
DNNModel *(* load_model)(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx)
Definition: dnn_interface.h:123
DNNModel::frame_pre_proc
FramePrePostProc frame_pre_proc
Definition: dnn_interface.h:110
ov2_map_error
static int ov2_map_error(ov_status_e status, const char **desc)
Definition: dnn_backend_openvino.c:145
data
const char data[16]
Definition: mxf.c:148
OVModel::core
ie_core_t * core
Definition: dnn_backend_openvino.c:72
FLAGS
#define FLAGS
Definition: cmdutils.c:581
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
DNNExecBaseParams::input_name
const char * input_name
Definition: dnn_interface.h:77
dnn_io_proc.h
TaskItem
Definition: dnn_backend_common.h:36
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:73
AVDetectionBBox::y
int y
Definition: detection_bbox.h:32
tf_sess_config.config
config
Definition: tf_sess_config.py:33
OVModel
Definition: dnn_backend_openvino.c:59
OVOptions::batch_size
int batch_size
Definition: dnn_backend_openvino.c:47
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
init_model_ov
static int init_model_ov(OVModel *ov_model, const char *input_name, const char **output_names, int nb_outputs)
Definition: dnn_backend_openvino.c:599
DNNModel::filter_ctx
AVFilterContext * filter_ctx
Definition: dnn_interface.h:99
ff_queue_create
Queue * ff_queue_create(void)
Create a Queue instance.
Definition: queue.c:47
dnn_get_width_idx_by_layout
static int dnn_get_width_idx_by_layout(DNNLayout layout)
Definition: dnn_interface.h:137
AVDetectionBBox::detect_label
char detect_label[AV_DETECTION_BBOX_LABEL_NAME_MAX_SIZE]
Detect result with confidence.
Definition: detection_bbox.h:41
TaskItem::model
void * model
Definition: dnn_backend_common.h:37
OVModel::output_info
ov_preprocess_output_info_t * output_info
Definition: dnn_backend_openvino.c:69
av_opt_free
void av_opt_free(void *obj)
Free all allocated objects in obj.
Definition: opt.c:1910
OVRequestItem::infer_request
ov_infer_request_t * infer_request
Definition: dnn_backend_openvino.c:89
filter_ctx
static FilteringContext * filter_ctx
Definition: transcode.c:52
OVModel::output_ports
ov_output_const_port_t ** output_ports
Definition: dnn_backend_openvino.c:68
OVOptions::device_type
char * device_type
Definition: dnn_backend_openvino.c:44
fabsf
static __device__ float fabsf(float a)
Definition: cuda_runtime.h:181
DL_NHWC
@ DL_NHWC
Definition: dnn_interface.h:62
Queue
Linear double-ended data structure.
Definition: queue.c:33
av_get_detection_bbox
static av_always_inline AVDetectionBBox * av_get_detection_bbox(const AVDetectionBBoxHeader *header, unsigned int idx)
Definition: detection_bbox.h:84
ff_queue_push_back
int ff_queue_push_back(Queue *q, void *v)
Add data to the tail of the queue.
Definition: queue.c:130
avassert.h
DNN_BACKEND_COMMON_OPTIONS
#define DNN_BACKEND_COMMON_OPTIONS
Definition: dnn_backend_common.h:31
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
AVFrameSideData::size
size_t size
Definition: frame.h:253
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
float
float
Definition: af_crystalizer.c:121
desc
const char * desc
Definition: dnn_backend_openvino.c:123
LastLevelTaskItem::task
TaskItem * task
Definition: dnn_backend_common.h:51
DNNExecClassificationParams
Definition: dnn_interface.h:84
ff_queue_destroy
void ff_queue_destroy(Queue *q)
Destroy the Queue instance.
Definition: queue.c:72
OVOptions::layout
DNNLayout layout
Definition: dnn_backend_openvino.c:49
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
DNNData
Definition: dnn_interface.h:65
execute_model_ov
static int execute_model_ov(OVRequestItem *request, Queue *inferenceq)
Definition: dnn_backend_openvino.c:987
ff_dnn_fill_gettingoutput_task
int ff_dnn_fill_gettingoutput_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int input_height, int input_width, void *ctx)
Allocate input and output frames and fill the Task with execution parameters.
Definition: dnn_backend_common.c:156
ctx
AVFormatContext * ctx
Definition: movenc.c:49
TaskItem::inference_todo
uint32_t inference_todo
Definition: dnn_backend_common.h:45
DL_NCHW
@ DL_NCHW
Definition: dnn_interface.h:61
dnn_free_model_ov
static void dnn_free_model_ov(DNNModel **model)
Definition: dnn_backend_openvino.c:533
OVRequestItem::infer_request
ie_infer_request_t * infer_request
Definition: dnn_backend_openvino.c:93
OVModel::preprocess
ov_preprocess_prepostprocessor_t * preprocess
Definition: dnn_backend_openvino.c:70
DNN_OV
@ DNN_OV
Definition: dnn_interface.h:35
if
if(ret)
Definition: filter_design.txt:179
ff_safe_queue_size
size_t ff_safe_queue_size(SafeQueue *sq)
Return the length of the SafeQueue.
Definition: safe_queue.c:80
ff_proc_from_frame_to_dnn
int ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, void *log_ctx)
Definition: dnn_io_proc.c:182
DNNExecClassificationParams::target
const char * target
Definition: dnn_interface.h:86
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
OVModel::all_input_names
const char * all_input_names
Definition: dnn_backend_openvino.c:75
ff_frame_to_dnn_detect
int ff_frame_to_dnn_detect(AVFrame *frame, DNNData *input, void *log_ctx)
Definition: dnn_io_proc.c:423
NULL
#define NULL
Definition: coverity.c:32
OVRequestItem::lltask_count
uint32_t lltask_count
Definition: dnn_backend_openvino.c:87
av_err
int av_err
Definition: dnn_backend_openvino.c:122
OVModel::network
ie_network_t * network
Definition: dnn_backend_openvino.c:73
ff_safe_queue_create
SafeQueue * ff_safe_queue_create(void)
Create and initialize a SafeQueue instance.
Definition: safe_queue.c:52
AVDetectionBBoxHeader
Definition: detection_bbox.h:56
DNNModel::frame_post_proc
FramePrePostProc frame_post_proc
Definition: dnn_interface.h:113
get_output_ov
static int get_output_ov(void *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height)
Definition: dnn_backend_openvino.c:1280
TaskItem::in_frame
AVFrame * in_frame
Definition: dnn_backend_common.h:38
OVRequestItem::lltasks
LastLevelTaskItem ** lltasks
Definition: dnn_backend_openvino.c:86
OVModel::ctx
OVContext ctx
Definition: dnn_backend_openvino.c:60
OVRequestItem
Definition: dnn_backend_openvino.c:85
av_cpu_count
int av_cpu_count(void)
Definition: cpu.c:209
av_strncasecmp
int av_strncasecmp(const char *a, const char *b, size_t n)
Locale-independent case-insensitive compare.
Definition: avstring.c:217
get_datatype_size
static int get_datatype_size(DNNDataType dt)
Definition: dnn_backend_openvino.c:183
options
const OptionDef options[]
f
f
Definition: af_crystalizer.c:121
OVModel::compiled_model
ov_compiled_model_t * compiled_model
Definition: dnn_backend_openvino.c:65
TaskItem::async
uint8_t async
Definition: dnn_backend_common.h:42
TaskItem::inference_done
uint32_t inference_done
Definition: dnn_backend_common.h:46
AVDetectionBBox::w
int w
Definition: detection_bbox.h:33
cpu.h
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
DNNLayout
DNNLayout
Definition: dnn_interface.h:59
OVModel::model
DNNModel * model
Definition: dnn_backend_openvino.c:61
DNNModel::detect_post_proc
DetectPostProc detect_post_proc
Definition: dnn_interface.h:115
avio.h
DNNModel::func_type
DNNFunctionType func_type
Definition: dnn_interface.h:101
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
AVFrameSideData::data
uint8_t * data
Definition: frame.h:252
ff_safe_queue_destroy
void ff_safe_queue_destroy(SafeQueue *sq)
Destroy the SafeQueue instance.
Definition: safe_queue.c:69
DNNDataType
DNNDataType
Definition: dnn_interface.h:37
OVModel::all_output_names
const char * all_output_names
Definition: dnn_backend_openvino.c:76
header
static const uint8_t header[24]
Definition: sdr2.c:68
AVDetectionBBox::classify_count
uint32_t classify_count
Definition: detection_bbox.h:51
DNN_FLOAT
@ DNN_FLOAT
Definition: dnn_interface.h:37
AVERROR_EXTERNAL
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:59
ff_dnn_fill_task
int ff_dnn_fill_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int async, int do_ioproc)
Fill the Task for Backend Execution.
Definition: dnn_backend_common.c:50
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
contain_valid_detection_bbox
static int contain_valid_detection_bbox(AVFrame *frame)
Definition: dnn_backend_openvino.c:1177
AV_OPT_TYPE_FLOAT
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:238
ff_safe_queue_push_back
int ff_safe_queue_push_back(SafeQueue *sq, void *v)
Add data to the tail of queue in the SafeQueue after locking mutex.
Definition: safe_queue.c:95
OVOptions::input_resizable
int input_resizable
Definition: dnn_backend_openvino.c:48
layout
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
Definition: filter_design.txt:18
dnn_openvino_options
static const AVOption dnn_openvino_options[]
Definition: dnn_backend_openvino.c:103
infer_completion_callback
static void infer_completion_callback(void *args)
Definition: dnn_backend_openvino.c:355
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
DFT_ANALYTICS_DETECT
@ DFT_ANALYTICS_DETECT
Definition: dnn_interface.h:55
OVModel::ov_model
ov_model_t * ov_model
Definition: dnn_backend_openvino.c:64
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
OVModel::core
ov_core_t * core
Definition: dnn_backend_openvino.c:63
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
safe_queue.h
OVModel::request_queue
SafeQueue * request_queue
Definition: dnn_backend_openvino.c:78
TaskItem::output_names
const char ** output_names
Definition: dnn_backend_common.h:41
OVContext
Definition: dnn_backend_openvino.c:54
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
outputs
static const AVFilterPad outputs[]
Definition: af_aap.c:311
DNNModel::classify_post_proc
ClassifyPostProc classify_post_proc
Definition: dnn_interface.h:117
ret
ret
Definition: filter_design.txt:187
OVOptions::scale
float scale
Definition: dnn_backend_openvino.c:50
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVDetectionBBox::h
int h
Definition: detection_bbox.h:34
DNNModel::get_input
int(* get_input)(void *model, DNNData *input, const char *input_name)
Definition: dnn_interface.h:104
DNN_UINT8
@ DNN_UINT8
Definition: dnn_interface.h:37
TaskItem::out_frame
AVFrame * out_frame
Definition: dnn_backend_common.h:39
OFFSET
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
OVModel::task_queue
Queue * task_queue
Definition: dnn_backend_openvino.c:79
DFT_ANALYTICS_CLASSIFY
@ DFT_ANALYTICS_CLASSIFY
Definition: dnn_interface.h:56
AVFrame::height
int height
Definition: frame.h:447
extract_lltask_from_task
static int extract_lltask_from_task(DNNFunctionType func_type, TaskItem *task, Queue *lltask_queue, DNNExecBaseParams *exec_params)
Definition: dnn_backend_openvino.c:1214
status
ov_status_e status
Definition: dnn_backend_openvino.c:121
dnn_backend_common.h
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:235
AVDetectionBBox::x
int x
Distance in pixels from the left/top edge of the frame, together with width and height,...
Definition: detection_bbox.h:31
ff_dnn_get_result_common
DNNAsyncStatusType ff_dnn_get_result_common(Queue *task_queue, AVFrame **in, AVFrame **out)
Extract input and output frame from the Task Queue after asynchronous inference.
Definition: dnn_backend_common.c:136
ff_queue_peek_front
void * ff_queue_peek_front(Queue *q)
Return a pointer to the data at the head of the queue.
Definition: queue.c:93
DNNExecBaseParams::output_names
const char ** output_names
Definition: dnn_interface.h:78
DL_NONE
@ DL_NONE
Definition: dnn_interface.h:60
AVFilterContext
An instance of a filter.
Definition: avfilter.h:407
DNNModel
Definition: dnn_interface.h:93
precision_to_datatype
static DNNDataType precision_to_datatype(ov_element_type_e precision) static DNNDataType precision_to_datatype(precision_e precision)
Definition: dnn_backend_openvino.c:162
mem.h
dnn_get_height_idx_by_layout
static int dnn_get_height_idx_by_layout(DNNLayout layout)
Definition: dnn_interface.h:142
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:250
TaskItem::input_name
const char * input_name
Definition: dnn_backend_common.h:40
AV_NUM_DETECTION_BBOX_CLASSIFY
#define AV_NUM_DETECTION_BBOX_CLASSIFY
At most 4 classifications based on the detected bounding box.
Definition: detection_bbox.h:50
DNNModel::options
const char * options
Definition: dnn_interface.h:97
OVOptions::nireq
int nireq
Definition: dnn_backend_openvino.c:45
ff_frame_to_dnn_classify
int ff_frame_to_dnn_classify(AVFrame *frame, DNNData *input, uint32_t bbox_index, void *log_ctx)
Definition: dnn_io_proc.c:340
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:251
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
OVOptions
Definition: dnn_backend_openvino.c:43
DNNExecBaseParams
Definition: dnn_interface.h:76
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
OVModel::input_port
ov_output_const_port_t * input_port
Definition: dnn_backend_openvino.c:66
AVDetectionBBox
Definition: detection_bbox.h:26
TaskItem::do_ioproc
uint8_t do_ioproc
Definition: dnn_backend_common.h:43
DNNModel::get_output
int(* get_output)(void *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height)
Definition: dnn_interface.h:106
OVRequestItem::callback
ov_callback_t callback
Definition: dnn_backend_openvino.c:90
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:239
DCO_BGR
@ DCO_BGR
Definition: dnn_interface.h:41
DNNAsyncStatusType
DNNAsyncStatusType
Definition: dnn_interface.h:45
ov2_errors
static const struct @253 ov2_errors[]
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:244
DFT_PROCESS_FRAME
@ DFT_PROCESS_FRAME
Definition: dnn_interface.h:54
detection_bbox.h
fill_model_input_ov
static int fill_model_input_ov(OVModel *ov_model, OVRequestItem *request)
Definition: dnn_backend_openvino.c:197
AV_FRAME_DATA_DETECTION_BBOXES
@ AV_FRAME_DATA_DETECTION_BBOXES
Bounding boxes for object detection and classification, as described by AVDetectionBBoxHeader.
Definition: frame.h:194
DNNModule
Definition: dnn_interface.h:121
OVContext::options
OVOptions options
Definition: dnn_backend_openvino.c:56
DNNExecBaseParams::nb_output
uint32_t nb_output
Definition: dnn_interface.h:79
DNNModel::model
void * model
Definition: dnn_interface.h:95
ff_proc_from_dnn_to_frame
int ff_proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *log_ctx)
Definition: dnn_io_proc.c:42