FFmpeg
dnn_backend_openvino.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2020
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * DNN OpenVINO backend implementation.
24  */
25 
26 #include "dnn_backend_openvino.h"
27 #include "dnn_io_proc.h"
28 #include "libavformat/avio.h"
29 #include "libavutil/avassert.h"
30 #include "libavutil/opt.h"
31 #include "libavutil/avstring.h"
32 #include "../internal.h"
33 #include "queue.h"
34 #include "safe_queue.h"
35 #include <c_api/ie_c_api.h>
36 
37 typedef struct OVOptions{
38  char *device_type;
39  int nireq;
42 } OVOptions;
43 
44 typedef struct OVContext {
45  const AVClass *class;
47 } OVContext;
48 
49 typedef struct OVModel{
52  ie_core_t *core;
53  ie_network_t *network;
54  ie_executable_network_t *exe_network;
55  ie_infer_request_t *infer_request;
56 
57  /* for async execution */
58  SafeQueue *request_queue; // holds RequestItem
59  Queue *task_queue; // holds TaskItem
60 } OVModel;
61 
62 typedef struct TaskItem {
64  const char *input_name;
66  const char *output_name;
68  int do_ioproc;
69  int async;
70  int done;
71 } TaskItem;
72 
73 typedef struct RequestItem {
74  ie_infer_request_t *infer_request;
77  ie_complete_call_back_t callback;
78 } RequestItem;
79 
80 #define APPEND_STRING(generated_string, iterate_string) \
81  generated_string = generated_string ? av_asprintf("%s %s", generated_string, iterate_string) : \
82  av_asprintf("%s", iterate_string);
83 
84 #define OFFSET(x) offsetof(OVContext, x)
85 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
86 static const AVOption dnn_openvino_options[] = {
87  { "device", "device to run model", OFFSET(options.device_type), AV_OPT_TYPE_STRING, { .str = "CPU" }, 0, 0, FLAGS },
88  { "nireq", "number of request", OFFSET(options.nireq), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
89  { "batch_size", "batch size per request", OFFSET(options.batch_size), AV_OPT_TYPE_INT, { .i64 = 1 }, 1, 1000, FLAGS},
90  { "input_resizable", "can input be resizable or not", OFFSET(options.input_resizable), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
91  { NULL }
92 };
93 
94 AVFILTER_DEFINE_CLASS(dnn_openvino);
95 
96 static DNNDataType precision_to_datatype(precision_e precision)
97 {
98  switch (precision)
99  {
100  case FP32:
101  return DNN_FLOAT;
102  case U8:
103  return DNN_UINT8;
104  default:
105  av_assert0(!"not supported yet.");
106  return DNN_FLOAT;
107  }
108 }
109 
111 {
112  switch (dt)
113  {
114  case DNN_FLOAT:
115  return sizeof(float);
116  case DNN_UINT8:
117  return sizeof(uint8_t);
118  default:
119  av_assert0(!"not supported yet.");
120  return 1;
121  }
122 }
123 
125 {
126  dimensions_t dims;
127  precision_e precision;
128  ie_blob_buffer_t blob_buffer;
129  OVContext *ctx = &ov_model->ctx;
130  IEStatusCode status;
131  DNNData input;
132  ie_blob_t *input_blob = NULL;
133  TaskItem *task = request->tasks[0];
134 
135  status = ie_infer_request_get_blob(request->infer_request, task->input_name, &input_blob);
136  if (status != OK) {
137  av_log(ctx, AV_LOG_ERROR, "Failed to get input blob with name %s\n", task->input_name);
138  return DNN_ERROR;
139  }
140 
141  status |= ie_blob_get_dims(input_blob, &dims);
142  status |= ie_blob_get_precision(input_blob, &precision);
143  if (status != OK) {
144  av_log(ctx, AV_LOG_ERROR, "Failed to get input blob dims/precision\n");
145  return DNN_ERROR;
146  }
147 
148  status = ie_blob_get_buffer(input_blob, &blob_buffer);
149  if (status != OK) {
150  av_log(ctx, AV_LOG_ERROR, "Failed to get input blob buffer\n");
151  return DNN_ERROR;
152  }
153 
154  input.height = dims.dims[2];
155  input.width = dims.dims[3];
156  input.channels = dims.dims[1];
157  input.data = blob_buffer.buffer;
158  input.dt = precision_to_datatype(precision);
159  // all models in openvino open model zoo use BGR as input,
160  // change to be an option when necessary.
161  input.order = DCO_BGR;
162 
163  av_assert0(request->task_count <= dims.dims[0]);
164  for (int i = 0; i < request->task_count; ++i) {
165  task = request->tasks[i];
166  if (task->do_ioproc) {
167  if (ov_model->model->pre_proc != NULL) {
168  ov_model->model->pre_proc(task->in_frame, &input, ov_model->model->filter_ctx);
169  } else {
170  ff_proc_from_frame_to_dnn(task->in_frame, &input, ov_model->model->func_type, ctx);
171  }
172  }
173  input.data = (uint8_t *)input.data
174  + input.width * input.height * input.channels * get_datatype_size(input.dt);
175  }
176  ie_blob_free(&input_blob);
177 
178  return DNN_SUCCESS;
179 }
180 
181 static void infer_completion_callback(void *args)
182 {
183  dimensions_t dims;
184  precision_e precision;
185  IEStatusCode status;
186  RequestItem *request = args;
187  TaskItem *task = request->tasks[0];
188  SafeQueue *requestq = task->ov_model->request_queue;
189  ie_blob_t *output_blob = NULL;
190  ie_blob_buffer_t blob_buffer;
191  DNNData output;
192  OVContext *ctx = &task->ov_model->ctx;
193 
194  status = ie_infer_request_get_blob(request->infer_request, task->output_name, &output_blob);
195  if (status != OK) {
196  //incorrect output name
197  char *model_output_name = NULL;
198  char *all_output_names = NULL;
199  size_t model_output_count = 0;
200  av_log(ctx, AV_LOG_ERROR, "Failed to get model output data\n");
201  status = ie_network_get_outputs_number(task->ov_model->network, &model_output_count);
202  for (size_t i = 0; i < model_output_count; i++) {
203  status = ie_network_get_output_name(task->ov_model->network, i, &model_output_name);
204  APPEND_STRING(all_output_names, model_output_name)
205  }
206  av_log(ctx, AV_LOG_ERROR,
207  "output \"%s\" may not correct, all output(s) are: \"%s\"\n",
208  task->output_name, all_output_names);
209  return;
210  }
211 
212  status = ie_blob_get_buffer(output_blob, &blob_buffer);
213  if (status != OK) {
214  av_log(ctx, AV_LOG_ERROR, "Failed to access output memory\n");
215  return;
216  }
217 
218  status |= ie_blob_get_dims(output_blob, &dims);
219  status |= ie_blob_get_precision(output_blob, &precision);
220  if (status != OK) {
221  av_log(ctx, AV_LOG_ERROR, "Failed to get dims or precision of output\n");
222  return;
223  }
224 
225  output.channels = dims.dims[1];
226  output.height = dims.dims[2];
227  output.width = dims.dims[3];
228  output.dt = precision_to_datatype(precision);
229  output.data = blob_buffer.buffer;
230 
231  av_assert0(request->task_count <= dims.dims[0]);
232  av_assert0(request->task_count >= 1);
233  for (int i = 0; i < request->task_count; ++i) {
234  task = request->tasks[i];
235  if (task->do_ioproc) {
236  if (task->ov_model->model->post_proc != NULL) {
237  task->ov_model->model->post_proc(task->out_frame, &output, task->ov_model->model->filter_ctx);
238  } else {
239  ff_proc_from_dnn_to_frame(task->out_frame, &output, ctx);
240  }
241  } else {
242  task->out_frame->width = output.width;
243  task->out_frame->height = output.height;
244  }
245  task->done = 1;
246  output.data = (uint8_t *)output.data
247  + output.width * output.height * output.channels * get_datatype_size(output.dt);
248  }
249  ie_blob_free(&output_blob);
250 
251  request->task_count = 0;
252 
253  if (task->async) {
254  if (ff_safe_queue_push_back(requestq, request) < 0) {
255  av_log(ctx, AV_LOG_ERROR, "Failed to push back request_queue.\n");
256  return;
257  }
258  }
259 }
260 
261 static DNNReturnType init_model_ov(OVModel *ov_model, const char *input_name, const char *output_name)
262 {
263  OVContext *ctx = &ov_model->ctx;
264  IEStatusCode status;
265  ie_available_devices_t a_dev;
266  ie_config_t config = {NULL, NULL, NULL};
267  char *all_dev_names = NULL;
268 
269  // batch size
270  if (ctx->options.batch_size <= 0) {
271  ctx->options.batch_size = 1;
272  }
273 
274  if (ctx->options.batch_size > 1) {
275  input_shapes_t input_shapes;
276  status = ie_network_get_input_shapes(ov_model->network, &input_shapes);
277  if (status != OK)
278  goto err;
279  for (int i = 0; i < input_shapes.shape_num; i++)
280  input_shapes.shapes[i].shape.dims[0] = ctx->options.batch_size;
281  status = ie_network_reshape(ov_model->network, input_shapes);
282  ie_network_input_shapes_free(&input_shapes);
283  if (status != OK)
284  goto err;
285  }
286 
287  // The order of dims in the openvino is fixed and it is always NCHW for 4-D data.
288  // while we pass NHWC data from FFmpeg to openvino
289  status = ie_network_set_input_layout(ov_model->network, input_name, NHWC);
290  if (status != OK) {
291  av_log(ctx, AV_LOG_ERROR, "Failed to set layout as NHWC for input %s\n", input_name);
292  goto err;
293  }
294  status = ie_network_set_output_layout(ov_model->network, output_name, NHWC);
295  if (status != OK) {
296  av_log(ctx, AV_LOG_ERROR, "Failed to set layout as NHWC for output %s\n", output_name);
297  goto err;
298  }
299 
300  // all models in openvino open model zoo use BGR with range [0.0f, 255.0f] as input,
301  // we don't have a AVPixelFormat to descibe it, so we'll use AV_PIX_FMT_BGR24 and
302  // ask openvino to do the conversion internally.
303  // the current supported SR model (frame processing) is generated from tensorflow model,
304  // and its input is Y channel as float with range [0.0f, 1.0f], so do not set for this case.
305  // TODO: we need to get a final clear&general solution with all backends/formats considered.
306  if (ov_model->model->func_type != DFT_PROCESS_FRAME) {
307  status = ie_network_set_input_precision(ov_model->network, input_name, U8);
308  if (status != OK) {
309  av_log(ctx, AV_LOG_ERROR, "Failed to set input precision as U8 for %s\n", input_name);
310  return DNN_ERROR;
311  }
312  }
313 
314  status = ie_core_load_network(ov_model->core, ov_model->network, ctx->options.device_type, &config, &ov_model->exe_network);
315  if (status != OK) {
316  av_log(ctx, AV_LOG_ERROR, "Failed to load OpenVINO model network\n");
317  status = ie_core_get_available_devices(ov_model->core, &a_dev);
318  if (status != OK) {
319  av_log(ctx, AV_LOG_ERROR, "Failed to get available devices\n");
320  goto err;
321  }
322  for (int i = 0; i < a_dev.num_devices; i++) {
323  APPEND_STRING(all_dev_names, a_dev.devices[i])
324  }
325  av_log(ctx, AV_LOG_ERROR,"device %s may not be supported, all available devices are: \"%s\"\n",
326  ctx->options.device_type, all_dev_names);
327  goto err;
328  }
329 
330  // create infer_request for sync execution
331  status = ie_exec_network_create_infer_request(ov_model->exe_network, &ov_model->infer_request);
332  if (status != OK)
333  goto err;
334 
335  // create infer_requests for async execution
336  if (ctx->options.nireq <= 0) {
337  // the default value is a rough estimation
338  ctx->options.nireq = av_cpu_count() / 2 + 1;
339  }
340 
341  ov_model->request_queue = ff_safe_queue_create();
342  if (!ov_model->request_queue) {
343  goto err;
344  }
345 
346  for (int i = 0; i < ctx->options.nireq; i++) {
347  RequestItem *item = av_mallocz(sizeof(*item));
348  if (!item) {
349  goto err;
350  }
351 
352  status = ie_exec_network_create_infer_request(ov_model->exe_network, &item->infer_request);
353  if (status != OK) {
354  av_freep(&item);
355  goto err;
356  }
357 
358  item->tasks = av_malloc_array(ctx->options.batch_size, sizeof(*item->tasks));
359  if (!item->tasks) {
360  av_freep(&item);
361  goto err;
362  }
363  item->task_count = 0;
364 
365  item->callback.completeCallBackFunc = infer_completion_callback;
366  item->callback.args = item;
367  if (ff_safe_queue_push_back(ov_model->request_queue, item) < 0) {
368  av_freep(&item);
369  goto err;
370  }
371  }
372 
373  ov_model->task_queue = ff_queue_create();
374  if (!ov_model->task_queue) {
375  goto err;
376  }
377 
378  return DNN_SUCCESS;
379 
380 err:
381  ff_dnn_free_model_ov(&ov_model->model);
382  return DNN_ERROR;
383 }
384 
386 {
387  IEStatusCode status;
389  TaskItem *task = request->tasks[0];
390  OVContext *ctx = &task->ov_model->ctx;
391 
392  if (task->async) {
393  if (request->task_count < ctx->options.batch_size) {
394  if (ff_safe_queue_push_front(task->ov_model->request_queue, request) < 0) {
395  av_log(ctx, AV_LOG_ERROR, "Failed to push back request_queue.\n");
396  return DNN_ERROR;
397  }
398  return DNN_SUCCESS;
399  }
400  ret = fill_model_input_ov(task->ov_model, request);
401  if (ret != DNN_SUCCESS) {
402  return ret;
403  }
404  status = ie_infer_set_completion_callback(request->infer_request, &request->callback);
405  if (status != OK) {
406  av_log(ctx, AV_LOG_ERROR, "Failed to set completion callback for inference\n");
407  return DNN_ERROR;
408  }
409  status = ie_infer_request_infer_async(request->infer_request);
410  if (status != OK) {
411  av_log(ctx, AV_LOG_ERROR, "Failed to start async inference\n");
412  return DNN_ERROR;
413  }
414  return DNN_SUCCESS;
415  } else {
416  ret = fill_model_input_ov(task->ov_model, request);
417  if (ret != DNN_SUCCESS) {
418  return ret;
419  }
420  status = ie_infer_request_infer(request->infer_request);
421  if (status != OK) {
422  av_log(ctx, AV_LOG_ERROR, "Failed to start synchronous model inference\n");
423  return DNN_ERROR;
424  }
425  infer_completion_callback(request);
426  return task->done ? DNN_SUCCESS : DNN_ERROR;
427  }
428 }
429 
430 static DNNReturnType get_input_ov(void *model, DNNData *input, const char *input_name)
431 {
432  OVModel *ov_model = model;
433  OVContext *ctx = &ov_model->ctx;
434  char *model_input_name = NULL;
435  char *all_input_names = NULL;
436  IEStatusCode status;
437  size_t model_input_count = 0;
438  dimensions_t dims;
439  precision_e precision;
441 
442  status = ie_network_get_inputs_number(ov_model->network, &model_input_count);
443  if (status != OK) {
444  av_log(ctx, AV_LOG_ERROR, "Failed to get input count\n");
445  return DNN_ERROR;
446  }
447 
448  for (size_t i = 0; i < model_input_count; i++) {
449  status = ie_network_get_input_name(ov_model->network, i, &model_input_name);
450  if (status != OK) {
451  av_log(ctx, AV_LOG_ERROR, "Failed to get No.%d input's name\n", (int)i);
452  return DNN_ERROR;
453  }
454  if (strcmp(model_input_name, input_name) == 0) {
455  ie_network_name_free(&model_input_name);
456  status |= ie_network_get_input_dims(ov_model->network, input_name, &dims);
457  status |= ie_network_get_input_precision(ov_model->network, input_name, &precision);
458  if (status != OK) {
459  av_log(ctx, AV_LOG_ERROR, "Failed to get No.%d input's dims or precision\n", (int)i);
460  return DNN_ERROR;
461  }
462 
463  input->channels = dims.dims[1];
464  input->height = input_resizable ? -1 : dims.dims[2];
465  input->width = input_resizable ? -1 : dims.dims[3];
466  input->dt = precision_to_datatype(precision);
467  return DNN_SUCCESS;
468  } else {
469  //incorrect input name
470  APPEND_STRING(all_input_names, model_input_name)
471  }
472 
473  ie_network_name_free(&model_input_name);
474  }
475 
476  av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model, all input(s) are: \"%s\"\n", input_name, all_input_names);
477  return DNN_ERROR;
478 }
479 
480 static DNNReturnType get_output_ov(void *model, const char *input_name, int input_width, int input_height,
481  const char *output_name, int *output_width, int *output_height)
482 {
484  OVModel *ov_model = model;
485  OVContext *ctx = &ov_model->ctx;
486  TaskItem task;
487  RequestItem request;
488  AVFrame *in_frame = av_frame_alloc();
489  AVFrame *out_frame = NULL;
490  TaskItem *ptask = &task;
491  IEStatusCode status;
492  input_shapes_t input_shapes;
493 
494  if (!in_frame) {
495  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for input frame\n");
496  return DNN_ERROR;
497  }
498  out_frame = av_frame_alloc();
499  if (!out_frame) {
500  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for output frame\n");
501  av_frame_free(&in_frame);
502  return DNN_ERROR;
503  }
504  in_frame->width = input_width;
505  in_frame->height = input_height;
506 
507  if (ctx->options.input_resizable) {
508  status = ie_network_get_input_shapes(ov_model->network, &input_shapes);
509  input_shapes.shapes->shape.dims[2] = input_height;
510  input_shapes.shapes->shape.dims[3] = input_width;
511  status |= ie_network_reshape(ov_model->network, input_shapes);
512  ie_network_input_shapes_free(&input_shapes);
513  if (status != OK) {
514  av_log(ctx, AV_LOG_ERROR, "Failed to reshape input size for %s\n", input_name);
515  return DNN_ERROR;
516  }
517  }
518 
519  if (!ov_model->exe_network) {
520  if (init_model_ov(ov_model, input_name, output_name) != DNN_SUCCESS) {
521  av_log(ctx, AV_LOG_ERROR, "Failed init OpenVINO exectuable network or inference request\n");
522  return DNN_ERROR;
523  }
524  }
525 
526  task.done = 0;
527  task.do_ioproc = 0;
528  task.async = 0;
529  task.input_name = input_name;
530  task.in_frame = in_frame;
531  task.output_name = output_name;
532  task.out_frame = out_frame;
533  task.ov_model = ov_model;
534 
535  request.infer_request = ov_model->infer_request;
536  request.task_count = 1;
537  request.tasks = &ptask;
538 
539  ret = execute_model_ov(&request);
540  *output_width = out_frame->width;
541  *output_height = out_frame->height;
542 
543  av_frame_free(&out_frame);
544  av_frame_free(&in_frame);
545  return ret;
546 }
547 
548 DNNModel *ff_dnn_load_model_ov(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx)
549 {
550  DNNModel *model = NULL;
551  OVModel *ov_model = NULL;
552  OVContext *ctx = NULL;
553  IEStatusCode status;
554 
555  model = av_mallocz(sizeof(DNNModel));
556  if (!model){
557  return NULL;
558  }
559 
560  ov_model = av_mallocz(sizeof(OVModel));
561  if (!ov_model) {
562  av_freep(&model);
563  return NULL;
564  }
565  model->model = ov_model;
566  ov_model->model = model;
567  ov_model->ctx.class = &dnn_openvino_class;
568  ctx = &ov_model->ctx;
569 
570  //parse options
571  av_opt_set_defaults(ctx);
572  if (av_opt_set_from_string(ctx, options, NULL, "=", "&") < 0) {
573  av_log(ctx, AV_LOG_ERROR, "Failed to parse options \"%s\"\n", options);
574  goto err;
575  }
576 
577  status = ie_core_create("", &ov_model->core);
578  if (status != OK)
579  goto err;
580 
581  status = ie_core_read_network(ov_model->core, model_filename, NULL, &ov_model->network);
582  if (status != OK) {
583  ie_version_t ver;
584  ver = ie_c_api_version();
585  av_log(ctx, AV_LOG_ERROR, "Failed to read the network from model file %s,\n"
586  "Please check if the model version matches the runtime OpenVINO %s\n",
587  model_filename, ver.api_version);
588  ie_version_free(&ver);
589  goto err;
590  }
591 
592  model->get_input = &get_input_ov;
593  model->get_output = &get_output_ov;
594  model->options = options;
595  model->filter_ctx = filter_ctx;
596  model->func_type = func_type;
597 
598  return model;
599 
600 err:
601  ff_dnn_free_model_ov(&model);
602  return NULL;
603 }
604 
605 DNNReturnType ff_dnn_execute_model_ov(const DNNModel *model, const char *input_name, AVFrame *in_frame,
606  const char **output_names, uint32_t nb_output, AVFrame *out_frame)
607 {
608  OVModel *ov_model = model->model;
609  OVContext *ctx = &ov_model->ctx;
610  TaskItem task;
611  RequestItem request;
612  TaskItem *ptask = &task;
613 
614  if (!in_frame) {
615  av_log(ctx, AV_LOG_ERROR, "in frame is NULL when execute model.\n");
616  return DNN_ERROR;
617  }
618 
619  if (!out_frame && model->func_type == DFT_PROCESS_FRAME) {
620  av_log(ctx, AV_LOG_ERROR, "out frame is NULL when execute model.\n");
621  return DNN_ERROR;
622  }
623 
624  if (nb_output != 1) {
625  // currently, the filter does not need multiple outputs,
626  // so we just pending the support until we really need it.
627  avpriv_report_missing_feature(ctx, "multiple outputs");
628  return DNN_ERROR;
629  }
630 
631  if (ctx->options.batch_size > 1) {
632  avpriv_report_missing_feature(ctx, "batch mode for sync execution");
633  return DNN_ERROR;
634  }
635 
636  if (!ov_model->exe_network) {
637  if (init_model_ov(ov_model, input_name, output_names[0]) != DNN_SUCCESS) {
638  av_log(ctx, AV_LOG_ERROR, "Failed init OpenVINO exectuable network or inference request\n");
639  return DNN_ERROR;
640  }
641  }
642 
643  task.done = 0;
644  task.do_ioproc = 1;
645  task.async = 0;
646  task.input_name = input_name;
647  task.in_frame = in_frame;
648  task.output_name = output_names[0];
649  task.out_frame = out_frame;
650  task.ov_model = ov_model;
651 
652  request.infer_request = ov_model->infer_request;
653  request.task_count = 1;
654  request.tasks = &ptask;
655 
656  return execute_model_ov(&request);
657 }
658 
659 DNNReturnType ff_dnn_execute_model_async_ov(const DNNModel *model, const char *input_name, AVFrame *in_frame,
660  const char **output_names, uint32_t nb_output, AVFrame *out_frame)
661 {
662  OVModel *ov_model = model->model;
663  OVContext *ctx = &ov_model->ctx;
664  RequestItem *request;
665  TaskItem *task;
666 
667  if (!in_frame) {
668  av_log(ctx, AV_LOG_ERROR, "in frame is NULL when async execute model.\n");
669  return DNN_ERROR;
670  }
671 
672  if (!out_frame && model->func_type == DFT_PROCESS_FRAME) {
673  av_log(ctx, AV_LOG_ERROR, "out frame is NULL when async execute model.\n");
674  return DNN_ERROR;
675  }
676 
677  task = av_malloc(sizeof(*task));
678  if (!task) {
679  av_log(ctx, AV_LOG_ERROR, "unable to alloc memory for task item.\n");
680  return DNN_ERROR;
681  }
682 
683  if (!ov_model->exe_network) {
684  if (init_model_ov(ov_model, input_name, output_names[0]) != DNN_SUCCESS) {
685  av_log(ctx, AV_LOG_ERROR, "Failed init OpenVINO exectuable network or inference request\n");
686  return DNN_ERROR;
687  }
688  }
689 
690  task->done = 0;
691  task->do_ioproc = 1;
692  task->async = 1;
693  task->input_name = input_name;
694  task->in_frame = in_frame;
695  task->output_name = output_names[0];
696  task->out_frame = out_frame;
697  task->ov_model = ov_model;
698  if (ff_queue_push_back(ov_model->task_queue, task) < 0) {
699  av_freep(&task);
700  av_log(ctx, AV_LOG_ERROR, "unable to push back task_queue.\n");
701  return DNN_ERROR;
702  }
703 
704  request = ff_safe_queue_pop_front(ov_model->request_queue);
705  if (!request) {
706  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
707  return DNN_ERROR;
708  }
709 
710  request->tasks[request->task_count++] = task;
711  return execute_model_ov(request);
712 }
713 
715 {
716  OVModel *ov_model = model->model;
717  TaskItem *task = ff_queue_peek_front(ov_model->task_queue);
718 
719  if (!task) {
720  return DAST_EMPTY_QUEUE;
721  }
722 
723  if (!task->done) {
724  return DAST_NOT_READY;
725  }
726 
727  *in = task->in_frame;
728  *out = task->out_frame;
729  ff_queue_pop_front(ov_model->task_queue);
730  av_freep(&task);
731 
732  return DAST_SUCCESS;
733 }
734 
736 {
737  OVModel *ov_model = model->model;
738  OVContext *ctx = &ov_model->ctx;
739  RequestItem *request;
740  IEStatusCode status;
742 
743  request = ff_safe_queue_pop_front(ov_model->request_queue);
744  if (!request) {
745  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
746  return DNN_ERROR;
747  }
748 
749  if (request->task_count == 0) {
750  // no pending task need to flush
751  if (ff_safe_queue_push_back(ov_model->request_queue, request) < 0) {
752  av_log(ctx, AV_LOG_ERROR, "Failed to push back request_queue.\n");
753  return DNN_ERROR;
754  }
755  return DNN_SUCCESS;
756  }
757 
758  ret = fill_model_input_ov(ov_model, request);
759  if (ret != DNN_SUCCESS) {
760  av_log(ctx, AV_LOG_ERROR, "Failed to fill model input.\n");
761  return ret;
762  }
763  status = ie_infer_set_completion_callback(request->infer_request, &request->callback);
764  if (status != OK) {
765  av_log(ctx, AV_LOG_ERROR, "Failed to set completion callback for inference\n");
766  return DNN_ERROR;
767  }
768  status = ie_infer_request_infer_async(request->infer_request);
769  if (status != OK) {
770  av_log(ctx, AV_LOG_ERROR, "Failed to start async inference\n");
771  return DNN_ERROR;
772  }
773 
774  return DNN_SUCCESS;
775 }
776 
778 {
779  if (*model){
780  OVModel *ov_model = (*model)->model;
781  while (ff_safe_queue_size(ov_model->request_queue) != 0) {
783  if (item && item->infer_request) {
784  ie_infer_request_free(&item->infer_request);
785  }
786  av_freep(&item->tasks);
787  av_freep(&item);
788  }
790 
791  while (ff_queue_size(ov_model->task_queue) != 0) {
792  TaskItem *item = ff_queue_pop_front(ov_model->task_queue);
793  av_frame_free(&item->in_frame);
794  av_frame_free(&item->out_frame);
795  av_freep(&item);
796  }
797  ff_queue_destroy(ov_model->task_queue);
798 
799  if (ov_model->infer_request)
800  ie_infer_request_free(&ov_model->infer_request);
801  if (ov_model->exe_network)
802  ie_exec_network_free(&ov_model->exe_network);
803  if (ov_model->network)
804  ie_network_free(&ov_model->network);
805  if (ov_model->core)
806  ie_core_free(&ov_model->core);
807  av_freep(&ov_model);
808  av_freep(model);
809  }
810 }
void * model
Definition: dnn_interface.h:68
#define NULL
Definition: coverity.c:32
DNNReturnType ff_dnn_execute_model_async_ov(const DNNModel *model, const char *input_name, AVFrame *in_frame, const char **output_names, uint32_t nb_output, AVFrame *out_frame)
const char * output_name
Buffered I/O operations.
This structure describes decoded (raw) audio or video data.
Definition: frame.h:314
AVOption.
Definition: opt.h:248
int av_cpu_count(void)
Definition: cpu.c:275
void av_opt_set_defaults(void *s)
Set the values of all AVOption fields to their default values.
Definition: opt.c:1358
int channels
Definition: dnn_interface.h:60
int(* post_proc)(AVFrame *frame_out, DNNData *model_output, AVFilterContext *filter_ctx)
Definition: dnn_interface.h:86
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
static int get_datatype_size(DNNDataType dt)
void ff_queue_destroy(Queue *q)
Definition: queue.c:73
int ff_queue_push_back(Queue *q, void *v)
Definition: queue.c:131
void * ff_queue_peek_front(Queue *q)
Definition: queue.c:94
SafeQueue * ff_safe_queue_create(void)
Definition: safe_queue.c:52
const char * options
Definition: dnn_interface.h:70
ie_complete_call_back_t callback
int av_opt_set_from_string(void *ctx, const char *opts, const char *const *shorthand, const char *key_val_sep, const char *pairs_sep)
Parse the key-value pairs list in opts.
Definition: opt.c:1559
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
void ff_safe_queue_destroy(SafeQueue *sq)
Definition: safe_queue.c:69
uint8_t
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
AVOptions.
static const AVOption dnn_openvino_options[]
Queue * task_queue
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
ie_infer_request_t * infer_request
size_t ff_safe_queue_size(SafeQueue *sq)
Definition: safe_queue.c:80
static DNNReturnType get_input_ov(void *model, DNNData *input, const char *input_name)
int(* pre_proc)(AVFrame *frame_in, DNNData *model_input, AVFilterContext *filter_ctx)
Definition: dnn_interface.h:83
DNNReturnType ff_proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *log_ctx)
Definition: dnn_io_proc.c:26
DNNModel * model
ie_infer_request_t * infer_request
static DNNReturnType execute_model_ov(RequestItem *request)
static DNNReturnType get_output_ov(void *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height)
#define av_log(a,...)
DNNDataType
Definition: dnn_interface.h:37
DNNReturnType(* get_input)(void *model, DNNData *input, const char *input_name)
Definition: dnn_interface.h:77
int width
Definition: frame.h:372
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
DNNColorOrder order
Definition: dnn_interface.h:63
int ff_safe_queue_push_back(SafeQueue *sq, void *v)
Definition: safe_queue.c:95
const char * input_name
int height
Definition: dnn_interface.h:60
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
void * data
Definition: dnn_interface.h:59
simple assert() macros that are a bit more flexible than ISO C assert().
static FilteringContext * filter_ctx
Definition: transcoding.c:47
ie_core_t * core
Definition: queue.c:34
#define FLAGS
OVOptions options
static DNNDataType precision_to_datatype(precision_e precision)
DNNReturnType ff_dnn_execute_model_ov(const DNNModel *model, const char *input_name, AVFrame *in_frame, const char **output_names, uint32_t nb_output, AVFrame *out_frame)
DNNAsyncStatusType
Definition: dnn_interface.h:44
const AVClass * class
size_t ff_queue_size(Queue *q)
Definition: queue.c:89
OVContext ctx
AVFormatContext * ctx
Definition: movenc.c:48
#define OFFSET(x)
AVFrame * in_frame
void * ff_queue_pop_front(Queue *q)
Definition: queue.c:152
DNNReturnType
Definition: dnn_interface.h:33
AVFILTER_DEFINE_CLASS(dnn_openvino)
static DNNReturnType fill_model_input_ov(OVModel *ov_model, RequestItem *request)
ie_network_t * network
void * ff_safe_queue_pop_front(SafeQueue *sq)
Definition: safe_queue.c:105
AVFrame * out_frame
DNN input&output process between AVFrame and DNNData.
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Describe the class of an AVClass context structure.
Definition: log.h:67
DNNFunctionType func_type
Definition: dnn_interface.h:74
DNNFunctionType
Definition: dnn_interface.h:51
DNNReturnType ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, DNNFunctionType func_type, void *log_ctx)
Definition: dnn_io_proc.c:207
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
void ff_dnn_free_model_ov(DNNModel **model)
Queue * ff_queue_create(void)
Definition: queue.c:48
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
static DNNReturnType init_model_ov(OVModel *ov_model, const char *input_name, const char *output_name)
int ff_safe_queue_push_front(SafeQueue *sq, void *v)
Definition: safe_queue.c:85
const OptionDef options[]
Definition: ffmpeg_opt.c:3424
DNNReturnType(* get_output)(void *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height)
Definition: dnn_interface.h:79
DNN inference functions interface for OpenVINO backend.
SafeQueue * request_queue
static void infer_completion_callback(void *args)
TaskItem ** tasks
#define APPEND_STRING(generated_string, iterate_string)
An instance of a filter.
Definition: avfilter.h:341
OVModel * ov_model
int height
Definition: frame.h:372
FILE * out
Definition: movenc.c:54
#define av_freep(p)
#define av_malloc_array(a, b)
DNNAsyncStatusType ff_dnn_get_async_result_ov(const DNNModel *model, AVFrame **in, AVFrame **out)
DNNModel * ff_dnn_load_model_ov(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx)
AVFilterContext * filter_ctx
Definition: dnn_interface.h:72
DNNReturnType ff_dnn_flush_ov(const DNNModel *model)
DNNDataType dt
Definition: dnn_interface.h:62
ie_executable_network_t * exe_network
int i
Definition: input.c:407