FFmpeg
dnn_backend_tf.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Sergey Lavrushkin
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * DNN tensorflow backend implementation.
24  */
25 
26 #include "libavformat/avio.h"
27 #include "libavutil/avassert.h"
28 #include "libavutil/avstring.h"
29 #include "libavutil/cpu.h"
30 #include "libavutil/mem.h"
31 #include "libavutil/opt.h"
32 #include "libavcodec/defs.h"
33 #include "dnn_io_proc.h"
34 #include "dnn_backend_common.h"
35 #include "safe_queue.h"
36 #include <tensorflow/c/c_api.h>
37 
38 typedef struct TFModel {
41  TF_Graph *graph;
42  TF_Session *session;
43  TF_Status *status;
47 } TFModel;
48 
49 /**
50  * Stores execution parameters for single
51  * call to the TensorFlow C API
52  */
53 typedef struct TFInferRequest {
54  TF_Output *tf_outputs;
55  TF_Tensor **output_tensors;
56  TF_Output *tf_input;
57  TF_Tensor *input_tensor;
59 
60 typedef struct TFRequestItem {
63  TF_Status *status;
66 
67 #define OFFSET(x) offsetof(TFOptions, x)
68 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
69 static const AVOption dnn_tensorflow_options[] = {
70  { "sess_config", "config for SessionOptions", OFFSET(sess_config), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
71  { NULL }
72 };
73 
74 
75 static int execute_model_tf(TFRequestItem *request, Queue *lltask_queue);
76 static void infer_completion_callback(void *args);
77 static inline void destroy_request_item(TFRequestItem **arg);
78 
79 static void free_buffer(void *data, size_t length)
80 {
81  av_freep(&data);
82 }
83 
84 /**
85  * Free the contents of TensorFlow inference request.
86  * It does not free the TFInferRequest instance.
87  *
88  * @param request pointer to TFInferRequest instance.
89  * NULL pointer is allowed.
90  */
91 static void tf_free_request(TFInferRequest *request)
92 {
93  if (!request)
94  return;
95  if (request->input_tensor) {
96  TF_DeleteTensor(request->input_tensor);
97  request->input_tensor = NULL;
98  }
99  av_freep(&request->tf_input);
100  av_freep(&request->tf_outputs);
101  if (request->output_tensors) {
102  int nb_output = sizeof(*request->output_tensors)/sizeof(request->output_tensors[0]);
103  for (uint32_t i = 0; i < nb_output; ++i) {
104  if (request->output_tensors[i]) {
105  TF_DeleteTensor(request->output_tensors[i]);
106  request->output_tensors[i] = NULL;
107  }
108  }
109  av_freep(&request->output_tensors);
110  }
111 }
112 
113 /**
114  * Create a TensorFlow inference request. All properties
115  * are initially unallocated and set as NULL.
116  *
117  * @return pointer to the allocated TFInferRequest instance.
118  */
120 {
121  TFInferRequest *infer_request = av_malloc(sizeof(TFInferRequest));
122  if (!infer_request) {
123  return NULL;
124  }
125  infer_request->tf_outputs = NULL;
126  infer_request->tf_input = NULL;
127  infer_request->input_tensor = NULL;
128  infer_request->output_tensors = NULL;
129  return infer_request;
130 }
131 
132 /**
133  * Start synchronous inference for the TensorFlow model.
134  *
135  * @param request pointer to the TFRequestItem for inference
136  * @retval 0 if execution is successful
137  * @retval AVERROR(EINVAL) if request is NULL
138  * @retval DNN_GENERIC_ERROR if execution fails
139  */
140 static int tf_start_inference(void *args)
141 {
142  TFRequestItem *request = args;
143  TFInferRequest *infer_request = request->infer_request;
144  LastLevelTaskItem *lltask = request->lltask;
145  TaskItem *task = lltask->task;
146  TFModel *tf_model = task->model;
147 
148  if (!request) {
149  av_log(tf_model->ctx, AV_LOG_ERROR, "TFRequestItem is NULL\n");
150  return AVERROR(EINVAL);
151  }
152 
153  TF_SessionRun(tf_model->session, NULL,
154  infer_request->tf_input, &infer_request->input_tensor, 1,
155  infer_request->tf_outputs, infer_request->output_tensors,
156  task->nb_output, NULL, 0, NULL,
157  request->status);
158  if (TF_GetCode(request->status) != TF_OK) {
159  av_log(tf_model->ctx, AV_LOG_ERROR, "%s", TF_Message(request->status));
160  return DNN_GENERIC_ERROR;
161  }
162  return 0;
163 }
164 
165 /**
166  * Free the TFRequestItem completely.
167  *
168  * @param arg Address of the TFInferRequest instance.
169  */
170 static inline void destroy_request_item(TFRequestItem **arg) {
171  TFRequestItem *request;
172  if (!arg) {
173  return;
174  }
175  request = *arg;
176  tf_free_request(request->infer_request);
177  av_freep(&request->infer_request);
178  av_freep(&request->lltask);
179  TF_DeleteStatus(request->status);
181  av_freep(arg);
182 }
183 
184 static int extract_lltask_from_task(TaskItem *task, Queue *lltask_queue)
185 {
186  TFModel *tf_model = task->model;
187  DnnContext *ctx = tf_model->ctx;
188  LastLevelTaskItem *lltask = av_malloc(sizeof(*lltask));
189  if (!lltask) {
190  av_log(ctx, AV_LOG_ERROR, "Unable to allocate space for LastLevelTaskItem\n");
191  return AVERROR(ENOMEM);
192  }
193  task->inference_todo = 1;
194  task->inference_done = 0;
195  lltask->task = task;
196  if (ff_queue_push_back(lltask_queue, lltask) < 0) {
197  av_log(ctx, AV_LOG_ERROR, "Failed to push back lltask_queue.\n");
198  av_freep(&lltask);
199  return AVERROR(ENOMEM);
200  }
201  return 0;
202 }
203 
204 static TF_Buffer *read_graph(const char *model_filename)
205 {
206  TF_Buffer *graph_buf;
207  unsigned char *graph_data = NULL;
208  AVIOContext *model_file_context;
209  long size, bytes_read;
210 
211  if (avio_open(&model_file_context, model_filename, AVIO_FLAG_READ) < 0){
212  return NULL;
213  }
214 
215  size = avio_size(model_file_context);
216 
217  graph_data = av_malloc(size);
218  if (!graph_data){
219  avio_closep(&model_file_context);
220  return NULL;
221  }
222  bytes_read = avio_read(model_file_context, graph_data, size);
223  avio_closep(&model_file_context);
224  if (bytes_read != size){
225  av_freep(&graph_data);
226  return NULL;
227  }
228 
229  graph_buf = TF_NewBuffer();
230  graph_buf->data = graph_data;
231  graph_buf->length = size;
232  graph_buf->data_deallocator = free_buffer;
233 
234  return graph_buf;
235 }
236 
237 static TF_Tensor *allocate_input_tensor(const DNNData *input)
238 {
239  TF_DataType dt;
240  size_t size;
241  int64_t input_dims[4] = { 0 };
242 
243  input_dims[0] = 1;
244  input_dims[1] = input->dims[dnn_get_height_idx_by_layout(input->layout)];
245  input_dims[2] = input->dims[dnn_get_width_idx_by_layout(input->layout)];
246  input_dims[3] = input->dims[dnn_get_channel_idx_by_layout(input->layout)];
247  switch (input->dt) {
248  case DNN_FLOAT:
249  dt = TF_FLOAT;
250  size = sizeof(float);
251  break;
252  case DNN_UINT8:
253  dt = TF_UINT8;
254  size = 1;
255  break;
256  default:
257  av_assert0(!"should not reach here");
258  }
259 
260  return TF_AllocateTensor(dt, input_dims, 4,
261  input_dims[1] * input_dims[2] * input_dims[3] * size);
262 }
263 
264 static int get_input_tf(DNNModel *model, DNNData *input, const char *input_name)
265 {
266  TFModel *tf_model = (TFModel *)model;
267  DnnContext *ctx = tf_model->ctx;
268  TF_Status *status;
269  TF_DataType dt;
270  int64_t dims[4];
271 
272  TF_Output tf_output;
273  tf_output.oper = TF_GraphOperationByName(tf_model->graph, input_name);
274  if (!tf_output.oper) {
275  av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model\n", input_name);
276  return AVERROR(EINVAL);
277  }
278 
279  tf_output.index = 0;
280  dt = TF_OperationOutputType(tf_output);
281  switch (dt) {
282  case TF_FLOAT:
283  input->dt = DNN_FLOAT;
284  break;
285  case TF_UINT8:
286  input->dt = DNN_UINT8;
287  break;
288  default:
289  av_log(ctx, AV_LOG_ERROR, "Unsupported output type %d in model\n", dt);
290  return AVERROR(EINVAL);
291  }
292  input->order = DCO_RGB;
293 
294  status = TF_NewStatus();
295  TF_GraphGetTensorShape(tf_model->graph, tf_output, dims, 4, status);
296  if (TF_GetCode(status) != TF_OK){
297  TF_DeleteStatus(status);
298  av_log(ctx, AV_LOG_ERROR, "Failed to get input tensor shape: number of dimension incorrect\n");
299  return DNN_GENERIC_ERROR;
300  }
301  TF_DeleteStatus(status);
302 
303  // currently only NHWC is supported
304  av_assert0(dims[0] == 1 || dims[0] == -1);
305  for (int i = 0; i < 4; i++)
306  input->dims[i] = dims[i];
307  input->layout = DL_NHWC;
308 
309  return 0;
310 }
311 
312 static int get_output_tf(DNNModel *model, const char *input_name, int input_width, int input_height,
313  const char *output_name, int *output_width, int *output_height)
314 {
315  int ret;
316  TFModel *tf_model = (TFModel *)model;
317  DnnContext *ctx = tf_model->ctx;
318  TaskItem task;
319  TFRequestItem *request;
320  DNNExecBaseParams exec_params = {
321  .input_name = input_name,
322  .output_names = &output_name,
323  .nb_output = 1,
324  .in_frame = NULL,
325  .out_frame = NULL,
326  };
327 
328  ret = ff_dnn_fill_gettingoutput_task(&task, &exec_params, tf_model, input_height, input_width, ctx);
329  if (ret != 0) {
330  goto err;
331  }
332 
333  ret = extract_lltask_from_task(&task, tf_model->lltask_queue);
334  if (ret != 0) {
335  av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
336  goto err;
337  }
338 
339  request = ff_safe_queue_pop_front(tf_model->request_queue);
340  if (!request) {
341  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
342  ret = AVERROR(EINVAL);
343  goto err;
344  }
345 
346  ret = execute_model_tf(request, tf_model->lltask_queue);
347  *output_width = task.out_frame->width;
348  *output_height = task.out_frame->height;
349 
350 err:
351  av_frame_free(&task.out_frame);
352  av_frame_free(&task.in_frame);
353  return ret;
354 }
355 
356 #define SPACE_CHARS " \t\r\n"
357 static int hex_to_data(uint8_t *data, const char *p)
358 {
359  int c, len, v;
360 
361  len = 0;
362  v = 1;
363  for (;;) {
364  p += strspn(p, SPACE_CHARS);
365  if (*p == '\0')
366  break;
367  c = av_toupper((unsigned char) *p++);
368  if (c >= '0' && c <= '9')
369  c = c - '0';
370  else if (c >= 'A' && c <= 'F')
371  c = c - 'A' + 10;
372  else
373  break;
374  v = (v << 4) | c;
375  if (v & 0x100) {
376  if (data) {
377  data[len] = v;
378  }
379  len++;
380  v = 1;
381  }
382  }
383  return len;
384 }
385 
386 static int load_tf_model(TFModel *tf_model, const char *model_filename)
387 {
388  DnnContext *ctx = tf_model->ctx;
389  TF_Buffer *graph_def;
390  TF_ImportGraphDefOptions *graph_opts;
391  TF_SessionOptions *sess_opts;
392  const TF_Operation *init_op;
393  uint8_t *sess_config = NULL;
394  int sess_config_length = 0;
395 
396  // prepare the sess config data
397  if (ctx->tf_option.sess_config != NULL) {
398  const char *config;
399  /*
400  tf_model->ctx.options.sess_config is hex to present the serialized proto
401  required by TF_SetConfig below, so we need to first generate the serialized
402  proto in a python script, tools/python/tf_sess_config.py is a script example
403  to generate the configs of sess_config.
404  */
405  if (strncmp(ctx->tf_option.sess_config, "0x", 2) != 0) {
406  av_log(ctx, AV_LOG_ERROR, "sess_config should start with '0x'\n");
407  return AVERROR(EINVAL);
408  }
409  config = ctx->tf_option.sess_config + 2;
410  sess_config_length = hex_to_data(NULL, config);
411 
412  sess_config = av_mallocz(sess_config_length + AV_INPUT_BUFFER_PADDING_SIZE);
413  if (!sess_config) {
414  av_log(ctx, AV_LOG_ERROR, "failed to allocate memory\n");
415  return AVERROR(ENOMEM);
416  }
417  if (hex_to_data(sess_config, config) < 0) {
418  av_log(ctx, AV_LOG_ERROR, "failed to convert hex to data\n");
419  return AVERROR(EINVAL);
420  }
421  }
422 
423  graph_def = read_graph(model_filename);
424  if (!graph_def){
425  av_log(ctx, AV_LOG_ERROR, "Failed to read model \"%s\" graph\n", model_filename);
426  av_freep(&sess_config);
427  return AVERROR(EINVAL);
428  }
429  tf_model->graph = TF_NewGraph();
430  tf_model->status = TF_NewStatus();
431  graph_opts = TF_NewImportGraphDefOptions();
432  TF_GraphImportGraphDef(tf_model->graph, graph_def, graph_opts, tf_model->status);
433  TF_DeleteImportGraphDefOptions(graph_opts);
434  TF_DeleteBuffer(graph_def);
435  if (TF_GetCode(tf_model->status) != TF_OK){
436  av_log(ctx, AV_LOG_ERROR, "Failed to import serialized graph to model graph\n");
437  av_freep(&sess_config);
438  return DNN_GENERIC_ERROR;
439  }
440 
441  init_op = TF_GraphOperationByName(tf_model->graph, "init");
442  sess_opts = TF_NewSessionOptions();
443 
444  if (sess_config) {
445  TF_SetConfig(sess_opts, sess_config, sess_config_length,tf_model->status);
446  av_freep(&sess_config);
447  if (TF_GetCode(tf_model->status) != TF_OK) {
448  TF_DeleteSessionOptions(sess_opts);
449  av_log(ctx, AV_LOG_ERROR, "Failed to set config for sess options with %s\n",
450  ctx->tf_option.sess_config);
451  return DNN_GENERIC_ERROR;
452  }
453  }
454 
455  tf_model->session = TF_NewSession(tf_model->graph, sess_opts, tf_model->status);
456  TF_DeleteSessionOptions(sess_opts);
457  if (TF_GetCode(tf_model->status) != TF_OK)
458  {
459  av_freep(&sess_config);
460  av_log(ctx, AV_LOG_ERROR, "Failed to create new session with model graph\n");
461  return DNN_GENERIC_ERROR;
462  }
463 
464  // Run initialization operation with name "init" if it is present in graph
465  if (init_op){
466  TF_SessionRun(tf_model->session, NULL,
467  NULL, NULL, 0,
468  NULL, NULL, 0,
469  &init_op, 1, NULL, tf_model->status);
470  if (TF_GetCode(tf_model->status) != TF_OK)
471  {
472  av_freep(&sess_config);
473  av_log(ctx, AV_LOG_ERROR, "Failed to run session when initializing\n");
474  return DNN_GENERIC_ERROR;
475  }
476  }
477 
478  return 0;
479 }
480 
481 static void dnn_free_model_tf(DNNModel **model)
482 {
483  TFModel *tf_model;
484 
485  if (!model || !*model)
486  return;
487 
488  tf_model = (TFModel *)(*model);
489  while (ff_safe_queue_size(tf_model->request_queue) != 0) {
491  destroy_request_item(&item);
492  }
494 
495  while (ff_queue_size(tf_model->lltask_queue) != 0) {
497  av_freep(&item);
498  }
499  ff_queue_destroy(tf_model->lltask_queue);
500 
501  while (ff_queue_size(tf_model->task_queue) != 0) {
502  TaskItem *item = ff_queue_pop_front(tf_model->task_queue);
503  av_frame_free(&item->in_frame);
504  av_frame_free(&item->out_frame);
505  av_freep(&item);
506  }
507  ff_queue_destroy(tf_model->task_queue);
508 
509  if (tf_model->graph){
510  TF_DeleteGraph(tf_model->graph);
511  }
512  if (tf_model->session){
513  TF_CloseSession(tf_model->session, tf_model->status);
514  TF_DeleteSession(tf_model->session, tf_model->status);
515  }
516  if (tf_model->status){
517  TF_DeleteStatus(tf_model->status);
518  }
519  av_freep(&tf_model);
520  *model = NULL;
521 }
522 
524 {
525  DNNModel *model = NULL;
526  TFModel *tf_model = NULL;
527 
528  tf_model = av_mallocz(sizeof(TFModel));
529  if (!tf_model)
530  return NULL;
531  model = &tf_model->model;
532  tf_model->ctx = ctx;
533 
534  if (load_tf_model(tf_model, ctx->model_filename) != 0){
535  av_log(ctx, AV_LOG_ERROR, "Failed to load TensorFlow model: \"%s\"\n", ctx->model_filename);
536  goto err;
537  }
538 
539  if (ctx->nireq <= 0) {
540  ctx->nireq = av_cpu_count() / 2 + 1;
541  }
542 
543 #if !HAVE_PTHREAD_CANCEL
544  if (ctx->options.async) {
545  ctx->options.async = 0;
546  av_log(filter_ctx, AV_LOG_WARNING, "pthread is not supported, roll back to sync.\n");
547  }
548 #endif
549 
550  tf_model->request_queue = ff_safe_queue_create();
551  if (!tf_model->request_queue) {
552  goto err;
553  }
554 
555  for (int i = 0; i < ctx->nireq; i++) {
556  TFRequestItem *item = av_mallocz(sizeof(*item));
557  if (!item) {
558  goto err;
559  }
560  item->lltask = NULL;
562  if (!item->infer_request) {
563  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for TensorFlow inference request\n");
564  av_freep(&item);
565  goto err;
566  }
567  item->status = TF_NewStatus();
570  item->exec_module.args = item;
571 
572  if (ff_safe_queue_push_back(tf_model->request_queue, item) < 0) {
573  destroy_request_item(&item);
574  goto err;
575  }
576  }
577 
578  tf_model->lltask_queue = ff_queue_create();
579  if (!tf_model->lltask_queue) {
580  goto err;
581  }
582 
583  tf_model->task_queue = ff_queue_create();
584  if (!tf_model->task_queue) {
585  goto err;
586  }
587 
588  model->get_input = &get_input_tf;
589  model->get_output = &get_output_tf;
590  model->filter_ctx = filter_ctx;
591  model->func_type = func_type;
592 
593  return model;
594 err:
595  dnn_free_model_tf(&model);
596  return NULL;
597 }
598 
599 static int fill_model_input_tf(TFModel *tf_model, TFRequestItem *request) {
600  DNNData input = { 0 };
601  LastLevelTaskItem *lltask;
602  TaskItem *task;
603  TFInferRequest *infer_request = NULL;
604  DnnContext *ctx = tf_model->ctx;
605  int ret = 0;
606 
607  lltask = ff_queue_pop_front(tf_model->lltask_queue);
608  av_assert0(lltask);
609  task = lltask->task;
610  request->lltask = lltask;
611 
612  ret = get_input_tf(&tf_model->model, &input, task->input_name);
613  if (ret != 0) {
614  goto err;
615  }
616 
617  infer_request = request->infer_request;
618  input.dims[1] = task->in_frame->height;
619  input.dims[2] = task->in_frame->width;
620 
621  infer_request->tf_input = av_malloc(sizeof(TF_Output));
622  if (!infer_request->tf_input) {
623  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for input tensor\n");
624  ret = AVERROR(ENOMEM);
625  goto err;
626  }
627 
628  infer_request->tf_input->oper = TF_GraphOperationByName(tf_model->graph, task->input_name);
629  if (!infer_request->tf_input->oper){
630  av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model\n", task->input_name);
632  goto err;
633  }
634  infer_request->tf_input->index = 0;
635 
636  infer_request->input_tensor = allocate_input_tensor(&input);
637  if (!infer_request->input_tensor){
638  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for input tensor\n");
639  ret = AVERROR(ENOMEM);
640  goto err;
641  }
642  input.data = (float *)TF_TensorData(infer_request->input_tensor);
643 
644  switch (tf_model->model.func_type) {
645  case DFT_PROCESS_FRAME:
646  if (task->do_ioproc) {
647  if (tf_model->model.frame_pre_proc != NULL) {
648  tf_model->model.frame_pre_proc(task->in_frame, &input, tf_model->model.filter_ctx);
649  } else {
651  }
652  }
653  break;
656  break;
657  default:
658  avpriv_report_missing_feature(ctx, "model function type %d", tf_model->model.func_type);
659  break;
660  }
661 
662  infer_request->tf_outputs = av_malloc_array(task->nb_output, sizeof(TF_Output));
663  if (infer_request->tf_outputs == NULL) {
664  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for *tf_outputs\n");
665  ret = AVERROR(ENOMEM);
666  goto err;
667  }
668 
669  infer_request->output_tensors = av_calloc(task->nb_output, sizeof(*infer_request->output_tensors));
670  if (!infer_request->output_tensors) {
671  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for output tensor\n");
672  ret = AVERROR(ENOMEM);
673  goto err;
674  }
675 
676  for (int i = 0; i < task->nb_output; ++i) {
677  infer_request->output_tensors[i] = NULL;
678  infer_request->tf_outputs[i].oper = TF_GraphOperationByName(tf_model->graph, task->output_names[i]);
679  if (!infer_request->tf_outputs[i].oper) {
680  av_log(ctx, AV_LOG_ERROR, "Could not find output \"%s\" in model\n", task->output_names[i]);
682  goto err;
683  }
684  infer_request->tf_outputs[i].index = 0;
685  }
686 
687  return 0;
688 err:
689  tf_free_request(infer_request);
690  return ret;
691 }
692 
693 static void infer_completion_callback(void *args) {
694  TFRequestItem *request = args;
695  LastLevelTaskItem *lltask = request->lltask;
696  TaskItem *task = lltask->task;
697  DNNData *outputs;
698  TFInferRequest *infer_request = request->infer_request;
699  TFModel *tf_model = task->model;
700  DnnContext *ctx = tf_model->ctx;
701 
702  outputs = av_calloc(task->nb_output, sizeof(*outputs));
703  if (!outputs) {
704  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for *outputs\n");
705  goto err;
706  }
707 
708  for (uint32_t i = 0; i < task->nb_output; ++i) {
710  TF_Dim(infer_request->output_tensors[i], 1);
712  TF_Dim(infer_request->output_tensors[i], 2);
714  TF_Dim(infer_request->output_tensors[i], 3);
715  outputs[i].data = TF_TensorData(infer_request->output_tensors[i]);
716  outputs[i].dt = (DNNDataType)TF_TensorType(infer_request->output_tensors[i]);
717  }
718  switch (tf_model->model.func_type) {
719  case DFT_PROCESS_FRAME:
720  //it only support 1 output if it's frame in & frame out
721  if (task->do_ioproc) {
722  if (tf_model->model.frame_post_proc != NULL) {
723  tf_model->model.frame_post_proc(task->out_frame, outputs, tf_model->model.filter_ctx);
724  } else {
726  }
727  } else {
728  task->out_frame->width =
730  task->out_frame->height =
732  }
733  break;
735  if (!tf_model->model.detect_post_proc) {
736  av_log(ctx, AV_LOG_ERROR, "Detect filter needs provide post proc\n");
737  return;
738  }
739  tf_model->model.detect_post_proc(task->in_frame, outputs, task->nb_output, tf_model->model.filter_ctx);
740  break;
741  default:
742  av_log(ctx, AV_LOG_ERROR, "Tensorflow backend does not support this kind of dnn filter now\n");
743  goto err;
744  }
745  task->inference_done++;
746 err:
747  tf_free_request(infer_request);
748  av_freep(&outputs);
749 
750  if (ff_safe_queue_push_back(tf_model->request_queue, request) < 0) {
751  destroy_request_item(&request);
752  av_log(ctx, AV_LOG_ERROR, "Failed to push back request_queue.\n");
753  }
754 }
755 
756 static int execute_model_tf(TFRequestItem *request, Queue *lltask_queue)
757 {
758  TFModel *tf_model;
759  DnnContext *ctx;
760  LastLevelTaskItem *lltask;
761  TaskItem *task;
762  int ret = 0;
763 
764  if (ff_queue_size(lltask_queue) == 0) {
765  destroy_request_item(&request);
766  return 0;
767  }
768 
769  lltask = ff_queue_peek_front(lltask_queue);
770  task = lltask->task;
771  tf_model = task->model;
772  ctx = tf_model->ctx;
773 
774  ret = fill_model_input_tf(tf_model, request);
775  if (ret != 0) {
776  goto err;
777  }
778 
779  if (task->async) {
780  if (ff_dnn_start_inference_async(ctx, &request->exec_module) != 0) {
781  goto err;
782  }
783  return 0;
784  }
785  else {
786  ret = tf_start_inference(request);
787  if (ret != 0) {
788  goto err;
789  }
790  infer_completion_callback(request);
791  return (task->inference_done == task->inference_todo) ? 0 : DNN_GENERIC_ERROR;
792  }
793 err:
794  tf_free_request(request->infer_request);
795  if (ff_safe_queue_push_back(tf_model->request_queue, request) < 0) {
796  destroy_request_item(&request);
797  }
798 
799  return ret;
800 }
801 
802 static int dnn_execute_model_tf(const DNNModel *model, DNNExecBaseParams *exec_params)
803 {
804  TFModel *tf_model = (TFModel *)model;
805  DnnContext *ctx = tf_model->ctx;
806  TaskItem *task;
807  TFRequestItem *request;
808  int ret = 0;
809 
810  ret = ff_check_exec_params(ctx, DNN_TF, model->func_type, exec_params);
811  if (ret != 0) {
812  return ret;
813  }
814 
815  task = av_malloc(sizeof(*task));
816  if (!task) {
817  av_log(ctx, AV_LOG_ERROR, "unable to alloc memory for task item.\n");
818  return AVERROR(ENOMEM);
819  }
820 
821  ret = ff_dnn_fill_task(task, exec_params, tf_model, ctx->async, 1);
822  if (ret != 0) {
823  av_log(ctx, AV_LOG_ERROR, "Fill task with invalid parameter(s).\n");
824  av_freep(&task);
825  return ret;
826  }
827 
828  if (ff_queue_push_back(tf_model->task_queue, task) < 0) {
829  av_freep(&task);
830  av_log(ctx, AV_LOG_ERROR, "unable to push back task_queue.\n");
831  return AVERROR(ENOMEM);
832  }
833 
834  ret = extract_lltask_from_task(task, tf_model->lltask_queue);
835  if (ret != 0) {
836  av_freep(&task);
837  av_log(ctx, AV_LOG_ERROR, "unable to extract last level task from task.\n");
838  return ret;
839  }
840 
841  request = ff_safe_queue_pop_front(tf_model->request_queue);
842  if (!request) {
843  av_freep(&task);
844  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
845  return AVERROR(EINVAL);
846  }
847  return execute_model_tf(request, tf_model->lltask_queue);
848 }
849 
851 {
852  TFModel *tf_model = (TFModel *)model;
853  return ff_dnn_get_result_common(tf_model->task_queue, in, out);
854 }
855 
856 static int dnn_flush_tf(const DNNModel *model)
857 {
858  TFModel *tf_model = (TFModel *)model;
859  DnnContext *ctx = tf_model->ctx;
860  TFRequestItem *request;
861  int ret;
862 
863  if (ff_queue_size(tf_model->lltask_queue) == 0) {
864  // no pending task need to flush
865  return 0;
866  }
867 
868  request = ff_safe_queue_pop_front(tf_model->request_queue);
869  if (!request) {
870  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
871  return AVERROR(EINVAL);
872  }
873 
874  ret = fill_model_input_tf(tf_model, request);
875  if (ret != 0) {
876  av_log(ctx, AV_LOG_ERROR, "Failed to fill model input.\n");
877  if (ff_safe_queue_push_back(tf_model->request_queue, request) < 0) {
878  destroy_request_item(&request);
879  }
880  return ret;
881  }
882 
883  return ff_dnn_start_inference_async(ctx, &request->exec_module);
884 }
885 
887  .clazz = DNN_DEFINE_CLASS(dnn_tensorflow),
888  .type = DNN_TF,
889  .load_model = dnn_load_model_tf,
890  .execute_model = dnn_execute_model_tf,
891  .get_result = dnn_get_result_tf,
892  .flush = dnn_flush_tf,
893  .free_model = dnn_free_model_tf,
894 };
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
TFInferRequest
Stores execution parameters for single call to the TensorFlow C API.
Definition: dnn_backend_tf.c:53
TFInferRequest::tf_outputs
TF_Output * tf_outputs
Definition: dnn_backend_tf.c:54
execute_model_tf
static int execute_model_tf(TFRequestItem *request, Queue *lltask_queue)
Definition: dnn_backend_tf.c:756
FLAGS
#define FLAGS
Definition: dnn_backend_tf.c:68
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
TFModel::graph
TF_Graph * graph
Definition: dnn_backend_tf.c:41
ff_safe_queue_pop_front
void * ff_safe_queue_pop_front(SafeQueue *sq)
Remove and free first element from the queue in SafeQueue.
Definition: safe_queue.c:105
out
FILE * out
Definition: movenc.c:55
DNNAsyncExecModule
Common Async Execution Mechanism for the DNN Backends.
Definition: dnn_backend_common.h:65
DNNFunctionType
DNNFunctionType
Definition: dnn_interface.h:56
extract_lltask_from_task
static int extract_lltask_from_task(TaskItem *task, Queue *lltask_queue)
Definition: dnn_backend_tf.c:184
int64_t
long long int64_t
Definition: coverity.c:34
ff_queue_pop_front
void * ff_queue_pop_front(Queue *q)
Remove and free first element from the Queue.
Definition: queue.c:151
ff_check_exec_params
int ff_check_exec_params(void *ctx, DNNBackendType backend, DNNFunctionType func_type, DNNExecBaseParams *exec_params)
Definition: dnn_backend_common.c:30
ff_queue_size
size_t ff_queue_size(Queue *q)
Return the length of the Queue.
Definition: queue.c:88
DNN_GENERIC_ERROR
#define DNN_GENERIC_ERROR
Definition: dnn_interface.h:33
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:162
LastLevelTaskItem
Definition: dnn_backend_common.h:57
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
AVFrame::width
int width
Definition: frame.h:461
SafeQueue
Double-ended queue with mutex locks ensuring data consistency while multithreading.
Definition: safe_queue.c:46
AVOption
AVOption.
Definition: opt.h:429
DNNModel::frame_pre_proc
FramePrePostProc frame_pre_proc
Definition: dnn_interface.h:110
TFInferRequest::input_tensor
TF_Tensor * input_tensor
Definition: dnn_backend_tf.c:57
data
const char data[16]
Definition: mxf.c:149
avio_open
int avio_open(AVIOContext **s, const char *filename, int flags)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: avio.c:497
TFModel::model
DNNModel model
Definition: dnn_backend_tf.c:39
DNNExecBaseParams::input_name
const char * input_name
Definition: dnn_interface.h:81
load_tf_model
static int load_tf_model(TFModel *tf_model, const char *model_filename)
Definition: dnn_backend_tf.c:386
dnn_io_proc.h
TFModel::request_queue
SafeQueue * request_queue
Definition: dnn_backend_tf.c:44
TaskItem
Definition: dnn_backend_common.h:43
DNNAsyncExecModule::callback
void(* callback)(void *args)
Completion Callback for the backend.
Definition: dnn_backend_common.h:77
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:323
tf_sess_config.config
config
Definition: tf_sess_config.py:33
OFFSET
#define OFFSET(x)
Definition: dnn_backend_tf.c:67
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
destroy_request_item
static void destroy_request_item(TFRequestItem **arg)
Free the TFRequestItem completely.
Definition: dnn_backend_tf.c:170
DNNModel::filter_ctx
AVFilterContext * filter_ctx
Definition: dnn_interface.h:99
ff_queue_create
Queue * ff_queue_create(void)
Create a Queue instance.
Definition: queue.c:47
dnn_get_width_idx_by_layout
static int dnn_get_width_idx_by_layout(DNNLayout layout)
Definition: dnn_interface.h:197
TaskItem::model
void * model
Definition: dnn_backend_common.h:44
DnnContext
Definition: dnn_interface.h:143
get_input_tf
static int get_input_tf(DNNModel *model, DNNData *input, const char *input_name)
Definition: dnn_backend_tf.c:264
filter_ctx
static FilteringContext * filter_ctx
Definition: transcode.c:52
dnn_load_model_tf
static DNNModel * dnn_load_model_tf(DnnContext *ctx, DNNFunctionType func_type, AVFilterContext *filter_ctx)
Definition: dnn_backend_tf.c:523
DL_NHWC
@ DL_NHWC
Definition: dnn_interface.h:66
SPACE_CHARS
#define SPACE_CHARS
Definition: dnn_backend_tf.c:356
Queue
Linear double-ended data structure.
Definition: executor.c:51
ff_queue_push_back
int ff_queue_push_back(Queue *q, void *v)
Add data to the tail of the queue.
Definition: queue.c:130
avassert.h
DNN_TF
@ DNN_TF
Definition: dnn_interface.h:36
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
fill_model_input_tf
static int fill_model_input_tf(TFModel *tf_model, TFRequestItem *request)
Definition: dnn_backend_tf.c:599
TFRequestItem::exec_module
DNNAsyncExecModule exec_module
Definition: dnn_backend_tf.c:64
float
float
Definition: af_crystalizer.c:122
LastLevelTaskItem::task
TaskItem * task
Definition: dnn_backend_common.h:58
TFModel::ctx
DnnContext * ctx
Definition: dnn_backend_tf.c:40
read_graph
static TF_Buffer * read_graph(const char *model_filename)
Definition: dnn_backend_tf.c:204
ff_queue_destroy
void ff_queue_destroy(Queue *q)
Destroy the Queue instance.
Definition: queue.c:72
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
DNNData
Definition: dnn_interface.h:69
DNNModule::clazz
const AVClass clazz
Definition: dnn_interface.h:176
dnn_tensorflow_options
static const AVOption dnn_tensorflow_options[]
Definition: dnn_backend_tf.c:69
ff_dnn_fill_gettingoutput_task
int ff_dnn_fill_gettingoutput_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int input_height, int input_width, void *ctx)
Allocate input and output frames and fill the Task with execution parameters.
Definition: dnn_backend_common.c:156
DNNModel::get_output
int(* get_output)(struct DNNModel *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height)
Definition: dnn_interface.h:106
ctx
AVFormatContext * ctx
Definition: movenc.c:49
TaskItem::inference_todo
uint32_t inference_todo
Definition: dnn_backend_common.h:52
arg
const char * arg
Definition: jacosubdec.c:67
ff_safe_queue_size
size_t ff_safe_queue_size(SafeQueue *sq)
Return the length of the SafeQueue.
Definition: safe_queue.c:80
ff_proc_from_frame_to_dnn
int ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, void *log_ctx)
Definition: dnn_io_proc.c:182
ff_frame_to_dnn_detect
int ff_frame_to_dnn_detect(AVFrame *frame, DNNData *input, void *log_ctx)
Definition: dnn_io_proc.c:423
NULL
#define NULL
Definition: coverity.c:32
ff_safe_queue_create
SafeQueue * ff_safe_queue_create(void)
Create and initialize a SafeQueue instance.
Definition: safe_queue.c:52
DNNModel::frame_post_proc
FramePrePostProc frame_post_proc
Definition: dnn_interface.h:113
tf_create_inference_request
static TFInferRequest * tf_create_inference_request(void)
Create a TensorFlow inference request.
Definition: dnn_backend_tf.c:119
ff_dnn_async_module_cleanup
int ff_dnn_async_module_cleanup(DNNAsyncExecModule *async_module)
Join the Async Execution thread and set module pointers to NULL.
Definition: dnn_backend_common.c:86
TFModel::task_queue
Queue * task_queue
Definition: dnn_backend_tf.c:46
infer_completion_callback
static void infer_completion_callback(void *args)
Definition: dnn_backend_tf.c:693
TaskItem::in_frame
AVFrame * in_frame
Definition: dnn_backend_common.h:45
TFModel::status
TF_Status * status
Definition: dnn_backend_tf.c:43
tf_free_request
static void tf_free_request(TFInferRequest *request)
Free the contents of TensorFlow inference request.
Definition: dnn_backend_tf.c:91
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
av_cpu_count
int av_cpu_count(void)
Definition: cpu.c:221
AVIOContext
Bytestream IO Context.
Definition: avio.h:160
TaskItem::async
uint8_t async
Definition: dnn_backend_common.h:49
TaskItem::inference_done
uint32_t inference_done
Definition: dnn_backend_common.h:53
cpu.h
DNNModel::detect_post_proc
DetectPostProc detect_post_proc
Definition: dnn_interface.h:115
size
int size
Definition: twinvq_data.h:10344
avio.h
DNNModel::func_type
DNNFunctionType func_type
Definition: dnn_interface.h:101
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
dnn_flush_tf
static int dnn_flush_tf(const DNNModel *model)
Definition: dnn_backend_tf.c:856
ff_safe_queue_destroy
void ff_safe_queue_destroy(SafeQueue *sq)
Destroy the SafeQueue instance.
Definition: safe_queue.c:69
DNNDataType
DNNDataType
Definition: dnn_interface.h:41
hex_to_data
static int hex_to_data(uint8_t *data, const char *p)
Definition: dnn_backend_tf.c:357
DNN_FLOAT
@ DNN_FLOAT
Definition: dnn_interface.h:41
get_output_tf
static int get_output_tf(DNNModel *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height)
Definition: dnn_backend_tf.c:312
tf_start_inference
static int tf_start_inference(void *args)
Start synchronous inference for the TensorFlow model.
Definition: dnn_backend_tf.c:140
ff_dnn_fill_task
int ff_dnn_fill_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int async, int do_ioproc)
Fill the Task for Backend Execution.
Definition: dnn_backend_common.c:50
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
DNN_DEFINE_CLASS
#define DNN_DEFINE_CLASS(fname)
Definition: dnn_backend_common.h:39
ff_safe_queue_push_back
int ff_safe_queue_push_back(SafeQueue *sq, void *v)
Add data to the tail of queue in the SafeQueue after locking mutex.
Definition: safe_queue.c:95
layout
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
Definition: filter_design.txt:18
ff_dnn_backend_tf
const DNNModule ff_dnn_backend_tf
Definition: dnn_backend_tf.c:886
dnn_execute_model_tf
static int dnn_execute_model_tf(const DNNModel *model, DNNExecBaseParams *exec_params)
Definition: dnn_backend_tf.c:802
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
DFT_ANALYTICS_DETECT
@ DFT_ANALYTICS_DETECT
Definition: dnn_interface.h:59
TFRequestItem::status
TF_Status * status
Definition: dnn_backend_tf.c:63
TFInferRequest::output_tensors
TF_Tensor ** output_tensors
Definition: dnn_backend_tf.c:55
TFModel::session
TF_Session * session
Definition: dnn_backend_tf.c:42
TFRequestItem::infer_request
TFInferRequest * infer_request
Definition: dnn_backend_tf.c:61
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
DNNAsyncExecModule::start_inference
int(* start_inference)(void *request)
Synchronous inference function for the backend with corresponding request item as the argument.
Definition: dnn_backend_common.h:70
DNNAsyncExecModule::args
void * args
Argument for the execution functions.
Definition: dnn_backend_common.h:83
av_toupper
static av_const int av_toupper(int c)
Locale-independent conversion of ASCII characters to uppercase.
Definition: avstring.h:227
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
safe_queue.h
TaskItem::output_names
const char ** output_names
Definition: dnn_backend_common.h:48
len
int len
Definition: vorbis_enc_data.h:426
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
outputs
static const AVFilterPad outputs[]
Definition: af_aap.c:310
TFInferRequest::tf_input
TF_Output * tf_input
Definition: dnn_backend_tf.c:56
ret
ret
Definition: filter_design.txt:187
DNN_UINT8
@ DNN_UINT8
Definition: dnn_interface.h:41
TFModel
Definition: dnn_backend_tf.c:38
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
dnn_get_result_tf
static DNNAsyncStatusType dnn_get_result_tf(const DNNModel *model, AVFrame **in, AVFrame **out)
Definition: dnn_backend_tf.c:850
TaskItem::out_frame
AVFrame * out_frame
Definition: dnn_backend_common.h:46
AVFrame::height
int height
Definition: frame.h:461
status
ov_status_e status
Definition: dnn_backend_openvino.c:100
allocate_input_tensor
static TF_Tensor * allocate_input_tensor(const DNNData *input)
Definition: dnn_backend_tf.c:237
dnn_backend_common.h
TFRequestItem::lltask
LastLevelTaskItem * lltask
Definition: dnn_backend_tf.c:62
defs.h
avio_read
int avio_read(AVIOContext *s, unsigned char *buf, int size)
Read size bytes from AVIOContext into buf.
Definition: aviobuf.c:612
ff_dnn_get_result_common
DNNAsyncStatusType ff_dnn_get_result_common(Queue *task_queue, AVFrame **in, AVFrame **out)
Extract input and output frame from the Task Queue after asynchronous inference.
Definition: dnn_backend_common.c:136
ff_queue_peek_front
void * ff_queue_peek_front(Queue *q)
Return a pointer to the data at the head of the queue.
Definition: queue.c:93
DCO_RGB
@ DCO_RGB
Definition: dnn_interface.h:46
AVFilterContext
An instance of a filter.
Definition: avfilter.h:457
ff_dnn_start_inference_async
int ff_dnn_start_inference_async(void *ctx, DNNAsyncExecModule *async_module)
Start asynchronous inference routine for the TensorFlow model on a detached thread.
Definition: dnn_backend_common.c:105
DNNModel
Definition: dnn_interface.h:97
AVIO_FLAG_READ
#define AVIO_FLAG_READ
read-only
Definition: avio.h:617
mem.h
dnn_get_height_idx_by_layout
static int dnn_get_height_idx_by_layout(DNNLayout layout)
Definition: dnn_interface.h:202
dnn_free_model_tf
static void dnn_free_model_tf(DNNModel **model)
Definition: dnn_backend_tf.c:481
TaskItem::input_name
const char * input_name
Definition: dnn_backend_common.h:47
dnn_get_channel_idx_by_layout
static int dnn_get_channel_idx_by_layout(DNNLayout layout)
Definition: dnn_interface.h:207
avio_closep
int avio_closep(AVIOContext **s)
Close the resource accessed by the AVIOContext *s, free it and set the pointer pointing to it to NULL...
Definition: avio.c:649
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
free_buffer
static void free_buffer(void *data, size_t length)
Definition: dnn_backend_tf.c:79
DNNExecBaseParams
Definition: dnn_interface.h:80
DNNModel::get_input
int(* get_input)(struct DNNModel *model, DNNData *input, const char *input_name)
Definition: dnn_interface.h:104
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
TaskItem::do_ioproc
uint8_t do_ioproc
Definition: dnn_backend_common.h:50
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Underlying C type is a uint8_t* that is either NULL or points to a C string allocated with the av_mal...
Definition: opt.h:276
TFRequestItem
Definition: dnn_backend_tf.c:60
DNNAsyncStatusType
DNNAsyncStatusType
Definition: dnn_interface.h:49
DFT_PROCESS_FRAME
@ DFT_PROCESS_FRAME
Definition: dnn_interface.h:58
TFModel::lltask_queue
Queue * lltask_queue
Definition: dnn_backend_tf.c:45
TaskItem::nb_output
uint32_t nb_output
Definition: dnn_backend_common.h:51
DNNModule
Definition: dnn_interface.h:175
ff_proc_from_dnn_to_frame
int ff_proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *log_ctx)
Definition: dnn_io_proc.c:42