FFmpeg
dnn_backend_tf.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Sergey Lavrushkin
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * DNN tensorflow backend implementation.
24  */
25 
26 #include "dnn_backend_tf.h"
27 #include "libavformat/avio.h"
28 #include "libavutil/avassert.h"
29 #include "libavutil/avstring.h"
30 #include "libavutil/cpu.h"
31 #include "libavutil/opt.h"
32 #include "libavcodec/defs.h"
33 #include "../internal.h"
34 #include "dnn_io_proc.h"
35 #include "dnn_backend_common.h"
36 #include "safe_queue.h"
37 #include <tensorflow/c/c_api.h>
38 
39 typedef struct TFOptions{
40  char *sess_config;
41  uint8_t async;
42  uint32_t nireq;
43 } TFOptions;
44 
45 typedef struct TFContext {
46  const AVClass *class;
48 } TFContext;
49 
50 typedef struct TFModel{
53  TF_Graph *graph;
54  TF_Session *session;
55  TF_Status *status;
59 } TFModel;
60 
61 /**
62  * Stores execution parameters for single
63  * call to the TensorFlow C API
64  */
65 typedef struct TFInferRequest {
66  TF_Output *tf_outputs;
67  TF_Tensor **output_tensors;
68  TF_Output *tf_input;
69  TF_Tensor *input_tensor;
71 
72 typedef struct TFRequestItem {
75  TF_Status *status;
78 
79 #define OFFSET(x) offsetof(TFContext, x)
80 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
81 static const AVOption dnn_tensorflow_options[] = {
82  { "sess_config", "config for SessionOptions", OFFSET(options.sess_config), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
84  { NULL }
85 };
86 
87 AVFILTER_DEFINE_CLASS(dnn_tensorflow);
88 
89 static int execute_model_tf(TFRequestItem *request, Queue *lltask_queue);
90 static void infer_completion_callback(void *args);
91 static inline void destroy_request_item(TFRequestItem **arg);
92 
93 static void free_buffer(void *data, size_t length)
94 {
95  av_freep(&data);
96 }
97 
98 /**
99  * Free the contents of TensorFlow inference request.
100  * It does not free the TFInferRequest instance.
101  *
102  * @param request pointer to TFInferRequest instance.
103  * NULL pointer is allowed.
104  */
105 static void tf_free_request(TFInferRequest *request)
106 {
107  if (!request)
108  return;
109  if (request->input_tensor) {
110  TF_DeleteTensor(request->input_tensor);
111  request->input_tensor = NULL;
112  }
113  av_freep(&request->tf_input);
114  av_freep(&request->tf_outputs);
115  if (request->output_tensors) {
116  int nb_output = sizeof(*request->output_tensors)/sizeof(request->output_tensors[0]);
117  for (uint32_t i = 0; i < nb_output; ++i) {
118  if (request->output_tensors[i]) {
119  TF_DeleteTensor(request->output_tensors[i]);
120  request->output_tensors[i] = NULL;
121  }
122  }
123  av_freep(&request->output_tensors);
124  }
125 }
126 
127 /**
128  * Create a TensorFlow inference request. All properties
129  * are initially unallocated and set as NULL.
130  *
131  * @return pointer to the allocated TFInferRequest instance.
132  */
134 {
135  TFInferRequest *infer_request = av_malloc(sizeof(TFInferRequest));
136  if (!infer_request) {
137  return NULL;
138  }
139  infer_request->tf_outputs = NULL;
140  infer_request->tf_input = NULL;
141  infer_request->input_tensor = NULL;
142  infer_request->output_tensors = NULL;
143  return infer_request;
144 }
145 
146 /**
147  * Start synchronous inference for the TensorFlow model.
148  *
149  * @param request pointer to the TFRequestItem for inference
150  * @retval 0 if execution is successful
151  * @retval AVERROR(EINVAL) if request is NULL
152  * @retval DNN_GENERIC_ERROR if execution fails
153  */
154 static int tf_start_inference(void *args)
155 {
156  TFRequestItem *request = args;
157  TFInferRequest *infer_request = request->infer_request;
158  LastLevelTaskItem *lltask = request->lltask;
159  TaskItem *task = lltask->task;
160  TFModel *tf_model = task->model;
161 
162  if (!request) {
163  av_log(&tf_model->ctx, AV_LOG_ERROR, "TFRequestItem is NULL\n");
164  return AVERROR(EINVAL);
165  }
166 
167  TF_SessionRun(tf_model->session, NULL,
168  infer_request->tf_input, &infer_request->input_tensor, 1,
169  infer_request->tf_outputs, infer_request->output_tensors,
170  task->nb_output, NULL, 0, NULL,
171  request->status);
172  if (TF_GetCode(request->status) != TF_OK) {
173  av_log(&tf_model->ctx, AV_LOG_ERROR, "%s", TF_Message(request->status));
174  return DNN_GENERIC_ERROR;
175  }
176  return 0;
177 }
178 
179 /**
180  * Free the TFRequestItem completely.
181  *
182  * @param arg Address of the TFInferRequest instance.
183  */
184 static inline void destroy_request_item(TFRequestItem **arg) {
185  TFRequestItem *request;
186  if (!arg) {
187  return;
188  }
189  request = *arg;
190  tf_free_request(request->infer_request);
191  av_freep(&request->infer_request);
192  av_freep(&request->lltask);
193  TF_DeleteStatus(request->status);
195  av_freep(arg);
196 }
197 
198 static int extract_lltask_from_task(TaskItem *task, Queue *lltask_queue)
199 {
200  TFModel *tf_model = task->model;
201  TFContext *ctx = &tf_model->ctx;
202  LastLevelTaskItem *lltask = av_malloc(sizeof(*lltask));
203  if (!lltask) {
204  av_log(ctx, AV_LOG_ERROR, "Unable to allocate space for LastLevelTaskItem\n");
205  return AVERROR(ENOMEM);
206  }
207  task->inference_todo = 1;
208  task->inference_done = 0;
209  lltask->task = task;
210  if (ff_queue_push_back(lltask_queue, lltask) < 0) {
211  av_log(ctx, AV_LOG_ERROR, "Failed to push back lltask_queue.\n");
212  av_freep(&lltask);
213  return AVERROR(ENOMEM);
214  }
215  return 0;
216 }
217 
218 static TF_Buffer *read_graph(const char *model_filename)
219 {
220  TF_Buffer *graph_buf;
221  unsigned char *graph_data = NULL;
222  AVIOContext *model_file_context;
223  long size, bytes_read;
224 
225  if (avio_open(&model_file_context, model_filename, AVIO_FLAG_READ) < 0){
226  return NULL;
227  }
228 
229  size = avio_size(model_file_context);
230 
231  graph_data = av_malloc(size);
232  if (!graph_data){
233  avio_closep(&model_file_context);
234  return NULL;
235  }
236  bytes_read = avio_read(model_file_context, graph_data, size);
237  avio_closep(&model_file_context);
238  if (bytes_read != size){
239  av_freep(&graph_data);
240  return NULL;
241  }
242 
243  graph_buf = TF_NewBuffer();
244  graph_buf->data = graph_data;
245  graph_buf->length = size;
246  graph_buf->data_deallocator = free_buffer;
247 
248  return graph_buf;
249 }
250 
251 static TF_Tensor *allocate_input_tensor(const DNNData *input)
252 {
253  TF_DataType dt;
254  size_t size;
255  int64_t input_dims[] = {1, input->height, input->width, input->channels};
256  switch (input->dt) {
257  case DNN_FLOAT:
258  dt = TF_FLOAT;
259  size = sizeof(float);
260  break;
261  case DNN_UINT8:
262  dt = TF_UINT8;
263  size = 1;
264  break;
265  default:
266  av_assert0(!"should not reach here");
267  }
268 
269  return TF_AllocateTensor(dt, input_dims, 4,
270  input_dims[1] * input_dims[2] * input_dims[3] * size);
271 }
272 
273 static int get_input_tf(void *model, DNNData *input, const char *input_name)
274 {
275  TFModel *tf_model = model;
276  TFContext *ctx = &tf_model->ctx;
277  TF_Status *status;
278  int64_t dims[4];
279 
280  TF_Output tf_output;
281  tf_output.oper = TF_GraphOperationByName(tf_model->graph, input_name);
282  if (!tf_output.oper) {
283  av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model\n", input_name);
284  return AVERROR(EINVAL);
285  }
286 
287  tf_output.index = 0;
288  input->dt = TF_OperationOutputType(tf_output);
289  input->order = DCO_RGB;
290 
291  status = TF_NewStatus();
292  TF_GraphGetTensorShape(tf_model->graph, tf_output, dims, 4, status);
293  if (TF_GetCode(status) != TF_OK){
294  TF_DeleteStatus(status);
295  av_log(ctx, AV_LOG_ERROR, "Failed to get input tensor shape: number of dimension incorrect\n");
296  return DNN_GENERIC_ERROR;
297  }
298  TF_DeleteStatus(status);
299 
300  // currently only NHWC is supported
301  av_assert0(dims[0] == 1 || dims[0] == -1);
302  input->height = dims[1];
303  input->width = dims[2];
304  input->channels = dims[3];
305 
306  return 0;
307 }
308 
309 static int get_output_tf(void *model, const char *input_name, int input_width, int input_height,
310  const char *output_name, int *output_width, int *output_height)
311 {
312  int ret;
313  TFModel *tf_model = model;
314  TFContext *ctx = &tf_model->ctx;
315  TaskItem task;
316  TFRequestItem *request;
317  DNNExecBaseParams exec_params = {
318  .input_name = input_name,
319  .output_names = &output_name,
320  .nb_output = 1,
321  .in_frame = NULL,
322  .out_frame = NULL,
323  };
324 
325  ret = ff_dnn_fill_gettingoutput_task(&task, &exec_params, tf_model, input_height, input_width, ctx);
326  if (ret != 0) {
327  goto err;
328  }
329 
330  ret = extract_lltask_from_task(&task, tf_model->lltask_queue);
331  if (ret != 0) {
332  av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
333  goto err;
334  }
335 
336  request = ff_safe_queue_pop_front(tf_model->request_queue);
337  if (!request) {
338  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
339  ret = AVERROR(EINVAL);
340  goto err;
341  }
342 
343  ret = execute_model_tf(request, tf_model->lltask_queue);
344  *output_width = task.out_frame->width;
345  *output_height = task.out_frame->height;
346 
347 err:
348  av_frame_free(&task.out_frame);
349  av_frame_free(&task.in_frame);
350  return ret;
351 }
352 
353 #define SPACE_CHARS " \t\r\n"
354 static int hex_to_data(uint8_t *data, const char *p)
355 {
356  int c, len, v;
357 
358  len = 0;
359  v = 1;
360  for (;;) {
361  p += strspn(p, SPACE_CHARS);
362  if (*p == '\0')
363  break;
364  c = av_toupper((unsigned char) *p++);
365  if (c >= '0' && c <= '9')
366  c = c - '0';
367  else if (c >= 'A' && c <= 'F')
368  c = c - 'A' + 10;
369  else
370  break;
371  v = (v << 4) | c;
372  if (v & 0x100) {
373  if (data) {
374  data[len] = v;
375  }
376  len++;
377  v = 1;
378  }
379  }
380  return len;
381 }
382 
383 static int load_tf_model(TFModel *tf_model, const char *model_filename)
384 {
385  TFContext *ctx = &tf_model->ctx;
386  TF_Buffer *graph_def;
387  TF_ImportGraphDefOptions *graph_opts;
388  TF_SessionOptions *sess_opts;
389  const TF_Operation *init_op;
390  uint8_t *sess_config = NULL;
391  int sess_config_length = 0;
392 
393  // prepare the sess config data
394  if (tf_model->ctx.options.sess_config != NULL) {
395  const char *config;
396  /*
397  tf_model->ctx.options.sess_config is hex to present the serialized proto
398  required by TF_SetConfig below, so we need to first generate the serialized
399  proto in a python script, tools/python/tf_sess_config.py is a script example
400  to generate the configs of sess_config.
401  */
402  if (strncmp(tf_model->ctx.options.sess_config, "0x", 2) != 0) {
403  av_log(ctx, AV_LOG_ERROR, "sess_config should start with '0x'\n");
404  return AVERROR(EINVAL);
405  }
406  config = tf_model->ctx.options.sess_config + 2;
407  sess_config_length = hex_to_data(NULL, config);
408 
409  sess_config = av_mallocz(sess_config_length + AV_INPUT_BUFFER_PADDING_SIZE);
410  if (!sess_config) {
411  av_log(ctx, AV_LOG_ERROR, "failed to allocate memory\n");
412  return AVERROR(ENOMEM);
413  }
414  if (hex_to_data(sess_config, config) < 0) {
415  av_log(ctx, AV_LOG_ERROR, "failed to convert hex to data\n");
416  return AVERROR(EINVAL);
417  }
418  }
419 
420  graph_def = read_graph(model_filename);
421  if (!graph_def){
422  av_log(ctx, AV_LOG_ERROR, "Failed to read model \"%s\" graph\n", model_filename);
423  av_freep(&sess_config);
424  return AVERROR(EINVAL);
425  }
426  tf_model->graph = TF_NewGraph();
427  tf_model->status = TF_NewStatus();
428  graph_opts = TF_NewImportGraphDefOptions();
429  TF_GraphImportGraphDef(tf_model->graph, graph_def, graph_opts, tf_model->status);
430  TF_DeleteImportGraphDefOptions(graph_opts);
431  TF_DeleteBuffer(graph_def);
432  if (TF_GetCode(tf_model->status) != TF_OK){
433  av_log(ctx, AV_LOG_ERROR, "Failed to import serialized graph to model graph\n");
434  av_freep(&sess_config);
435  return DNN_GENERIC_ERROR;
436  }
437 
438  init_op = TF_GraphOperationByName(tf_model->graph, "init");
439  sess_opts = TF_NewSessionOptions();
440 
441  if (sess_config) {
442  TF_SetConfig(sess_opts, sess_config, sess_config_length,tf_model->status);
443  av_freep(&sess_config);
444  if (TF_GetCode(tf_model->status) != TF_OK) {
445  TF_DeleteSessionOptions(sess_opts);
446  av_log(ctx, AV_LOG_ERROR, "Failed to set config for sess options with %s\n",
447  tf_model->ctx.options.sess_config);
448  return DNN_GENERIC_ERROR;
449  }
450  }
451 
452  tf_model->session = TF_NewSession(tf_model->graph, sess_opts, tf_model->status);
453  TF_DeleteSessionOptions(sess_opts);
454  if (TF_GetCode(tf_model->status) != TF_OK)
455  {
456  av_freep(&sess_config);
457  av_log(ctx, AV_LOG_ERROR, "Failed to create new session with model graph\n");
458  return DNN_GENERIC_ERROR;
459  }
460 
461  // Run initialization operation with name "init" if it is present in graph
462  if (init_op){
463  TF_SessionRun(tf_model->session, NULL,
464  NULL, NULL, 0,
465  NULL, NULL, 0,
466  &init_op, 1, NULL, tf_model->status);
467  if (TF_GetCode(tf_model->status) != TF_OK)
468  {
469  av_freep(&sess_config);
470  av_log(ctx, AV_LOG_ERROR, "Failed to run session when initializing\n");
471  return DNN_GENERIC_ERROR;
472  }
473  }
474 
475  return 0;
476 }
477 
478 #define NAME_BUFFER_SIZE 256
479 
480 DNNModel *ff_dnn_load_model_tf(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx)
481 {
482  DNNModel *model = NULL;
483  TFModel *tf_model = NULL;
484  TFContext *ctx = NULL;
485 
486  model = av_mallocz(sizeof(DNNModel));
487  if (!model){
488  return NULL;
489  }
490 
491  tf_model = av_mallocz(sizeof(TFModel));
492  if (!tf_model){
493  av_freep(&model);
494  return NULL;
495  }
496  model->model = tf_model;
497  tf_model->model = model;
498  ctx = &tf_model->ctx;
499  ctx->class = &dnn_tensorflow_class;
500 
501  //parse options
503  if (av_opt_set_from_string(ctx, options, NULL, "=", "&") < 0) {
504  av_log(ctx, AV_LOG_ERROR, "Failed to parse options \"%s\"\n", options);
505  goto err;
506  }
507 
508  if (load_tf_model(tf_model, model_filename) != 0){
509  av_log(ctx, AV_LOG_ERROR, "Failed to load TensorFlow model: \"%s\"\n", model_filename);
510  goto err;
511  }
512 
513  if (ctx->options.nireq <= 0) {
514  ctx->options.nireq = av_cpu_count() / 2 + 1;
515  }
516 
517 #if !HAVE_PTHREAD_CANCEL
518  if (ctx->options.async) {
519  ctx->options.async = 0;
520  av_log(filter_ctx, AV_LOG_WARNING, "pthread is not supported, roll back to sync.\n");
521  }
522 #endif
523 
524  tf_model->request_queue = ff_safe_queue_create();
525  if (!tf_model->request_queue) {
526  goto err;
527  }
528 
529  for (int i = 0; i < ctx->options.nireq; i++) {
530  TFRequestItem *item = av_mallocz(sizeof(*item));
531  if (!item) {
532  goto err;
533  }
534  item->lltask = NULL;
536  if (!item->infer_request) {
537  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for TensorFlow inference request\n");
538  av_freep(&item);
539  goto err;
540  }
541  item->status = TF_NewStatus();
544  item->exec_module.args = item;
545 
546  if (ff_safe_queue_push_back(tf_model->request_queue, item) < 0) {
547  destroy_request_item(&item);
548  goto err;
549  }
550  }
551 
552  tf_model->lltask_queue = ff_queue_create();
553  if (!tf_model->lltask_queue) {
554  goto err;
555  }
556 
557  tf_model->task_queue = ff_queue_create();
558  if (!tf_model->task_queue) {
559  goto err;
560  }
561 
562  model->get_input = &get_input_tf;
563  model->get_output = &get_output_tf;
564  model->options = options;
565  model->filter_ctx = filter_ctx;
566  model->func_type = func_type;
567 
568  return model;
569 err:
570  ff_dnn_free_model_tf(&model);
571  return NULL;
572 }
573 
574 static int fill_model_input_tf(TFModel *tf_model, TFRequestItem *request) {
575  DNNData input;
576  LastLevelTaskItem *lltask;
577  TaskItem *task;
578  TFInferRequest *infer_request;
579  TFContext *ctx = &tf_model->ctx;
580  int ret = 0;
581 
582  lltask = ff_queue_pop_front(tf_model->lltask_queue);
583  av_assert0(lltask);
584  task = lltask->task;
585  request->lltask = lltask;
586 
587  ret = get_input_tf(tf_model, &input, task->input_name);
588  if (ret != 0) {
589  goto err;
590  }
591 
592  infer_request = request->infer_request;
593  input.height = task->in_frame->height;
594  input.width = task->in_frame->width;
595 
596  infer_request->tf_input = av_malloc(sizeof(TF_Output));
597  if (!infer_request->tf_input) {
598  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for input tensor\n");
599  ret = AVERROR(ENOMEM);
600  goto err;
601  }
602 
603  infer_request->tf_input->oper = TF_GraphOperationByName(tf_model->graph, task->input_name);
604  if (!infer_request->tf_input->oper){
605  av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model\n", task->input_name);
607  goto err;
608  }
609  infer_request->tf_input->index = 0;
610 
611  infer_request->input_tensor = allocate_input_tensor(&input);
612  if (!infer_request->input_tensor){
613  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for input tensor\n");
614  ret = AVERROR(ENOMEM);
615  goto err;
616  }
617  input.data = (float *)TF_TensorData(infer_request->input_tensor);
618 
619  switch (tf_model->model->func_type) {
620  case DFT_PROCESS_FRAME:
621  if (task->do_ioproc) {
622  if (tf_model->model->frame_pre_proc != NULL) {
623  tf_model->model->frame_pre_proc(task->in_frame, &input, tf_model->model->filter_ctx);
624  } else {
626  }
627  }
628  break;
631  break;
632  default:
633  avpriv_report_missing_feature(ctx, "model function type %d", tf_model->model->func_type);
634  break;
635  }
636 
637  infer_request->tf_outputs = av_malloc_array(task->nb_output, sizeof(TF_Output));
638  if (infer_request->tf_outputs == NULL) {
639  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for *tf_outputs\n");
640  ret = AVERROR(ENOMEM);
641  goto err;
642  }
643 
644  infer_request->output_tensors = av_calloc(task->nb_output, sizeof(*infer_request->output_tensors));
645  if (!infer_request->output_tensors) {
646  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for output tensor\n");
647  ret = AVERROR(ENOMEM);
648  goto err;
649  }
650 
651  for (int i = 0; i < task->nb_output; ++i) {
652  infer_request->output_tensors[i] = NULL;
653  infer_request->tf_outputs[i].oper = TF_GraphOperationByName(tf_model->graph, task->output_names[i]);
654  if (!infer_request->tf_outputs[i].oper) {
655  av_log(ctx, AV_LOG_ERROR, "Could not find output \"%s\" in model\n", task->output_names[i]);
657  goto err;
658  }
659  infer_request->tf_outputs[i].index = 0;
660  }
661 
662  return 0;
663 err:
664  tf_free_request(infer_request);
665  return ret;
666 }
667 
668 static void infer_completion_callback(void *args) {
669  TFRequestItem *request = args;
670  LastLevelTaskItem *lltask = request->lltask;
671  TaskItem *task = lltask->task;
672  DNNData *outputs;
673  TFInferRequest *infer_request = request->infer_request;
674  TFModel *tf_model = task->model;
675  TFContext *ctx = &tf_model->ctx;
676 
677  outputs = av_malloc_array(task->nb_output, sizeof(*outputs));
678  if (!outputs) {
679  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for *outputs\n");
680  goto err;
681  }
682 
683  for (uint32_t i = 0; i < task->nb_output; ++i) {
684  outputs[i].height = TF_Dim(infer_request->output_tensors[i], 1);
685  outputs[i].width = TF_Dim(infer_request->output_tensors[i], 2);
686  outputs[i].channels = TF_Dim(infer_request->output_tensors[i], 3);
687  outputs[i].data = TF_TensorData(infer_request->output_tensors[i]);
688  outputs[i].dt = TF_TensorType(infer_request->output_tensors[i]);
689  }
690  switch (tf_model->model->func_type) {
691  case DFT_PROCESS_FRAME:
692  //it only support 1 output if it's frame in & frame out
693  if (task->do_ioproc) {
694  if (tf_model->model->frame_post_proc != NULL) {
695  tf_model->model->frame_post_proc(task->out_frame, outputs, tf_model->model->filter_ctx);
696  } else {
698  }
699  } else {
700  task->out_frame->width = outputs[0].width;
701  task->out_frame->height = outputs[0].height;
702  }
703  break;
705  if (!tf_model->model->detect_post_proc) {
706  av_log(ctx, AV_LOG_ERROR, "Detect filter needs provide post proc\n");
707  return;
708  }
709  tf_model->model->detect_post_proc(task->in_frame, outputs, task->nb_output, tf_model->model->filter_ctx);
710  break;
711  default:
712  av_log(ctx, AV_LOG_ERROR, "Tensorflow backend does not support this kind of dnn filter now\n");
713  goto err;
714  }
715  task->inference_done++;
716 err:
717  tf_free_request(infer_request);
718  av_freep(&outputs);
719 
720  if (ff_safe_queue_push_back(tf_model->request_queue, request) < 0) {
721  destroy_request_item(&request);
722  av_log(ctx, AV_LOG_ERROR, "Failed to push back request_queue.\n");
723  }
724 }
725 
726 static int execute_model_tf(TFRequestItem *request, Queue *lltask_queue)
727 {
728  TFModel *tf_model;
729  TFContext *ctx;
730  LastLevelTaskItem *lltask;
731  TaskItem *task;
732  int ret = 0;
733 
734  if (ff_queue_size(lltask_queue) == 0) {
735  destroy_request_item(&request);
736  return 0;
737  }
738 
739  lltask = ff_queue_peek_front(lltask_queue);
740  task = lltask->task;
741  tf_model = task->model;
742  ctx = &tf_model->ctx;
743 
744  ret = fill_model_input_tf(tf_model, request);
745  if (ret != 0) {
746  goto err;
747  }
748 
749  if (task->async) {
750  if (ff_dnn_start_inference_async(ctx, &request->exec_module) != 0) {
751  goto err;
752  }
753  return 0;
754  }
755  else {
756  ret = tf_start_inference(request);
757  if (ret != 0) {
758  goto err;
759  }
760  infer_completion_callback(request);
761  return (task->inference_done == task->inference_todo) ? 0 : DNN_GENERIC_ERROR;
762  }
763 err:
764  tf_free_request(request->infer_request);
765  if (ff_safe_queue_push_back(tf_model->request_queue, request) < 0) {
766  destroy_request_item(&request);
767  }
768  ff_dnn_free_model_tf(&tf_model->model);
769  return ret;
770 }
771 
772 int ff_dnn_execute_model_tf(const DNNModel *model, DNNExecBaseParams *exec_params)
773 {
774  TFModel *tf_model = model->model;
775  TFContext *ctx = &tf_model->ctx;
776  TaskItem *task;
777  TFRequestItem *request;
778  int ret = 0;
779 
780  ret = ff_check_exec_params(ctx, DNN_TF, model->func_type, exec_params);
781  if (ret != 0) {
782  return ret;
783  }
784 
785  task = av_malloc(sizeof(*task));
786  if (!task) {
787  av_log(ctx, AV_LOG_ERROR, "unable to alloc memory for task item.\n");
788  return AVERROR(ENOMEM);
789  }
790 
791  ret = ff_dnn_fill_task(task, exec_params, tf_model, ctx->options.async, 1);
792  if (ret != 0) {
793  av_log(ctx, AV_LOG_ERROR, "Fill task with invalid parameter(s).\n");
794  av_freep(&task);
795  return ret;
796  }
797 
798  if (ff_queue_push_back(tf_model->task_queue, task) < 0) {
799  av_freep(&task);
800  av_log(ctx, AV_LOG_ERROR, "unable to push back task_queue.\n");
801  return AVERROR(ENOMEM);
802  }
803 
804  ret = extract_lltask_from_task(task, tf_model->lltask_queue);
805  if (ret != 0) {
806  av_freep(&task);
807  av_log(ctx, AV_LOG_ERROR, "unable to extract last level task from task.\n");
808  return ret;
809  }
810 
811  request = ff_safe_queue_pop_front(tf_model->request_queue);
812  if (!request) {
813  av_freep(&task);
814  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
815  return AVERROR(EINVAL);
816  }
817  return execute_model_tf(request, tf_model->lltask_queue);
818 }
819 
821 {
822  TFModel *tf_model = model->model;
823  return ff_dnn_get_result_common(tf_model->task_queue, in, out);
824 }
825 
826 int ff_dnn_flush_tf(const DNNModel *model)
827 {
828  TFModel *tf_model = model->model;
829  TFContext *ctx = &tf_model->ctx;
830  TFRequestItem *request;
831  int ret;
832 
833  if (ff_queue_size(tf_model->lltask_queue) == 0) {
834  // no pending task need to flush
835  return 0;
836  }
837 
838  request = ff_safe_queue_pop_front(tf_model->request_queue);
839  if (!request) {
840  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
841  return AVERROR(EINVAL);
842  }
843 
844  ret = fill_model_input_tf(tf_model, request);
845  if (ret != 0) {
846  av_log(ctx, AV_LOG_ERROR, "Failed to fill model input.\n");
847  if (ff_safe_queue_push_back(tf_model->request_queue, request) < 0) {
848  destroy_request_item(&request);
849  }
850  return ret;
851  }
852 
853  return ff_dnn_start_inference_async(ctx, &request->exec_module);
854 }
855 
857 {
858  TFModel *tf_model;
859 
860  if (*model){
861  tf_model = (*model)->model;
862  while (ff_safe_queue_size(tf_model->request_queue) != 0) {
864  destroy_request_item(&item);
865  }
867 
868  while (ff_queue_size(tf_model->lltask_queue) != 0) {
870  av_freep(&item);
871  }
872  ff_queue_destroy(tf_model->lltask_queue);
873 
874  while (ff_queue_size(tf_model->task_queue) != 0) {
875  TaskItem *item = ff_queue_pop_front(tf_model->task_queue);
876  av_frame_free(&item->in_frame);
877  av_frame_free(&item->out_frame);
878  av_freep(&item);
879  }
880  ff_queue_destroy(tf_model->task_queue);
881 
882  if (tf_model->graph){
883  TF_DeleteGraph(tf_model->graph);
884  }
885  if (tf_model->session){
886  TF_CloseSession(tf_model->session, tf_model->status);
887  TF_DeleteSession(tf_model->session, tf_model->status);
888  }
889  if (tf_model->status){
890  TF_DeleteStatus(tf_model->status);
891  }
892  av_freep(&tf_model);
893  av_freep(model);
894  }
895 }
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(dnn_tensorflow)
ff_dnn_get_result_tf
DNNAsyncStatusType ff_dnn_get_result_tf(const DNNModel *model, AVFrame **in, AVFrame **out)
Definition: dnn_backend_tf.c:820
TFOptions::sess_config
char * sess_config
Definition: dnn_backend_tf.c:40
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
TFInferRequest
Stores execution parameters for single call to the TensorFlow C API.
Definition: dnn_backend_tf.c:65
TFInferRequest::tf_outputs
TF_Output * tf_outputs
Definition: dnn_backend_tf.c:66
execute_model_tf
static int execute_model_tf(TFRequestItem *request, Queue *lltask_queue)
Definition: dnn_backend_tf.c:726
FLAGS
#define FLAGS
Definition: dnn_backend_tf.c:80
status
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
TFModel::graph
TF_Graph * graph
Definition: dnn_backend_tf.c:53
av_opt_set_defaults
void av_opt_set_defaults(void *s)
Set the values of all AVOption fields to their default values.
Definition: opt.c:1459
ff_safe_queue_pop_front
void * ff_safe_queue_pop_front(SafeQueue *sq)
Remove and free first element from the queue in SafeQueue.
Definition: safe_queue.c:105
out
FILE * out
Definition: movenc.c:54
TFModel::ctx
TFContext ctx
Definition: dnn_backend_tf.c:51
DNNAsyncExecModule
Common Async Execution Mechanism for the DNN Backends.
Definition: dnn_backend_common.h:58
DNNFunctionType
DNNFunctionType
Definition: dnn_interface.h:52
extract_lltask_from_task
static int extract_lltask_from_task(TaskItem *task, Queue *lltask_queue)
Definition: dnn_backend_tf.c:198
ff_queue_pop_front
void * ff_queue_pop_front(Queue *q)
Remove and free first element from the Queue.
Definition: queue.c:151
ff_check_exec_params
int ff_check_exec_params(void *ctx, DNNBackendType backend, DNNFunctionType func_type, DNNExecBaseParams *exec_params)
Definition: dnn_backend_common.c:29
ff_queue_size
size_t ff_queue_size(Queue *q)
Return the length of the Queue.
Definition: queue.c:88
DNN_GENERIC_ERROR
#define DNN_GENERIC_ERROR
Definition: dnn_interface.h:33
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:100
LastLevelTaskItem
Definition: dnn_backend_common.h:50
test::height
int height
Definition: vc1dsp.c:39
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
AVFrame::width
int width
Definition: frame.h:402
SafeQueue
Double-ended queue with mutex locks ensuring data consistency while multithreading.
Definition: safe_queue.c:46
AVOption
AVOption.
Definition: opt.h:251
DNNModel::frame_pre_proc
FramePrePostProc frame_pre_proc
Definition: dnn_interface.h:101
TFInferRequest::input_tensor
TF_Tensor * input_tensor
Definition: dnn_backend_tf.c:69
data
const char data[16]
Definition: mxf.c:148
DNNExecBaseParams::input_name
const char * input_name
Definition: dnn_interface.h:68
load_tf_model
static int load_tf_model(TFModel *tf_model, const char *model_filename)
Definition: dnn_backend_tf.c:383
dnn_io_proc.h
TFModel::request_queue
SafeQueue * request_queue
Definition: dnn_backend_tf.c:56
TaskItem
Definition: dnn_backend_common.h:36
DNNAsyncExecModule::callback
void(* callback)(void *args)
Completion Callback for the backend.
Definition: dnn_backend_common.h:70
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:344
tf_sess_config.config
config
Definition: tf_sess_config.py:33
OFFSET
#define OFFSET(x)
Definition: dnn_backend_tf.c:79
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
destroy_request_item
static void destroy_request_item(TFRequestItem **arg)
Free the TFRequestItem completely.
Definition: dnn_backend_tf.c:184
DNNModel::filter_ctx
AVFilterContext * filter_ctx
Definition: dnn_interface.h:90
ff_queue_create
Queue * ff_queue_create(void)
Create a Queue instance.
Definition: queue.c:47
ff_dnn_execute_model_tf
int ff_dnn_execute_model_tf(const DNNModel *model, DNNExecBaseParams *exec_params)
Definition: dnn_backend_tf.c:772
TaskItem::model
void * model
Definition: dnn_backend_common.h:37
filter_ctx
static FilteringContext * filter_ctx
Definition: transcode.c:51
get_input_tf
static int get_input_tf(void *model, DNNData *input, const char *input_name)
Definition: dnn_backend_tf.c:273
SPACE_CHARS
#define SPACE_CHARS
Definition: dnn_backend_tf.c:353
Queue
Linear double-ended data structure.
Definition: queue.c:33
ff_queue_push_back
int ff_queue_push_back(Queue *q, void *v)
Add data to the tail of the queue.
Definition: queue.c:130
avassert.h
DNN_BACKEND_COMMON_OPTIONS
#define DNN_BACKEND_COMMON_OPTIONS
Definition: dnn_backend_common.h:31
DNN_TF
@ DNN_TF
Definition: dnn_interface.h:35
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
fill_model_input_tf
static int fill_model_input_tf(TFModel *tf_model, TFRequestItem *request)
Definition: dnn_backend_tf.c:574
TFRequestItem::exec_module
DNNAsyncExecModule exec_module
Definition: dnn_backend_tf.c:76
float
float
Definition: af_crystalizer.c:122
LastLevelTaskItem::task
TaskItem * task
Definition: dnn_backend_common.h:51
read_graph
static TF_Buffer * read_graph(const char *model_filename)
Definition: dnn_backend_tf.c:218
ff_queue_destroy
void ff_queue_destroy(Queue *q)
Destroy the Queue instance.
Definition: queue.c:72
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
DNNData
Definition: dnn_interface.h:59
outputs
static const AVFilterPad outputs[]
Definition: af_acontrast.c:172
dnn_tensorflow_options
static const AVOption dnn_tensorflow_options[]
Definition: dnn_backend_tf.c:81
ff_dnn_fill_gettingoutput_task
int ff_dnn_fill_gettingoutput_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int input_height, int input_width, void *ctx)
Allocate input and output frames and fill the Task with execution parameters.
Definition: dnn_backend_common.c:162
ctx
AVFormatContext * ctx
Definition: movenc.c:48
TaskItem::inference_todo
uint32_t inference_todo
Definition: dnn_backend_common.h:45
arg
const char * arg
Definition: jacosubdec.c:67
ff_safe_queue_size
size_t ff_safe_queue_size(SafeQueue *sq)
Return the length of the SafeQueue.
Definition: safe_queue.c:80
ff_proc_from_frame_to_dnn
int ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, void *log_ctx)
Definition: dnn_io_proc.c:100
ff_dnn_flush_tf
int ff_dnn_flush_tf(const DNNModel *model)
Definition: dnn_backend_tf.c:826
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
ff_frame_to_dnn_detect
int ff_frame_to_dnn_detect(AVFrame *frame, DNNData *input, void *log_ctx)
Definition: dnn_io_proc.c:254
NULL
#define NULL
Definition: coverity.c:32
ff_safe_queue_create
SafeQueue * ff_safe_queue_create(void)
Create and initialize a SafeQueue instance.
Definition: safe_queue.c:52
DNNModel::frame_post_proc
FramePrePostProc frame_post_proc
Definition: dnn_interface.h:104
av_opt_set_from_string
int av_opt_set_from_string(void *ctx, const char *opts, const char *const *shorthand, const char *key_val_sep, const char *pairs_sep)
Parse the key-value pairs list in opts.
Definition: opt.c:1667
tf_create_inference_request
static TFInferRequest * tf_create_inference_request(void)
Create a TensorFlow inference request.
Definition: dnn_backend_tf.c:133
ff_dnn_async_module_cleanup
int ff_dnn_async_module_cleanup(DNNAsyncExecModule *async_module)
Join the Async Execution thread and set module pointers to NULL.
Definition: dnn_backend_common.c:92
TFModel::task_queue
Queue * task_queue
Definition: dnn_backend_tf.c:58
infer_completion_callback
static void infer_completion_callback(void *args)
Definition: dnn_backend_tf.c:668
TaskItem::in_frame
AVFrame * in_frame
Definition: dnn_backend_common.h:38
TFModel::status
TF_Status * status
Definition: dnn_backend_tf.c:55
tf_free_request
static void tf_free_request(TFInferRequest *request)
Free the contents of TensorFlow inference request.
Definition: dnn_backend_tf.c:105
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
av_cpu_count
int av_cpu_count(void)
Definition: cpu.c:206
options
const OptionDef options[]
test::width
int width
Definition: vc1dsp.c:38
AVIOContext
Bytestream IO Context.
Definition: avio.h:166
TaskItem::async
uint8_t async
Definition: dnn_backend_common.h:42
TaskItem::inference_done
uint32_t inference_done
Definition: dnn_backend_common.h:46
cpu.h
DNNModel::detect_post_proc
DetectPostProc detect_post_proc
Definition: dnn_interface.h:106
size
int size
Definition: twinvq_data.h:10344
avio.h
DNNModel::func_type
DNNFunctionType func_type
Definition: dnn_interface.h:92
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
TFOptions::nireq
uint32_t nireq
Definition: dnn_backend_tf.c:42
ff_safe_queue_destroy
void ff_safe_queue_destroy(SafeQueue *sq)
Destroy the SafeQueue instance.
Definition: safe_queue.c:69
hex_to_data
static int hex_to_data(uint8_t *data, const char *p)
Definition: dnn_backend_tf.c:354
DNN_FLOAT
@ DNN_FLOAT
Definition: dnn_interface.h:37
tf_start_inference
static int tf_start_inference(void *args)
Start synchronous inference for the TensorFlow model.
Definition: dnn_backend_tf.c:154
ff_dnn_fill_task
int ff_dnn_fill_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int async, int do_ioproc)
Fill the Task for Backend Execution.
Definition: dnn_backend_common.c:56
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
ff_dnn_load_model_tf
DNNModel * ff_dnn_load_model_tf(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx)
Definition: dnn_backend_tf.c:480
ff_safe_queue_push_back
int ff_safe_queue_push_back(SafeQueue *sq, void *v)
Add data to the tail of queue in the SafeQueue after locking mutex.
Definition: safe_queue.c:95
avio_closep
int avio_closep(AVIOContext **s)
Close the resource accessed by the AVIOContext *s, free it and set the pointer pointing to it to NULL...
Definition: aviobuf.c:1280
TFContext::options
TFOptions options
Definition: dnn_backend_tf.c:47
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
DFT_ANALYTICS_DETECT
@ DFT_ANALYTICS_DETECT
Definition: dnn_interface.h:55
TFRequestItem::status
TF_Status * status
Definition: dnn_backend_tf.c:75
TFInferRequest::output_tensors
TF_Tensor ** output_tensors
Definition: dnn_backend_tf.c:67
TFModel::session
TF_Session * session
Definition: dnn_backend_tf.c:54
TFRequestItem::infer_request
TFInferRequest * infer_request
Definition: dnn_backend_tf.c:73
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
DNNAsyncExecModule::start_inference
int(* start_inference)(void *request)
Synchronous inference function for the backend with corresponding request item as the argument.
Definition: dnn_backend_common.h:63
DNNAsyncExecModule::args
void * args
Argument for the execution functions.
Definition: dnn_backend_common.h:76
av_toupper
static av_const int av_toupper(int c)
Locale-independent conversion of ASCII characters to uppercase.
Definition: avstring.h:228
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
TFOptions::async
uint8_t async
Definition: dnn_backend_tf.c:41
safe_queue.h
TaskItem::output_names
const char ** output_names
Definition: dnn_backend_common.h:41
len
int len
Definition: vorbis_enc_data.h:426
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
TFInferRequest::tf_input
TF_Output * tf_input
Definition: dnn_backend_tf.c:68
TFContext
Definition: dnn_backend_tf.c:45
ret
ret
Definition: filter_design.txt:187
DNNModel::get_input
int(* get_input)(void *model, DNNData *input, const char *input_name)
Definition: dnn_interface.h:95
DNN_UINT8
@ DNN_UINT8
Definition: dnn_interface.h:37
TFModel::model
DNNModel * model
Definition: dnn_backend_tf.c:52
TFModel
Definition: dnn_backend_tf.c:50
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
TaskItem::out_frame
AVFrame * out_frame
Definition: dnn_backend_common.h:39
AVFrame::height
int height
Definition: frame.h:402
allocate_input_tensor
static TF_Tensor * allocate_input_tensor(const DNNData *input)
Definition: dnn_backend_tf.c:251
dnn_backend_common.h
TFRequestItem::lltask
LastLevelTaskItem * lltask
Definition: dnn_backend_tf.c:74
defs.h
avio_read
int avio_read(AVIOContext *s, unsigned char *buf, int size)
Read size bytes from AVIOContext into buf.
Definition: aviobuf.c:633
ff_dnn_get_result_common
DNNAsyncStatusType ff_dnn_get_result_common(Queue *task_queue, AVFrame **in, AVFrame **out)
Extract input and output frame from the Task Queue after asynchronous inference.
Definition: dnn_backend_common.c:142
ff_queue_peek_front
void * ff_queue_peek_front(Queue *q)
Return a pointer to the data at the head of the queue.
Definition: queue.c:93
DCO_RGB
@ DCO_RGB
Definition: dnn_interface.h:42
avio_open
int avio_open(AVIOContext **s, const char *url, int flags)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: aviobuf.c:1215
AVFilterContext
An instance of a filter.
Definition: avfilter.h:397
ff_dnn_free_model_tf
void ff_dnn_free_model_tf(DNNModel **model)
Definition: dnn_backend_tf.c:856
ff_dnn_start_inference_async
int ff_dnn_start_inference_async(void *ctx, DNNAsyncExecModule *async_module)
Start asynchronous inference routine for the TensorFlow model on a detached thread.
Definition: dnn_backend_common.c:111
DNNModel
Definition: dnn_interface.h:84
AVIO_FLAG_READ
#define AVIO_FLAG_READ
read-only
Definition: avio.h:623
TaskItem::input_name
const char * input_name
Definition: dnn_backend_common.h:40
DNNModel::options
const char * options
Definition: dnn_interface.h:88
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
TFOptions
Definition: dnn_backend_tf.c:39
free_buffer
static void free_buffer(void *data, size_t length)
Definition: dnn_backend_tf.c:93
get_output_tf
static int get_output_tf(void *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height)
Definition: dnn_backend_tf.c:309
DNNExecBaseParams
Definition: dnn_interface.h:67
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
TaskItem::do_ioproc
uint8_t do_ioproc
Definition: dnn_backend_common.h:43
DNNModel::get_output
int(* get_output)(void *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height)
Definition: dnn_interface.h:97
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:229
TFRequestItem
Definition: dnn_backend_tf.c:72
DNNAsyncStatusType
DNNAsyncStatusType
Definition: dnn_interface.h:45
dnn_backend_tf.h
DFT_PROCESS_FRAME
@ DFT_PROCESS_FRAME
Definition: dnn_interface.h:54
TFModel::lltask_queue
Queue * lltask_queue
Definition: dnn_backend_tf.c:57
TaskItem::nb_output
uint32_t nb_output
Definition: dnn_backend_common.h:44
DNNModel::model
void * model
Definition: dnn_interface.h:86
ff_proc_from_dnn_to_frame
int ff_proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *log_ctx)
Definition: dnn_io_proc.c:27