FFmpeg
dnn_backend_tf.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Sergey Lavrushkin
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * DNN tensorflow backend implementation.
24  */
25 
26 #include "libavformat/avio.h"
27 #include "libavutil/avassert.h"
28 #include "libavutil/avstring.h"
29 #include "libavutil/cpu.h"
30 #include "libavutil/opt.h"
31 #include "libavcodec/defs.h"
32 #include "../internal.h"
33 #include "dnn_io_proc.h"
34 #include "dnn_backend_common.h"
35 #include "safe_queue.h"
36 #include <tensorflow/c/c_api.h>
37 
38 typedef struct TFOptions{
39  char *sess_config;
40  uint8_t async;
41  uint32_t nireq;
42 } TFOptions;
43 
44 typedef struct TFContext {
45  const AVClass *class;
47 } TFContext;
48 
49 typedef struct TFModel{
52  TF_Graph *graph;
53  TF_Session *session;
54  TF_Status *status;
58 } TFModel;
59 
60 /**
61  * Stores execution parameters for single
62  * call to the TensorFlow C API
63  */
64 typedef struct TFInferRequest {
65  TF_Output *tf_outputs;
66  TF_Tensor **output_tensors;
67  TF_Output *tf_input;
68  TF_Tensor *input_tensor;
70 
71 typedef struct TFRequestItem {
74  TF_Status *status;
77 
78 #define OFFSET(x) offsetof(TFContext, x)
79 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
80 static const AVOption dnn_tensorflow_options[] = {
81  { "sess_config", "config for SessionOptions", OFFSET(options.sess_config), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
83  { NULL }
84 };
85 
86 AVFILTER_DEFINE_CLASS(dnn_tensorflow);
87 
88 static int execute_model_tf(TFRequestItem *request, Queue *lltask_queue);
89 static void infer_completion_callback(void *args);
90 static inline void destroy_request_item(TFRequestItem **arg);
91 
92 static void free_buffer(void *data, size_t length)
93 {
94  av_freep(&data);
95 }
96 
97 /**
98  * Free the contents of TensorFlow inference request.
99  * It does not free the TFInferRequest instance.
100  *
101  * @param request pointer to TFInferRequest instance.
102  * NULL pointer is allowed.
103  */
104 static void tf_free_request(TFInferRequest *request)
105 {
106  if (!request)
107  return;
108  if (request->input_tensor) {
109  TF_DeleteTensor(request->input_tensor);
110  request->input_tensor = NULL;
111  }
112  av_freep(&request->tf_input);
113  av_freep(&request->tf_outputs);
114  if (request->output_tensors) {
115  int nb_output = sizeof(*request->output_tensors)/sizeof(request->output_tensors[0]);
116  for (uint32_t i = 0; i < nb_output; ++i) {
117  if (request->output_tensors[i]) {
118  TF_DeleteTensor(request->output_tensors[i]);
119  request->output_tensors[i] = NULL;
120  }
121  }
122  av_freep(&request->output_tensors);
123  }
124 }
125 
126 /**
127  * Create a TensorFlow inference request. All properties
128  * are initially unallocated and set as NULL.
129  *
130  * @return pointer to the allocated TFInferRequest instance.
131  */
133 {
134  TFInferRequest *infer_request = av_malloc(sizeof(TFInferRequest));
135  if (!infer_request) {
136  return NULL;
137  }
138  infer_request->tf_outputs = NULL;
139  infer_request->tf_input = NULL;
140  infer_request->input_tensor = NULL;
141  infer_request->output_tensors = NULL;
142  return infer_request;
143 }
144 
145 /**
146  * Start synchronous inference for the TensorFlow model.
147  *
148  * @param request pointer to the TFRequestItem for inference
149  * @retval 0 if execution is successful
150  * @retval AVERROR(EINVAL) if request is NULL
151  * @retval DNN_GENERIC_ERROR if execution fails
152  */
153 static int tf_start_inference(void *args)
154 {
155  TFRequestItem *request = args;
156  TFInferRequest *infer_request = request->infer_request;
157  LastLevelTaskItem *lltask = request->lltask;
158  TaskItem *task = lltask->task;
159  TFModel *tf_model = task->model;
160 
161  if (!request) {
162  av_log(&tf_model->ctx, AV_LOG_ERROR, "TFRequestItem is NULL\n");
163  return AVERROR(EINVAL);
164  }
165 
166  TF_SessionRun(tf_model->session, NULL,
167  infer_request->tf_input, &infer_request->input_tensor, 1,
168  infer_request->tf_outputs, infer_request->output_tensors,
169  task->nb_output, NULL, 0, NULL,
170  request->status);
171  if (TF_GetCode(request->status) != TF_OK) {
172  av_log(&tf_model->ctx, AV_LOG_ERROR, "%s", TF_Message(request->status));
173  return DNN_GENERIC_ERROR;
174  }
175  return 0;
176 }
177 
178 /**
179  * Free the TFRequestItem completely.
180  *
181  * @param arg Address of the TFInferRequest instance.
182  */
183 static inline void destroy_request_item(TFRequestItem **arg) {
184  TFRequestItem *request;
185  if (!arg) {
186  return;
187  }
188  request = *arg;
189  tf_free_request(request->infer_request);
190  av_freep(&request->infer_request);
191  av_freep(&request->lltask);
192  TF_DeleteStatus(request->status);
194  av_freep(arg);
195 }
196 
197 static int extract_lltask_from_task(TaskItem *task, Queue *lltask_queue)
198 {
199  TFModel *tf_model = task->model;
200  TFContext *ctx = &tf_model->ctx;
201  LastLevelTaskItem *lltask = av_malloc(sizeof(*lltask));
202  if (!lltask) {
203  av_log(ctx, AV_LOG_ERROR, "Unable to allocate space for LastLevelTaskItem\n");
204  return AVERROR(ENOMEM);
205  }
206  task->inference_todo = 1;
207  task->inference_done = 0;
208  lltask->task = task;
209  if (ff_queue_push_back(lltask_queue, lltask) < 0) {
210  av_log(ctx, AV_LOG_ERROR, "Failed to push back lltask_queue.\n");
211  av_freep(&lltask);
212  return AVERROR(ENOMEM);
213  }
214  return 0;
215 }
216 
217 static TF_Buffer *read_graph(const char *model_filename)
218 {
219  TF_Buffer *graph_buf;
220  unsigned char *graph_data = NULL;
221  AVIOContext *model_file_context;
222  long size, bytes_read;
223 
224  if (avio_open(&model_file_context, model_filename, AVIO_FLAG_READ) < 0){
225  return NULL;
226  }
227 
228  size = avio_size(model_file_context);
229 
230  graph_data = av_malloc(size);
231  if (!graph_data){
232  avio_closep(&model_file_context);
233  return NULL;
234  }
235  bytes_read = avio_read(model_file_context, graph_data, size);
236  avio_closep(&model_file_context);
237  if (bytes_read != size){
238  av_freep(&graph_data);
239  return NULL;
240  }
241 
242  graph_buf = TF_NewBuffer();
243  graph_buf->data = graph_data;
244  graph_buf->length = size;
245  graph_buf->data_deallocator = free_buffer;
246 
247  return graph_buf;
248 }
249 
250 static TF_Tensor *allocate_input_tensor(const DNNData *input)
251 {
252  TF_DataType dt;
253  size_t size;
254  int64_t input_dims[4] = { 0 };
255 
256  input_dims[0] = 1;
257  input_dims[1] = input->dims[dnn_get_height_idx_by_layout(input->layout)];
258  input_dims[2] = input->dims[dnn_get_width_idx_by_layout(input->layout)];
259  input_dims[3] = input->dims[dnn_get_channel_idx_by_layout(input->layout)];
260  switch (input->dt) {
261  case DNN_FLOAT:
262  dt = TF_FLOAT;
263  size = sizeof(float);
264  break;
265  case DNN_UINT8:
266  dt = TF_UINT8;
267  size = 1;
268  break;
269  default:
270  av_assert0(!"should not reach here");
271  }
272 
273  return TF_AllocateTensor(dt, input_dims, 4,
274  input_dims[1] * input_dims[2] * input_dims[3] * size);
275 }
276 
277 static int get_input_tf(void *model, DNNData *input, const char *input_name)
278 {
279  TFModel *tf_model = model;
280  TFContext *ctx = &tf_model->ctx;
281  TF_Status *status;
282  TF_DataType dt;
283  int64_t dims[4];
284 
285  TF_Output tf_output;
286  tf_output.oper = TF_GraphOperationByName(tf_model->graph, input_name);
287  if (!tf_output.oper) {
288  av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model\n", input_name);
289  return AVERROR(EINVAL);
290  }
291 
292  tf_output.index = 0;
293  dt = TF_OperationOutputType(tf_output);
294  switch (dt) {
295  case TF_FLOAT:
296  input->dt = DNN_FLOAT;
297  break;
298  case TF_UINT8:
299  input->dt = DNN_UINT8;
300  break;
301  default:
302  av_log(ctx, AV_LOG_ERROR, "Unsupported output type %d in model\n", dt);
303  return AVERROR(EINVAL);
304  }
305  input->order = DCO_RGB;
306 
307  status = TF_NewStatus();
308  TF_GraphGetTensorShape(tf_model->graph, tf_output, dims, 4, status);
309  if (TF_GetCode(status) != TF_OK){
310  TF_DeleteStatus(status);
311  av_log(ctx, AV_LOG_ERROR, "Failed to get input tensor shape: number of dimension incorrect\n");
312  return DNN_GENERIC_ERROR;
313  }
314  TF_DeleteStatus(status);
315 
316  // currently only NHWC is supported
317  av_assert0(dims[0] == 1 || dims[0] == -1);
318  for (int i = 0; i < 4; i++)
319  input->dims[i] = dims[i];
320  input->layout = DL_NHWC;
321 
322  return 0;
323 }
324 
325 static int get_output_tf(void *model, const char *input_name, int input_width, int input_height,
326  const char *output_name, int *output_width, int *output_height)
327 {
328  int ret;
329  TFModel *tf_model = model;
330  TFContext *ctx = &tf_model->ctx;
331  TaskItem task;
332  TFRequestItem *request;
333  DNNExecBaseParams exec_params = {
334  .input_name = input_name,
335  .output_names = &output_name,
336  .nb_output = 1,
337  .in_frame = NULL,
338  .out_frame = NULL,
339  };
340 
341  ret = ff_dnn_fill_gettingoutput_task(&task, &exec_params, tf_model, input_height, input_width, ctx);
342  if (ret != 0) {
343  goto err;
344  }
345 
346  ret = extract_lltask_from_task(&task, tf_model->lltask_queue);
347  if (ret != 0) {
348  av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
349  goto err;
350  }
351 
352  request = ff_safe_queue_pop_front(tf_model->request_queue);
353  if (!request) {
354  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
355  ret = AVERROR(EINVAL);
356  goto err;
357  }
358 
359  ret = execute_model_tf(request, tf_model->lltask_queue);
360  *output_width = task.out_frame->width;
361  *output_height = task.out_frame->height;
362 
363 err:
364  av_frame_free(&task.out_frame);
365  av_frame_free(&task.in_frame);
366  return ret;
367 }
368 
369 #define SPACE_CHARS " \t\r\n"
370 static int hex_to_data(uint8_t *data, const char *p)
371 {
372  int c, len, v;
373 
374  len = 0;
375  v = 1;
376  for (;;) {
377  p += strspn(p, SPACE_CHARS);
378  if (*p == '\0')
379  break;
380  c = av_toupper((unsigned char) *p++);
381  if (c >= '0' && c <= '9')
382  c = c - '0';
383  else if (c >= 'A' && c <= 'F')
384  c = c - 'A' + 10;
385  else
386  break;
387  v = (v << 4) | c;
388  if (v & 0x100) {
389  if (data) {
390  data[len] = v;
391  }
392  len++;
393  v = 1;
394  }
395  }
396  return len;
397 }
398 
399 static int load_tf_model(TFModel *tf_model, const char *model_filename)
400 {
401  TFContext *ctx = &tf_model->ctx;
402  TF_Buffer *graph_def;
403  TF_ImportGraphDefOptions *graph_opts;
404  TF_SessionOptions *sess_opts;
405  const TF_Operation *init_op;
406  uint8_t *sess_config = NULL;
407  int sess_config_length = 0;
408 
409  // prepare the sess config data
410  if (tf_model->ctx.options.sess_config != NULL) {
411  const char *config;
412  /*
413  tf_model->ctx.options.sess_config is hex to present the serialized proto
414  required by TF_SetConfig below, so we need to first generate the serialized
415  proto in a python script, tools/python/tf_sess_config.py is a script example
416  to generate the configs of sess_config.
417  */
418  if (strncmp(tf_model->ctx.options.sess_config, "0x", 2) != 0) {
419  av_log(ctx, AV_LOG_ERROR, "sess_config should start with '0x'\n");
420  return AVERROR(EINVAL);
421  }
422  config = tf_model->ctx.options.sess_config + 2;
423  sess_config_length = hex_to_data(NULL, config);
424 
425  sess_config = av_mallocz(sess_config_length + AV_INPUT_BUFFER_PADDING_SIZE);
426  if (!sess_config) {
427  av_log(ctx, AV_LOG_ERROR, "failed to allocate memory\n");
428  return AVERROR(ENOMEM);
429  }
430  if (hex_to_data(sess_config, config) < 0) {
431  av_log(ctx, AV_LOG_ERROR, "failed to convert hex to data\n");
432  return AVERROR(EINVAL);
433  }
434  }
435 
436  graph_def = read_graph(model_filename);
437  if (!graph_def){
438  av_log(ctx, AV_LOG_ERROR, "Failed to read model \"%s\" graph\n", model_filename);
439  av_freep(&sess_config);
440  return AVERROR(EINVAL);
441  }
442  tf_model->graph = TF_NewGraph();
443  tf_model->status = TF_NewStatus();
444  graph_opts = TF_NewImportGraphDefOptions();
445  TF_GraphImportGraphDef(tf_model->graph, graph_def, graph_opts, tf_model->status);
446  TF_DeleteImportGraphDefOptions(graph_opts);
447  TF_DeleteBuffer(graph_def);
448  if (TF_GetCode(tf_model->status) != TF_OK){
449  av_log(ctx, AV_LOG_ERROR, "Failed to import serialized graph to model graph\n");
450  av_freep(&sess_config);
451  return DNN_GENERIC_ERROR;
452  }
453 
454  init_op = TF_GraphOperationByName(tf_model->graph, "init");
455  sess_opts = TF_NewSessionOptions();
456 
457  if (sess_config) {
458  TF_SetConfig(sess_opts, sess_config, sess_config_length,tf_model->status);
459  av_freep(&sess_config);
460  if (TF_GetCode(tf_model->status) != TF_OK) {
461  TF_DeleteSessionOptions(sess_opts);
462  av_log(ctx, AV_LOG_ERROR, "Failed to set config for sess options with %s\n",
463  tf_model->ctx.options.sess_config);
464  return DNN_GENERIC_ERROR;
465  }
466  }
467 
468  tf_model->session = TF_NewSession(tf_model->graph, sess_opts, tf_model->status);
469  TF_DeleteSessionOptions(sess_opts);
470  if (TF_GetCode(tf_model->status) != TF_OK)
471  {
472  av_freep(&sess_config);
473  av_log(ctx, AV_LOG_ERROR, "Failed to create new session with model graph\n");
474  return DNN_GENERIC_ERROR;
475  }
476 
477  // Run initialization operation with name "init" if it is present in graph
478  if (init_op){
479  TF_SessionRun(tf_model->session, NULL,
480  NULL, NULL, 0,
481  NULL, NULL, 0,
482  &init_op, 1, NULL, tf_model->status);
483  if (TF_GetCode(tf_model->status) != TF_OK)
484  {
485  av_freep(&sess_config);
486  av_log(ctx, AV_LOG_ERROR, "Failed to run session when initializing\n");
487  return DNN_GENERIC_ERROR;
488  }
489  }
490 
491  return 0;
492 }
493 
494 static void dnn_free_model_tf(DNNModel **model)
495 {
496  TFModel *tf_model;
497 
498  if (*model){
499  tf_model = (*model)->model;
500  while (ff_safe_queue_size(tf_model->request_queue) != 0) {
502  destroy_request_item(&item);
503  }
505 
506  while (ff_queue_size(tf_model->lltask_queue) != 0) {
508  av_freep(&item);
509  }
510  ff_queue_destroy(tf_model->lltask_queue);
511 
512  while (ff_queue_size(tf_model->task_queue) != 0) {
513  TaskItem *item = ff_queue_pop_front(tf_model->task_queue);
514  av_frame_free(&item->in_frame);
515  av_frame_free(&item->out_frame);
516  av_freep(&item);
517  }
518  ff_queue_destroy(tf_model->task_queue);
519 
520  if (tf_model->graph){
521  TF_DeleteGraph(tf_model->graph);
522  }
523  if (tf_model->session){
524  TF_CloseSession(tf_model->session, tf_model->status);
525  TF_DeleteSession(tf_model->session, tf_model->status);
526  }
527  if (tf_model->status){
528  TF_DeleteStatus(tf_model->status);
529  }
530  av_freep(&tf_model);
531  av_freep(model);
532  }
533 }
534 
535 static DNNModel *dnn_load_model_tf(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx)
536 {
537  DNNModel *model = NULL;
538  TFModel *tf_model = NULL;
539  TFContext *ctx = NULL;
540 
541  model = av_mallocz(sizeof(DNNModel));
542  if (!model){
543  return NULL;
544  }
545 
546  tf_model = av_mallocz(sizeof(TFModel));
547  if (!tf_model){
548  av_freep(&model);
549  return NULL;
550  }
551  model->model = tf_model;
552  tf_model->model = model;
553  ctx = &tf_model->ctx;
554  ctx->class = &dnn_tensorflow_class;
555 
556  //parse options
558  if (av_opt_set_from_string(ctx, options, NULL, "=", "&") < 0) {
559  av_log(ctx, AV_LOG_ERROR, "Failed to parse options \"%s\"\n", options);
560  goto err;
561  }
562 
563  if (load_tf_model(tf_model, model_filename) != 0){
564  av_log(ctx, AV_LOG_ERROR, "Failed to load TensorFlow model: \"%s\"\n", model_filename);
565  goto err;
566  }
567 
568  if (ctx->options.nireq <= 0) {
569  ctx->options.nireq = av_cpu_count() / 2 + 1;
570  }
571 
572 #if !HAVE_PTHREAD_CANCEL
573  if (ctx->options.async) {
574  ctx->options.async = 0;
575  av_log(filter_ctx, AV_LOG_WARNING, "pthread is not supported, roll back to sync.\n");
576  }
577 #endif
578 
579  tf_model->request_queue = ff_safe_queue_create();
580  if (!tf_model->request_queue) {
581  goto err;
582  }
583 
584  for (int i = 0; i < ctx->options.nireq; i++) {
585  TFRequestItem *item = av_mallocz(sizeof(*item));
586  if (!item) {
587  goto err;
588  }
589  item->lltask = NULL;
591  if (!item->infer_request) {
592  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for TensorFlow inference request\n");
593  av_freep(&item);
594  goto err;
595  }
596  item->status = TF_NewStatus();
599  item->exec_module.args = item;
600 
601  if (ff_safe_queue_push_back(tf_model->request_queue, item) < 0) {
602  destroy_request_item(&item);
603  goto err;
604  }
605  }
606 
607  tf_model->lltask_queue = ff_queue_create();
608  if (!tf_model->lltask_queue) {
609  goto err;
610  }
611 
612  tf_model->task_queue = ff_queue_create();
613  if (!tf_model->task_queue) {
614  goto err;
615  }
616 
617  model->get_input = &get_input_tf;
618  model->get_output = &get_output_tf;
619  model->options = options;
620  model->filter_ctx = filter_ctx;
621  model->func_type = func_type;
622 
623  return model;
624 err:
625  dnn_free_model_tf(&model);
626  return NULL;
627 }
628 
629 static int fill_model_input_tf(TFModel *tf_model, TFRequestItem *request) {
630  DNNData input = { 0 };
631  LastLevelTaskItem *lltask;
632  TaskItem *task;
633  TFInferRequest *infer_request = NULL;
634  TFContext *ctx = &tf_model->ctx;
635  int ret = 0;
636 
637  lltask = ff_queue_pop_front(tf_model->lltask_queue);
638  av_assert0(lltask);
639  task = lltask->task;
640  request->lltask = lltask;
641 
642  ret = get_input_tf(tf_model, &input, task->input_name);
643  if (ret != 0) {
644  goto err;
645  }
646 
647  infer_request = request->infer_request;
648  input.dims[1] = task->in_frame->height;
649  input.dims[2] = task->in_frame->width;
650 
651  infer_request->tf_input = av_malloc(sizeof(TF_Output));
652  if (!infer_request->tf_input) {
653  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for input tensor\n");
654  ret = AVERROR(ENOMEM);
655  goto err;
656  }
657 
658  infer_request->tf_input->oper = TF_GraphOperationByName(tf_model->graph, task->input_name);
659  if (!infer_request->tf_input->oper){
660  av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model\n", task->input_name);
662  goto err;
663  }
664  infer_request->tf_input->index = 0;
665 
666  infer_request->input_tensor = allocate_input_tensor(&input);
667  if (!infer_request->input_tensor){
668  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for input tensor\n");
669  ret = AVERROR(ENOMEM);
670  goto err;
671  }
672  input.data = (float *)TF_TensorData(infer_request->input_tensor);
673 
674  switch (tf_model->model->func_type) {
675  case DFT_PROCESS_FRAME:
676  if (task->do_ioproc) {
677  if (tf_model->model->frame_pre_proc != NULL) {
678  tf_model->model->frame_pre_proc(task->in_frame, &input, tf_model->model->filter_ctx);
679  } else {
681  }
682  }
683  break;
686  break;
687  default:
688  avpriv_report_missing_feature(ctx, "model function type %d", tf_model->model->func_type);
689  break;
690  }
691 
692  infer_request->tf_outputs = av_malloc_array(task->nb_output, sizeof(TF_Output));
693  if (infer_request->tf_outputs == NULL) {
694  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for *tf_outputs\n");
695  ret = AVERROR(ENOMEM);
696  goto err;
697  }
698 
699  infer_request->output_tensors = av_calloc(task->nb_output, sizeof(*infer_request->output_tensors));
700  if (!infer_request->output_tensors) {
701  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for output tensor\n");
702  ret = AVERROR(ENOMEM);
703  goto err;
704  }
705 
706  for (int i = 0; i < task->nb_output; ++i) {
707  infer_request->output_tensors[i] = NULL;
708  infer_request->tf_outputs[i].oper = TF_GraphOperationByName(tf_model->graph, task->output_names[i]);
709  if (!infer_request->tf_outputs[i].oper) {
710  av_log(ctx, AV_LOG_ERROR, "Could not find output \"%s\" in model\n", task->output_names[i]);
712  goto err;
713  }
714  infer_request->tf_outputs[i].index = 0;
715  }
716 
717  return 0;
718 err:
719  tf_free_request(infer_request);
720  return ret;
721 }
722 
723 static void infer_completion_callback(void *args) {
724  TFRequestItem *request = args;
725  LastLevelTaskItem *lltask = request->lltask;
726  TaskItem *task = lltask->task;
727  DNNData *outputs;
728  TFInferRequest *infer_request = request->infer_request;
729  TFModel *tf_model = task->model;
730  TFContext *ctx = &tf_model->ctx;
731 
732  outputs = av_calloc(task->nb_output, sizeof(*outputs));
733  if (!outputs) {
734  av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for *outputs\n");
735  goto err;
736  }
737 
738  for (uint32_t i = 0; i < task->nb_output; ++i) {
740  TF_Dim(infer_request->output_tensors[i], 1);
742  TF_Dim(infer_request->output_tensors[i], 2);
744  TF_Dim(infer_request->output_tensors[i], 3);
745  outputs[i].data = TF_TensorData(infer_request->output_tensors[i]);
746  outputs[i].dt = (DNNDataType)TF_TensorType(infer_request->output_tensors[i]);
747  }
748  switch (tf_model->model->func_type) {
749  case DFT_PROCESS_FRAME:
750  //it only support 1 output if it's frame in & frame out
751  if (task->do_ioproc) {
752  if (tf_model->model->frame_post_proc != NULL) {
753  tf_model->model->frame_post_proc(task->out_frame, outputs, tf_model->model->filter_ctx);
754  } else {
756  }
757  } else {
758  task->out_frame->width =
760  task->out_frame->height =
762  }
763  break;
765  if (!tf_model->model->detect_post_proc) {
766  av_log(ctx, AV_LOG_ERROR, "Detect filter needs provide post proc\n");
767  return;
768  }
769  tf_model->model->detect_post_proc(task->in_frame, outputs, task->nb_output, tf_model->model->filter_ctx);
770  break;
771  default:
772  av_log(ctx, AV_LOG_ERROR, "Tensorflow backend does not support this kind of dnn filter now\n");
773  goto err;
774  }
775  task->inference_done++;
776 err:
777  tf_free_request(infer_request);
778  av_freep(&outputs);
779 
780  if (ff_safe_queue_push_back(tf_model->request_queue, request) < 0) {
781  destroy_request_item(&request);
782  av_log(ctx, AV_LOG_ERROR, "Failed to push back request_queue.\n");
783  }
784 }
785 
786 static int execute_model_tf(TFRequestItem *request, Queue *lltask_queue)
787 {
788  TFModel *tf_model;
789  TFContext *ctx;
790  LastLevelTaskItem *lltask;
791  TaskItem *task;
792  int ret = 0;
793 
794  if (ff_queue_size(lltask_queue) == 0) {
795  destroy_request_item(&request);
796  return 0;
797  }
798 
799  lltask = ff_queue_peek_front(lltask_queue);
800  task = lltask->task;
801  tf_model = task->model;
802  ctx = &tf_model->ctx;
803 
804  ret = fill_model_input_tf(tf_model, request);
805  if (ret != 0) {
806  goto err;
807  }
808 
809  if (task->async) {
810  if (ff_dnn_start_inference_async(ctx, &request->exec_module) != 0) {
811  goto err;
812  }
813  return 0;
814  }
815  else {
816  ret = tf_start_inference(request);
817  if (ret != 0) {
818  goto err;
819  }
820  infer_completion_callback(request);
821  return (task->inference_done == task->inference_todo) ? 0 : DNN_GENERIC_ERROR;
822  }
823 err:
824  tf_free_request(request->infer_request);
825  if (ff_safe_queue_push_back(tf_model->request_queue, request) < 0) {
826  destroy_request_item(&request);
827  }
828  dnn_free_model_tf(&tf_model->model);
829  return ret;
830 }
831 
832 static int dnn_execute_model_tf(const DNNModel *model, DNNExecBaseParams *exec_params)
833 {
834  TFModel *tf_model = model->model;
835  TFContext *ctx = &tf_model->ctx;
836  TaskItem *task;
837  TFRequestItem *request;
838  int ret = 0;
839 
840  ret = ff_check_exec_params(ctx, DNN_TF, model->func_type, exec_params);
841  if (ret != 0) {
842  return ret;
843  }
844 
845  task = av_malloc(sizeof(*task));
846  if (!task) {
847  av_log(ctx, AV_LOG_ERROR, "unable to alloc memory for task item.\n");
848  return AVERROR(ENOMEM);
849  }
850 
851  ret = ff_dnn_fill_task(task, exec_params, tf_model, ctx->options.async, 1);
852  if (ret != 0) {
853  av_log(ctx, AV_LOG_ERROR, "Fill task with invalid parameter(s).\n");
854  av_freep(&task);
855  return ret;
856  }
857 
858  if (ff_queue_push_back(tf_model->task_queue, task) < 0) {
859  av_freep(&task);
860  av_log(ctx, AV_LOG_ERROR, "unable to push back task_queue.\n");
861  return AVERROR(ENOMEM);
862  }
863 
864  ret = extract_lltask_from_task(task, tf_model->lltask_queue);
865  if (ret != 0) {
866  av_freep(&task);
867  av_log(ctx, AV_LOG_ERROR, "unable to extract last level task from task.\n");
868  return ret;
869  }
870 
871  request = ff_safe_queue_pop_front(tf_model->request_queue);
872  if (!request) {
873  av_freep(&task);
874  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
875  return AVERROR(EINVAL);
876  }
877  return execute_model_tf(request, tf_model->lltask_queue);
878 }
879 
881 {
882  TFModel *tf_model = model->model;
883  return ff_dnn_get_result_common(tf_model->task_queue, in, out);
884 }
885 
886 static int dnn_flush_tf(const DNNModel *model)
887 {
888  TFModel *tf_model = model->model;
889  TFContext *ctx = &tf_model->ctx;
890  TFRequestItem *request;
891  int ret;
892 
893  if (ff_queue_size(tf_model->lltask_queue) == 0) {
894  // no pending task need to flush
895  return 0;
896  }
897 
898  request = ff_safe_queue_pop_front(tf_model->request_queue);
899  if (!request) {
900  av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
901  return AVERROR(EINVAL);
902  }
903 
904  ret = fill_model_input_tf(tf_model, request);
905  if (ret != 0) {
906  av_log(ctx, AV_LOG_ERROR, "Failed to fill model input.\n");
907  if (ff_safe_queue_push_back(tf_model->request_queue, request) < 0) {
908  destroy_request_item(&request);
909  }
910  return ret;
911  }
912 
913  return ff_dnn_start_inference_async(ctx, &request->exec_module);
914 }
915 
918  .execute_model = dnn_execute_model_tf,
919  .get_result = dnn_get_result_tf,
920  .flush = dnn_flush_tf,
921  .free_model = dnn_free_model_tf,
922 };
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(dnn_tensorflow)
TFOptions::sess_config
char * sess_config
Definition: dnn_backend_tf.c:39
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
TFInferRequest
Stores execution parameters for single call to the TensorFlow C API.
Definition: dnn_backend_tf.c:64
TFInferRequest::tf_outputs
TF_Output * tf_outputs
Definition: dnn_backend_tf.c:65
execute_model_tf
static int execute_model_tf(TFRequestItem *request, Queue *lltask_queue)
Definition: dnn_backend_tf.c:786
FLAGS
#define FLAGS
Definition: dnn_backend_tf.c:79
av_opt_set_defaults
void av_opt_set_defaults(void *s)
Set the values of all AVOption fields to their default values.
Definition: opt.c:1638
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
TFModel::graph
TF_Graph * graph
Definition: dnn_backend_tf.c:52
ff_safe_queue_pop_front
void * ff_safe_queue_pop_front(SafeQueue *sq)
Remove and free first element from the queue in SafeQueue.
Definition: safe_queue.c:105
out
FILE * out
Definition: movenc.c:54
TFModel::ctx
TFContext ctx
Definition: dnn_backend_tf.c:50
DNNAsyncExecModule
Common Async Execution Mechanism for the DNN Backends.
Definition: dnn_backend_common.h:58
DNNFunctionType
DNNFunctionType
Definition: dnn_interface.h:52
extract_lltask_from_task
static int extract_lltask_from_task(TaskItem *task, Queue *lltask_queue)
Definition: dnn_backend_tf.c:197
ff_queue_pop_front
void * ff_queue_pop_front(Queue *q)
Remove and free first element from the Queue.
Definition: queue.c:151
ff_check_exec_params
int ff_check_exec_params(void *ctx, DNNBackendType backend, DNNFunctionType func_type, DNNExecBaseParams *exec_params)
Definition: dnn_backend_common.c:29
ff_queue_size
size_t ff_queue_size(Queue *q)
Return the length of the Queue.
Definition: queue.c:88
DNN_GENERIC_ERROR
#define DNN_GENERIC_ERROR
Definition: dnn_interface.h:33
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:88
LastLevelTaskItem
Definition: dnn_backend_common.h:50
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
AVFrame::width
int width
Definition: frame.h:412
dnn_load_model_tf
static DNNModel * dnn_load_model_tf(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx)
Definition: dnn_backend_tf.c:535
SafeQueue
Double-ended queue with mutex locks ensuring data consistency while multithreading.
Definition: safe_queue.c:46
av_opt_set_from_string
int av_opt_set_from_string(void *ctx, const char *opts, const char *const *shorthand, const char *key_val_sep, const char *pairs_sep)
Parse the key-value pairs list in opts.
Definition: opt.c:1856
AVOption
AVOption.
Definition: opt.h:346
DNNModule::load_model
DNNModel *(* load_model)(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx)
Definition: dnn_interface.h:123
DNNModel::frame_pre_proc
FramePrePostProc frame_pre_proc
Definition: dnn_interface.h:110
TFInferRequest::input_tensor
TF_Tensor * input_tensor
Definition: dnn_backend_tf.c:68
data
const char data[16]
Definition: mxf.c:148
avio_open
int avio_open(AVIOContext **s, const char *filename, int flags)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: avio.c:496
DNNExecBaseParams::input_name
const char * input_name
Definition: dnn_interface.h:77
load_tf_model
static int load_tf_model(TFModel *tf_model, const char *model_filename)
Definition: dnn_backend_tf.c:399
dnn_io_proc.h
TFModel::request_queue
SafeQueue * request_queue
Definition: dnn_backend_tf.c:55
TaskItem
Definition: dnn_backend_common.h:36
DNNAsyncExecModule::callback
void(* callback)(void *args)
Completion Callback for the backend.
Definition: dnn_backend_common.h:70
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:322
tf_sess_config.config
config
Definition: tf_sess_config.py:33
OFFSET
#define OFFSET(x)
Definition: dnn_backend_tf.c:78
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
destroy_request_item
static void destroy_request_item(TFRequestItem **arg)
Free the TFRequestItem completely.
Definition: dnn_backend_tf.c:183
DNNModel::filter_ctx
AVFilterContext * filter_ctx
Definition: dnn_interface.h:99
ff_queue_create
Queue * ff_queue_create(void)
Create a Queue instance.
Definition: queue.c:47
dnn_get_width_idx_by_layout
static int dnn_get_width_idx_by_layout(DNNLayout layout)
Definition: dnn_interface.h:137
TaskItem::model
void * model
Definition: dnn_backend_common.h:37
filter_ctx
static FilteringContext * filter_ctx
Definition: transcode.c:51
get_input_tf
static int get_input_tf(void *model, DNNData *input, const char *input_name)
Definition: dnn_backend_tf.c:277
DL_NHWC
@ DL_NHWC
Definition: dnn_interface.h:62
SPACE_CHARS
#define SPACE_CHARS
Definition: dnn_backend_tf.c:369
Queue
Linear double-ended data structure.
Definition: queue.c:33
ff_queue_push_back
int ff_queue_push_back(Queue *q, void *v)
Add data to the tail of the queue.
Definition: queue.c:130
avassert.h
DNN_BACKEND_COMMON_OPTIONS
#define DNN_BACKEND_COMMON_OPTIONS
Definition: dnn_backend_common.h:31
DNN_TF
@ DNN_TF
Definition: dnn_interface.h:35
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
fill_model_input_tf
static int fill_model_input_tf(TFModel *tf_model, TFRequestItem *request)
Definition: dnn_backend_tf.c:629
TFRequestItem::exec_module
DNNAsyncExecModule exec_module
Definition: dnn_backend_tf.c:75
float
float
Definition: af_crystalizer.c:121
LastLevelTaskItem::task
TaskItem * task
Definition: dnn_backend_common.h:51
read_graph
static TF_Buffer * read_graph(const char *model_filename)
Definition: dnn_backend_tf.c:217
ff_queue_destroy
void ff_queue_destroy(Queue *q)
Destroy the Queue instance.
Definition: queue.c:72
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
DNNData
Definition: dnn_interface.h:65
dnn_tensorflow_options
static const AVOption dnn_tensorflow_options[]
Definition: dnn_backend_tf.c:80
ff_dnn_fill_gettingoutput_task
int ff_dnn_fill_gettingoutput_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int input_height, int input_width, void *ctx)
Allocate input and output frames and fill the Task with execution parameters.
Definition: dnn_backend_common.c:155
ctx
AVFormatContext * ctx
Definition: movenc.c:48
TaskItem::inference_todo
uint32_t inference_todo
Definition: dnn_backend_common.h:45
arg
const char * arg
Definition: jacosubdec.c:67
ff_safe_queue_size
size_t ff_safe_queue_size(SafeQueue *sq)
Return the length of the SafeQueue.
Definition: safe_queue.c:80
ff_proc_from_frame_to_dnn
int ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, void *log_ctx)
Definition: dnn_io_proc.c:181
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
ff_frame_to_dnn_detect
int ff_frame_to_dnn_detect(AVFrame *frame, DNNData *input, void *log_ctx)
Definition: dnn_io_proc.c:420
NULL
#define NULL
Definition: coverity.c:32
ff_safe_queue_create
SafeQueue * ff_safe_queue_create(void)
Create and initialize a SafeQueue instance.
Definition: safe_queue.c:52
DNNModel::frame_post_proc
FramePrePostProc frame_post_proc
Definition: dnn_interface.h:113
tf_create_inference_request
static TFInferRequest * tf_create_inference_request(void)
Create a TensorFlow inference request.
Definition: dnn_backend_tf.c:132
ff_dnn_async_module_cleanup
int ff_dnn_async_module_cleanup(DNNAsyncExecModule *async_module)
Join the Async Execution thread and set module pointers to NULL.
Definition: dnn_backend_common.c:85
TFModel::task_queue
Queue * task_queue
Definition: dnn_backend_tf.c:57
infer_completion_callback
static void infer_completion_callback(void *args)
Definition: dnn_backend_tf.c:723
TaskItem::in_frame
AVFrame * in_frame
Definition: dnn_backend_common.h:38
TFModel::status
TF_Status * status
Definition: dnn_backend_tf.c:54
tf_free_request
static void tf_free_request(TFInferRequest *request)
Free the contents of TensorFlow inference request.
Definition: dnn_backend_tf.c:104
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
av_cpu_count
int av_cpu_count(void)
Definition: cpu.c:209
options
const OptionDef options[]
AVIOContext
Bytestream IO Context.
Definition: avio.h:160
TaskItem::async
uint8_t async
Definition: dnn_backend_common.h:42
TaskItem::inference_done
uint32_t inference_done
Definition: dnn_backend_common.h:46
cpu.h
DNNModel::detect_post_proc
DetectPostProc detect_post_proc
Definition: dnn_interface.h:115
size
int size
Definition: twinvq_data.h:10344
avio.h
DNNModel::func_type
DNNFunctionType func_type
Definition: dnn_interface.h:101
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
dnn_flush_tf
static int dnn_flush_tf(const DNNModel *model)
Definition: dnn_backend_tf.c:886
TFOptions::nireq
uint32_t nireq
Definition: dnn_backend_tf.c:41
ff_safe_queue_destroy
void ff_safe_queue_destroy(SafeQueue *sq)
Destroy the SafeQueue instance.
Definition: safe_queue.c:69
DNNDataType
DNNDataType
Definition: dnn_interface.h:37
hex_to_data
static int hex_to_data(uint8_t *data, const char *p)
Definition: dnn_backend_tf.c:370
DNN_FLOAT
@ DNN_FLOAT
Definition: dnn_interface.h:37
tf_start_inference
static int tf_start_inference(void *args)
Start synchronous inference for the TensorFlow model.
Definition: dnn_backend_tf.c:153
ff_dnn_fill_task
int ff_dnn_fill_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int async, int do_ioproc)
Fill the Task for Backend Execution.
Definition: dnn_backend_common.c:49
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
ff_safe_queue_push_back
int ff_safe_queue_push_back(SafeQueue *sq, void *v)
Add data to the tail of queue in the SafeQueue after locking mutex.
Definition: safe_queue.c:95
layout
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel layout
Definition: filter_design.txt:18
ff_dnn_backend_tf
const DNNModule ff_dnn_backend_tf
Definition: dnn_backend_tf.c:916
dnn_execute_model_tf
static int dnn_execute_model_tf(const DNNModel *model, DNNExecBaseParams *exec_params)
Definition: dnn_backend_tf.c:832
TFContext::options
TFOptions options
Definition: dnn_backend_tf.c:46
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
DFT_ANALYTICS_DETECT
@ DFT_ANALYTICS_DETECT
Definition: dnn_interface.h:55
TFRequestItem::status
TF_Status * status
Definition: dnn_backend_tf.c:74
TFInferRequest::output_tensors
TF_Tensor ** output_tensors
Definition: dnn_backend_tf.c:66
TFModel::session
TF_Session * session
Definition: dnn_backend_tf.c:53
TFRequestItem::infer_request
TFInferRequest * infer_request
Definition: dnn_backend_tf.c:72
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
DNNAsyncExecModule::start_inference
int(* start_inference)(void *request)
Synchronous inference function for the backend with corresponding request item as the argument.
Definition: dnn_backend_common.h:63
DNNAsyncExecModule::args
void * args
Argument for the execution functions.
Definition: dnn_backend_common.h:76
av_toupper
static av_const int av_toupper(int c)
Locale-independent conversion of ASCII characters to uppercase.
Definition: avstring.h:227
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
TFOptions::async
uint8_t async
Definition: dnn_backend_tf.c:40
safe_queue.h
TaskItem::output_names
const char ** output_names
Definition: dnn_backend_common.h:41
len
int len
Definition: vorbis_enc_data.h:426
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
outputs
static const AVFilterPad outputs[]
Definition: af_aap.c:310
TFInferRequest::tf_input
TF_Output * tf_input
Definition: dnn_backend_tf.c:67
TFContext
Definition: dnn_backend_tf.c:44
ret
ret
Definition: filter_design.txt:187
DNNModel::get_input
int(* get_input)(void *model, DNNData *input, const char *input_name)
Definition: dnn_interface.h:104
DNN_UINT8
@ DNN_UINT8
Definition: dnn_interface.h:37
TFModel::model
DNNModel * model
Definition: dnn_backend_tf.c:51
TFModel
Definition: dnn_backend_tf.c:49
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
dnn_get_result_tf
static DNNAsyncStatusType dnn_get_result_tf(const DNNModel *model, AVFrame **in, AVFrame **out)
Definition: dnn_backend_tf.c:880
TaskItem::out_frame
AVFrame * out_frame
Definition: dnn_backend_common.h:39
AVFrame::height
int height
Definition: frame.h:412
status
ov_status_e status
Definition: dnn_backend_openvino.c:120
allocate_input_tensor
static TF_Tensor * allocate_input_tensor(const DNNData *input)
Definition: dnn_backend_tf.c:250
dnn_backend_common.h
TFRequestItem::lltask
LastLevelTaskItem * lltask
Definition: dnn_backend_tf.c:73
defs.h
avio_read
int avio_read(AVIOContext *s, unsigned char *buf, int size)
Read size bytes from AVIOContext into buf.
Definition: aviobuf.c:611
ff_dnn_get_result_common
DNNAsyncStatusType ff_dnn_get_result_common(Queue *task_queue, AVFrame **in, AVFrame **out)
Extract input and output frame from the Task Queue after asynchronous inference.
Definition: dnn_backend_common.c:135
ff_queue_peek_front
void * ff_queue_peek_front(Queue *q)
Return a pointer to the data at the head of the queue.
Definition: queue.c:93
DCO_RGB
@ DCO_RGB
Definition: dnn_interface.h:42
AVFilterContext
An instance of a filter.
Definition: avfilter.h:407
ff_dnn_start_inference_async
int ff_dnn_start_inference_async(void *ctx, DNNAsyncExecModule *async_module)
Start asynchronous inference routine for the TensorFlow model on a detached thread.
Definition: dnn_backend_common.c:104
DNNModel
Definition: dnn_interface.h:93
AVIO_FLAG_READ
#define AVIO_FLAG_READ
read-only
Definition: avio.h:617
dnn_get_height_idx_by_layout
static int dnn_get_height_idx_by_layout(DNNLayout layout)
Definition: dnn_interface.h:142
dnn_free_model_tf
static void dnn_free_model_tf(DNNModel **model)
Definition: dnn_backend_tf.c:494
TaskItem::input_name
const char * input_name
Definition: dnn_backend_common.h:40
DNNModel::options
const char * options
Definition: dnn_interface.h:97
dnn_get_channel_idx_by_layout
static int dnn_get_channel_idx_by_layout(DNNLayout layout)
Definition: dnn_interface.h:147
avio_closep
int avio_closep(AVIOContext **s)
Close the resource accessed by the AVIOContext *s, free it and set the pointer pointing to it to NULL...
Definition: avio.c:648
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
TFOptions
Definition: dnn_backend_tf.c:38
free_buffer
static void free_buffer(void *data, size_t length)
Definition: dnn_backend_tf.c:92
get_output_tf
static int get_output_tf(void *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height)
Definition: dnn_backend_tf.c:325
DNNExecBaseParams
Definition: dnn_interface.h:76
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
TaskItem::do_ioproc
uint8_t do_ioproc
Definition: dnn_backend_common.h:43
DNNModel::get_output
int(* get_output)(void *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height)
Definition: dnn_interface.h:106
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:239
TFRequestItem
Definition: dnn_backend_tf.c:71
DNNAsyncStatusType
DNNAsyncStatusType
Definition: dnn_interface.h:45
DFT_PROCESS_FRAME
@ DFT_PROCESS_FRAME
Definition: dnn_interface.h:54
TFModel::lltask_queue
Queue * lltask_queue
Definition: dnn_backend_tf.c:56
TaskItem::nb_output
uint32_t nb_output
Definition: dnn_backend_common.h:44
DNNModule
Definition: dnn_interface.h:121
DNNModel::model
void * model
Definition: dnn_interface.h:95
ff_proc_from_dnn_to_frame
int ff_proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *log_ctx)
Definition: dnn_io_proc.c:41