FFmpeg
vf_scale_qsv.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 /**
20  * @file
21  * scale video filter - QSV
22  */
23 
24 #include <mfx/mfxvideo.h>
25 
26 #include <stdio.h>
27 #include <string.h>
28 
29 #include "libavutil/avstring.h"
30 #include "libavutil/common.h"
31 #include "libavutil/eval.h"
32 #include "libavutil/hwcontext.h"
34 #include "libavutil/internal.h"
35 #include "libavutil/mathematics.h"
36 #include "libavutil/opt.h"
37 #include "libavutil/pixdesc.h"
38 #include "libavutil/time.h"
39 #include "libavfilter/qsvvpp.h"
40 
41 #include "avfilter.h"
42 #include "formats.h"
43 #include "internal.h"
44 #include "video.h"
45 
46 static const char *const var_names[] = {
47  "PI",
48  "PHI",
49  "E",
50  "in_w", "iw",
51  "in_h", "ih",
52  "out_w", "ow",
53  "out_h", "oh",
54  "a", "dar",
55  "sar",
56  NULL
57 };
58 
59 enum var_name {
70 };
71 
72 #define QSV_HAVE_SCALING_CONFIG QSV_VERSION_ATLEAST(1, 19)
73 
74 typedef struct QSVScaleContext {
75  const AVClass *class;
76 
77  /* a clone of the main session, used internally for scaling */
78  mfxSession session;
79 
80  mfxMemId *mem_ids_in;
82 
83  mfxMemId *mem_ids_out;
85 
86  mfxFrameSurface1 **surface_ptrs_in;
88 
89  mfxFrameSurface1 **surface_ptrs_out;
91 
92  mfxExtOpaqueSurfaceAlloc opaque_alloc;
93 
94 #if QSV_HAVE_SCALING_CONFIG
95  mfxExtVPPScaling scale_conf;
96 #endif
97  int mode;
98 
101 
103 
104  /**
105  * New dimensions. Special values are:
106  * 0 = original width/height
107  * -1 = keep original aspect
108  */
109  int w, h;
110 
111  /**
112  * Output sw format. AV_PIX_FMT_NONE for no conversion.
113  */
115 
116  char *w_expr; ///< width expression string
117  char *h_expr; ///< height expression string
118  char *format_str;
120 
122 {
123  QSVScaleContext *s = ctx->priv;
124 
125  if (!strcmp(s->format_str, "same")) {
126  s->format = AV_PIX_FMT_NONE;
127  } else {
128  s->format = av_get_pix_fmt(s->format_str);
129  if (s->format == AV_PIX_FMT_NONE) {
130  av_log(ctx, AV_LOG_ERROR, "Unrecognized pixel format: %s\n", s->format_str);
131  return AVERROR(EINVAL);
132  }
133  }
134 
135  return 0;
136 }
137 
139 {
140  QSVScaleContext *s = ctx->priv;
141 
142  if (s->session) {
143  MFXClose(s->session);
144  s->session = NULL;
145  }
146 
147  av_freep(&s->mem_ids_in);
148  av_freep(&s->mem_ids_out);
149  s->nb_mem_ids_in = 0;
150  s->nb_mem_ids_out = 0;
151 
152  av_freep(&s->surface_ptrs_in);
153  av_freep(&s->surface_ptrs_out);
154  s->nb_surface_ptrs_in = 0;
155  s->nb_surface_ptrs_out = 0;
156 }
157 
159 {
160  static const enum AVPixelFormat pixel_formats[] = {
162  };
163  AVFilterFormats *pix_fmts = ff_make_format_list(pixel_formats);
164  int ret;
165 
166  if ((ret = ff_set_common_formats(ctx, pix_fmts)) < 0)
167  return ret;
168 
169  return 0;
170 }
171 
173  int out_width, int out_height)
174 {
175  QSVScaleContext *s = ctx->priv;
176  AVFilterLink *outlink = ctx->outputs[0];
177 
178  AVHWFramesContext *in_frames_ctx;
179  AVHWFramesContext *out_frames_ctx;
180  AVQSVFramesContext *in_frames_hwctx;
181  AVQSVFramesContext *out_frames_hwctx;
182  enum AVPixelFormat in_format;
183  enum AVPixelFormat out_format;
184  int i, ret;
185 
186  /* check that we have a hw context */
187  if (!ctx->inputs[0]->hw_frames_ctx) {
188  av_log(ctx, AV_LOG_ERROR, "No hw context provided on input\n");
189  return AVERROR(EINVAL);
190  }
191  in_frames_ctx = (AVHWFramesContext*)ctx->inputs[0]->hw_frames_ctx->data;
192  in_frames_hwctx = in_frames_ctx->hwctx;
193 
194  in_format = in_frames_ctx->sw_format;
195  out_format = (s->format == AV_PIX_FMT_NONE) ? in_format : s->format;
196 
197  outlink->hw_frames_ctx = av_hwframe_ctx_alloc(in_frames_ctx->device_ref);
198  if (!outlink->hw_frames_ctx)
199  return AVERROR(ENOMEM);
200  out_frames_ctx = (AVHWFramesContext*)outlink->hw_frames_ctx->data;
201  out_frames_hwctx = out_frames_ctx->hwctx;
202 
203  out_frames_ctx->format = AV_PIX_FMT_QSV;
204  out_frames_ctx->width = FFALIGN(out_width, 16);
205  out_frames_ctx->height = FFALIGN(out_height, 16);
206  out_frames_ctx->sw_format = out_format;
207  out_frames_ctx->initial_pool_size = 4;
208 
209  out_frames_hwctx->frame_type = in_frames_hwctx->frame_type;
210 
211  ret = ff_filter_init_hw_frames(ctx, outlink, 32);
212  if (ret < 0)
213  return ret;
214 
216  if (ret < 0)
217  return ret;
218 
219  for (i = 0; i < out_frames_hwctx->nb_surfaces; i++) {
220  mfxFrameInfo *info = &out_frames_hwctx->surfaces[i].Info;
221  info->CropW = out_width;
222  info->CropH = out_height;
223  }
224 
225  return 0;
226 }
227 
228 static mfxStatus frame_alloc(mfxHDL pthis, mfxFrameAllocRequest *req,
229  mfxFrameAllocResponse *resp)
230 {
231  AVFilterContext *ctx = pthis;
232  QSVScaleContext *s = ctx->priv;
233 
234  if (!(req->Type & MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET) ||
235  !(req->Type & (MFX_MEMTYPE_FROM_VPPIN | MFX_MEMTYPE_FROM_VPPOUT)) ||
236  !(req->Type & MFX_MEMTYPE_EXTERNAL_FRAME))
237  return MFX_ERR_UNSUPPORTED;
238 
239  if (req->Type & MFX_MEMTYPE_FROM_VPPIN) {
240  resp->mids = s->mem_ids_in;
241  resp->NumFrameActual = s->nb_mem_ids_in;
242  } else {
243  resp->mids = s->mem_ids_out;
244  resp->NumFrameActual = s->nb_mem_ids_out;
245  }
246 
247  return MFX_ERR_NONE;
248 }
249 
250 static mfxStatus frame_free(mfxHDL pthis, mfxFrameAllocResponse *resp)
251 {
252  return MFX_ERR_NONE;
253 }
254 
255 static mfxStatus frame_lock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
256 {
257  return MFX_ERR_UNSUPPORTED;
258 }
259 
260 static mfxStatus frame_unlock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
261 {
262  return MFX_ERR_UNSUPPORTED;
263 }
264 
265 static mfxStatus frame_get_hdl(mfxHDL pthis, mfxMemId mid, mfxHDL *hdl)
266 {
267  *hdl = mid;
268  return MFX_ERR_NONE;
269 }
270 
271 static const mfxHandleType handle_types[] = {
272  MFX_HANDLE_VA_DISPLAY,
273  MFX_HANDLE_D3D9_DEVICE_MANAGER,
274  MFX_HANDLE_D3D11_DEVICE,
275 };
276 
278 {
279 
280  QSVScaleContext *s = ctx->priv;
281  AVHWFramesContext *in_frames_ctx = (AVHWFramesContext*)ctx->inputs[0]->hw_frames_ctx->data;
282  AVHWFramesContext *out_frames_ctx = (AVHWFramesContext*)ctx->outputs[0]->hw_frames_ctx->data;
283  AVQSVFramesContext *in_frames_hwctx = in_frames_ctx->hwctx;
284  AVQSVFramesContext *out_frames_hwctx = out_frames_ctx->hwctx;
285  AVQSVDeviceContext *device_hwctx = in_frames_ctx->device_ctx->hwctx;
286 
287  int opaque = !!(in_frames_hwctx->frame_type & MFX_MEMTYPE_OPAQUE_FRAME);
288 
289  mfxHDL handle = NULL;
290  mfxHandleType handle_type;
291  mfxVersion ver;
292  mfxIMPL impl;
293  mfxVideoParam par;
294  mfxStatus err;
295  int i;
296 
297  s->num_ext_buf = 0;
298 
299  /* extract the properties of the "master" session given to us */
300  err = MFXQueryIMPL(device_hwctx->session, &impl);
301  if (err == MFX_ERR_NONE)
302  err = MFXQueryVersion(device_hwctx->session, &ver);
303  if (err != MFX_ERR_NONE) {
304  av_log(ctx, AV_LOG_ERROR, "Error querying the session attributes\n");
305  return AVERROR_UNKNOWN;
306  }
307 
308  for (i = 0; i < FF_ARRAY_ELEMS(handle_types); i++) {
309  err = MFXVideoCORE_GetHandle(device_hwctx->session, handle_types[i], &handle);
310  if (err == MFX_ERR_NONE) {
312  break;
313  }
314  }
315 
316  if (err < 0)
317  return ff_qsvvpp_print_error(ctx, err, "Error getting the session handle");
318  else if (err > 0) {
319  ff_qsvvpp_print_warning(ctx, err, "Warning in getting the session handle");
320  return AVERROR_UNKNOWN;
321  }
322 
323  /* create a "slave" session with those same properties, to be used for
324  * actual scaling */
325  err = MFXInit(impl, &ver, &s->session);
326  if (err != MFX_ERR_NONE) {
327  av_log(ctx, AV_LOG_ERROR, "Error initializing a session for scaling\n");
328  return AVERROR_UNKNOWN;
329  }
330 
331  if (handle) {
332  err = MFXVideoCORE_SetHandle(s->session, handle_type, handle);
333  if (err != MFX_ERR_NONE)
334  return AVERROR_UNKNOWN;
335  }
336 
337  if (QSV_RUNTIME_VERSION_ATLEAST(ver, 1, 25)) {
338  err = MFXJoinSession(device_hwctx->session, s->session);
339  if (err != MFX_ERR_NONE)
340  return AVERROR_UNKNOWN;
341  }
342 
343  memset(&par, 0, sizeof(par));
344 
345  if (opaque) {
346  s->surface_ptrs_in = av_mallocz_array(in_frames_hwctx->nb_surfaces,
347  sizeof(*s->surface_ptrs_in));
348  if (!s->surface_ptrs_in)
349  return AVERROR(ENOMEM);
350  for (i = 0; i < in_frames_hwctx->nb_surfaces; i++)
351  s->surface_ptrs_in[i] = in_frames_hwctx->surfaces + i;
352  s->nb_surface_ptrs_in = in_frames_hwctx->nb_surfaces;
353 
354  s->surface_ptrs_out = av_mallocz_array(out_frames_hwctx->nb_surfaces,
355  sizeof(*s->surface_ptrs_out));
356  if (!s->surface_ptrs_out)
357  return AVERROR(ENOMEM);
358  for (i = 0; i < out_frames_hwctx->nb_surfaces; i++)
359  s->surface_ptrs_out[i] = out_frames_hwctx->surfaces + i;
360  s->nb_surface_ptrs_out = out_frames_hwctx->nb_surfaces;
361 
362  s->opaque_alloc.In.Surfaces = s->surface_ptrs_in;
363  s->opaque_alloc.In.NumSurface = s->nb_surface_ptrs_in;
364  s->opaque_alloc.In.Type = in_frames_hwctx->frame_type;
365 
366  s->opaque_alloc.Out.Surfaces = s->surface_ptrs_out;
367  s->opaque_alloc.Out.NumSurface = s->nb_surface_ptrs_out;
368  s->opaque_alloc.Out.Type = out_frames_hwctx->frame_type;
369 
370  s->opaque_alloc.Header.BufferId = MFX_EXTBUFF_OPAQUE_SURFACE_ALLOCATION;
371  s->opaque_alloc.Header.BufferSz = sizeof(s->opaque_alloc);
372 
373  s->ext_buffers[s->num_ext_buf++] = (mfxExtBuffer*)&s->opaque_alloc;
374 
375  par.IOPattern = MFX_IOPATTERN_IN_OPAQUE_MEMORY | MFX_IOPATTERN_OUT_OPAQUE_MEMORY;
376  } else {
377  mfxFrameAllocator frame_allocator = {
378  .pthis = ctx,
379  .Alloc = frame_alloc,
380  .Lock = frame_lock,
381  .Unlock = frame_unlock,
382  .GetHDL = frame_get_hdl,
383  .Free = frame_free,
384  };
385 
386  s->mem_ids_in = av_mallocz_array(in_frames_hwctx->nb_surfaces,
387  sizeof(*s->mem_ids_in));
388  if (!s->mem_ids_in)
389  return AVERROR(ENOMEM);
390  for (i = 0; i < in_frames_hwctx->nb_surfaces; i++)
391  s->mem_ids_in[i] = in_frames_hwctx->surfaces[i].Data.MemId;
392  s->nb_mem_ids_in = in_frames_hwctx->nb_surfaces;
393 
394  s->mem_ids_out = av_mallocz_array(out_frames_hwctx->nb_surfaces,
395  sizeof(*s->mem_ids_out));
396  if (!s->mem_ids_out)
397  return AVERROR(ENOMEM);
398  for (i = 0; i < out_frames_hwctx->nb_surfaces; i++)
399  s->mem_ids_out[i] = out_frames_hwctx->surfaces[i].Data.MemId;
400  s->nb_mem_ids_out = out_frames_hwctx->nb_surfaces;
401 
402  err = MFXVideoCORE_SetFrameAllocator(s->session, &frame_allocator);
403  if (err != MFX_ERR_NONE)
404  return AVERROR_UNKNOWN;
405 
406  par.IOPattern = MFX_IOPATTERN_IN_VIDEO_MEMORY | MFX_IOPATTERN_OUT_VIDEO_MEMORY;
407  }
408 
409 #if QSV_HAVE_SCALING_CONFIG
410  memset(&s->scale_conf, 0, sizeof(mfxExtVPPScaling));
411  s->scale_conf.Header.BufferId = MFX_EXTBUFF_VPP_SCALING;
412  s->scale_conf.Header.BufferSz = sizeof(mfxExtVPPScaling);
413  s->scale_conf.ScalingMode = s->mode;
414  s->ext_buffers[s->num_ext_buf++] = (mfxExtBuffer*)&s->scale_conf;
415  av_log(ctx, AV_LOG_VERBOSE, "Scaling mode: %d\n", s->mode);
416 #endif
417 
418  par.ExtParam = s->ext_buffers;
419  par.NumExtParam = s->num_ext_buf;
420 
421  par.AsyncDepth = 1; // TODO async
422 
423  par.vpp.In = in_frames_hwctx->surfaces[0].Info;
424  par.vpp.Out = out_frames_hwctx->surfaces[0].Info;
425 
426  /* Apparently VPP requires the frame rate to be set to some value, otherwise
427  * init will fail (probably for the framerate conversion filter). Since we
428  * are only doing scaling here, we just invent an arbitrary
429  * value */
430  par.vpp.In.FrameRateExtN = 25;
431  par.vpp.In.FrameRateExtD = 1;
432  par.vpp.Out.FrameRateExtN = 25;
433  par.vpp.Out.FrameRateExtD = 1;
434 
435  /* Print input memory mode */
436  ff_qsvvpp_print_iopattern(ctx, par.IOPattern & 0x0F, "VPP");
437  /* Print output memory mode */
438  ff_qsvvpp_print_iopattern(ctx, par.IOPattern & 0xF0, "VPP");
439  err = MFXVideoVPP_Init(s->session, &par);
440  if (err < 0)
441  return ff_qsvvpp_print_error(ctx, err,
442  "Error opening the VPP for scaling");
443  else if (err > 0) {
445  "Warning in VPP initialization");
446  return AVERROR_UNKNOWN;
447  }
448 
449  return 0;
450 }
451 
452 static int init_scale_session(AVFilterContext *ctx, int in_width, int in_height,
453  int out_width, int out_height)
454 {
455  int ret;
456 
458 
459  ret = init_out_pool(ctx, out_width, out_height);
460  if (ret < 0)
461  return ret;
462 
464  if (ret < 0)
465  return ret;
466 
467  return 0;
468 }
469 
471 {
472  AVFilterContext *ctx = outlink->src;
473  AVFilterLink *inlink = outlink->src->inputs[0];
474  QSVScaleContext *s = ctx->priv;
475  int64_t w, h;
476  double var_values[VARS_NB], res;
477  char *expr;
478  int ret;
479 
480  var_values[VAR_PI] = M_PI;
481  var_values[VAR_PHI] = M_PHI;
482  var_values[VAR_E] = M_E;
483  var_values[VAR_IN_W] = var_values[VAR_IW] = inlink->w;
484  var_values[VAR_IN_H] = var_values[VAR_IH] = inlink->h;
485  var_values[VAR_OUT_W] = var_values[VAR_OW] = NAN;
486  var_values[VAR_OUT_H] = var_values[VAR_OH] = NAN;
487  var_values[VAR_A] = (double) inlink->w / inlink->h;
488  var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ?
489  (double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1;
490  var_values[VAR_DAR] = var_values[VAR_A] * var_values[VAR_SAR];
491 
492  /* evaluate width and height */
493  av_expr_parse_and_eval(&res, (expr = s->w_expr),
494  var_names, var_values,
495  NULL, NULL, NULL, NULL, NULL, 0, ctx);
496  s->w = var_values[VAR_OUT_W] = var_values[VAR_OW] = res;
497  if ((ret = av_expr_parse_and_eval(&res, (expr = s->h_expr),
498  var_names, var_values,
499  NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
500  goto fail;
501  s->h = var_values[VAR_OUT_H] = var_values[VAR_OH] = res;
502  /* evaluate again the width, as it may depend on the output height */
503  if ((ret = av_expr_parse_and_eval(&res, (expr = s->w_expr),
504  var_names, var_values,
505  NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
506  goto fail;
507  s->w = res;
508 
509  w = s->w;
510  h = s->h;
511 
512  /* sanity check params */
513  if (w < -1 || h < -1) {
514  av_log(ctx, AV_LOG_ERROR, "Size values less than -1 are not acceptable.\n");
515  return AVERROR(EINVAL);
516  }
517  if (w == -1 && h == -1)
518  s->w = s->h = 0;
519 
520  if (!(w = s->w))
521  w = inlink->w;
522  if (!(h = s->h))
523  h = inlink->h;
524  if (w == -1)
525  w = av_rescale(h, inlink->w, inlink->h);
526  if (h == -1)
527  h = av_rescale(w, inlink->h, inlink->w);
528 
529  if (w > INT_MAX || h > INT_MAX ||
530  (h * inlink->w) > INT_MAX ||
531  (w * inlink->h) > INT_MAX)
532  av_log(ctx, AV_LOG_ERROR, "Rescaled value for width or height is too big.\n");
533 
534  outlink->w = w;
535  outlink->h = h;
536 
537  ret = init_scale_session(ctx, inlink->w, inlink->h, w, h);
538  if (ret < 0)
539  return ret;
540 
541  av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d -> w:%d h:%d\n",
542  inlink->w, inlink->h, outlink->w, outlink->h);
543 
544  if (inlink->sample_aspect_ratio.num)
545  outlink->sample_aspect_ratio = av_mul_q((AVRational){outlink->h*inlink->w,
546  outlink->w*inlink->h},
547  inlink->sample_aspect_ratio);
548  else
549  outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
550 
551  return 0;
552 
553 fail:
555  "Error when evaluating the expression '%s'\n", expr);
556  return ret;
557 }
558 
560 {
561  AVFilterContext *ctx = link->dst;
562  QSVScaleContext *s = ctx->priv;
563  AVFilterLink *outlink = ctx->outputs[0];
564 
565  mfxSyncPoint sync = NULL;
566  mfxStatus err;
567 
568  AVFrame *out = NULL;
569  int ret = 0;
570 
571  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
572  if (!out) {
573  ret = AVERROR(ENOMEM);
574  goto fail;
575  }
576 
577  do {
578  err = MFXVideoVPP_RunFrameVPPAsync(s->session,
579  (mfxFrameSurface1*)in->data[3],
580  (mfxFrameSurface1*)out->data[3],
581  NULL, &sync);
582  if (err == MFX_WRN_DEVICE_BUSY)
583  av_usleep(1);
584  } while (err == MFX_WRN_DEVICE_BUSY);
585 
586  if (err < 0) {
587  ret = ff_qsvvpp_print_error(ctx, err, "Error during scaling");
588  goto fail;
589  }
590 
591  if (!sync) {
592  av_log(ctx, AV_LOG_ERROR, "No sync during scaling\n");
594  goto fail;
595  }
596 
597  do {
598  err = MFXVideoCORE_SyncOperation(s->session, sync, 1000);
599  } while (err == MFX_WRN_IN_EXECUTION);
600  if (err < 0) {
601  ret = ff_qsvvpp_print_error(ctx, err, "Error synchronizing the operation");
602  goto fail;
603  }
604 
606  if (ret < 0)
607  goto fail;
608 
609  out->width = outlink->w;
610  out->height = outlink->h;
611 
612  av_reduce(&out->sample_aspect_ratio.num, &out->sample_aspect_ratio.den,
613  (int64_t)in->sample_aspect_ratio.num * outlink->h * link->w,
614  (int64_t)in->sample_aspect_ratio.den * outlink->w * link->h,
615  INT_MAX);
616 
617  av_frame_free(&in);
618  return ff_filter_frame(outlink, out);
619 fail:
620  av_frame_free(&in);
621  av_frame_free(&out);
622  return ret;
623 }
624 
625 #define OFFSET(x) offsetof(QSVScaleContext, x)
626 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
627 static const AVOption options[] = {
628  { "w", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, { .str = "iw" }, .flags = FLAGS },
629  { "h", "Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, { .str = "ih" }, .flags = FLAGS },
630  { "format", "Output pixel format", OFFSET(format_str), AV_OPT_TYPE_STRING, { .str = "same" }, .flags = FLAGS },
631 
632 #if QSV_HAVE_SCALING_CONFIG
633  { "mode", "set scaling mode", OFFSET(mode), AV_OPT_TYPE_INT, { .i64 = MFX_SCALING_MODE_DEFAULT}, MFX_SCALING_MODE_DEFAULT, MFX_SCALING_MODE_QUALITY, FLAGS, "mode"},
634  { "low_power", "low power mode", 0, AV_OPT_TYPE_CONST, { .i64 = MFX_SCALING_MODE_LOWPOWER}, INT_MIN, INT_MAX, FLAGS, "mode"},
635  { "hq", "high quality mode", 0, AV_OPT_TYPE_CONST, { .i64 = MFX_SCALING_MODE_QUALITY}, INT_MIN, INT_MAX, FLAGS, "mode"},
636 #else
637  { "mode", "(not supported)", OFFSET(mode), AV_OPT_TYPE_INT, { .i64 = 0}, 0, INT_MAX, FLAGS, "mode"},
638  { "low_power", "", 0, AV_OPT_TYPE_CONST, { .i64 = 1}, 0, 0, FLAGS, "mode"},
639  { "hq", "", 0, AV_OPT_TYPE_CONST, { .i64 = 2}, 0, 0, FLAGS, "mode"},
640 #endif
641 
642  { NULL },
643 };
644 
645 static const AVClass qsvscale_class = {
646  .class_name = "scale_qsv",
647  .item_name = av_default_item_name,
648  .option = options,
649  .version = LIBAVUTIL_VERSION_INT,
650 };
651 
652 static const AVFilterPad qsvscale_inputs[] = {
653  {
654  .name = "default",
655  .type = AVMEDIA_TYPE_VIDEO,
656  .filter_frame = qsvscale_filter_frame,
657  },
658  { NULL }
659 };
660 
661 static const AVFilterPad qsvscale_outputs[] = {
662  {
663  .name = "default",
664  .type = AVMEDIA_TYPE_VIDEO,
665  .config_props = qsvscale_config_props,
666  },
667  { NULL }
668 };
669 
671  .name = "scale_qsv",
672  .description = NULL_IF_CONFIG_SMALL("QuickSync video scaling and format conversion"),
673 
674  .init = qsvscale_init,
675  .uninit = qsvscale_uninit,
676  .query_formats = qsvscale_query_formats,
677 
678  .priv_size = sizeof(QSVScaleContext),
679  .priv_class = &qsvscale_class,
680 
683 
684  .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
685 };
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
AVHWDeviceContext::hwctx
void * hwctx
The format-specific data, allocated and freed by libavutil along with this context.
Definition: hwcontext.h:92
AVQSVFramesContext::frame_type
int frame_type
A combination of MFX_MEMTYPE_* describing the frame pool.
Definition: hwcontext_qsv.h:49
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
QSVScaleContext::num_ext_buf
int num_ext_buf
Definition: vf_scale_qsv.c:100
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:286
var_names
static const char *const var_names[]
Definition: vf_scale_qsv.c:46
QSV_HAVE_SCALING_CONFIG
#define QSV_HAVE_SCALING_CONFIG
Definition: vf_scale_qsv.c:72
VAR_IW
@ VAR_IW
Definition: vf_scale_qsv.c:63
M_PHI
#define M_PHI
Definition: mathematics.h:49
out
FILE * out
Definition: movenc.c:54
FF_FILTER_FLAG_HWFRAME_AWARE
#define FF_FILTER_FLAG_HWFRAME_AWARE
The filter is aware of hardware frames, and any hardware frame context should not be automatically pr...
Definition: internal.h:339
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1096
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:92
AVHWFramesContext::format
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:209
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
QSVScaleContext::session
mfxSession session
Definition: vf_scale_qsv.c:78
qsvscale_init
static av_cold int qsvscale_init(AVFilterContext *ctx)
Definition: vf_scale_qsv.c:121
av_hwframe_ctx_init
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:333
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
pixdesc.h
AVQSVDeviceContext
This struct is allocated as AVHWDeviceContext.hwctx.
Definition: hwcontext_qsv.h:35
w
uint8_t w
Definition: llviddspenc.c:39
av_hwframe_ctx_alloc
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
Definition: hwcontext.c:247
qsvscale_outputs
static const AVFilterPad qsvscale_outputs[]
Definition: vf_scale_qsv.c:661
AVOption
AVOption.
Definition: opt.h:248
av_mallocz_array
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:190
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:210
frame_alloc
static mfxStatus frame_alloc(mfxHDL pthis, mfxFrameAllocRequest *req, mfxFrameAllocResponse *resp)
Definition: vf_scale_qsv.c:228
mathematics.h
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:149
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:71
AVHWFramesContext::width
int width
The allocated dimensions of the frames in this pool.
Definition: hwcontext.h:229
video.h
qsvscale_query_formats
static int qsvscale_query_formats(AVFilterContext *ctx)
Definition: vf_scale_qsv.c:158
VAR_OH
@ VAR_OH
Definition: vf_scale_qsv.c:66
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:65
VAR_OUT_H
@ VAR_OUT_H
Definition: vf_scale_qsv.c:66
formats.h
VAR_OUT_W
@ VAR_OUT_W
Definition: vf_scale_qsv.c:65
QSVScaleContext::h_expr
char * h_expr
height expression string
Definition: vf_scale_qsv.c:117
qsvvpp.h
fail
#define fail()
Definition: checkasm.h:133
OFFSET
#define OFFSET(x)
Definition: vf_scale_qsv.c:625
QSVScaleContext::nb_mem_ids_in
int nb_mem_ids_in
Definition: vf_scale_qsv.c:81
QSVScaleContext::h
int h
Definition: vf_scale_qsv.c:109
QSVScaleContext::mem_ids_out
mfxMemId * mem_ids_out
Definition: vf_scale_qsv.c:83
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
init_out_session
static int init_out_session(AVFilterContext *ctx)
Definition: vf_scale_qsv.c:277
handle_type
mfxHandleType handle_type
Definition: hwcontext_qsv.c:89
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:54
ff_qsvvpp_print_iopattern
int ff_qsvvpp_print_iopattern(void *log_ctx, int mfx_iopattern, const char *extra_string)
Definition: qsvvpp.c:91
QSVScaleContext::nb_surface_ptrs_in
int nb_surface_ptrs_in
Definition: vf_scale_qsv.c:87
VAR_PHI
@ VAR_PHI
Definition: vf_scale_qsv.c:61
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
var_name
var_name
Definition: setts_bsf.c:50
ff_set_common_formats
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:587
AVHWFramesContext::height
int height
Definition: hwcontext.h:229
VAR_OW
@ VAR_OW
Definition: vf_scale_qsv.c:65
s
#define s(width, name)
Definition: cbs_vp9.c:257
M_E
#define M_E
Definition: mathematics.h:37
QSV_RUNTIME_VERSION_ATLEAST
#define QSV_RUNTIME_VERSION_ATLEAST(MFX_VERSION, MAJOR, MINOR)
Definition: qsv_internal.h:59
info
MIPS optimizations info
Definition: mips.txt:2
QSVScaleContext::surface_ptrs_in
mfxFrameSurface1 ** surface_ptrs_in
Definition: vf_scale_qsv.c:86
outputs
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:309
ctx
AVFormatContext * ctx
Definition: movenc.c:48
QSVScaleContext::surface_ptrs_out
mfxFrameSurface1 ** surface_ptrs_out
Definition: vf_scale_qsv.c:89
av_usleep
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
NAN
#define NAN
Definition: mathematics.h:64
link
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
Definition: filter_design.txt:23
QSVScaleContext::format
enum AVPixelFormat format
Output sw format.
Definition: vf_scale_qsv.c:114
frame_free
static mfxStatus frame_free(mfxHDL pthis, mfxFrameAllocResponse *resp)
Definition: vf_scale_qsv.c:250
FLAGS
#define FLAGS
Definition: vf_scale_qsv.c:626
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
AVQSVFramesContext::surfaces
mfxFrameSurface1 * surfaces
Definition: hwcontext_qsv.h:43
frame_unlock
static mfxStatus frame_unlock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
Definition: vf_scale_qsv.c:260
NULL
#define NULL
Definition: coverity.c:32
AVHWFramesContext::sw_format
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
Definition: hwcontext.h:222
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:658
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
VAR_DAR
@ VAR_DAR
Definition: vf_scale_qsv.c:67
AVHWFramesContext::device_ref
AVBufferRef * device_ref
A reference to the parent AVHWDeviceContext.
Definition: hwcontext.h:141
QSVScaleContext::w_expr
char * w_expr
width expression string
Definition: vf_scale_qsv.c:116
ff_qsvvpp_print_error
int ff_qsvvpp_print_error(void *log_ctx, mfxStatus err, const char *error_string)
Definition: qsvvpp.c:163
AVFilterContext::inputs
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:349
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
time.h
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
AV_PIX_FMT_QSV
@ AV_PIX_FMT_QSV
HW acceleration through QSV, data[3] contains a pointer to the mfxFrameSurface1 structure.
Definition: pixfmt.h:222
QSVScaleContext::ext_buffers
mfxExtBuffer * ext_buffers[1+QSV_HAVE_SCALING_CONFIG]
Definition: vf_scale_qsv.c:99
frame_lock
static mfxStatus frame_lock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
Definition: vf_scale_qsv.c:255
QSVScaleContext::mode
int mode
Definition: vf_scale_qsv.c:97
QSVScaleContext::shift_height
int shift_height
Definition: vf_scale_qsv.c:102
eval.h
VAR_IN_W
@ VAR_IN_W
Definition: vf_scale_qsv.c:63
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
av_expr_parse_and_eval
int av_expr_parse_and_eval(double *d, const char *s, const char *const *const_names, const double *const_values, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), void *opaque, int log_offset, void *log_ctx)
Parse and evaluate an expression.
Definition: eval.c:776
VAR_IH
@ VAR_IH
Definition: vf_scale_qsv.c:64
QSVScaleContext::nb_mem_ids_out
int nb_mem_ids_out
Definition: vf_scale_qsv.c:84
frame_get_hdl
static mfxStatus frame_get_hdl(mfxHDL pthis, mfxMemId mid, mfxHDL *hdl)
Definition: vf_scale_qsv.c:265
AVQSVFramesContext::nb_surfaces
int nb_surfaces
Definition: hwcontext_qsv.h:44
qsvscale_config_props
static int qsvscale_config_props(AVFilterLink *outlink)
Definition: vf_scale_qsv.c:470
M_PI
#define M_PI
Definition: mathematics.h:52
VARS_NB
@ VARS_NB
Definition: vf_scale_qsv.c:69
internal.h
options
static const AVOption options[]
Definition: vf_scale_qsv.c:627
QSVScaleContext::shift_width
int shift_width
Definition: vf_scale_qsv.c:102
in
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Definition: audio_convert.c:326
init_out_pool
static int init_out_pool(AVFilterContext *ctx, int out_width, int out_height)
Definition: vf_scale_qsv.c:172
hwcontext_qsv.h
i
int i
Definition: input.c:407
internal.h
VAR_A
@ VAR_A
Definition: vf_scale_qsv.c:67
common.h
VAR_IN_H
@ VAR_IN_H
Definition: vf_scale_qsv.c:64
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:60
av_rescale
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
AVFilter
Filter definition.
Definition: avfilter.h:145
AVHWFramesContext
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:124
ret
ret
Definition: filter_design.txt:187
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
AVHWFramesContext::device_ctx
AVHWDeviceContext * device_ctx
The parent AVHWDeviceContext.
Definition: hwcontext.h:149
AVHWFramesContext::hwctx
void * hwctx
The format-specific data, allocated and freed automatically along with this context.
Definition: hwcontext.h:162
av_get_pix_fmt
enum AVPixelFormat av_get_pix_fmt(const char *name)
Return the pixel format corresponding to name.
Definition: pixdesc.c:2501
qsvscale_class
static const AVClass qsvscale_class
Definition: vf_scale_qsv.c:645
AVQSVDeviceContext::session
mfxSession session
Definition: hwcontext_qsv.h:36
qsvscale_inputs
static const AVFilterPad qsvscale_inputs[]
Definition: vf_scale_qsv.c:652
mode
mode
Definition: ebur128.h:83
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
avfilter.h
QSVScaleContext::mem_ids_in
mfxMemId * mem_ids_in
Definition: vf_scale_qsv.c:80
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
ff_vf_scale_qsv
AVFilter ff_vf_scale_qsv
Definition: vf_scale_qsv.c:670
AVFilterContext
An instance of a filter.
Definition: avfilter.h:341
AVQSVFramesContext
This struct is allocated as AVHWFramesContext.hwctx.
Definition: hwcontext_qsv.h:42
AVHWFramesContext::initial_pool_size
int initial_pool_size
Initial size of the frame pool.
Definition: hwcontext.h:199
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
qsvscale_filter_frame
static int qsvscale_filter_frame(AVFilterLink *link, AVFrame *in)
Definition: vf_scale_qsv.c:559
init_scale_session
static int init_scale_session(AVFilterContext *ctx, int in_width, int in_height, int out_width, int out_height)
Definition: vf_scale_qsv.c:452
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:48
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
handle_types
static const mfxHandleType handle_types[]
Definition: vf_scale_qsv.c:271
hwcontext.h
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
QSVScaleContext
Definition: vf_scale_qsv.c:74
qsvscale_uninit
static av_cold void qsvscale_uninit(AVFilterContext *ctx)
Definition: vf_scale_qsv.c:138
h
h
Definition: vp9dsp_template.c:2038
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:229
QSVScaleContext::w
int w
New dimensions.
Definition: vf_scale_qsv.c:109
QSVScaleContext::format_str
char * format_str
Definition: vf_scale_qsv.c:118
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
VAR_SAR
@ VAR_SAR
Definition: vf_scale_qsv.c:68
VAR_PI
@ VAR_PI
Definition: vf_scale_qsv.c:60
QSVScaleContext::nb_surface_ptrs_out
int nb_surface_ptrs_out
Definition: vf_scale_qsv.c:90
ff_qsvvpp_print_warning
int ff_qsvvpp_print_warning(void *log_ctx, mfxStatus err, const char *warning_string)
Definition: qsvvpp.c:173
VAR_E
@ VAR_E
Definition: vf_scale_qsv.c:62
QSVScaleContext::opaque_alloc
mfxExtOpaqueSurfaceAlloc opaque_alloc
Definition: vf_scale_qsv.c:92
ff_filter_init_hw_frames
int ff_filter_init_hw_frames(AVFilterContext *avctx, AVFilterLink *link, int default_pool_size)
Perform any additional setup required for hardware frames.
Definition: avfilter.c:1653