FFmpeg
frame.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #include "channel_layout.h"
20 #include "avassert.h"
21 #include "buffer.h"
22 #include "common.h"
23 #include "cpu.h"
24 #include "dict.h"
25 #include "frame.h"
26 #include "imgutils.h"
27 #include "mem.h"
28 #include "samplefmt.h"
29 #include "hwcontext.h"
30 
31 static const AVSideDataDescriptor sd_props[] = {
32  [AV_FRAME_DATA_PANSCAN] = { "AVPanScan" },
33  [AV_FRAME_DATA_A53_CC] = { "ATSC A53 Part 4 Closed Captions" },
34  [AV_FRAME_DATA_MATRIXENCODING] = { "AVMatrixEncoding" },
35  [AV_FRAME_DATA_DOWNMIX_INFO] = { "Metadata relevant to a downmix procedure" },
36  [AV_FRAME_DATA_AFD] = { "Active format description" },
37  [AV_FRAME_DATA_MOTION_VECTORS] = { "Motion vectors" },
38  [AV_FRAME_DATA_SKIP_SAMPLES] = { "Skip samples" },
39  [AV_FRAME_DATA_GOP_TIMECODE] = { "GOP timecode" },
40  [AV_FRAME_DATA_S12M_TIMECODE] = { "SMPTE 12-1 timecode" },
41  [AV_FRAME_DATA_DYNAMIC_HDR_PLUS] = { "HDR Dynamic Metadata SMPTE2094-40 (HDR10+)" },
42  [AV_FRAME_DATA_DYNAMIC_HDR_VIVID] = { "HDR Dynamic Metadata CUVA 005.1 2021 (Vivid)" },
43  [AV_FRAME_DATA_REGIONS_OF_INTEREST] = { "Regions Of Interest" },
44  [AV_FRAME_DATA_VIDEO_ENC_PARAMS] = { "Video encoding parameters" },
45  [AV_FRAME_DATA_FILM_GRAIN_PARAMS] = { "Film grain parameters" },
46  [AV_FRAME_DATA_DETECTION_BBOXES] = { "Bounding boxes for object detection and classification" },
47  [AV_FRAME_DATA_DOVI_RPU_BUFFER] = { "Dolby Vision RPU Data" },
48  [AV_FRAME_DATA_DOVI_METADATA] = { "Dolby Vision Metadata" },
49  [AV_FRAME_DATA_LCEVC] = { "LCEVC NAL data" },
51  [AV_FRAME_DATA_REPLAYGAIN] = { "AVReplayGain", AV_SIDE_DATA_PROP_GLOBAL },
52  [AV_FRAME_DATA_DISPLAYMATRIX] = { "3x3 displaymatrix", AV_SIDE_DATA_PROP_GLOBAL },
53  [AV_FRAME_DATA_AUDIO_SERVICE_TYPE] = { "Audio service type", AV_SIDE_DATA_PROP_GLOBAL },
54  [AV_FRAME_DATA_MASTERING_DISPLAY_METADATA] = { "Mastering display metadata", AV_SIDE_DATA_PROP_GLOBAL },
55  [AV_FRAME_DATA_CONTENT_LIGHT_LEVEL] = { "Content light level metadata", AV_SIDE_DATA_PROP_GLOBAL },
56  [AV_FRAME_DATA_AMBIENT_VIEWING_ENVIRONMENT] = { "Ambient viewing environment", AV_SIDE_DATA_PROP_GLOBAL },
57  [AV_FRAME_DATA_SPHERICAL] = { "Spherical Mapping", AV_SIDE_DATA_PROP_GLOBAL },
59  [AV_FRAME_DATA_SEI_UNREGISTERED] = { "H.26[45] User Data Unregistered SEI message", AV_SIDE_DATA_PROP_MULTI },
60 };
61 
63 {
64  memset(frame, 0, sizeof(*frame));
65 
66  frame->pts =
67  frame->pkt_dts = AV_NOPTS_VALUE;
68  frame->best_effort_timestamp = AV_NOPTS_VALUE;
69  frame->duration = 0;
70 #if FF_API_FRAME_PKT
72  frame->pkt_pos = -1;
73  frame->pkt_size = -1;
75 #endif
76  frame->time_base = (AVRational){ 0, 1 };
77  frame->sample_aspect_ratio = (AVRational){ 0, 1 };
78  frame->format = -1; /* unknown */
79  frame->extended_data = frame->data;
80  frame->color_primaries = AVCOL_PRI_UNSPECIFIED;
81  frame->color_trc = AVCOL_TRC_UNSPECIFIED;
82  frame->colorspace = AVCOL_SPC_UNSPECIFIED;
83  frame->color_range = AVCOL_RANGE_UNSPECIFIED;
84  frame->chroma_location = AVCHROMA_LOC_UNSPECIFIED;
85  frame->flags = 0;
86 }
87 
88 static void free_side_data(AVFrameSideData **ptr_sd)
89 {
90  AVFrameSideData *sd = *ptr_sd;
91 
92  av_buffer_unref(&sd->buf);
93  av_dict_free(&sd->metadata);
94  av_freep(ptr_sd);
95 }
96 
97 static void wipe_side_data(AVFrameSideData ***sd, int *nb_side_data)
98 {
99  for (int i = 0; i < *nb_side_data; i++) {
100  free_side_data(&((*sd)[i]));
101  }
102  *nb_side_data = 0;
103 
104  av_freep(sd);
105 }
106 
108 {
109  wipe_side_data(&frame->side_data, &frame->nb_side_data);
110 }
111 
113 {
114  wipe_side_data(sd, nb_sd);
115 }
116 
117 static void remove_side_data(AVFrameSideData ***sd, int *nb_side_data,
118  const enum AVFrameSideDataType type)
119 {
120  for (int i = *nb_side_data - 1; i >= 0; i--) {
121  AVFrameSideData *entry = ((*sd)[i]);
122  if (entry->type != type)
123  continue;
124 
126 
127  ((*sd)[i]) = ((*sd)[*nb_side_data - 1]);
128  (*nb_side_data)--;
129  }
130 }
131 
132 static void remove_side_data_by_entry(AVFrameSideData ***sd, int *nb_sd,
133  const AVFrameSideData *target)
134 {
135  for (int i = *nb_sd - 1; i >= 0; i--) {
136  AVFrameSideData *entry = ((*sd)[i]);
137  if (entry != target)
138  continue;
139 
141 
142  ((*sd)[i]) = ((*sd)[*nb_sd - 1]);
143  (*nb_sd)--;
144 
145  return;
146  }
147 }
148 
150 {
151  AVFrame *frame = av_malloc(sizeof(*frame));
152 
153  if (!frame)
154  return NULL;
155 
157 
158  return frame;
159 }
160 
162 {
163  if (!frame || !*frame)
164  return;
165 
167  av_freep(frame);
168 }
169 
170 #define ALIGN (HAVE_SIMD_ALIGN_64 ? 64 : 32)
171 
173 {
175  int ret, padded_height, total_size;
176  int plane_padding = FFMAX(16 + 16/*STRIDE_ALIGN*/, align);
177  ptrdiff_t linesizes[4];
178  size_t sizes[4];
179 
180  if (!desc)
181  return AVERROR(EINVAL);
182 
183  if ((ret = av_image_check_size(frame->width, frame->height, 0, NULL)) < 0)
184  return ret;
185 
186  if (!frame->linesize[0]) {
187  if (align <= 0)
188  align = ALIGN;
189 
190  for (int i = 1; i <= align; i += i) {
191  ret = av_image_fill_linesizes(frame->linesize, frame->format,
192  FFALIGN(frame->width, i));
193  if (ret < 0)
194  return ret;
195  if (!(frame->linesize[0] & (align-1)))
196  break;
197  }
198 
199  for (int i = 0; i < 4 && frame->linesize[i]; i++)
200  frame->linesize[i] = FFALIGN(frame->linesize[i], align);
201  }
202 
203  for (int i = 0; i < 4; i++)
204  linesizes[i] = frame->linesize[i];
205 
206  padded_height = FFALIGN(frame->height, 32);
207  if ((ret = av_image_fill_plane_sizes(sizes, frame->format,
208  padded_height, linesizes)) < 0)
209  return ret;
210 
211  total_size = 4*plane_padding;
212  for (int i = 0; i < 4; i++) {
213  if (sizes[i] > INT_MAX - total_size)
214  return AVERROR(EINVAL);
215  total_size += sizes[i];
216  }
217 
218  frame->buf[0] = av_buffer_alloc(total_size);
219  if (!frame->buf[0]) {
220  ret = AVERROR(ENOMEM);
221  goto fail;
222  }
223 
224  if ((ret = av_image_fill_pointers(frame->data, frame->format, padded_height,
225  frame->buf[0]->data, frame->linesize)) < 0)
226  goto fail;
227 
228  for (int i = 1; i < 4; i++) {
229  if (frame->data[i])
230  frame->data[i] += i * plane_padding;
231  }
232 
233  frame->extended_data = frame->data;
234 
235  return 0;
236 fail:
238  return ret;
239 }
240 
242 {
243  int planar = av_sample_fmt_is_planar(frame->format);
244  int channels, planes;
245  int ret;
246 
247  channels = frame->ch_layout.nb_channels;
248  planes = planar ? channels : 1;
249  if (!frame->linesize[0]) {
250  ret = av_samples_get_buffer_size(&frame->linesize[0], channels,
251  frame->nb_samples, frame->format,
252  align);
253  if (ret < 0)
254  return ret;
255  }
256 
258  frame->extended_data = av_calloc(planes,
259  sizeof(*frame->extended_data));
260  frame->extended_buf = av_calloc(planes - AV_NUM_DATA_POINTERS,
261  sizeof(*frame->extended_buf));
262  if (!frame->extended_data || !frame->extended_buf) {
263  av_freep(&frame->extended_data);
264  av_freep(&frame->extended_buf);
265  return AVERROR(ENOMEM);
266  }
267  frame->nb_extended_buf = planes - AV_NUM_DATA_POINTERS;
268  } else
269  frame->extended_data = frame->data;
270 
271  for (int i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
272  frame->buf[i] = av_buffer_alloc(frame->linesize[0]);
273  if (!frame->buf[i]) {
275  return AVERROR(ENOMEM);
276  }
277  frame->extended_data[i] = frame->data[i] = frame->buf[i]->data;
278  }
279  for (int i = 0; i < planes - AV_NUM_DATA_POINTERS; i++) {
280  frame->extended_buf[i] = av_buffer_alloc(frame->linesize[0]);
281  if (!frame->extended_buf[i]) {
283  return AVERROR(ENOMEM);
284  }
285  frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data;
286  }
287  return 0;
288 
289 }
290 
292 {
293  if (frame->format < 0)
294  return AVERROR(EINVAL);
295 
296  if (frame->width > 0 && frame->height > 0)
297  return get_video_buffer(frame, align);
298  else if (frame->nb_samples > 0 &&
299  (av_channel_layout_check(&frame->ch_layout)))
300  return get_audio_buffer(frame, align);
301 
302  return AVERROR(EINVAL);
303 }
304 
305 static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
306 {
307  int ret;
308 
309 #if FF_API_FRAME_KEY
311  dst->key_frame = src->key_frame;
313 #endif
314  dst->pict_type = src->pict_type;
315  dst->sample_aspect_ratio = src->sample_aspect_ratio;
316  dst->crop_top = src->crop_top;
317  dst->crop_bottom = src->crop_bottom;
318  dst->crop_left = src->crop_left;
319  dst->crop_right = src->crop_right;
320  dst->pts = src->pts;
321  dst->duration = src->duration;
322  dst->repeat_pict = src->repeat_pict;
323 #if FF_API_INTERLACED_FRAME
325  dst->interlaced_frame = src->interlaced_frame;
326  dst->top_field_first = src->top_field_first;
328 #endif
329 #if FF_API_PALETTE_HAS_CHANGED
331  dst->palette_has_changed = src->palette_has_changed;
333 #endif
334  dst->sample_rate = src->sample_rate;
335  dst->opaque = src->opaque;
336  dst->pkt_dts = src->pkt_dts;
337 #if FF_API_FRAME_PKT
339  dst->pkt_pos = src->pkt_pos;
340  dst->pkt_size = src->pkt_size;
342 #endif
343  dst->time_base = src->time_base;
344  dst->quality = src->quality;
345  dst->best_effort_timestamp = src->best_effort_timestamp;
346  dst->flags = src->flags;
347  dst->decode_error_flags = src->decode_error_flags;
348  dst->color_primaries = src->color_primaries;
349  dst->color_trc = src->color_trc;
350  dst->colorspace = src->colorspace;
351  dst->color_range = src->color_range;
352  dst->chroma_location = src->chroma_location;
353 
354  av_dict_copy(&dst->metadata, src->metadata, 0);
355 
356  for (int i = 0; i < src->nb_side_data; i++) {
357  const AVFrameSideData *sd_src = src->side_data[i];
358  AVFrameSideData *sd_dst;
359  if ( sd_src->type == AV_FRAME_DATA_PANSCAN
360  && (src->width != dst->width || src->height != dst->height))
361  continue;
362  if (force_copy) {
363  sd_dst = av_frame_new_side_data(dst, sd_src->type,
364  sd_src->size);
365  if (!sd_dst) {
367  return AVERROR(ENOMEM);
368  }
369  memcpy(sd_dst->data, sd_src->data, sd_src->size);
370  } else {
371  AVBufferRef *ref = av_buffer_ref(sd_src->buf);
372  sd_dst = av_frame_new_side_data_from_buf(dst, sd_src->type, ref);
373  if (!sd_dst) {
376  return AVERROR(ENOMEM);
377  }
378  }
379  av_dict_copy(&sd_dst->metadata, sd_src->metadata, 0);
380  }
381 
382  ret = av_buffer_replace(&dst->opaque_ref, src->opaque_ref);
383  ret |= av_buffer_replace(&dst->private_ref, src->private_ref);
384  return ret;
385 }
386 
388 {
389  int ret = 0;
390 
391  av_assert1(dst->width == 0 && dst->height == 0);
392  av_assert1(dst->ch_layout.nb_channels == 0 &&
393  dst->ch_layout.order == AV_CHANNEL_ORDER_UNSPEC);
394 
395  dst->format = src->format;
396  dst->width = src->width;
397  dst->height = src->height;
398  dst->nb_samples = src->nb_samples;
399 
400  ret = frame_copy_props(dst, src, 0);
401  if (ret < 0)
402  goto fail;
403 
404  ret = av_channel_layout_copy(&dst->ch_layout, &src->ch_layout);
405  if (ret < 0)
406  goto fail;
407 
408  /* duplicate the frame data if it's not refcounted */
409  if (!src->buf[0]) {
411  if (ret < 0)
412  goto fail;
413 
414  ret = av_frame_copy(dst, src);
415  if (ret < 0)
416  goto fail;
417 
418  return 0;
419  }
420 
421  /* ref the buffers */
422  for (int i = 0; i < FF_ARRAY_ELEMS(src->buf); i++) {
423  if (!src->buf[i])
424  continue;
425  dst->buf[i] = av_buffer_ref(src->buf[i]);
426  if (!dst->buf[i]) {
427  ret = AVERROR(ENOMEM);
428  goto fail;
429  }
430  }
431 
432  if (src->extended_buf) {
433  dst->extended_buf = av_calloc(src->nb_extended_buf,
434  sizeof(*dst->extended_buf));
435  if (!dst->extended_buf) {
436  ret = AVERROR(ENOMEM);
437  goto fail;
438  }
439  dst->nb_extended_buf = src->nb_extended_buf;
440 
441  for (int i = 0; i < src->nb_extended_buf; i++) {
442  dst->extended_buf[i] = av_buffer_ref(src->extended_buf[i]);
443  if (!dst->extended_buf[i]) {
444  ret = AVERROR(ENOMEM);
445  goto fail;
446  }
447  }
448  }
449 
450  if (src->hw_frames_ctx) {
451  dst->hw_frames_ctx = av_buffer_ref(src->hw_frames_ctx);
452  if (!dst->hw_frames_ctx) {
453  ret = AVERROR(ENOMEM);
454  goto fail;
455  }
456  }
457 
458  /* duplicate extended data */
459  if (src->extended_data != src->data) {
460  int ch = dst->ch_layout.nb_channels;
461 
462  if (!ch) {
463  ret = AVERROR(EINVAL);
464  goto fail;
465  }
466 
467  dst->extended_data = av_malloc_array(sizeof(*dst->extended_data), ch);
468  if (!dst->extended_data) {
469  ret = AVERROR(ENOMEM);
470  goto fail;
471  }
472  memcpy(dst->extended_data, src->extended_data, sizeof(*src->extended_data) * ch);
473  } else
474  dst->extended_data = dst->data;
475 
476  memcpy(dst->data, src->data, sizeof(src->data));
477  memcpy(dst->linesize, src->linesize, sizeof(src->linesize));
478 
479  return 0;
480 
481 fail:
483  return ret;
484 }
485 
487 {
488  int ret = 0;
489 
490  if (dst == src)
491  return AVERROR(EINVAL);
492 
493  if (!src->buf[0]) {
495 
496  /* duplicate the frame data if it's not refcounted */
497  if ( src->data[0] || src->data[1]
498  || src->data[2] || src->data[3])
499  return av_frame_ref(dst, src);
500 
501  ret = frame_copy_props(dst, src, 0);
502  if (ret < 0)
503  goto fail;
504  }
505 
506  dst->format = src->format;
507  dst->width = src->width;
508  dst->height = src->height;
509  dst->nb_samples = src->nb_samples;
510 
511  ret = av_channel_layout_copy(&dst->ch_layout, &src->ch_layout);
512  if (ret < 0)
513  goto fail;
514 
516  av_dict_free(&dst->metadata);
517  ret = frame_copy_props(dst, src, 0);
518  if (ret < 0)
519  goto fail;
520 
521  /* replace the buffers */
522  for (int i = 0; i < FF_ARRAY_ELEMS(src->buf); i++) {
523  ret = av_buffer_replace(&dst->buf[i], src->buf[i]);
524  if (ret < 0)
525  goto fail;
526  }
527 
528  if (src->extended_buf) {
529  if (dst->nb_extended_buf != src->nb_extended_buf) {
530  int nb_extended_buf = FFMIN(dst->nb_extended_buf, src->nb_extended_buf);
531  void *tmp;
532 
533  for (int i = nb_extended_buf; i < dst->nb_extended_buf; i++)
534  av_buffer_unref(&dst->extended_buf[i]);
535 
536  tmp = av_realloc_array(dst->extended_buf, sizeof(*dst->extended_buf),
537  src->nb_extended_buf);
538  if (!tmp) {
539  ret = AVERROR(ENOMEM);
540  goto fail;
541  }
542  dst->extended_buf = tmp;
543  dst->nb_extended_buf = src->nb_extended_buf;
544 
545  memset(&dst->extended_buf[nb_extended_buf], 0,
546  (src->nb_extended_buf - nb_extended_buf) * sizeof(*dst->extended_buf));
547  }
548 
549  for (int i = 0; i < src->nb_extended_buf; i++) {
550  ret = av_buffer_replace(&dst->extended_buf[i], src->extended_buf[i]);
551  if (ret < 0)
552  goto fail;
553  }
554  } else if (dst->extended_buf) {
555  for (int i = 0; i < dst->nb_extended_buf; i++)
556  av_buffer_unref(&dst->extended_buf[i]);
557  av_freep(&dst->extended_buf);
558  }
559 
560  ret = av_buffer_replace(&dst->hw_frames_ctx, src->hw_frames_ctx);
561  if (ret < 0)
562  goto fail;
563 
564  if (dst->extended_data != dst->data)
565  av_freep(&dst->extended_data);
566 
567  if (src->extended_data != src->data) {
568  int ch = dst->ch_layout.nb_channels;
569 
570  if (!ch) {
571  ret = AVERROR(EINVAL);
572  goto fail;
573  }
574 
575  if (ch > SIZE_MAX / sizeof(*dst->extended_data))
576  goto fail;
577 
578  dst->extended_data = av_memdup(src->extended_data, sizeof(*dst->extended_data) * ch);
579  if (!dst->extended_data) {
580  ret = AVERROR(ENOMEM);
581  goto fail;
582  }
583  } else
584  dst->extended_data = dst->data;
585 
586  memcpy(dst->data, src->data, sizeof(src->data));
587  memcpy(dst->linesize, src->linesize, sizeof(src->linesize));
588 
589  return 0;
590 
591 fail:
593  return ret;
594 }
595 
597 {
599 
600  if (!ret)
601  return NULL;
602 
603  if (av_frame_ref(ret, src) < 0)
604  av_frame_free(&ret);
605 
606  return ret;
607 }
608 
610 {
611  if (!frame)
612  return;
613 
615 
616  for (int i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
617  av_buffer_unref(&frame->buf[i]);
618  for (int i = 0; i < frame->nb_extended_buf; i++)
619  av_buffer_unref(&frame->extended_buf[i]);
620  av_freep(&frame->extended_buf);
621  av_dict_free(&frame->metadata);
622 
623  av_buffer_unref(&frame->hw_frames_ctx);
624 
625  av_buffer_unref(&frame->opaque_ref);
626  av_buffer_unref(&frame->private_ref);
627 
628  if (frame->extended_data != frame->data)
629  av_freep(&frame->extended_data);
630 
631  av_channel_layout_uninit(&frame->ch_layout);
632 
634 }
635 
637 {
638  av_assert1(dst->width == 0 && dst->height == 0);
639  av_assert1(dst->ch_layout.nb_channels == 0 &&
640  dst->ch_layout.order == AV_CHANNEL_ORDER_UNSPEC);
641 
642  *dst = *src;
643  if (src->extended_data == src->data)
644  dst->extended_data = dst->data;
646 }
647 
649 {
650  int ret = 1;
651 
652  /* assume non-refcounted frames are not writable */
653  if (!frame->buf[0])
654  return 0;
655 
656  for (int i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
657  if (frame->buf[i])
658  ret &= !!av_buffer_is_writable(frame->buf[i]);
659  for (int i = 0; i < frame->nb_extended_buf; i++)
660  ret &= !!av_buffer_is_writable(frame->extended_buf[i]);
661 
662  return ret;
663 }
664 
666 {
667  AVFrame tmp;
668  int ret;
669 
671  return 0;
672 
673  memset(&tmp, 0, sizeof(tmp));
674  tmp.format = frame->format;
675  tmp.width = frame->width;
676  tmp.height = frame->height;
677  tmp.nb_samples = frame->nb_samples;
678  ret = av_channel_layout_copy(&tmp.ch_layout, &frame->ch_layout);
679  if (ret < 0) {
681  return ret;
682  }
683 
684  if (frame->hw_frames_ctx)
685  ret = av_hwframe_get_buffer(frame->hw_frames_ctx, &tmp, 0);
686  else
687  ret = av_frame_get_buffer(&tmp, 0);
688  if (ret < 0)
689  return ret;
690 
691  ret = av_frame_copy(&tmp, frame);
692  if (ret < 0) {
694  return ret;
695  }
696 
698  if (ret < 0) {
700  return ret;
701  }
702 
704 
705  *frame = tmp;
706  if (tmp.data == tmp.extended_data)
707  frame->extended_data = frame->data;
708 
709  return 0;
710 }
711 
713 {
714  return frame_copy_props(dst, src, 1);
715 }
716 
718 {
719  uint8_t *data;
720  int planes;
721 
722  if (frame->nb_samples) {
723  int channels = frame->ch_layout.nb_channels;
724  if (!channels)
725  return NULL;
726  planes = av_sample_fmt_is_planar(frame->format) ? channels : 1;
727  } else
728  planes = 4;
729 
730  if (plane < 0 || plane >= planes || !frame->extended_data[plane])
731  return NULL;
732  data = frame->extended_data[plane];
733 
734  for (int i = 0; i < FF_ARRAY_ELEMS(frame->buf) && frame->buf[i]; i++) {
735  AVBufferRef *buf = frame->buf[i];
736  if (data >= buf->data && data < buf->data + buf->size)
737  return buf;
738  }
739  for (int i = 0; i < frame->nb_extended_buf; i++) {
740  AVBufferRef *buf = frame->extended_buf[i];
741  if (data >= buf->data && data < buf->data + buf->size)
742  return buf;
743  }
744  return NULL;
745 }
746 
748  int *nb_sd,
750  AVBufferRef *buf, uint8_t *data,
751  size_t size)
752 {
753  AVFrameSideData *ret, **tmp;
754 
755  // *nb_sd + 1 needs to fit into an int and a size_t.
756  if ((unsigned)*nb_sd >= FFMIN(INT_MAX, SIZE_MAX))
757  return NULL;
758 
759  tmp = av_realloc_array(*sd, sizeof(**sd), *nb_sd + 1);
760  if (!tmp)
761  return NULL;
762  *sd = tmp;
763 
764  ret = av_mallocz(sizeof(*ret));
765  if (!ret)
766  return NULL;
767 
768  ret->buf = buf;
769  ret->data = data;
770  ret->size = size;
771  ret->type = type;
772 
773  (*sd)[(*nb_sd)++] = ret;
774 
775  return ret;
776 }
777 
779  int *nb_sd,
781  AVBufferRef *buf)
782 {
783  if (!buf)
784  return NULL;
785 
786  return add_side_data_from_buf_ext(sd, nb_sd, type, buf, buf->data, buf->size);
787 }
788 
791  AVBufferRef *buf)
792 {
793  return
795  &frame->side_data, &frame->nb_side_data, type, buf);
796 }
797 
800  size_t size)
801 {
805  if (!ret)
806  av_buffer_unref(&buf);
807  return ret;
808 }
809 
811  AVBufferRef *buf, int flags)
812 {
814  return NULL;
815 
816  av_dict_free(&dst->metadata);
817  av_buffer_unref(&dst->buf);
818  dst->buf = buf;
819  dst->data = buf->data;
820  dst->size = buf->size;
821  return dst;
822 }
823 
826  size_t size, unsigned int flags)
827 {
831 
833  remove_side_data(sd, nb_sd, type);
834  if ((!desc || !(desc->props & AV_SIDE_DATA_PROP_MULTI)) &&
835  (ret = (AVFrameSideData *)av_frame_side_data_get(*sd, *nb_sd, type))) {
837  if (!ret)
838  av_buffer_unref(&buf);
839  return ret;
840  }
841 
842  ret = add_side_data_from_buf(sd, nb_sd, type, buf);
843  if (!ret)
844  av_buffer_unref(&buf);
845 
846  return ret;
847 }
848 
851  AVBufferRef **pbuf, unsigned int flags)
852 {
854  AVFrameSideData *sd_dst = NULL;
855  AVBufferRef *buf = *pbuf;
856 
858  remove_side_data(sd, nb_sd, type);
859  if ((!desc || !(desc->props & AV_SIDE_DATA_PROP_MULTI)) &&
860  (sd_dst = (AVFrameSideData *)av_frame_side_data_get(*sd, *nb_sd, type))) {
861  sd_dst = replace_side_data_from_buf(sd_dst, buf, flags);
862  if (sd_dst)
863  *pbuf = NULL;
864  return sd_dst;
865  }
866 
867  sd_dst = add_side_data_from_buf(sd, nb_sd, type, buf);
868  if (!sd_dst)
869  return NULL;
870 
871  *pbuf = NULL;
872  return sd_dst;
873 }
874 
876  const AVFrameSideData *src, unsigned int flags)
877 {
878  const AVSideDataDescriptor *desc;
879  AVBufferRef *buf = NULL;
880  AVFrameSideData *sd_dst = NULL;
881  int ret = AVERROR_BUG;
882 
883  if (!sd || !src || !nb_sd || (*nb_sd && !*sd))
884  return AVERROR(EINVAL);
885 
888  remove_side_data(sd, nb_sd, src->type);
889  if ((!desc || !(desc->props & AV_SIDE_DATA_PROP_MULTI)) &&
890  (sd_dst = (AVFrameSideData *)av_frame_side_data_get(*sd, *nb_sd, src->type))) {
891  AVDictionary *dict = NULL;
892 
894  return AVERROR(EEXIST);
895 
896  ret = av_dict_copy(&dict, src->metadata, 0);
897  if (ret < 0)
898  return ret;
899 
900  ret = av_buffer_replace(&sd_dst->buf, src->buf);
901  if (ret < 0) {
902  av_dict_free(&dict);
903  return ret;
904  }
905 
906  av_dict_free(&sd_dst->metadata);
907  sd_dst->metadata = dict;
908  sd_dst->data = src->data;
909  sd_dst->size = src->size;
910  return 0;
911  }
912 
913  buf = av_buffer_ref(src->buf);
914  if (!buf)
915  return AVERROR(ENOMEM);
916 
917  sd_dst = add_side_data_from_buf_ext(sd, nb_sd, src->type, buf,
918  src->data, src->size);
919  if (!sd_dst) {
920  av_buffer_unref(&buf);
921  return AVERROR(ENOMEM);
922  }
923 
924  ret = av_dict_copy(&sd_dst->metadata, src->metadata, 0);
925  if (ret < 0) {
926  remove_side_data_by_entry(sd, nb_sd, sd_dst);
927  return ret;
928  }
929 
930  return 0;
931 }
932 
934  const int nb_sd,
936 {
937  for (int i = 0; i < nb_sd; i++) {
938  if (sd[i]->type == type)
939  return sd[i];
940  }
941  return NULL;
942 }
943 
946 {
947  remove_side_data(sd, nb_sd, type);
948 }
949 
952 {
954  frame->side_data, frame->nb_side_data,
955  type
956  );
957 }
958 
959 static int frame_copy_video(AVFrame *dst, const AVFrame *src)
960 {
961  int planes;
962 
963  if (dst->width < src->width ||
964  dst->height < src->height)
965  return AVERROR(EINVAL);
966 
967  if (src->hw_frames_ctx || dst->hw_frames_ctx)
968  return av_hwframe_transfer_data(dst, src, 0);
969 
971  for (int i = 0; i < planes; i++)
972  if (!dst->data[i] || !src->data[i])
973  return AVERROR(EINVAL);
974 
975  av_image_copy2(dst->data, dst->linesize,
976  src->data, src->linesize,
977  dst->format, src->width, src->height);
978 
979  return 0;
980 }
981 
982 static int frame_copy_audio(AVFrame *dst, const AVFrame *src)
983 {
984  int planar = av_sample_fmt_is_planar(dst->format);
985  int channels = dst->ch_layout.nb_channels;
986  int planes = planar ? channels : 1;
987 
988  if (dst->nb_samples != src->nb_samples ||
989  av_channel_layout_compare(&dst->ch_layout, &src->ch_layout))
990  return AVERROR(EINVAL);
991 
992  for (int i = 0; i < planes; i++)
993  if (!dst->extended_data[i] || !src->extended_data[i])
994  return AVERROR(EINVAL);
995 
996  av_samples_copy(dst->extended_data, src->extended_data, 0, 0,
997  dst->nb_samples, channels, dst->format);
998 
999  return 0;
1000 }
1001 
1003 {
1004  if (dst->format != src->format || dst->format < 0)
1005  return AVERROR(EINVAL);
1006 
1007  if (dst->width > 0 && dst->height > 0)
1008  return frame_copy_video(dst, src);
1009  else if (dst->nb_samples > 0 &&
1010  (av_channel_layout_check(&dst->ch_layout)))
1011  return frame_copy_audio(dst, src);
1012 
1013  return AVERROR(EINVAL);
1014 }
1015 
1017 {
1018  remove_side_data(&frame->side_data, &frame->nb_side_data, type);
1019 }
1020 
1022 {
1023  unsigned t = type;
1024  if (t < FF_ARRAY_ELEMS(sd_props) && sd_props[t].name)
1025  return &sd_props[t];
1026  return NULL;
1027 }
1028 
1030 {
1032  return desc ? desc->name : NULL;
1033 }
1034 
1035 static int calc_cropping_offsets(size_t offsets[4], const AVFrame *frame,
1036  const AVPixFmtDescriptor *desc)
1037 {
1038  for (int i = 0; frame->data[i]; i++) {
1039  const AVComponentDescriptor *comp = NULL;
1040  int shift_x = (i == 1 || i == 2) ? desc->log2_chroma_w : 0;
1041  int shift_y = (i == 1 || i == 2) ? desc->log2_chroma_h : 0;
1042 
1043  if (desc->flags & AV_PIX_FMT_FLAG_PAL && i == 1) {
1044  offsets[i] = 0;
1045  break;
1046  }
1047 
1048  /* find any component descriptor for this plane */
1049  for (int j = 0; j < desc->nb_components; j++) {
1050  if (desc->comp[j].plane == i) {
1051  comp = &desc->comp[j];
1052  break;
1053  }
1054  }
1055  if (!comp)
1056  return AVERROR_BUG;
1057 
1058  offsets[i] = (frame->crop_top >> shift_y) * frame->linesize[i] +
1059  (frame->crop_left >> shift_x) * comp->step;
1060  }
1061 
1062  return 0;
1063 }
1064 
1066 {
1067  const AVPixFmtDescriptor *desc;
1068  size_t offsets[4];
1069 
1070  if (!(frame->width > 0 && frame->height > 0))
1071  return AVERROR(EINVAL);
1072 
1073  if (frame->crop_left >= INT_MAX - frame->crop_right ||
1074  frame->crop_top >= INT_MAX - frame->crop_bottom ||
1075  (frame->crop_left + frame->crop_right) >= frame->width ||
1076  (frame->crop_top + frame->crop_bottom) >= frame->height)
1077  return AVERROR(ERANGE);
1078 
1079  desc = av_pix_fmt_desc_get(frame->format);
1080  if (!desc)
1081  return AVERROR_BUG;
1082 
1083  /* Apply just the right/bottom cropping for hwaccel formats. Bitstream
1084  * formats cannot be easily handled here either (and corresponding decoders
1085  * should not export any cropping anyway), so do the same for those as well.
1086  * */
1088  frame->width -= frame->crop_right;
1089  frame->height -= frame->crop_bottom;
1090  frame->crop_right = 0;
1091  frame->crop_bottom = 0;
1092  return 0;
1093  }
1094 
1095  /* calculate the offsets for each plane */
1097 
1098  /* adjust the offsets to avoid breaking alignment */
1099  if (!(flags & AV_FRAME_CROP_UNALIGNED)) {
1100  int log2_crop_align = frame->crop_left ? ff_ctz(frame->crop_left) : INT_MAX;
1101  int min_log2_align = INT_MAX;
1102 
1103  for (int i = 0; frame->data[i]; i++) {
1104  int log2_align = offsets[i] ? ff_ctz(offsets[i]) : INT_MAX;
1105  min_log2_align = FFMIN(log2_align, min_log2_align);
1106  }
1107 
1108  /* we assume, and it should always be true, that the data alignment is
1109  * related to the cropping alignment by a constant power-of-2 factor */
1110  if (log2_crop_align < min_log2_align)
1111  return AVERROR_BUG;
1112 
1113  if (min_log2_align < 5 && log2_crop_align != INT_MAX) {
1114  frame->crop_left &= ~((1 << (5 + log2_crop_align - min_log2_align)) - 1);
1116  }
1117  }
1118 
1119  for (int i = 0; frame->data[i]; i++)
1120  frame->data[i] += offsets[i];
1121 
1122  frame->width -= (frame->crop_left + frame->crop_right);
1123  frame->height -= (frame->crop_top + frame->crop_bottom);
1124  frame->crop_left = 0;
1125  frame->crop_right = 0;
1126  frame->crop_top = 0;
1127  frame->crop_bottom = 0;
1128 
1129  return 0;
1130 }
av_samples_copy
int av_samples_copy(uint8_t *const *dst, uint8_t *const *src, int dst_offset, int src_offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Copy samples from src to dst.
Definition: samplefmt.c:222
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:73
free_side_data
static void free_side_data(AVFrameSideData **ptr_sd)
Definition: frame.c:88
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
entry
#define entry
Definition: aom_film_grain_template.c:66
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
get_video_buffer
static int get_video_buffer(AVFrame *frame, int align)
Definition: frame.c:172
sd_props
static const AVSideDataDescriptor sd_props[]
Definition: frame.c:31
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:291
ff_ctz
#define ff_ctz
Definition: intmath.h:107
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:950
comp
static void comp(unsigned char *dst, ptrdiff_t dst_stride, unsigned char *src, ptrdiff_t src_stride, int add)
Definition: eamad.c:81
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:798
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2965
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:59
add_side_data_from_buf_ext
static AVFrameSideData * add_side_data_from_buf_ext(AVFrameSideData ***sd, int *nb_sd, enum AVFrameSideDataType type, AVBufferRef *buf, uint8_t *data, size_t size)
Definition: frame.c:747
AV_FRAME_DATA_DOVI_METADATA
@ AV_FRAME_DATA_DOVI_METADATA
Parsed Dolby Vision metadata, suitable for passing to a software implementation.
Definition: frame.h:208
AV_FRAME_DATA_FILM_GRAIN_PARAMS
@ AV_FRAME_DATA_FILM_GRAIN_PARAMS
Film grain parameters for a frame, described by AVFilmGrainParams.
Definition: frame.h:188
AV_FRAME_DATA_S12M_TIMECODE
@ AV_FRAME_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1.
Definition: frame.h:152
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:161
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:380
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
av_frame_make_writable
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
Definition: frame.c:665
AVFrameSideData::buf
AVBufferRef * buf
Definition: frame.h:261
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:583
data
const char data[16]
Definition: mxf.c:148
AV_FRAME_DATA_DOVI_RPU_BUFFER
@ AV_FRAME_DATA_DOVI_RPU_BUFFER
Dolby Vision RPU raw data, suitable for passing to x265 or other libraries.
Definition: frame.h:201
frame_copy_props
static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
Definition: frame.c:305
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
get_audio_buffer
static int get_audio_buffer(AVFrame *frame, int align)
Definition: frame.c:241
AVDictionary
Definition: dict.c:34
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
av_frame_side_data_clone
int av_frame_side_data_clone(AVFrameSideData ***sd, int *nb_sd, const AVFrameSideData *src, unsigned int flags)
Add a new side data entry to an array based on existing side data, taking a reference towards the con...
Definition: frame.c:875
frame_copy_video
static int frame_copy_video(AVFrame *dst, const AVFrame *src)
Definition: frame.c:959
av_frame_apply_cropping
int av_frame_apply_cropping(AVFrame *frame, int flags)
Crop the given video AVFrame according to its crop_left/crop_top/crop_right/ crop_bottom fields.
Definition: frame.c:1065
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
av_memdup
void * av_memdup(const void *p, size_t size)
Duplicate a buffer with av_malloc().
Definition: mem.c:304
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:3005
ALIGN
#define ALIGN
Definition: frame.c:170
AV_FRAME_DATA_MATRIXENCODING
@ AV_FRAME_DATA_MATRIXENCODING
The data is the AVMatrixEncoding enum defined in libavutil/channel_layout.h.
Definition: frame.h:68
fail
#define fail()
Definition: checkasm.h:188
AV_PIX_FMT_FLAG_HWACCEL
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:128
samplefmt.h
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
av_image_fill_pointers
int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height, uint8_t *ptr, const int linesizes[4])
Fill plane data pointers for an image with pixel format pix_fmt and height height.
Definition: imgutils.c:145
planar
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1<< 16)) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out->ch+ch,(const uint8_t **) in->ch+ch, off *(out-> planar
Definition: audioconvert.c:56
AVFrameSideDataType
AVFrameSideDataType
Definition: frame.h:49
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:149
get_frame_defaults
static void get_frame_defaults(AVFrame *frame)
Definition: frame.c:62
avassert.h
AV_FRAME_SIDE_DATA_FLAG_UNIQUE
#define AV_FRAME_SIDE_DATA_FLAG_UNIQUE
Remove existing entries before adding new ones.
Definition: frame.h:1051
AVFrameSideData::size
size_t size
Definition: frame.h:259
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
remove_side_data_by_entry
static void remove_side_data_by_entry(AVFrameSideData ***sd, int *nb_sd, const AVFrameSideData *target)
Definition: frame.c:132
av_image_fill_linesizes
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
Definition: imgutils.c:89
offsets
static const int offsets[]
Definition: hevc_pel.c:34
av_realloc_array
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
Definition: mem.c:217
AV_CHANNEL_ORDER_UNSPEC
@ AV_CHANNEL_ORDER_UNSPEC
Only the channel count is specified, without any further information about the channel order.
Definition: channel_layout.h:116
AV_FRAME_CROP_UNALIGNED
@ AV_FRAME_CROP_UNALIGNED
Apply the maximum possible cropping, even if it requires setting the AVFrame.data[] entries to unalig...
Definition: frame.h:1005
AV_FRAME_DATA_AUDIO_SERVICE_TYPE
@ AV_FRAME_DATA_AUDIO_SERVICE_TYPE
This side data must be associated with an audio frame and corresponds to enum AVAudioServiceType defi...
Definition: frame.h:114
replace_side_data_from_buf
static AVFrameSideData * replace_side_data_from_buf(AVFrameSideData *dst, AVBufferRef *buf, int flags)
Definition: frame.c:810
av_sample_fmt_is_planar
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
Check if the sample format is planar.
Definition: samplefmt.c:114
wipe_side_data
static void wipe_side_data(AVFrameSideData ***sd, int *nb_side_data)
Definition: frame.c:97
channels
channels
Definition: aptx.h:31
AV_SIDE_DATA_PROP_MULTI
@ AV_SIDE_DATA_PROP_MULTI
Multiple instances of this side data type can be meaningfully present in a single side data array.
Definition: frame.h:276
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:596
AV_SIDE_DATA_PROP_GLOBAL
@ AV_SIDE_DATA_PROP_GLOBAL
The side data type can be used in stream-global structures.
Definition: frame.h:270
AVCOL_PRI_UNSPECIFIED
@ AVCOL_PRI_UNSPECIFIED
Definition: pixfmt.h:558
AV_FRAME_DATA_DYNAMIC_HDR_VIVID
@ AV_FRAME_DATA_DYNAMIC_HDR_VIVID
HDR Vivid dynamic metadata associated with a video frame.
Definition: frame.h:215
frame_copy_audio
static int frame_copy_audio(AVFrame *dst, const AVFrame *src)
Definition: frame.c:982
AV_FRAME_DATA_SPHERICAL
@ AV_FRAME_DATA_SPHERICAL
The data represents the AVSphericalMapping structure defined in libavutil/spherical....
Definition: frame.h:131
NULL
#define NULL
Definition: coverity.c:32
sizes
static const int sizes[][2]
Definition: img2dec.c:60
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:712
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
AVComponentDescriptor
Definition: pixdesc.h:30
av_image_fill_plane_sizes
int av_image_fill_plane_sizes(size_t sizes[4], enum AVPixelFormat pix_fmt, int height, const ptrdiff_t linesizes[4])
Fill plane sizes for an image with pixel format pix_fmt and height height.
Definition: imgutils.c:111
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
av_frame_side_data_remove
void av_frame_side_data_remove(AVFrameSideData ***sd, int *nb_sd, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type from an array.
Definition: frame.c:944
AV_FRAME_DATA_ICC_PROFILE
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:144
AV_FRAME_DATA_MASTERING_DISPLAY_METADATA
@ AV_FRAME_DATA_MASTERING_DISPLAY_METADATA
Mastering display metadata associated with a video frame.
Definition: frame.h:120
av_frame_new_side_data_from_buf
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
Definition: frame.c:789
AV_FRAME_SIDE_DATA_FLAG_REPLACE
#define AV_FRAME_SIDE_DATA_FLAG_REPLACE
Don't add a new entry if another of the same type exists.
Definition: frame.h:1056
AV_FRAME_DATA_AFD
@ AV_FRAME_DATA_AFD
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
Definition: frame.h:90
av_frame_get_plane_buffer
AVBufferRef * av_frame_get_plane_buffer(const AVFrame *frame, int plane)
Get the buffer reference a given data plane is stored in.
Definition: frame.c:717
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:652
AV_FRAME_DATA_SEI_UNREGISTERED
@ AV_FRAME_DATA_SEI_UNREGISTERED
User data unregistered metadata associated with a video frame.
Definition: frame.h:178
planes
static const struct @452 planes[]
AV_FRAME_DATA_REPLAYGAIN
@ AV_FRAME_DATA_REPLAYGAIN
ReplayGain information in the form of the AVReplayGain struct.
Definition: frame.h:77
AV_FRAME_DATA_AMBIENT_VIEWING_ENVIRONMENT
@ AV_FRAME_DATA_AMBIENT_VIEWING_ENVIRONMENT
Ambient viewing environment metadata, as defined by H.274.
Definition: frame.h:220
AV_FRAME_DATA_PANSCAN
@ AV_FRAME_DATA_PANSCAN
The data is the AVPanScan struct defined in libavcodec.
Definition: frame.h:53
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:387
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
av_frame_copy
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:1002
cpu.h
AV_FRAME_DATA_LCEVC
@ AV_FRAME_DATA_LCEVC
Raw LCEVC payload data, as a uint8_t array, with NAL emulation bytes intact.
Definition: frame.h:236
size
int size
Definition: twinvq_data.h:10344
AV_NUM_DATA_POINTERS
#define AV_NUM_DATA_POINTERS
Definition: frame.h:381
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
AV_PIX_FMT_FLAG_BITSTREAM
#define AV_PIX_FMT_FLAG_BITSTREAM
All values of a component are bit-wise packed end to end.
Definition: pixdesc.h:124
AVFrameSideData::data
uint8_t * data
Definition: frame.h:258
av_frame_is_writable
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:648
AVCHROMA_LOC_UNSPECIFIED
@ AVCHROMA_LOC_UNSPECIFIED
Definition: pixfmt.h:706
frame.h
buffer.h
align
static const uint8_t *BS_FUNC() align(BSCTX *bc)
Skip bits to a byte boundary.
Definition: bitstream_template.h:411
av_frame_remove_side_data
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
Definition: frame.c:1016
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:223
av_buffer_alloc
AVBufferRef * av_buffer_alloc(size_t size)
Allocate an AVBuffer of the given size using av_malloc().
Definition: buffer.c:77
av_channel_layout_compare
int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1)
Check whether two channel layouts are semantically the same, i.e.
Definition: channel_layout.c:804
AVBufferRef::size
size_t size
Size of data in bytes.
Definition: buffer.h:94
AV_FRAME_DATA_SKIP_SAMPLES
@ AV_FRAME_DATA_SKIP_SAMPLES
Recommmends skipping the specified number of samples.
Definition: frame.h:109
AV_FRAME_DATA_CONTENT_LIGHT_LEVEL
@ AV_FRAME_DATA_CONTENT_LIGHT_LEVEL
Content light level (based on CTA-861.3).
Definition: frame.h:137
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
av_frame_side_data_free
void av_frame_side_data_free(AVFrameSideData ***sd, int *nb_sd)
Free all side data entries and their contents, then zeroes out the values which the pointers are poin...
Definition: frame.c:112
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
common.h
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
AV_FRAME_DATA_STEREO3D
@ AV_FRAME_DATA_STEREO3D
Stereoscopic 3d metadata.
Definition: frame.h:64
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:636
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:609
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
remove_side_data
static void remove_side_data(AVFrameSideData ***sd, int *nb_side_data, const enum AVFrameSideDataType type)
Definition: frame.c:117
av_buffer_replace
int av_buffer_replace(AVBufferRef **pdst, const AVBufferRef *src)
Ensure dst refers to the same data as src.
Definition: buffer.c:233
av_samples_get_buffer_size
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:121
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:612
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
av_buffer_is_writable
int av_buffer_is_writable(const AVBufferRef *buf)
Definition: buffer.c:147
ret
ret
Definition: filter_design.txt:187
AV_FRAME_DATA_GOP_TIMECODE
@ AV_FRAME_DATA_GOP_TIMECODE
The GOP timecode in 25 bit timecode format.
Definition: frame.h:125
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
av_channel_layout_check
int av_channel_layout_check(const AVChannelLayout *channel_layout)
Check whether a channel layout is valid, i.e.
Definition: channel_layout.c:778
dict.h
av_hwframe_transfer_data
int av_hwframe_transfer_data(AVFrame *dst, const AVFrame *src, int flags)
Copy data to or from a hw surface.
Definition: hwcontext.c:433
av_frame_replace
int av_frame_replace(AVFrame *dst, const AVFrame *src)
Ensure the destination frame refers to the same data described by the source frame,...
Definition: frame.c:486
AV_FRAME_DATA_DYNAMIC_HDR_PLUS
@ AV_FRAME_DATA_DYNAMIC_HDR_PLUS
HDR dynamic metadata associated with a video frame.
Definition: frame.h:159
channel_layout.h
av_frame_side_data_new
AVFrameSideData * av_frame_side_data_new(AVFrameSideData ***sd, int *nb_sd, enum AVFrameSideDataType type, size_t size, unsigned int flags)
Add new side data entry to an array.
Definition: frame.c:824
AV_FRAME_DATA_VIDEO_ENC_PARAMS
@ AV_FRAME_DATA_VIDEO_ENC_PARAMS
Encoding parameters for a video frame, as described by AVVideoEncParams.
Definition: frame.h:170
av_image_copy2
static void av_image_copy2(uint8_t *const dst_data[4], const int dst_linesizes[4], uint8_t *const src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Wrapper around av_image_copy() to workaround the limitation that the conversion from uint8_t * const ...
Definition: imgutils.h:184
av_channel_layout_uninit
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
Definition: channel_layout.c:437
AVFrameSideData::type
enum AVFrameSideDataType type
Definition: frame.h:257
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
AVSideDataDescriptor
This struct describes the properties of a side data type.
Definition: frame.h:283
av_channel_layout_copy
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
Definition: channel_layout.c:444
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:72
desc
const char * desc
Definition: libsvtav1.c:79
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:256
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
av_frame_side_data_desc
const AVSideDataDescriptor * av_frame_side_data_desc(enum AVFrameSideDataType type)
Definition: frame.c:1021
calc_cropping_offsets
static int calc_cropping_offsets(size_t offsets[4], const AVFrame *frame, const AVPixFmtDescriptor *desc)
Definition: frame.c:1035
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
add_side_data_from_buf
static AVFrameSideData * add_side_data_from_buf(AVFrameSideData ***sd, int *nb_sd, enum AVFrameSideDataType type, AVBufferRef *buf)
Definition: frame.c:778
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:237
av_frame_side_data_name
const char * av_frame_side_data_name(enum AVFrameSideDataType type)
Definition: frame.c:1029
av_frame_side_data_get
static const AVFrameSideData * av_frame_side_data_get(AVFrameSideData *const *sd, const int nb_sd, enum AVFrameSideDataType type)
Wrapper around av_frame_side_data_get_c() to workaround the limitation that for any type T the conver...
Definition: frame.h:1149
AV_FRAME_DATA_REGIONS_OF_INTEREST
@ AV_FRAME_DATA_REGIONS_OF_INTEREST
Regions Of Interest, the data is an array of AVRegionOfInterest type, the number of array element is ...
Definition: frame.h:165
imgutils.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:482
hwcontext.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVFrameSideData::metadata
AVDictionary * metadata
Definition: frame.h:260
av_frame_side_data_get_c
const AVFrameSideData * av_frame_side_data_get_c(const AVFrameSideData *const *sd, const int nb_sd, enum AVFrameSideDataType type)
Get a side data entry of a specific type from an array.
Definition: frame.c:933
AV_FRAME_DATA_MOTION_VECTORS
@ AV_FRAME_DATA_MOTION_VECTORS
Motion vectors exported by some codecs (on demand through the export_mvs flag set in the libavcodec A...
Definition: frame.h:97
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
frame_side_data_wipe
static void frame_side_data_wipe(AVFrame *frame)
Definition: frame.c:107
AV_PIX_FMT_FLAG_PAL
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:120
av_hwframe_get_buffer
int av_hwframe_get_buffer(AVBufferRef *hwframe_ref, AVFrame *frame, int flags)
Allocate a new frame attached to the given AVHWFramesContext.
Definition: hwcontext.c:491
AV_FRAME_DATA_DOWNMIX_INFO
@ AV_FRAME_DATA_DOWNMIX_INFO
Metadata relevant to a downmix procedure.
Definition: frame.h:73
av_frame_side_data_add
AVFrameSideData * av_frame_side_data_add(AVFrameSideData ***sd, int *nb_sd, enum AVFrameSideDataType type, AVBufferRef **pbuf, unsigned int flags)
Add a new side data entry to an array from an existing AVBufferRef.
Definition: frame.c:849
src
#define src
Definition: vp8dsp.c:248
AV_FRAME_DATA_DETECTION_BBOXES
@ AV_FRAME_DATA_DETECTION_BBOXES
Bounding boxes for object detection and classification, as described by AVDetectionBBoxHeader.
Definition: frame.h:194