FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
frame.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #include "channel_layout.h"
20 #include "avassert.h"
21 #include "buffer.h"
22 #include "common.h"
23 #include "dict.h"
24 #include "frame.h"
25 #include "imgutils.h"
26 #include "mem.h"
27 #include "samplefmt.h"
28 
29 
32  AVBufferRef *buf);
33 
34 MAKE_ACCESSORS(AVFrame, frame, int64_t, best_effort_timestamp)
35 MAKE_ACCESSORS(AVFrame, frame, int64_t, pkt_duration)
36 MAKE_ACCESSORS(AVFrame, frame, int64_t, pkt_pos)
37 MAKE_ACCESSORS(AVFrame, frame, int64_t, channel_layout)
38 MAKE_ACCESSORS(AVFrame, frame, int, channels)
39 MAKE_ACCESSORS(AVFrame, frame, int, sample_rate)
40 MAKE_ACCESSORS(AVFrame, frame, AVDictionary *, metadata)
41 MAKE_ACCESSORS(AVFrame, frame, int, decode_error_flags)
42 MAKE_ACCESSORS(AVFrame, frame, int, pkt_size)
43 MAKE_ACCESSORS(AVFrame, frame, enum AVColorSpace, colorspace)
44 MAKE_ACCESSORS(AVFrame, frame, enum AVColorRange, color_range)
45 
46 #define CHECK_CHANNELS_CONSISTENCY(frame) \
47  av_assert2(!(frame)->channel_layout || \
48  (frame)->channels == \
49  av_get_channel_layout_nb_channels((frame)->channel_layout))
50 
51 AVDictionary **avpriv_frame_get_metadatap(AVFrame *frame) {return &frame->metadata;};
52 
53 #if FF_API_FRAME_QP
54 int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int qp_type)
55 {
57 
58  f->qp_table_buf = buf;
59 
61  f->qscale_table = buf->data;
62  f->qstride = stride;
63  f->qscale_type = qp_type;
65 
66  return 0;
67 }
68 
69 int8_t *av_frame_get_qp_table(AVFrame *f, int *stride, int *type)
70 {
72  *stride = f->qstride;
73  *type = f->qscale_type;
75 
76  if (!f->qp_table_buf)
77  return NULL;
78 
79  return f->qp_table_buf->data;
80 }
81 #endif
82 
83 const char *av_get_colorspace_name(enum AVColorSpace val)
84 {
85  static const char * const name[] = {
86  [AVCOL_SPC_RGB] = "GBR",
87  [AVCOL_SPC_BT709] = "bt709",
88  [AVCOL_SPC_FCC] = "fcc",
89  [AVCOL_SPC_BT470BG] = "bt470bg",
90  [AVCOL_SPC_SMPTE170M] = "smpte170m",
91  [AVCOL_SPC_SMPTE240M] = "smpte240m",
92  [AVCOL_SPC_YCOCG] = "YCgCo",
93  };
94  if ((unsigned)val >= FF_ARRAY_ELEMS(name))
95  return NULL;
96  return name[val];
97 }
98 
99 static void get_frame_defaults(AVFrame *frame)
100 {
101  if (frame->extended_data != frame->data)
102  av_freep(&frame->extended_data);
103 
104  memset(frame, 0, sizeof(*frame));
105 
106  frame->pts =
107  frame->pkt_dts = AV_NOPTS_VALUE;
108 #if FF_API_PKT_PTS
110  frame->pkt_pts = AV_NOPTS_VALUE;
112 #endif
114  frame->pkt_duration = 0;
115  frame->pkt_pos = -1;
116  frame->pkt_size = -1;
117  frame->key_frame = 1;
118  frame->sample_aspect_ratio = (AVRational){ 0, 1 };
119  frame->format = -1; /* unknown */
120  frame->extended_data = frame->data;
126  frame->flags = 0;
127 }
128 
129 static void free_side_data(AVFrameSideData **ptr_sd)
130 {
131  AVFrameSideData *sd = *ptr_sd;
132 
133  av_buffer_unref(&sd->buf);
134  av_dict_free(&sd->metadata);
135  av_freep(ptr_sd);
136 }
137 
138 static void wipe_side_data(AVFrame *frame)
139 {
140  int i;
141 
142  for (i = 0; i < frame->nb_side_data; i++) {
143  free_side_data(&frame->side_data[i]);
144  }
145  frame->nb_side_data = 0;
146 
147  av_freep(&frame->side_data);
148 }
149 
150 AVFrame *av_frame_alloc(void)
151 {
152  AVFrame *frame = av_mallocz(sizeof(*frame));
153 
154  if (!frame)
155  return NULL;
156 
157  frame->extended_data = NULL;
158  get_frame_defaults(frame);
159 
160  return frame;
161 }
162 
163 void av_frame_free(AVFrame **frame)
164 {
165  if (!frame || !*frame)
166  return;
167 
168  av_frame_unref(*frame);
169  av_freep(frame);
170 }
171 
172 static int get_video_buffer(AVFrame *frame, int align)
173 {
175  int ret, i;
176 
177  if (!desc)
178  return AVERROR(EINVAL);
179 
180  if ((ret = av_image_check_size(frame->width, frame->height, 0, NULL)) < 0)
181  return ret;
182 
183  if (!frame->linesize[0]) {
184  if (align <= 0)
185  align = 32; /* STRIDE_ALIGN. Should be av_cpu_max_align() */
186 
187  for(i=1; i<=align; i+=i) {
188  ret = av_image_fill_linesizes(frame->linesize, frame->format,
189  FFALIGN(frame->width, i));
190  if (ret < 0)
191  return ret;
192  if (!(frame->linesize[0] & (align-1)))
193  break;
194  }
195 
196  for (i = 0; i < 4 && frame->linesize[i]; i++)
197  frame->linesize[i] = FFALIGN(frame->linesize[i], align);
198  }
199 
200  for (i = 0; i < 4 && frame->linesize[i]; i++) {
201  int h = FFALIGN(frame->height, 32);
202  if (i == 1 || i == 2)
203  h = AV_CEIL_RSHIFT(h, desc->log2_chroma_h);
204 
205  frame->buf[i] = av_buffer_alloc(frame->linesize[i] * h + 16 + 16/*STRIDE_ALIGN*/ - 1);
206  if (!frame->buf[i])
207  goto fail;
208 
209  frame->data[i] = frame->buf[i]->data;
210  }
211  if (desc->flags & AV_PIX_FMT_FLAG_PAL || desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL) {
212  av_buffer_unref(&frame->buf[1]);
213  frame->buf[1] = av_buffer_alloc(AVPALETTE_SIZE);
214  if (!frame->buf[1])
215  goto fail;
216  frame->data[1] = frame->buf[1]->data;
217  }
218 
219  frame->extended_data = frame->data;
220 
221  return 0;
222 fail:
223  av_frame_unref(frame);
224  return AVERROR(ENOMEM);
225 }
226 
227 static int get_audio_buffer(AVFrame *frame, int align)
228 {
229  int channels;
230  int planar = av_sample_fmt_is_planar(frame->format);
231  int planes;
232  int ret, i;
233 
234  if (!frame->channels)
236 
237  channels = frame->channels;
238  planes = planar ? channels : 1;
239 
241  if (!frame->linesize[0]) {
242  ret = av_samples_get_buffer_size(&frame->linesize[0], channels,
243  frame->nb_samples, frame->format,
244  align);
245  if (ret < 0)
246  return ret;
247  }
248 
249  if (planes > AV_NUM_DATA_POINTERS) {
250  frame->extended_data = av_mallocz_array(planes,
251  sizeof(*frame->extended_data));
253  sizeof(*frame->extended_buf));
254  if (!frame->extended_data || !frame->extended_buf) {
255  av_freep(&frame->extended_data);
256  av_freep(&frame->extended_buf);
257  return AVERROR(ENOMEM);
258  }
259  frame->nb_extended_buf = planes - AV_NUM_DATA_POINTERS;
260  } else
261  frame->extended_data = frame->data;
262 
263  for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
264  frame->buf[i] = av_buffer_alloc(frame->linesize[0]);
265  if (!frame->buf[i]) {
266  av_frame_unref(frame);
267  return AVERROR(ENOMEM);
268  }
269  frame->extended_data[i] = frame->data[i] = frame->buf[i]->data;
270  }
271  for (i = 0; i < planes - AV_NUM_DATA_POINTERS; i++) {
272  frame->extended_buf[i] = av_buffer_alloc(frame->linesize[0]);
273  if (!frame->extended_buf[i]) {
274  av_frame_unref(frame);
275  return AVERROR(ENOMEM);
276  }
277  frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data;
278  }
279  return 0;
280 
281 }
282 
283 int av_frame_get_buffer(AVFrame *frame, int align)
284 {
285  if (frame->format < 0)
286  return AVERROR(EINVAL);
287 
288  if (frame->width > 0 && frame->height > 0)
289  return get_video_buffer(frame, align);
290  else if (frame->nb_samples > 0 && (frame->channel_layout || frame->channels > 0))
291  return get_audio_buffer(frame, align);
292 
293  return AVERROR(EINVAL);
294 }
295 
296 static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
297 {
298  int i;
299 
300  dst->key_frame = src->key_frame;
301  dst->pict_type = src->pict_type;
303  dst->crop_top = src->crop_top;
304  dst->crop_bottom = src->crop_bottom;
305  dst->crop_left = src->crop_left;
306  dst->crop_right = src->crop_right;
307  dst->pts = src->pts;
308  dst->repeat_pict = src->repeat_pict;
310  dst->top_field_first = src->top_field_first;
312  dst->sample_rate = src->sample_rate;
313  dst->opaque = src->opaque;
314 #if FF_API_PKT_PTS
316  dst->pkt_pts = src->pkt_pts;
318 #endif
319  dst->pkt_dts = src->pkt_dts;
320  dst->pkt_pos = src->pkt_pos;
321  dst->pkt_size = src->pkt_size;
322  dst->pkt_duration = src->pkt_duration;
324  dst->quality = src->quality;
328  dst->flags = src->flags;
330  dst->color_primaries = src->color_primaries;
331  dst->color_trc = src->color_trc;
332  dst->colorspace = src->colorspace;
333  dst->color_range = src->color_range;
334  dst->chroma_location = src->chroma_location;
335 
336  av_dict_copy(&dst->metadata, src->metadata, 0);
337 
338 #if FF_API_ERROR_FRAME
340  memcpy(dst->error, src->error, sizeof(dst->error));
342 #endif
343 
344  for (i = 0; i < src->nb_side_data; i++) {
345  const AVFrameSideData *sd_src = src->side_data[i];
346  AVFrameSideData *sd_dst;
347  if ( sd_src->type == AV_FRAME_DATA_PANSCAN
348  && (src->width != dst->width || src->height != dst->height))
349  continue;
350  if (force_copy) {
351  sd_dst = av_frame_new_side_data(dst, sd_src->type,
352  sd_src->size);
353  if (!sd_dst) {
354  wipe_side_data(dst);
355  return AVERROR(ENOMEM);
356  }
357  memcpy(sd_dst->data, sd_src->data, sd_src->size);
358  } else {
359  sd_dst = frame_new_side_data(dst, sd_src->type, av_buffer_ref(sd_src->buf));
360  if (!sd_dst) {
361  wipe_side_data(dst);
362  return AVERROR(ENOMEM);
363  }
364  }
365  av_dict_copy(&sd_dst->metadata, sd_src->metadata, 0);
366  }
367 
368 #if FF_API_FRAME_QP
370  dst->qscale_table = NULL;
371  dst->qstride = 0;
372  dst->qscale_type = 0;
374  if (src->qp_table_buf) {
376  if (dst->qp_table_buf) {
377  dst->qscale_table = dst->qp_table_buf->data;
378  dst->qstride = src->qstride;
379  dst->qscale_type = src->qscale_type;
380  }
381  }
383 #endif
384 
386  if (src->opaque_ref) {
387  dst->opaque_ref = av_buffer_ref(src->opaque_ref);
388  if (!dst->opaque_ref)
389  return AVERROR(ENOMEM);
390  }
391 
392  return 0;
393 }
394 
395 int av_frame_ref(AVFrame *dst, const AVFrame *src)
396 {
397  int i, ret = 0;
398 
399  av_assert1(dst->width == 0 && dst->height == 0);
400  av_assert1(dst->channels == 0);
401 
402  dst->format = src->format;
403  dst->width = src->width;
404  dst->height = src->height;
405  dst->channels = src->channels;
406  dst->channel_layout = src->channel_layout;
407  dst->nb_samples = src->nb_samples;
408 
409  ret = frame_copy_props(dst, src, 0);
410  if (ret < 0)
411  return ret;
412 
413  /* duplicate the frame data if it's not refcounted */
414  if (!src->buf[0]) {
415  ret = av_frame_get_buffer(dst, 32);
416  if (ret < 0)
417  return ret;
418 
419  ret = av_frame_copy(dst, src);
420  if (ret < 0)
421  av_frame_unref(dst);
422 
423  return ret;
424  }
425 
426  /* ref the buffers */
427  for (i = 0; i < FF_ARRAY_ELEMS(src->buf); i++) {
428  if (!src->buf[i])
429  continue;
430  dst->buf[i] = av_buffer_ref(src->buf[i]);
431  if (!dst->buf[i]) {
432  ret = AVERROR(ENOMEM);
433  goto fail;
434  }
435  }
436 
437  if (src->extended_buf) {
438  dst->extended_buf = av_mallocz_array(sizeof(*dst->extended_buf),
439  src->nb_extended_buf);
440  if (!dst->extended_buf) {
441  ret = AVERROR(ENOMEM);
442  goto fail;
443  }
444  dst->nb_extended_buf = src->nb_extended_buf;
445 
446  for (i = 0; i < src->nb_extended_buf; i++) {
447  dst->extended_buf[i] = av_buffer_ref(src->extended_buf[i]);
448  if (!dst->extended_buf[i]) {
449  ret = AVERROR(ENOMEM);
450  goto fail;
451  }
452  }
453  }
454 
455  if (src->hw_frames_ctx) {
457  if (!dst->hw_frames_ctx) {
458  ret = AVERROR(ENOMEM);
459  goto fail;
460  }
461  }
462 
463  /* duplicate extended data */
464  if (src->extended_data != src->data) {
465  int ch = src->channels;
466 
467  if (!ch) {
468  ret = AVERROR(EINVAL);
469  goto fail;
470  }
472 
473  dst->extended_data = av_malloc_array(sizeof(*dst->extended_data), ch);
474  if (!dst->extended_data) {
475  ret = AVERROR(ENOMEM);
476  goto fail;
477  }
478  memcpy(dst->extended_data, src->extended_data, sizeof(*src->extended_data) * ch);
479  } else
480  dst->extended_data = dst->data;
481 
482  memcpy(dst->data, src->data, sizeof(src->data));
483  memcpy(dst->linesize, src->linesize, sizeof(src->linesize));
484 
485  return 0;
486 
487 fail:
488  av_frame_unref(dst);
489  return ret;
490 }
491 
492 AVFrame *av_frame_clone(const AVFrame *src)
493 {
494  AVFrame *ret = av_frame_alloc();
495 
496  if (!ret)
497  return NULL;
498 
499  if (av_frame_ref(ret, src) < 0)
500  av_frame_free(&ret);
501 
502  return ret;
503 }
504 
505 void av_frame_unref(AVFrame *frame)
506 {
507  int i;
508 
509  if (!frame)
510  return;
511 
512  wipe_side_data(frame);
513 
514  for (i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
515  av_buffer_unref(&frame->buf[i]);
516  for (i = 0; i < frame->nb_extended_buf; i++)
517  av_buffer_unref(&frame->extended_buf[i]);
518  av_freep(&frame->extended_buf);
519  av_dict_free(&frame->metadata);
520 #if FF_API_FRAME_QP
521  av_buffer_unref(&frame->qp_table_buf);
522 #endif
523 
525 
526  av_buffer_unref(&frame->opaque_ref);
527 
528  get_frame_defaults(frame);
529 }
530 
531 void av_frame_move_ref(AVFrame *dst, AVFrame *src)
532 {
533  av_assert1(dst->width == 0 && dst->height == 0);
534  av_assert1(dst->channels == 0);
535 
536  *dst = *src;
537  if (src->extended_data == src->data)
538  dst->extended_data = dst->data;
539  memset(src, 0, sizeof(*src));
540  get_frame_defaults(src);
541 }
542 
543 int av_frame_is_writable(AVFrame *frame)
544 {
545  int i, ret = 1;
546 
547  /* assume non-refcounted frames are not writable */
548  if (!frame->buf[0])
549  return 0;
550 
551  for (i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
552  if (frame->buf[i])
553  ret &= !!av_buffer_is_writable(frame->buf[i]);
554  for (i = 0; i < frame->nb_extended_buf; i++)
555  ret &= !!av_buffer_is_writable(frame->extended_buf[i]);
556 
557  return ret;
558 }
559 
560 int av_frame_make_writable(AVFrame *frame)
561 {
562  AVFrame tmp;
563  int ret;
564 
565  if (!frame->buf[0])
566  return AVERROR(EINVAL);
567 
568  if (av_frame_is_writable(frame))
569  return 0;
570 
571  memset(&tmp, 0, sizeof(tmp));
572  tmp.format = frame->format;
573  tmp.width = frame->width;
574  tmp.height = frame->height;
575  tmp.channels = frame->channels;
576  tmp.channel_layout = frame->channel_layout;
577  tmp.nb_samples = frame->nb_samples;
578  ret = av_frame_get_buffer(&tmp, 32);
579  if (ret < 0)
580  return ret;
581 
582  ret = av_frame_copy(&tmp, frame);
583  if (ret < 0) {
584  av_frame_unref(&tmp);
585  return ret;
586  }
587 
588  ret = av_frame_copy_props(&tmp, frame);
589  if (ret < 0) {
590  av_frame_unref(&tmp);
591  return ret;
592  }
593 
594  av_frame_unref(frame);
595 
596  *frame = tmp;
597  if (tmp.data == tmp.extended_data)
598  frame->extended_data = frame->data;
599 
600  return 0;
601 }
602 
603 int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
604 {
605  return frame_copy_props(dst, src, 1);
606 }
607 
609 {
610  uint8_t *data;
611  int planes, i;
612 
613  if (frame->nb_samples) {
614  int channels = frame->channels;
615  if (!channels)
616  return NULL;
618  planes = av_sample_fmt_is_planar(frame->format) ? channels : 1;
619  } else
620  planes = 4;
621 
622  if (plane < 0 || plane >= planes || !frame->extended_data[plane])
623  return NULL;
624  data = frame->extended_data[plane];
625 
626  for (i = 0; i < FF_ARRAY_ELEMS(frame->buf) && frame->buf[i]; i++) {
627  AVBufferRef *buf = frame->buf[i];
628  if (data >= buf->data && data < buf->data + buf->size)
629  return buf;
630  }
631  for (i = 0; i < frame->nb_extended_buf; i++) {
632  AVBufferRef *buf = frame->extended_buf[i];
633  if (data >= buf->data && data < buf->data + buf->size)
634  return buf;
635  }
636  return NULL;
637 }
638 
639 static AVFrameSideData *frame_new_side_data(AVFrame *frame,
641  AVBufferRef *buf)
642 {
643  AVFrameSideData *ret, **tmp;
644 
645  if (!buf)
646  return NULL;
647 
648  if (frame->nb_side_data > INT_MAX / sizeof(*frame->side_data) - 1)
649  goto fail;
650 
651  tmp = av_realloc(frame->side_data,
652  (frame->nb_side_data + 1) * sizeof(*frame->side_data));
653  if (!tmp)
654  goto fail;
655  frame->side_data = tmp;
656 
657  ret = av_mallocz(sizeof(*ret));
658  if (!ret)
659  goto fail;
660 
661  ret->buf = buf;
662  ret->data = ret->buf->data;
663  ret->size = buf->size;
664  ret->type = type;
665 
666  frame->side_data[frame->nb_side_data++] = ret;
667 
668  return ret;
669 fail:
670  av_buffer_unref(&buf);
671  return NULL;
672 }
673 
676  int size)
677 {
678 
679  return frame_new_side_data(frame, type, av_buffer_alloc(size));
680 }
681 
684 {
685  int i;
686 
687  for (i = 0; i < frame->nb_side_data; i++) {
688  if (frame->side_data[i]->type == type)
689  return frame->side_data[i];
690  }
691  return NULL;
692 }
693 
694 static int frame_copy_video(AVFrame *dst, const AVFrame *src)
695 {
696  const uint8_t *src_data[4];
697  int i, planes;
698 
699  if (dst->width < src->width ||
700  dst->height < src->height)
701  return AVERROR(EINVAL);
702 
703  planes = av_pix_fmt_count_planes(dst->format);
704  for (i = 0; i < planes; i++)
705  if (!dst->data[i] || !src->data[i])
706  return AVERROR(EINVAL);
707 
708  memcpy(src_data, src->data, sizeof(src_data));
709  av_image_copy(dst->data, dst->linesize,
710  src_data, src->linesize,
711  dst->format, src->width, src->height);
712 
713  return 0;
714 }
715 
716 static int frame_copy_audio(AVFrame *dst, const AVFrame *src)
717 {
719  int channels = dst->channels;
720  int planes = planar ? channels : 1;
721  int i;
722 
723  if (dst->nb_samples != src->nb_samples ||
724  dst->channels != src->channels ||
725  dst->channel_layout != src->channel_layout)
726  return AVERROR(EINVAL);
727 
729 
730  for (i = 0; i < planes; i++)
731  if (!dst->extended_data[i] || !src->extended_data[i])
732  return AVERROR(EINVAL);
733 
735  dst->nb_samples, channels, dst->format);
736 
737  return 0;
738 }
739 
740 int av_frame_copy(AVFrame *dst, const AVFrame *src)
741 {
742  if (dst->format != src->format || dst->format < 0)
743  return AVERROR(EINVAL);
744 
745  if (dst->width > 0 && dst->height > 0)
746  return frame_copy_video(dst, src);
747  else if (dst->nb_samples > 0 && dst->channels > 0)
748  return frame_copy_audio(dst, src);
749 
750  return AVERROR(EINVAL);
751 }
752 
754 {
755  int i;
756 
757  for (i = 0; i < frame->nb_side_data; i++) {
758  AVFrameSideData *sd = frame->side_data[i];
759  if (sd->type == type) {
760  free_side_data(&frame->side_data[i]);
761  frame->side_data[i] = frame->side_data[frame->nb_side_data - 1];
762  frame->nb_side_data--;
763  }
764  }
765 }
766 
768 {
769  switch(type) {
770  case AV_FRAME_DATA_PANSCAN: return "AVPanScan";
771  case AV_FRAME_DATA_A53_CC: return "ATSC A53 Part 4 Closed Captions";
772  case AV_FRAME_DATA_STEREO3D: return "Stereoscopic 3d metadata";
773  case AV_FRAME_DATA_MATRIXENCODING: return "AVMatrixEncoding";
774  case AV_FRAME_DATA_DOWNMIX_INFO: return "Metadata relevant to a downmix procedure";
775  case AV_FRAME_DATA_REPLAYGAIN: return "AVReplayGain";
776  case AV_FRAME_DATA_DISPLAYMATRIX: return "3x3 displaymatrix";
777  case AV_FRAME_DATA_AFD: return "Active format description";
778  case AV_FRAME_DATA_MOTION_VECTORS: return "Motion vectors";
779  case AV_FRAME_DATA_SKIP_SAMPLES: return "Skip samples";
780  case AV_FRAME_DATA_AUDIO_SERVICE_TYPE: return "Audio service type";
781  case AV_FRAME_DATA_MASTERING_DISPLAY_METADATA: return "Mastering display metadata";
782  case AV_FRAME_DATA_CONTENT_LIGHT_LEVEL: return "Content light level metadata";
783  case AV_FRAME_DATA_GOP_TIMECODE: return "GOP timecode";
784  case AV_FRAME_DATA_ICC_PROFILE: return "ICC profile";
785  }
786  return NULL;
787 }
788 
789 static int calc_cropping_offsets(size_t offsets[4], const AVFrame *frame,
790  const AVPixFmtDescriptor *desc)
791 {
792  int i, j;
793 
794  for (i = 0; frame->data[i]; i++) {
796  int shift_x = (i == 1 || i == 2) ? desc->log2_chroma_w : 0;
797  int shift_y = (i == 1 || i == 2) ? desc->log2_chroma_h : 0;
798 
799  if (desc->flags & (AV_PIX_FMT_FLAG_PAL | AV_PIX_FMT_FLAG_PSEUDOPAL) && i == 1) {
800  offsets[i] = 0;
801  break;
802  }
803 
804  /* find any component descriptor for this plane */
805  for (j = 0; j < desc->nb_components; j++) {
806  if (desc->comp[j].plane == i) {
807  comp = &desc->comp[j];
808  break;
809  }
810  }
811  if (!comp)
812  return AVERROR_BUG;
813 
814  offsets[i] = (frame->crop_top >> shift_y) * frame->linesize[i] +
815  (frame->crop_left >> shift_x) * comp->step;
816  }
817 
818  return 0;
819 }
820 
821 int av_frame_apply_cropping(AVFrame *frame, int flags)
822 {
823  const AVPixFmtDescriptor *desc;
824  size_t offsets[4];
825  int i;
826 
827  if (!(frame->width > 0 && frame->height > 0))
828  return AVERROR(EINVAL);
829 
830  if (frame->crop_left >= INT_MAX - frame->crop_right ||
831  frame->crop_top >= INT_MAX - frame->crop_bottom ||
832  (frame->crop_left + frame->crop_right) >= frame->width ||
833  (frame->crop_top + frame->crop_bottom) >= frame->height)
834  return AVERROR(ERANGE);
835 
836  desc = av_pix_fmt_desc_get(frame->format);
837  if (!desc)
838  return AVERROR_BUG;
839 
840  /* Apply just the right/bottom cropping for hwaccel formats. Bitstream
841  * formats cannot be easily handled here either (and corresponding decoders
842  * should not export any cropping anyway), so do the same for those as well.
843  * */
845  frame->width -= frame->crop_right;
846  frame->height -= frame->crop_bottom;
847  frame->crop_right = 0;
848  frame->crop_bottom = 0;
849  return 0;
850  }
851 
852  /* calculate the offsets for each plane */
853  calc_cropping_offsets(offsets, frame, desc);
854 
855  /* adjust the offsets to avoid breaking alignment */
856  if (!(flags & AV_FRAME_CROP_UNALIGNED)) {
857  int log2_crop_align = frame->crop_left ? ff_ctz(frame->crop_left) : INT_MAX;
858  int min_log2_align = INT_MAX;
859 
860  for (i = 0; frame->data[i]; i++) {
861  int log2_align = offsets[i] ? ff_ctz(offsets[i]) : INT_MAX;
862  min_log2_align = FFMIN(log2_align, min_log2_align);
863  }
864 
865  /* we assume, and it should always be true, that the data alignment is
866  * related to the cropping alignment by a constant power-of-2 factor */
867  if (log2_crop_align < min_log2_align)
868  return AVERROR_BUG;
869 
870  if (min_log2_align < 5) {
871  frame->crop_left &= ~((1 << (5 + log2_crop_align - min_log2_align)) - 1);
872  calc_cropping_offsets(offsets, frame, desc);
873  }
874  }
875 
876  for (i = 0; frame->data[i]; i++)
877  frame->data[i] += offsets[i];
878 
879  frame->width -= (frame->crop_left + frame->crop_right);
880  frame->height -= (frame->crop_top + frame->crop_bottom);
881  frame->crop_left = 0;
882  frame->crop_right = 0;
883  frame->crop_top = 0;
884  frame->crop_bottom = 0;
885 
886  return 0;
887 }
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:132
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:486
int plane
Definition: avisynth_c.h:422
int plane
Which of the 4 planes contains the component.
Definition: pixdesc.h:35
#define NULL
Definition: coverity.c:32
const char const char void * val
Definition: avisynth_c.h:771
#define ff_ctz
Definition: intmath.h:106
#define AV_NUM_DATA_POINTERS
Definition: frame.h:202
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2419
This structure describes decoded (raw) audio or video data.
Definition: frame.h:201
void * av_realloc(void *ptr, size_t size)
Allocate, reallocate, or free a block of memory.
Definition: mem.c:135
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
This side data must be associated with an audio frame and corresponds to enum AVAudioServiceType defi...
Definition: frame.h:113
attribute_deprecated int qscale_type
Definition: frame.h:530
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:473
misc image utilities
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2459
Memory handling functions.
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:393
const char * desc
Definition: nvenc.c:60
AVDictionary * metadata
Definition: frame.h:167
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:490
void * opaque
for some private data of the user
Definition: frame.h:329
int nb_extended_buf
Number of elements in extended_buf.
Definition: frame.h:411
Content light level (based on CTA-861.3).
Definition: frame.h:136
int repeat_pict
When decoding, this signals how much the picture must be delayed.
Definition: frame.h:343
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:491
int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int qp_type)
Definition: frame.c:54
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:217
Mastering display metadata associated with a video frame.
Definition: frame.h:119
static void wipe_side_data(AVFrame *frame)
Definition: frame.c:138
color_range
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:531
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:222
size_t crop_bottom
Definition: frame.h:560
#define src
Definition: vp8dsp.c:254
int8_t * av_frame_get_qp_table(AVFrame *f, int *stride, int *type)
Definition: frame.c:69
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB)
Definition: pixfmt.h:485
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
attribute_deprecated int8_t * qscale_table
QP table.
Definition: frame.h:522
functionally identical to above
Definition: pixfmt.h:492
Apply the maximum possible cropping, even if it requires setting the AVFrame.data[] entries to unalig...
Definition: frame.h:791
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:682
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame...
Definition: frame.h:538
Public dictionary API.
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
uint8_t
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:150
static int frame_copy_video(AVFrame *dst, const AVFrame *src)
Definition: frame.c:694
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:484
size_t crop_left
Definition: frame.h:561
#define AVPALETTE_SIZE
Definition: pixfmt.h:32
attribute_deprecated int qstride
QP store stride.
Definition: frame.h:527
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(constuint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(constint16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(constint32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(constint64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(constfloat *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(constdouble *) pi *(INT64_C(1)<< 63)))#defineFMT_PAIR_FUNC(out, in) staticconv_func_type *constfmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};staticvoidcpy1(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, len);}staticvoidcpy2(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 2 *len);}staticvoidcpy4(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 4 *len);}staticvoidcpy8(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, constint *ch_map, intflags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) returnNULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) returnNULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case1:ctx->simd_f=cpy1;break;case2:ctx->simd_f=cpy2;break;case4:ctx->simd_f=cpy4;break;case8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);returnctx;}voidswri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}intswri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, intlen){intch;intoff=0;constintos=(out->planar?1:out->ch_count)*out->bps;unsignedmisaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){intplanes=in->planar?in->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){intplanes=out->planar?out->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){intplanes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:395
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:294
AVBufferRef * buf
Definition: frame.h:168
The data is the AVPanScan struct defined in libavcodec.
Definition: frame.h:52
static AVFrame * frame
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(constuint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(constint16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(constint32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(constint64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(constfloat *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(constdouble *) pi *(INT64_C(1)<< 63)))#defineFMT_PAIR_FUNC(out, in) staticconv_func_type *constfmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};staticvoidcpy1(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, len);}staticvoidcpy2(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 2 *len);}staticvoidcpy4(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 4 *len);}staticvoidcpy8(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, constint *ch_map, intflags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) returnNULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) returnNULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case1:ctx->simd_f=cpy1;break;case2:ctx->simd_f=cpy2;break;case4:ctx->simd_f=cpy4;break;case8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);returnctx;}voidswri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}intswri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, intlen){intch;intoff=0;constintos=(out->planar?1:out->ch_count)*out->bps;unsignedmisaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){intplanes=in->planar?in->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){intplanes=out->planar?out->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){intplanes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch const uint8_t **in ch off *out planar
Definition: audioconvert.c:56
Structure to hold side data for an AVFrame.
Definition: frame.h:163
static int flags
Definition: log.c:57
AVDictionary * metadata
metadata.
Definition: frame.h:488
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:348
AVColorRange
MPEG vs JPEG YUV range.
Definition: pixfmt.h:507
ptrdiff_t size
Definition: opengl_enc.c:101
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
Definition: frame.h:89
Metadata relevant to a downmix procedure.
Definition: frame.h:72
int nb_side_data
Definition: frame.h:414
#define FFALIGN(x, a)
Definition: macros.h:48
attribute_deprecated uint64_t error[AV_NUM_DATA_POINTERS]
Definition: frame.h:336
AVFrameSideData ** side_data
Definition: frame.h:413
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
Check if the sample format is planar.
Definition: samplefmt.c:112
int width
Definition: frame.h:259
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
#define MAKE_ACCESSORS(str, name, type, field)
Definition: internal.h:90
#define AVERROR(e)
Definition: error.h:43
static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
Definition: frame.c:296
int av_frame_apply_cropping(AVFrame *frame, int flags)
Crop the given video AVFrame according to its crop_left/crop_top/crop_right/ crop_bottom fields...
Definition: frame.c:821
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:163
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:446
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:203
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:457
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:140
simple assert() macros that are a bit more flexible than ISO C assert().
The GOP timecode in 25 bit timecode format.
Definition: frame.h:124
static int get_audio_buffer(AVFrame *frame, int align)
Definition: frame.c:227
static void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.h:229
#define fail()
Definition: checkasm.h:109
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
Definition: imgutils.c:385
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:740
reference-counted frame API
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:379
size_t crop_top
Definition: frame.h:559
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:106
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:281
uint8_t nb_components
The number of components each pixel has, (1-4)
Definition: pixdesc.h:83
int channels
number of audio channels, only used for audio.
Definition: frame.h:506
audio channel layout utility functions
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:284
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:439
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
#define FFMIN(a, b)
Definition: common.h:96
int display_picture_number
picture number in display order
Definition: frame.h:319
AVBufferRef ** extended_buf
For planar audio which requires more than AV_NUM_DATA_POINTERS AVBufferRef pointers, this array will hold all the references which cannot fit into AVFrame.buf.
Definition: frame.h:407
#define AV_PIX_FMT_FLAG_PSEUDOPAL
The pixel format is "pseudo-paletted".
Definition: pixdesc.h:158
Motion vectors exported by some codecs (on demand through the export_mvs flag set in the libavcodec A...
Definition: frame.h:96
AVBufferRef * av_frame_get_plane_buffer(AVFrame *frame, int plane)
Get the buffer reference a given data plane is stored in.
Definition: frame.c:608
AVBufferRef * qp_table_buf
Definition: frame.h:532
AVFrameSideDataType
Definition: frame.h:48
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:324
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:143
int av_buffer_is_writable(const AVBufferRef *buf)
Definition: buffer.c:133
const char * av_get_colorspace_name(enum AVColorSpace val)
Get the name of a colorspace.
Definition: frame.c:83
static AVFrameSideData * frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Definition: frame.c:639
FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:489
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:492
#define FF_ARRAY_ELEMS(a)
static void comp(unsigned char *dst, ptrdiff_t dst_stride, unsigned char *src, ptrdiff_t src_stride, int add)
Definition: eamad.c:83
const AVS_VideoInfo int align
Definition: avisynth_c.h:795
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:274
int coded_picture_number
picture number in bitstream order
Definition: frame.h:315
sample_rate
int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown...
Definition: frame.h:481
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:84
AVDictionary ** avpriv_frame_get_metadatap(AVFrame *frame)
Definition: frame.c:51
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:543
AVBufferRef * av_buffer_alloc(int size)
Allocate an AVBuffer of the given size using av_malloc().
Definition: buffer.c:67
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:232
static int get_video_buffer(AVFrame *frame, int align)
Definition: frame.c:172
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
static int calc_cropping_offsets(size_t offsets[4], const AVFrame *frame, const AVPixFmtDescriptor *desc)
Definition: frame.c:789
uint8_t * data
The data buffer.
Definition: buffer.h:89
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:289
uint8_t * data
Definition: frame.h:165
int av_samples_copy(uint8_t **dst, uint8_t *const *src, int dst_offset, int src_offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Copy samples from src to dst.
Definition: samplefmt.c:213
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
If side data of the supplied type exists in the frame, free it and remove it from the frame...
Definition: frame.c:753
void * buf
Definition: avisynth_c.h:690
size_t crop_right
Definition: frame.h:562
GLint GLenum type
Definition: opengl_enc.c:105
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
int64_t reordered_opaque
reordered opaque 64 bits (generally an integer or a double precision float PTS but can be anything)...
Definition: frame.h:369
int sample_rate
Sample rate of the audio data.
Definition: frame.h:374
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
Definition: imgutils.c:89
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:674
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:119
Rational number (pair of numerator and denominator).
Definition: rational.h:58
int palette_has_changed
Tell user application that palette has changed from previous frame.
Definition: frame.h:358
refcounted data buffer API
enum AVChromaLocation chroma_location
Definition: frame.h:459
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:466
int decode_error_flags
decode error flags of the frame, set to a combination of FF_DECODE_ERROR_xxx flags if the decoder pro...
Definition: frame.h:497
static int frame_copy_audio(AVFrame *dst, const AVFrame *src)
Definition: frame.c:716
#define AV_PIX_FMT_FLAG_BITSTREAM
All values of a component are bit-wise packed end to end.
Definition: pixdesc.h:136
const char * av_frame_side_data_name(enum AVFrameSideDataType type)
Definition: frame.c:767
int size
Size of data in bytes.
Definition: buffer.h:93
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:283
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:505
enum AVFrameSideDataType type
Definition: frame.h:164
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
Definition: frame.c:560
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:215
attribute_deprecated int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Definition: frame.h:302
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:310
A reference to a data buffer.
Definition: buffer.h:81
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:105
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:83
common internal and external API header
#define CHECK_CHANNELS_CONSISTENCY(frame)
Definition: frame.c:46
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:353
AVBufferRef * opaque_ref
AVBufferRef for free use by the API user.
Definition: frame.h:549
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:279
enum AVColorPrimaries color_primaries
Definition: frame.h:448
int height
Definition: frame.h:259
#define av_freep(p)
static void free_side_data(AVFrameSideData **ptr_sd)
Definition: frame.c:129
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:450
Recommmends skipping the specified number of samples.
Definition: frame.h:108
#define av_malloc_array(a, b)
#define stride
ReplayGain information in the form of the AVReplayGain struct.
Definition: frame.h:76
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:248
static void get_frame_defaults(AVFrame *frame)
Definition: frame.c:99
int pkt_size
size of the corresponding packet containing the compressed frame.
Definition: frame.h:515
Stereoscopic 3d metadata.
Definition: frame.h:63
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:267
The data is the AVMatrixEncoding enum defined in libavutil/channel_layout.h.
Definition: frame.h:67
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:603
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
int step
Number of elements between 2 horizontally consecutive pixels.
Definition: pixdesc.h:41
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
const char * name
Definition: opengl_enc.c:103
static uint8_t tmp[11]
Definition: aes_ctr.c:26