FFmpeg
decode.c
Go to the documentation of this file.
1 /*
2  * generic decoding-related code
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <stdint.h>
22 #include <string.h>
23 
24 #include "config.h"
25 
26 #if CONFIG_ICONV
27 # include <iconv.h>
28 #endif
29 
30 #include "libavutil/avassert.h"
31 #include "libavutil/avstring.h"
32 #include "libavutil/bprint.h"
33 #include "libavutil/common.h"
34 #include "libavutil/frame.h"
35 #include "libavutil/hwcontext.h"
36 #include "libavutil/imgutils.h"
37 #include "libavutil/internal.h"
38 #include "libavutil/intmath.h"
39 #include "libavutil/opt.h"
40 
41 #include "avcodec.h"
42 #include "bytestream.h"
43 #include "decode.h"
44 #include "hwconfig.h"
45 #include "internal.h"
46 #include "thread.h"
47 
48 typedef struct FramePool {
49  /**
50  * Pools for each data plane. For audio all the planes have the same size,
51  * so only pools[0] is used.
52  */
54 
55  /*
56  * Pool parameters
57  */
58  int format;
59  int width, height;
61  int linesize[4];
62  int planes;
63  int channels;
64  int samples;
65 } FramePool;
66 
67 static int apply_param_change(AVCodecContext *avctx, const AVPacket *avpkt)
68 {
69  int size, ret;
70  const uint8_t *data;
71  uint32_t flags;
72  int64_t val;
73 
75  if (!data)
76  return 0;
77 
78  if (!(avctx->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE)) {
79  av_log(avctx, AV_LOG_ERROR, "This decoder does not support parameter "
80  "changes, but PARAM_CHANGE side data was sent to it.\n");
81  ret = AVERROR(EINVAL);
82  goto fail2;
83  }
84 
85  if (size < 4)
86  goto fail;
87 
88  flags = bytestream_get_le32(&data);
89  size -= 4;
90 
92  if (size < 4)
93  goto fail;
94  val = bytestream_get_le32(&data);
95  if (val <= 0 || val > INT_MAX) {
96  av_log(avctx, AV_LOG_ERROR, "Invalid channel count");
97  ret = AVERROR_INVALIDDATA;
98  goto fail2;
99  }
100  avctx->channels = val;
101  size -= 4;
102  }
104  if (size < 8)
105  goto fail;
106  avctx->channel_layout = bytestream_get_le64(&data);
107  size -= 8;
108  }
110  if (size < 4)
111  goto fail;
112  val = bytestream_get_le32(&data);
113  if (val <= 0 || val > INT_MAX) {
114  av_log(avctx, AV_LOG_ERROR, "Invalid sample rate");
115  ret = AVERROR_INVALIDDATA;
116  goto fail2;
117  }
118  avctx->sample_rate = val;
119  size -= 4;
120  }
122  if (size < 8)
123  goto fail;
124  avctx->width = bytestream_get_le32(&data);
125  avctx->height = bytestream_get_le32(&data);
126  size -= 8;
127  ret = ff_set_dimensions(avctx, avctx->width, avctx->height);
128  if (ret < 0)
129  goto fail2;
130  }
131 
132  return 0;
133 fail:
134  av_log(avctx, AV_LOG_ERROR, "PARAM_CHANGE side data too small.\n");
135  ret = AVERROR_INVALIDDATA;
136 fail2:
137  if (ret < 0) {
138  av_log(avctx, AV_LOG_ERROR, "Error applying parameter changes.\n");
139  if (avctx->err_recognition & AV_EF_EXPLODE)
140  return ret;
141  }
142  return 0;
143 }
144 
145 #define IS_EMPTY(pkt) (!(pkt)->data)
146 
147 static int copy_packet_props(AVPacket *dst, const AVPacket *src)
148 {
149  int ret = av_packet_copy_props(dst, src);
150  if (ret < 0)
151  return ret;
152 
153  dst->size = src->size; // HACK: Needed for ff_decode_frame_props().
154  dst->data = (void*)1; // HACK: Needed for IS_EMPTY().
155 
156  return 0;
157 }
158 
160 {
161  AVPacket tmp = { 0 };
162  int ret = 0;
163 
164  if (IS_EMPTY(avci->last_pkt_props)) {
165  if (av_fifo_size(avci->pkt_props) >= sizeof(*pkt)) {
167  sizeof(*avci->last_pkt_props), NULL);
168  } else
169  return copy_packet_props(avci->last_pkt_props, pkt);
170  }
171 
172  if (av_fifo_space(avci->pkt_props) < sizeof(*pkt)) {
173  ret = av_fifo_grow(avci->pkt_props, sizeof(*pkt));
174  if (ret < 0)
175  return ret;
176  }
177 
178  ret = copy_packet_props(&tmp, pkt);
179  if (ret < 0)
180  return ret;
181 
182  av_fifo_generic_write(avci->pkt_props, &tmp, sizeof(tmp), NULL);
183 
184  return 0;
185 }
186 
188 {
189  AVCodecInternal *avci = avctx->internal;
190  int ret;
191 
192  if (avci->bsf)
193  return 0;
194 
195  ret = av_bsf_list_parse_str(avctx->codec->bsfs, &avci->bsf);
196  if (ret < 0) {
197  av_log(avctx, AV_LOG_ERROR, "Error parsing decoder bitstream filters '%s': %s\n", avctx->codec->bsfs, av_err2str(ret));
198  if (ret != AVERROR(ENOMEM))
199  ret = AVERROR_BUG;
200  goto fail;
201  }
202 
203  /* We do not currently have an API for passing the input timebase into decoders,
204  * but no filters used here should actually need it.
205  * So we make up some plausible-looking number (the MPEG 90kHz timebase) */
206  avci->bsf->time_base_in = (AVRational){ 1, 90000 };
207  ret = avcodec_parameters_from_context(avci->bsf->par_in, avctx);
208  if (ret < 0)
209  goto fail;
210 
211  ret = av_bsf_init(avci->bsf);
212  if (ret < 0)
213  goto fail;
214 
215  return 0;
216 fail:
217  av_bsf_free(&avci->bsf);
218  return ret;
219 }
220 
222 {
223  AVCodecInternal *avci = avctx->internal;
224  int ret;
225 
226  if (avci->draining)
227  return AVERROR_EOF;
228 
229  ret = av_bsf_receive_packet(avci->bsf, pkt);
230  if (ret == AVERROR_EOF)
231  avci->draining = 1;
232  if (ret < 0)
233  return ret;
234 
235  ret = extract_packet_props(avctx->internal, pkt);
236  if (ret < 0)
237  goto finish;
238 
239  ret = apply_param_change(avctx, pkt);
240  if (ret < 0)
241  goto finish;
242 
243 #if FF_API_OLD_ENCDEC
244  if (avctx->codec->receive_frame)
245  avci->compat_decode_consumed += pkt->size;
246 #endif
247 
248  return 0;
249 finish:
250  av_packet_unref(pkt);
251  return ret;
252 }
253 
254 /**
255  * Attempt to guess proper monotonic timestamps for decoded video frames
256  * which might have incorrect times. Input timestamps may wrap around, in
257  * which case the output will as well.
258  *
259  * @param pts the pts field of the decoded AVPacket, as passed through
260  * AVFrame.pts
261  * @param dts the dts field of the decoded AVPacket
262  * @return one of the input values, may be AV_NOPTS_VALUE
263  */
265  int64_t reordered_pts, int64_t dts)
266 {
267  int64_t pts = AV_NOPTS_VALUE;
268 
269  if (dts != AV_NOPTS_VALUE) {
271  ctx->pts_correction_last_dts = dts;
272  } else if (reordered_pts != AV_NOPTS_VALUE)
273  ctx->pts_correction_last_dts = reordered_pts;
274 
275  if (reordered_pts != AV_NOPTS_VALUE) {
276  ctx->pts_correction_num_faulty_pts += reordered_pts <= ctx->pts_correction_last_pts;
277  ctx->pts_correction_last_pts = reordered_pts;
278  } else if(dts != AV_NOPTS_VALUE)
279  ctx->pts_correction_last_pts = dts;
280 
282  && reordered_pts != AV_NOPTS_VALUE)
283  pts = reordered_pts;
284  else
285  pts = dts;
286 
287  return pts;
288 }
289 
290 /*
291  * The core of the receive_frame_wrapper for the decoders implementing
292  * the simple API. Certain decoders might consume partial packets without
293  * returning any output, so this function needs to be called in a loop until it
294  * returns EAGAIN.
295  **/
296 static inline int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame, int64_t *discarded_samples)
297 {
298  AVCodecInternal *avci = avctx->internal;
299  DecodeSimpleContext *ds = &avci->ds;
300  AVPacket *pkt = ds->in_pkt;
301  // copy to ensure we do not change pkt
302  int got_frame, actual_got_frame;
303  int ret;
304 
305  if (!pkt->data && !avci->draining) {
306  av_packet_unref(pkt);
307  ret = ff_decode_get_packet(avctx, pkt);
308  if (ret < 0 && ret != AVERROR_EOF)
309  return ret;
310  }
311 
312  // Some codecs (at least wma lossless) will crash when feeding drain packets
313  // after EOF was signaled.
314  if (avci->draining_done)
315  return AVERROR_EOF;
316 
317  if (!pkt->data &&
318  !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY ||
320  return AVERROR_EOF;
321 
322  got_frame = 0;
323 
324  if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME) {
325  ret = ff_thread_decode_frame(avctx, frame, &got_frame, pkt);
326  } else {
327  ret = avctx->codec->decode(avctx, frame, &got_frame, pkt);
328 
330  frame->pkt_dts = pkt->dts;
331  if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
332  if(!avctx->has_b_frames)
333  frame->pkt_pos = pkt->pos;
334  //FIXME these should be under if(!avctx->has_b_frames)
335  /* get_buffer is supposed to set frame parameters */
336  if (!(avctx->codec->capabilities & AV_CODEC_CAP_DR1)) {
337  if (!frame->sample_aspect_ratio.num) frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
338  if (!frame->width) frame->width = avctx->width;
339  if (!frame->height) frame->height = avctx->height;
340  if (frame->format == AV_PIX_FMT_NONE) frame->format = avctx->pix_fmt;
341  }
342  }
343  }
344  emms_c();
345  actual_got_frame = got_frame;
346 
347  if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
348  if (frame->flags & AV_FRAME_FLAG_DISCARD)
349  got_frame = 0;
350  } else if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
351  uint8_t *side;
352  int side_size;
353  uint32_t discard_padding = 0;
354  uint8_t skip_reason = 0;
355  uint8_t discard_reason = 0;
356 
357  if (ret >= 0 && got_frame) {
358  if (frame->format == AV_SAMPLE_FMT_NONE)
359  frame->format = avctx->sample_fmt;
360  if (!frame->channel_layout)
361  frame->channel_layout = avctx->channel_layout;
362  if (!frame->channels)
363  frame->channels = avctx->channels;
364  if (!frame->sample_rate)
365  frame->sample_rate = avctx->sample_rate;
366  }
367 
369  if(side && side_size>=10) {
370  avci->skip_samples = AV_RL32(side) * avci->skip_samples_multiplier;
371  discard_padding = AV_RL32(side + 4);
372  av_log(avctx, AV_LOG_DEBUG, "skip %d / discard %d samples due to side data\n",
373  avci->skip_samples, (int)discard_padding);
374  skip_reason = AV_RL8(side + 8);
375  discard_reason = AV_RL8(side + 9);
376  }
377 
378  if ((frame->flags & AV_FRAME_FLAG_DISCARD) && got_frame &&
379  !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
380  avci->skip_samples = FFMAX(0, avci->skip_samples - frame->nb_samples);
381  got_frame = 0;
382  *discarded_samples += frame->nb_samples;
383  }
384 
385  if (avci->skip_samples > 0 && got_frame &&
386  !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
387  if(frame->nb_samples <= avci->skip_samples){
388  got_frame = 0;
389  *discarded_samples += frame->nb_samples;
390  avci->skip_samples -= frame->nb_samples;
391  av_log(avctx, AV_LOG_DEBUG, "skip whole frame, skip left: %d\n",
392  avci->skip_samples);
393  } else {
394  av_samples_copy(frame->extended_data, frame->extended_data, 0, avci->skip_samples,
395  frame->nb_samples - avci->skip_samples, avctx->channels, frame->format);
396  if(avctx->pkt_timebase.num && avctx->sample_rate) {
397  int64_t diff_ts = av_rescale_q(avci->skip_samples,
398  (AVRational){1, avctx->sample_rate},
399  avctx->pkt_timebase);
400  if(frame->pts!=AV_NOPTS_VALUE)
401  frame->pts += diff_ts;
402 #if FF_API_PKT_PTS
404  if(frame->pkt_pts!=AV_NOPTS_VALUE)
405  frame->pkt_pts += diff_ts;
407 #endif
408  if(frame->pkt_dts!=AV_NOPTS_VALUE)
409  frame->pkt_dts += diff_ts;
410  if (frame->pkt_duration >= diff_ts)
411  frame->pkt_duration -= diff_ts;
412  } else {
413  av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for skipped samples.\n");
414  }
415  av_log(avctx, AV_LOG_DEBUG, "skip %d/%d samples\n",
416  avci->skip_samples, frame->nb_samples);
417  *discarded_samples += avci->skip_samples;
418  frame->nb_samples -= avci->skip_samples;
419  avci->skip_samples = 0;
420  }
421  }
422 
423  if (discard_padding > 0 && discard_padding <= frame->nb_samples && got_frame &&
424  !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
425  if (discard_padding == frame->nb_samples) {
426  *discarded_samples += frame->nb_samples;
427  got_frame = 0;
428  } else {
429  if(avctx->pkt_timebase.num && avctx->sample_rate) {
430  int64_t diff_ts = av_rescale_q(frame->nb_samples - discard_padding,
431  (AVRational){1, avctx->sample_rate},
432  avctx->pkt_timebase);
433  frame->pkt_duration = diff_ts;
434  } else {
435  av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for discarded samples.\n");
436  }
437  av_log(avctx, AV_LOG_DEBUG, "discard %d/%d samples\n",
438  (int)discard_padding, frame->nb_samples);
439  frame->nb_samples -= discard_padding;
440  }
441  }
442 
443  if ((avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL) && got_frame) {
445  if (fside) {
446  AV_WL32(fside->data, avci->skip_samples);
447  AV_WL32(fside->data + 4, discard_padding);
448  AV_WL8(fside->data + 8, skip_reason);
449  AV_WL8(fside->data + 9, discard_reason);
450  avci->skip_samples = 0;
451  }
452  }
453  }
454 
455  if (avctx->codec->type == AVMEDIA_TYPE_AUDIO &&
457  ret >= 0 && ret != pkt->size && !(avctx->codec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
458  av_log(avctx, AV_LOG_WARNING, "Multiple frames in a packet.\n");
459  avci->showed_multi_packet_warning = 1;
460  }
461 
462  if (!got_frame)
463  av_frame_unref(frame);
464 
465  if (ret >= 0 && avctx->codec->type == AVMEDIA_TYPE_VIDEO && !(avctx->flags & AV_CODEC_FLAG_TRUNCATED))
466  ret = pkt->size;
467 
468 #if FF_API_AVCTX_TIMEBASE
469  if (avctx->framerate.num > 0 && avctx->framerate.den > 0)
470  avctx->time_base = av_inv_q(av_mul_q(avctx->framerate, (AVRational){avctx->ticks_per_frame, 1}));
471 #endif
472 
473  /* do not stop draining when actual_got_frame != 0 or ret < 0 */
474  /* got_frame == 0 but actual_got_frame != 0 when frame is discarded */
475  if (avci->draining && !actual_got_frame) {
476  if (ret < 0) {
477  /* prevent infinite loop if a decoder wrongly always return error on draining */
478  /* reasonable nb_errors_max = maximum b frames + thread count */
479  int nb_errors_max = 20 + (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME ?
480  avctx->thread_count : 1);
481 
482  if (avci->nb_draining_errors++ >= nb_errors_max) {
483  av_log(avctx, AV_LOG_ERROR, "Too many errors when draining, this is a bug. "
484  "Stop draining and force EOF.\n");
485  avci->draining_done = 1;
486  ret = AVERROR_BUG;
487  }
488  } else {
489  avci->draining_done = 1;
490  }
491  }
492 
493 #if FF_API_OLD_ENCDEC
494  avci->compat_decode_consumed += ret;
495 #endif
496 
497  if (ret >= pkt->size || ret < 0) {
498  av_packet_unref(pkt);
500  } else {
501  int consumed = ret;
502 
503  pkt->data += consumed;
504  pkt->size -= consumed;
505  avci->last_pkt_props->size -= consumed; // See extract_packet_props() comment.
506  pkt->pts = AV_NOPTS_VALUE;
507  pkt->dts = AV_NOPTS_VALUE;
510  }
511 
512  if (got_frame)
513  av_assert0(frame->buf[0]);
514 
515  return ret < 0 ? ret : 0;
516 }
517 
519 {
520  int ret;
521  int64_t discarded_samples = 0;
522 
523  while (!frame->buf[0]) {
524  if (discarded_samples > avctx->max_samples)
525  return AVERROR(EAGAIN);
526  ret = decode_simple_internal(avctx, frame, &discarded_samples);
527  if (ret < 0)
528  return ret;
529  }
530 
531  return 0;
532 }
533 
535 {
536  AVCodecInternal *avci = avctx->internal;
537  int ret;
538 
539  av_assert0(!frame->buf[0]);
540 
541  if (avctx->codec->receive_frame) {
542  ret = avctx->codec->receive_frame(avctx, frame);
543  if (ret != AVERROR(EAGAIN))
545  } else
546  ret = decode_simple_receive_frame(avctx, frame);
547 
548  if (ret == AVERROR_EOF)
549  avci->draining_done = 1;
550 
551  if (!ret) {
553  frame->pts,
554  frame->pkt_dts);
555 
556  /* the only case where decode data is not set should be decoders
557  * that do not call ff_get_buffer() */
558  av_assert0((frame->private_ref && frame->private_ref->size == sizeof(FrameDecodeData)) ||
559  !(avctx->codec->capabilities & AV_CODEC_CAP_DR1));
560 
561  if (frame->private_ref) {
563 
564  if (fdd->post_process) {
565  ret = fdd->post_process(avctx, frame);
566  if (ret < 0) {
567  av_frame_unref(frame);
568  return ret;
569  }
570  }
571  }
572  }
573 
574  /* free the per-frame decode data */
575  av_buffer_unref(&frame->private_ref);
576 
577  return ret;
578 }
579 
580 int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
581 {
582  AVCodecInternal *avci = avctx->internal;
583  int ret;
584 
585  if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
586  return AVERROR(EINVAL);
587 
588  if (avctx->internal->draining)
589  return AVERROR_EOF;
590 
591  if (avpkt && !avpkt->size && avpkt->data)
592  return AVERROR(EINVAL);
593 
595  if (avpkt && (avpkt->data || avpkt->side_data_elems)) {
596  ret = av_packet_ref(avci->buffer_pkt, avpkt);
597  if (ret < 0)
598  return ret;
599  }
600 
601  ret = av_bsf_send_packet(avci->bsf, avci->buffer_pkt);
602  if (ret < 0) {
604  return ret;
605  }
606 
607  if (!avci->buffer_frame->buf[0]) {
608  ret = decode_receive_frame_internal(avctx, avci->buffer_frame);
609  if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
610  return ret;
611  }
612 
613  return 0;
614 }
615 
617 {
618  /* make sure we are noisy about decoders returning invalid cropping data */
619  if (frame->crop_left >= INT_MAX - frame->crop_right ||
620  frame->crop_top >= INT_MAX - frame->crop_bottom ||
621  (frame->crop_left + frame->crop_right) >= frame->width ||
622  (frame->crop_top + frame->crop_bottom) >= frame->height) {
623  av_log(avctx, AV_LOG_WARNING,
624  "Invalid cropping information set by a decoder: "
626  "(frame size %dx%d). This is a bug, please report it\n",
627  frame->crop_left, frame->crop_right, frame->crop_top, frame->crop_bottom,
628  frame->width, frame->height);
629  frame->crop_left = 0;
630  frame->crop_right = 0;
631  frame->crop_top = 0;
632  frame->crop_bottom = 0;
633  return 0;
634  }
635 
636  if (!avctx->apply_cropping)
637  return 0;
638 
639  return av_frame_apply_cropping(frame, avctx->flags & AV_CODEC_FLAG_UNALIGNED ?
641 }
642 
643 int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
644 {
645  AVCodecInternal *avci = avctx->internal;
646  int ret, changed;
647 
648  av_frame_unref(frame);
649 
650  if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
651  return AVERROR(EINVAL);
652 
653  if (avci->buffer_frame->buf[0]) {
654  av_frame_move_ref(frame, avci->buffer_frame);
655  } else {
656  ret = decode_receive_frame_internal(avctx, frame);
657  if (ret < 0)
658  return ret;
659  }
660 
661  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
662  ret = apply_cropping(avctx, frame);
663  if (ret < 0) {
664  av_frame_unref(frame);
665  return ret;
666  }
667  }
668 
669  avctx->frame_number++;
670 
671  if (avctx->flags & AV_CODEC_FLAG_DROPCHANGED) {
672 
673  if (avctx->frame_number == 1) {
674  avci->initial_format = frame->format;
675  switch(avctx->codec_type) {
676  case AVMEDIA_TYPE_VIDEO:
677  avci->initial_width = frame->width;
678  avci->initial_height = frame->height;
679  break;
680  case AVMEDIA_TYPE_AUDIO:
681  avci->initial_sample_rate = frame->sample_rate ? frame->sample_rate :
682  avctx->sample_rate;
683  avci->initial_channels = frame->channels;
684  avci->initial_channel_layout = frame->channel_layout;
685  break;
686  }
687  }
688 
689  if (avctx->frame_number > 1) {
690  changed = avci->initial_format != frame->format;
691 
692  switch(avctx->codec_type) {
693  case AVMEDIA_TYPE_VIDEO:
694  changed |= avci->initial_width != frame->width ||
695  avci->initial_height != frame->height;
696  break;
697  case AVMEDIA_TYPE_AUDIO:
698  changed |= avci->initial_sample_rate != frame->sample_rate ||
699  avci->initial_sample_rate != avctx->sample_rate ||
700  avci->initial_channels != frame->channels ||
701  avci->initial_channel_layout != frame->channel_layout;
702  break;
703  }
704 
705  if (changed) {
706  avci->changed_frames_dropped++;
707  av_log(avctx, AV_LOG_INFO, "dropped changed frame #%d pts %"PRId64
708  " drop count: %d \n",
709  avctx->frame_number, frame->pts,
710  avci->changed_frames_dropped);
711  av_frame_unref(frame);
712  return AVERROR_INPUT_CHANGED;
713  }
714  }
715  }
716  return 0;
717 }
718 
719 #if FF_API_OLD_ENCDEC
722 {
723  int ret;
724 
725  /* move the original frame to our backup */
726  av_frame_unref(avci->to_free);
727  av_frame_move_ref(avci->to_free, frame);
728 
729  /* now copy everything except the AVBufferRefs back
730  * note that we make a COPY of the side data, so calling av_frame_free() on
731  * the caller's frame will work properly */
732  ret = av_frame_copy_props(frame, avci->to_free);
733  if (ret < 0)
734  return ret;
735 
736  memcpy(frame->data, avci->to_free->data, sizeof(frame->data));
737  memcpy(frame->linesize, avci->to_free->linesize, sizeof(frame->linesize));
738  if (avci->to_free->extended_data != avci->to_free->data) {
739  int planes = avci->to_free->channels;
740  int size = planes * sizeof(*frame->extended_data);
741 
742  if (!size) {
743  av_frame_unref(frame);
744  return AVERROR_BUG;
745  }
746 
747  frame->extended_data = av_malloc(size);
748  if (!frame->extended_data) {
749  av_frame_unref(frame);
750  return AVERROR(ENOMEM);
751  }
752  memcpy(frame->extended_data, avci->to_free->extended_data,
753  size);
754  } else
755  frame->extended_data = frame->data;
756 
757  frame->format = avci->to_free->format;
758  frame->width = avci->to_free->width;
759  frame->height = avci->to_free->height;
760  frame->channel_layout = avci->to_free->channel_layout;
761  frame->nb_samples = avci->to_free->nb_samples;
762  frame->channels = avci->to_free->channels;
763 
764  return 0;
765 }
766 
768  int *got_frame, const AVPacket *pkt)
769 {
770  AVCodecInternal *avci = avctx->internal;
771  int ret = 0;
772 
774 
775  if (avci->draining_done && pkt && pkt->size != 0) {
776  av_log(avctx, AV_LOG_WARNING, "Got unexpected packet after EOF\n");
777  avcodec_flush_buffers(avctx);
778  }
779 
780  *got_frame = 0;
781 
782  if (avci->compat_decode_partial_size > 0 &&
783  avci->compat_decode_partial_size != pkt->size) {
784  av_log(avctx, AV_LOG_ERROR,
785  "Got unexpected packet size after a partial decode\n");
786  ret = AVERROR(EINVAL);
787  goto finish;
788  }
789 
790  if (!avci->compat_decode_partial_size) {
791  ret = avcodec_send_packet(avctx, pkt);
792  if (ret == AVERROR_EOF)
793  ret = 0;
794  else if (ret == AVERROR(EAGAIN)) {
795  /* we fully drain all the output in each decode call, so this should not
796  * ever happen */
797  ret = AVERROR_BUG;
798  goto finish;
799  } else if (ret < 0)
800  goto finish;
801  }
802 
803  while (ret >= 0) {
804  ret = avcodec_receive_frame(avctx, frame);
805  if (ret < 0) {
806  if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
807  ret = 0;
808  goto finish;
809  }
810 
811  if (frame != avci->compat_decode_frame) {
812  if (!avctx->refcounted_frames) {
813  ret = unrefcount_frame(avci, frame);
814  if (ret < 0)
815  goto finish;
816  }
817 
818  *got_frame = 1;
819  frame = avci->compat_decode_frame;
820  } else {
821  if (!avci->compat_decode_warned) {
822  av_log(avctx, AV_LOG_WARNING, "The deprecated avcodec_decode_* "
823  "API cannot return all the frames for this decoder. "
824  "Some frames will be dropped. Update your code to the "
825  "new decoding API to fix this.\n");
826  avci->compat_decode_warned = 1;
827  }
828  }
829 
830  if (avci->draining || (!avctx->codec->bsfs && avci->compat_decode_consumed < pkt->size))
831  break;
832  }
833 
834 finish:
835  if (ret == 0) {
836  /* if there are any bsfs then assume full packet is always consumed */
837  if (avctx->codec->bsfs)
838  ret = pkt->size;
839  else
840  ret = FFMIN(avci->compat_decode_consumed, pkt->size);
841  }
842  avci->compat_decode_consumed = 0;
843  avci->compat_decode_partial_size = (ret >= 0) ? pkt->size - ret : 0;
844 
845  return ret;
846 }
847 
848 int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
849  int *got_picture_ptr,
850  const AVPacket *avpkt)
851 {
852  return compat_decode(avctx, picture, got_picture_ptr, avpkt);
853 }
854 
855 int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx,
856  AVFrame *frame,
857  int *got_frame_ptr,
858  const AVPacket *avpkt)
859 {
860  return compat_decode(avctx, frame, got_frame_ptr, avpkt);
861 }
863 #endif
864 
866 {
867  memset(sub, 0, sizeof(*sub));
868  sub->pts = AV_NOPTS_VALUE;
869 }
870 
871 #define UTF8_MAX_BYTES 4 /* 5 and 6 bytes sequences should not be used */
872 static int recode_subtitle(AVCodecContext *avctx,
873  AVPacket *outpkt, const AVPacket *inpkt)
874 {
875 #if CONFIG_ICONV
876  iconv_t cd = (iconv_t)-1;
877  int ret = 0;
878  char *inb, *outb;
879  size_t inl, outl;
880  AVPacket tmp;
881 #endif
882 
883  if (avctx->sub_charenc_mode != FF_SUB_CHARENC_MODE_PRE_DECODER || inpkt->size == 0)
884  return 0;
885 
886 #if CONFIG_ICONV
887  cd = iconv_open("UTF-8", avctx->sub_charenc);
888  av_assert0(cd != (iconv_t)-1);
889 
890  inb = inpkt->data;
891  inl = inpkt->size;
892 
893  if (inl >= INT_MAX / UTF8_MAX_BYTES - AV_INPUT_BUFFER_PADDING_SIZE) {
894  av_log(avctx, AV_LOG_ERROR, "Subtitles packet is too big for recoding\n");
895  ret = AVERROR(ENOMEM);
896  goto end;
897  }
898 
899  ret = av_new_packet(&tmp, inl * UTF8_MAX_BYTES);
900  if (ret < 0)
901  goto end;
902  outpkt->buf = tmp.buf;
903  outpkt->data = tmp.data;
904  outpkt->size = tmp.size;
905  outb = outpkt->data;
906  outl = outpkt->size;
907 
908  if (iconv(cd, &inb, &inl, &outb, &outl) == (size_t)-1 ||
909  iconv(cd, NULL, NULL, &outb, &outl) == (size_t)-1 ||
910  outl >= outpkt->size || inl != 0) {
911  ret = FFMIN(AVERROR(errno), -1);
912  av_log(avctx, AV_LOG_ERROR, "Unable to recode subtitle event \"%s\" "
913  "from %s to UTF-8\n", inpkt->data, avctx->sub_charenc);
914  av_packet_unref(&tmp);
915  goto end;
916  }
917  outpkt->size -= outl;
918  memset(outpkt->data + outpkt->size, 0, outl);
919 
920 end:
921  if (cd != (iconv_t)-1)
922  iconv_close(cd);
923  return ret;
924 #else
925  av_log(avctx, AV_LOG_ERROR, "requesting subtitles recoding without iconv");
926  return AVERROR(EINVAL);
927 #endif
928 }
929 
930 static int utf8_check(const uint8_t *str)
931 {
932  const uint8_t *byte;
933  uint32_t codepoint, min;
934 
935  while (*str) {
936  byte = str;
937  GET_UTF8(codepoint, *(byte++), return 0;);
938  min = byte - str == 1 ? 0 : byte - str == 2 ? 0x80 :
939  1 << (5 * (byte - str) - 4);
940  if (codepoint < min || codepoint >= 0x110000 ||
941  codepoint == 0xFFFE /* BOM */ ||
942  codepoint >= 0xD800 && codepoint <= 0xDFFF /* surrogates */)
943  return 0;
944  str = byte;
945  }
946  return 1;
947 }
948 
949 #if FF_API_ASS_TIMING
950 static void insert_ts(AVBPrint *buf, int ts)
951 {
952  if (ts == -1) {
953  av_bprintf(buf, "9:59:59.99,");
954  } else {
955  int h, m, s;
956 
957  h = ts/360000; ts -= 360000*h;
958  m = ts/ 6000; ts -= 6000*m;
959  s = ts/ 100; ts -= 100*s;
960  av_bprintf(buf, "%d:%02d:%02d.%02d,", h, m, s, ts);
961  }
962 }
963 
965 {
966  int i;
967  AVBPrint buf;
968 
970 
971  for (i = 0; i < sub->num_rects; i++) {
972  char *final_dialog;
973  const char *dialog;
974  AVSubtitleRect *rect = sub->rects[i];
975  int ts_start, ts_duration = -1;
976  long int layer;
977 
978  if (rect->type != SUBTITLE_ASS || !strncmp(rect->ass, "Dialogue: ", 10))
979  continue;
980 
981  av_bprint_clear(&buf);
982 
983  /* skip ReadOrder */
984  dialog = strchr(rect->ass, ',');
985  if (!dialog)
986  continue;
987  dialog++;
988 
989  /* extract Layer or Marked */
990  layer = strtol(dialog, (char**)&dialog, 10);
991  if (*dialog != ',')
992  continue;
993  dialog++;
994 
995  /* rescale timing to ASS time base (ms) */
996  ts_start = av_rescale_q(pkt->pts, tb, av_make_q(1, 100));
997  if (pkt->duration != -1)
998  ts_duration = av_rescale_q(pkt->duration, tb, av_make_q(1, 100));
999  sub->end_display_time = FFMAX(sub->end_display_time, 10 * ts_duration);
1000 
1001  /* construct ASS (standalone file form with timestamps) string */
1002  av_bprintf(&buf, "Dialogue: %ld,", layer);
1003  insert_ts(&buf, ts_start);
1004  insert_ts(&buf, ts_duration == -1 ? -1 : ts_start + ts_duration);
1005  av_bprintf(&buf, "%s\r\n", dialog);
1006 
1007  final_dialog = av_strdup(buf.str);
1008  if (!av_bprint_is_complete(&buf) || !final_dialog) {
1009  av_freep(&final_dialog);
1010  av_bprint_finalize(&buf, NULL);
1011  return AVERROR(ENOMEM);
1012  }
1013  av_freep(&rect->ass);
1014  rect->ass = final_dialog;
1015  }
1016 
1017  av_bprint_finalize(&buf, NULL);
1018  return 0;
1019 }
1020 #endif
1021 
1023  int *got_sub_ptr,
1024  AVPacket *avpkt)
1025 {
1026  int i, ret = 0;
1027 
1028  if (!avpkt->data && avpkt->size) {
1029  av_log(avctx, AV_LOG_ERROR, "invalid packet: NULL data, size != 0\n");
1030  return AVERROR(EINVAL);
1031  }
1032  if (!avctx->codec)
1033  return AVERROR(EINVAL);
1034  if (avctx->codec->type != AVMEDIA_TYPE_SUBTITLE) {
1035  av_log(avctx, AV_LOG_ERROR, "Invalid media type for subtitles\n");
1036  return AVERROR(EINVAL);
1037  }
1038 
1039  *got_sub_ptr = 0;
1040  get_subtitle_defaults(sub);
1041 
1042  if ((avctx->codec->capabilities & AV_CODEC_CAP_DELAY) || avpkt->size) {
1043  AVPacket pkt_recoded = *avpkt;
1044 
1045  ret = recode_subtitle(avctx, &pkt_recoded, avpkt);
1046  if (ret < 0) {
1047  *got_sub_ptr = 0;
1048  } else {
1049  ret = extract_packet_props(avctx->internal, &pkt_recoded);
1050  if (ret < 0)
1051  return ret;
1052 
1053  if (avctx->pkt_timebase.num && avpkt->pts != AV_NOPTS_VALUE)
1054  sub->pts = av_rescale_q(avpkt->pts,
1055  avctx->pkt_timebase, AV_TIME_BASE_Q);
1056  ret = avctx->codec->decode(avctx, sub, got_sub_ptr, &pkt_recoded);
1057  av_assert1((ret >= 0) >= !!*got_sub_ptr &&
1058  !!*got_sub_ptr >= !!sub->num_rects);
1059 
1060 #if FF_API_ASS_TIMING
1062  && *got_sub_ptr && sub->num_rects) {
1063  const AVRational tb = avctx->pkt_timebase.num ? avctx->pkt_timebase
1064  : avctx->time_base;
1065  int err = convert_sub_to_old_ass_form(sub, avpkt, tb);
1066  if (err < 0)
1067  ret = err;
1068  }
1069 #endif
1070 
1071  if (sub->num_rects && !sub->end_display_time && avpkt->duration &&
1072  avctx->pkt_timebase.num) {
1073  AVRational ms = { 1, 1000 };
1074  sub->end_display_time = av_rescale_q(avpkt->duration,
1075  avctx->pkt_timebase, ms);
1076  }
1077 
1079  sub->format = 0;
1080  else if (avctx->codec_descriptor->props & AV_CODEC_PROP_TEXT_SUB)
1081  sub->format = 1;
1082 
1083  for (i = 0; i < sub->num_rects; i++) {
1085  sub->rects[i]->ass && !utf8_check(sub->rects[i]->ass)) {
1086  av_log(avctx, AV_LOG_ERROR,
1087  "Invalid UTF-8 in decoded subtitles text; "
1088  "maybe missing -sub_charenc option\n");
1089  avsubtitle_free(sub);
1090  ret = AVERROR_INVALIDDATA;
1091  break;
1092  }
1093  }
1094 
1095  if (avpkt->data != pkt_recoded.data) { // did we recode?
1096  /* prevent from destroying side data from original packet */
1097  pkt_recoded.side_data = NULL;
1098  pkt_recoded.side_data_elems = 0;
1099 
1100  av_packet_unref(&pkt_recoded);
1101  }
1102  }
1103 
1104  if (*got_sub_ptr)
1105  avctx->frame_number++;
1106  }
1107 
1108  return ret;
1109 }
1110 
1112  const enum AVPixelFormat *fmt)
1113 {
1114  const AVPixFmtDescriptor *desc;
1115  const AVCodecHWConfig *config;
1116  int i, n;
1117 
1118  // If a device was supplied when the codec was opened, assume that the
1119  // user wants to use it.
1120  if (avctx->hw_device_ctx && avctx->codec->hw_configs) {
1121  AVHWDeviceContext *device_ctx =
1123  for (i = 0;; i++) {
1124  config = &avctx->codec->hw_configs[i]->public;
1125  if (!config)
1126  break;
1127  if (!(config->methods &
1129  continue;
1130  if (device_ctx->type != config->device_type)
1131  continue;
1132  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++) {
1133  if (config->pix_fmt == fmt[n])
1134  return fmt[n];
1135  }
1136  }
1137  }
1138  // No device or other setup, so we have to choose from things which
1139  // don't any other external information.
1140 
1141  // If the last element of the list is a software format, choose it
1142  // (this should be best software format if any exist).
1143  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++);
1144  desc = av_pix_fmt_desc_get(fmt[n - 1]);
1145  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
1146  return fmt[n - 1];
1147 
1148  // Finally, traverse the list in order and choose the first entry
1149  // with no external dependencies (if there is no hardware configuration
1150  // information available then this just picks the first entry).
1151  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++) {
1152  for (i = 0;; i++) {
1153  config = avcodec_get_hw_config(avctx->codec, i);
1154  if (!config)
1155  break;
1156  if (config->pix_fmt == fmt[n])
1157  break;
1158  }
1159  if (!config) {
1160  // No specific config available, so the decoder must be able
1161  // to handle this format without any additional setup.
1162  return fmt[n];
1163  }
1165  // Usable with only internal setup.
1166  return fmt[n];
1167  }
1168  }
1169 
1170  // Nothing is usable, give up.
1171  return AV_PIX_FMT_NONE;
1172 }
1173 
1175  enum AVHWDeviceType dev_type)
1176 {
1177  AVHWDeviceContext *device_ctx;
1178  AVHWFramesContext *frames_ctx;
1179  int ret;
1180 
1181  if (!avctx->hwaccel)
1182  return AVERROR(ENOSYS);
1183 
1184  if (avctx->hw_frames_ctx)
1185  return 0;
1186  if (!avctx->hw_device_ctx) {
1187  av_log(avctx, AV_LOG_ERROR, "A hardware frames or device context is "
1188  "required for hardware accelerated decoding.\n");
1189  return AVERROR(EINVAL);
1190  }
1191 
1192  device_ctx = (AVHWDeviceContext *)avctx->hw_device_ctx->data;
1193  if (device_ctx->type != dev_type) {
1194  av_log(avctx, AV_LOG_ERROR, "Device type %s expected for hardware "
1195  "decoding, but got %s.\n", av_hwdevice_get_type_name(dev_type),
1196  av_hwdevice_get_type_name(device_ctx->type));
1197  return AVERROR(EINVAL);
1198  }
1199 
1201  avctx->hw_device_ctx,
1202  avctx->hwaccel->pix_fmt,
1203  &avctx->hw_frames_ctx);
1204  if (ret < 0)
1205  return ret;
1206 
1207  frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1208 
1209 
1210  if (frames_ctx->initial_pool_size) {
1211  // We guarantee 4 base work surfaces. The function above guarantees 1
1212  // (the absolute minimum), so add the missing count.
1213  frames_ctx->initial_pool_size += 3;
1214  }
1215 
1216  ret = av_hwframe_ctx_init(avctx->hw_frames_ctx);
1217  if (ret < 0) {
1218  av_buffer_unref(&avctx->hw_frames_ctx);
1219  return ret;
1220  }
1221 
1222  return 0;
1223 }
1224 
1226  AVBufferRef *device_ref,
1228  AVBufferRef **out_frames_ref)
1229 {
1230  AVBufferRef *frames_ref = NULL;
1231  const AVCodecHWConfigInternal *hw_config;
1232  const AVHWAccel *hwa;
1233  int i, ret;
1234 
1235  for (i = 0;; i++) {
1236  hw_config = avctx->codec->hw_configs[i];
1237  if (!hw_config)
1238  return AVERROR(ENOENT);
1239  if (hw_config->public.pix_fmt == hw_pix_fmt)
1240  break;
1241  }
1242 
1243  hwa = hw_config->hwaccel;
1244  if (!hwa || !hwa->frame_params)
1245  return AVERROR(ENOENT);
1246 
1247  frames_ref = av_hwframe_ctx_alloc(device_ref);
1248  if (!frames_ref)
1249  return AVERROR(ENOMEM);
1250 
1251  ret = hwa->frame_params(avctx, frames_ref);
1252  if (ret >= 0) {
1253  AVHWFramesContext *frames_ctx = (AVHWFramesContext*)frames_ref->data;
1254 
1255  if (frames_ctx->initial_pool_size) {
1256  // If the user has requested that extra output surfaces be
1257  // available then add them here.
1258  if (avctx->extra_hw_frames > 0)
1259  frames_ctx->initial_pool_size += avctx->extra_hw_frames;
1260 
1261  // If frame threading is enabled then an extra surface per thread
1262  // is also required.
1263  if (avctx->active_thread_type & FF_THREAD_FRAME)
1264  frames_ctx->initial_pool_size += avctx->thread_count;
1265  }
1266 
1267  *out_frames_ref = frames_ref;
1268  } else {
1269  av_buffer_unref(&frames_ref);
1270  }
1271  return ret;
1272 }
1273 
1274 static int hwaccel_init(AVCodecContext *avctx,
1275  const AVCodecHWConfigInternal *hw_config)
1276 {
1277  const AVHWAccel *hwaccel;
1278  int err;
1279 
1280  hwaccel = hw_config->hwaccel;
1283  av_log(avctx, AV_LOG_WARNING, "Ignoring experimental hwaccel: %s\n",
1284  hwaccel->name);
1285  return AVERROR_PATCHWELCOME;
1286  }
1287 
1288  if (hwaccel->priv_data_size) {
1289  avctx->internal->hwaccel_priv_data =
1290  av_mallocz(hwaccel->priv_data_size);
1291  if (!avctx->internal->hwaccel_priv_data)
1292  return AVERROR(ENOMEM);
1293  }
1294 
1295  avctx->hwaccel = hwaccel;
1296  if (hwaccel->init) {
1297  err = hwaccel->init(avctx);
1298  if (err < 0) {
1299  av_log(avctx, AV_LOG_ERROR, "Failed setup for format %s: "
1300  "hwaccel initialisation returned error.\n",
1301  av_get_pix_fmt_name(hw_config->public.pix_fmt));
1303  avctx->hwaccel = NULL;
1304  return err;
1305  }
1306  }
1307 
1308  return 0;
1309 }
1310 
1311 static void hwaccel_uninit(AVCodecContext *avctx)
1312 {
1313  if (avctx->hwaccel && avctx->hwaccel->uninit)
1314  avctx->hwaccel->uninit(avctx);
1315 
1317 
1318  avctx->hwaccel = NULL;
1319 
1320  av_buffer_unref(&avctx->hw_frames_ctx);
1321 }
1322 
1323 int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
1324 {
1325  const AVPixFmtDescriptor *desc;
1326  enum AVPixelFormat *choices;
1327  enum AVPixelFormat ret, user_choice;
1328  const AVCodecHWConfigInternal *hw_config;
1329  const AVCodecHWConfig *config;
1330  int i, n, err;
1331 
1332  // Find end of list.
1333  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++);
1334  // Must contain at least one entry.
1335  av_assert0(n >= 1);
1336  // If a software format is available, it must be the last entry.
1337  desc = av_pix_fmt_desc_get(fmt[n - 1]);
1338  if (desc->flags & AV_PIX_FMT_FLAG_HWACCEL) {
1339  // No software format is available.
1340  } else {
1341  avctx->sw_pix_fmt = fmt[n - 1];
1342  }
1343 
1344  choices = av_malloc_array(n + 1, sizeof(*choices));
1345  if (!choices)
1346  return AV_PIX_FMT_NONE;
1347 
1348  memcpy(choices, fmt, (n + 1) * sizeof(*choices));
1349 
1350  for (;;) {
1351  // Remove the previous hwaccel, if there was one.
1352  hwaccel_uninit(avctx);
1353 
1354  user_choice = avctx->get_format(avctx, choices);
1355  if (user_choice == AV_PIX_FMT_NONE) {
1356  // Explicitly chose nothing, give up.
1357  ret = AV_PIX_FMT_NONE;
1358  break;
1359  }
1360 
1361  desc = av_pix_fmt_desc_get(user_choice);
1362  if (!desc) {
1363  av_log(avctx, AV_LOG_ERROR, "Invalid format returned by "
1364  "get_format() callback.\n");
1365  ret = AV_PIX_FMT_NONE;
1366  break;
1367  }
1368  av_log(avctx, AV_LOG_DEBUG, "Format %s chosen by get_format().\n",
1369  desc->name);
1370 
1371  for (i = 0; i < n; i++) {
1372  if (choices[i] == user_choice)
1373  break;
1374  }
1375  if (i == n) {
1376  av_log(avctx, AV_LOG_ERROR, "Invalid return from get_format(): "
1377  "%s not in possible list.\n", desc->name);
1378  ret = AV_PIX_FMT_NONE;
1379  break;
1380  }
1381 
1382  if (avctx->codec->hw_configs) {
1383  for (i = 0;; i++) {
1384  hw_config = avctx->codec->hw_configs[i];
1385  if (!hw_config)
1386  break;
1387  if (hw_config->public.pix_fmt == user_choice)
1388  break;
1389  }
1390  } else {
1391  hw_config = NULL;
1392  }
1393 
1394  if (!hw_config) {
1395  // No config available, so no extra setup required.
1396  ret = user_choice;
1397  break;
1398  }
1399  config = &hw_config->public;
1400 
1401  if (config->methods &
1403  avctx->hw_frames_ctx) {
1404  const AVHWFramesContext *frames_ctx =
1406  if (frames_ctx->format != user_choice) {
1407  av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
1408  "does not match the format of the provided frames "
1409  "context.\n", desc->name);
1410  goto try_again;
1411  }
1412  } else if (config->methods &
1414  avctx->hw_device_ctx) {
1415  const AVHWDeviceContext *device_ctx =
1417  if (device_ctx->type != config->device_type) {
1418  av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
1419  "does not match the type of the provided device "
1420  "context.\n", desc->name);
1421  goto try_again;
1422  }
1423  } else if (config->methods &
1425  // Internal-only setup, no additional configuration.
1426  } else if (config->methods &
1428  // Some ad-hoc configuration we can't see and can't check.
1429  } else {
1430  av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
1431  "missing configuration.\n", desc->name);
1432  goto try_again;
1433  }
1434  if (hw_config->hwaccel) {
1435  av_log(avctx, AV_LOG_DEBUG, "Format %s requires hwaccel "
1436  "initialisation.\n", desc->name);
1437  err = hwaccel_init(avctx, hw_config);
1438  if (err < 0)
1439  goto try_again;
1440  }
1441  ret = user_choice;
1442  break;
1443 
1444  try_again:
1445  av_log(avctx, AV_LOG_DEBUG, "Format %s not usable, retrying "
1446  "get_format() without it.\n", desc->name);
1447  for (i = 0; i < n; i++) {
1448  if (choices[i] == user_choice)
1449  break;
1450  }
1451  for (; i + 1 < n; i++)
1452  choices[i] = choices[i + 1];
1453  --n;
1454  }
1455 
1456  av_freep(&choices);
1457  return ret;
1458 }
1459 
1460 static void frame_pool_free(void *opaque, uint8_t *data)
1461 {
1462  FramePool *pool = (FramePool*)data;
1463  int i;
1464 
1465  for (i = 0; i < FF_ARRAY_ELEMS(pool->pools); i++)
1466  av_buffer_pool_uninit(&pool->pools[i]);
1467 
1468  av_freep(&data);
1469 }
1470 
1472 {
1473  FramePool *pool = av_mallocz(sizeof(*pool));
1474  AVBufferRef *buf;
1475 
1476  if (!pool)
1477  return NULL;
1478 
1479  buf = av_buffer_create((uint8_t*)pool, sizeof(*pool),
1480  frame_pool_free, NULL, 0);
1481  if (!buf) {
1482  av_freep(&pool);
1483  return NULL;
1484  }
1485 
1486  return buf;
1487 }
1488 
1490 {
1491  FramePool *pool = avctx->internal->pool ?
1492  (FramePool*)avctx->internal->pool->data : NULL;
1493  AVBufferRef *pool_buf;
1494  int i, ret, ch, planes;
1495 
1496  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
1497  int planar = av_sample_fmt_is_planar(frame->format);
1498  ch = frame->channels;
1499  planes = planar ? ch : 1;
1500  }
1501 
1502  if (pool && pool->format == frame->format) {
1503  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO &&
1504  pool->width == frame->width && pool->height == frame->height)
1505  return 0;
1506  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && pool->planes == planes &&
1507  pool->channels == ch && frame->nb_samples == pool->samples)
1508  return 0;
1509  }
1510 
1511  pool_buf = frame_pool_alloc();
1512  if (!pool_buf)
1513  return AVERROR(ENOMEM);
1514  pool = (FramePool*)pool_buf->data;
1515 
1516  switch (avctx->codec_type) {
1517  case AVMEDIA_TYPE_VIDEO: {
1518  int linesize[4];
1519  int w = frame->width;
1520  int h = frame->height;
1521  int unaligned;
1522  ptrdiff_t linesize1[4];
1523  size_t size[4];
1524 
1525  avcodec_align_dimensions2(avctx, &w, &h, pool->stride_align);
1526 
1527  do {
1528  // NOTE: do not align linesizes individually, this breaks e.g. assumptions
1529  // that linesize[0] == 2*linesize[1] in the MPEG-encoder for 4:2:2
1530  ret = av_image_fill_linesizes(linesize, avctx->pix_fmt, w);
1531  if (ret < 0)
1532  goto fail;
1533  // increase alignment of w for next try (rhs gives the lowest bit set in w)
1534  w += w & ~(w - 1);
1535 
1536  unaligned = 0;
1537  for (i = 0; i < 4; i++)
1538  unaligned |= linesize[i] % pool->stride_align[i];
1539  } while (unaligned);
1540 
1541  for (i = 0; i < 4; i++)
1542  linesize1[i] = linesize[i];
1543  ret = av_image_fill_plane_sizes(size, avctx->pix_fmt, h, linesize1);
1544  if (ret < 0)
1545  goto fail;
1546 
1547  for (i = 0; i < 4; i++) {
1548  pool->linesize[i] = linesize[i];
1549  if (size[i]) {
1550  if (size[i] > INT_MAX - (16 + STRIDE_ALIGN - 1)) {
1551  ret = AVERROR(EINVAL);
1552  goto fail;
1553  }
1554  pool->pools[i] = av_buffer_pool_init(size[i] + 16 + STRIDE_ALIGN - 1,
1555  CONFIG_MEMORY_POISONING ?
1556  NULL :
1558  if (!pool->pools[i]) {
1559  ret = AVERROR(ENOMEM);
1560  goto fail;
1561  }
1562  }
1563  }
1564  pool->format = frame->format;
1565  pool->width = frame->width;
1566  pool->height = frame->height;
1567 
1568  break;
1569  }
1570  case AVMEDIA_TYPE_AUDIO: {
1571  ret = av_samples_get_buffer_size(&pool->linesize[0], ch,
1572  frame->nb_samples, frame->format, 0);
1573  if (ret < 0)
1574  goto fail;
1575 
1576  pool->pools[0] = av_buffer_pool_init(pool->linesize[0], NULL);
1577  if (!pool->pools[0]) {
1578  ret = AVERROR(ENOMEM);
1579  goto fail;
1580  }
1581 
1582  pool->format = frame->format;
1583  pool->planes = planes;
1584  pool->channels = ch;
1585  pool->samples = frame->nb_samples;
1586  break;
1587  }
1588  default: av_assert0(0);
1589  }
1590 
1591  av_buffer_unref(&avctx->internal->pool);
1592  avctx->internal->pool = pool_buf;
1593 
1594  return 0;
1595 fail:
1596  av_buffer_unref(&pool_buf);
1597  return ret;
1598 }
1599 
1601 {
1602  FramePool *pool = (FramePool*)avctx->internal->pool->data;
1603  int planes = pool->planes;
1604  int i;
1605 
1606  frame->linesize[0] = pool->linesize[0];
1607 
1609  frame->extended_data = av_mallocz_array(planes, sizeof(*frame->extended_data));
1612  sizeof(*frame->extended_buf));
1613  if (!frame->extended_data || !frame->extended_buf) {
1614  av_freep(&frame->extended_data);
1615  av_freep(&frame->extended_buf);
1616  return AVERROR(ENOMEM);
1617  }
1618  } else {
1619  frame->extended_data = frame->data;
1620  av_assert0(frame->nb_extended_buf == 0);
1621  }
1622 
1623  for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
1624  frame->buf[i] = av_buffer_pool_get(pool->pools[0]);
1625  if (!frame->buf[i])
1626  goto fail;
1627  frame->extended_data[i] = frame->data[i] = frame->buf[i]->data;
1628  }
1629  for (i = 0; i < frame->nb_extended_buf; i++) {
1630  frame->extended_buf[i] = av_buffer_pool_get(pool->pools[0]);
1631  if (!frame->extended_buf[i])
1632  goto fail;
1633  frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data;
1634  }
1635 
1636  if (avctx->debug & FF_DEBUG_BUFFERS)
1637  av_log(avctx, AV_LOG_DEBUG, "default_get_buffer called on frame %p", frame);
1638 
1639  return 0;
1640 fail:
1641  av_frame_unref(frame);
1642  return AVERROR(ENOMEM);
1643 }
1644 
1646 {
1647  FramePool *pool = (FramePool*)s->internal->pool->data;
1649  int i;
1650 
1651  if (pic->data[0] || pic->data[1] || pic->data[2] || pic->data[3]) {
1652  av_log(s, AV_LOG_ERROR, "pic->data[*]!=NULL in avcodec_default_get_buffer\n");
1653  return -1;
1654  }
1655 
1656  if (!desc) {
1657  av_log(s, AV_LOG_ERROR,
1658  "Unable to get pixel format descriptor for format %s\n",
1659  av_get_pix_fmt_name(pic->format));
1660  return AVERROR(EINVAL);
1661  }
1662 
1663  memset(pic->data, 0, sizeof(pic->data));
1664  pic->extended_data = pic->data;
1665 
1666  for (i = 0; i < 4 && pool->pools[i]; i++) {
1667  pic->linesize[i] = pool->linesize[i];
1668 
1669  pic->buf[i] = av_buffer_pool_get(pool->pools[i]);
1670  if (!pic->buf[i])
1671  goto fail;
1672 
1673  pic->data[i] = pic->buf[i]->data;
1674  }
1675  for (; i < AV_NUM_DATA_POINTERS; i++) {
1676  pic->data[i] = NULL;
1677  pic->linesize[i] = 0;
1678  }
1679  if (desc->flags & AV_PIX_FMT_FLAG_PAL ||
1680  ((desc->flags & FF_PSEUDOPAL) && pic->data[1]))
1681  avpriv_set_systematic_pal2((uint32_t *)pic->data[1], pic->format);
1682 
1683  if (s->debug & FF_DEBUG_BUFFERS)
1684  av_log(s, AV_LOG_DEBUG, "default_get_buffer called on pic %p\n", pic);
1685 
1686  return 0;
1687 fail:
1688  av_frame_unref(pic);
1689  return AVERROR(ENOMEM);
1690 }
1691 
1693 {
1694  int ret;
1695 
1696  if (avctx->hw_frames_ctx) {
1697  ret = av_hwframe_get_buffer(avctx->hw_frames_ctx, frame, 0);
1698  frame->width = avctx->coded_width;
1699  frame->height = avctx->coded_height;
1700  return ret;
1701  }
1702 
1703  if ((ret = update_frame_pool(avctx, frame)) < 0)
1704  return ret;
1705 
1706  switch (avctx->codec_type) {
1707  case AVMEDIA_TYPE_VIDEO:
1708  return video_get_buffer(avctx, frame);
1709  case AVMEDIA_TYPE_AUDIO:
1710  return audio_get_buffer(avctx, frame);
1711  default:
1712  return -1;
1713  }
1714 }
1715 
1717 {
1718  int size;
1719  const uint8_t *side_metadata;
1720 
1721  AVDictionary **frame_md = &frame->metadata;
1722 
1723  side_metadata = av_packet_get_side_data(avpkt,
1725  return av_packet_unpack_dictionary(side_metadata, size, frame_md);
1726 }
1727 
1729 {
1730  AVPacket *pkt = avctx->internal->last_pkt_props;
1731  int i;
1732  static const struct {
1733  enum AVPacketSideDataType packet;
1735  } sd[] = {
1746  };
1747 
1748  if (IS_EMPTY(pkt) && av_fifo_size(avctx->internal->pkt_props) >= sizeof(*pkt))
1750  pkt, sizeof(*pkt), NULL);
1751 
1752  if (pkt) {
1753  frame->pts = pkt->pts;
1754 #if FF_API_PKT_PTS
1756  frame->pkt_pts = pkt->pts;
1758 #endif
1759  frame->pkt_pos = pkt->pos;
1760  frame->pkt_duration = pkt->duration;
1761  frame->pkt_size = pkt->size;
1762 
1763  for (i = 0; i < FF_ARRAY_ELEMS(sd); i++) {
1764  int size;
1765  uint8_t *packet_sd = av_packet_get_side_data(pkt, sd[i].packet, &size);
1766  if (packet_sd) {
1767  AVFrameSideData *frame_sd = av_frame_new_side_data(frame,
1768  sd[i].frame,
1769  size);
1770  if (!frame_sd)
1771  return AVERROR(ENOMEM);
1772 
1773  memcpy(frame_sd->data, packet_sd, size);
1774  }
1775  }
1776  add_metadata_from_side_data(pkt, frame);
1777 
1778  if (pkt->flags & AV_PKT_FLAG_DISCARD) {
1779  frame->flags |= AV_FRAME_FLAG_DISCARD;
1780  } else {
1781  frame->flags = (frame->flags & ~AV_FRAME_FLAG_DISCARD);
1782  }
1783  }
1784  frame->reordered_opaque = avctx->reordered_opaque;
1785 
1786  if (frame->color_primaries == AVCOL_PRI_UNSPECIFIED)
1787  frame->color_primaries = avctx->color_primaries;
1788  if (frame->color_trc == AVCOL_TRC_UNSPECIFIED)
1789  frame->color_trc = avctx->color_trc;
1790  if (frame->colorspace == AVCOL_SPC_UNSPECIFIED)
1791  frame->colorspace = avctx->colorspace;
1792  if (frame->color_range == AVCOL_RANGE_UNSPECIFIED)
1793  frame->color_range = avctx->color_range;
1795  frame->chroma_location = avctx->chroma_sample_location;
1796 
1797  switch (avctx->codec->type) {
1798  case AVMEDIA_TYPE_VIDEO:
1799  frame->format = avctx->pix_fmt;
1800  if (!frame->sample_aspect_ratio.num)
1801  frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
1802 
1803  if (frame->width && frame->height &&
1804  av_image_check_sar(frame->width, frame->height,
1805  frame->sample_aspect_ratio) < 0) {
1806  av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
1807  frame->sample_aspect_ratio.num,
1808  frame->sample_aspect_ratio.den);
1809  frame->sample_aspect_ratio = (AVRational){ 0, 1 };
1810  }
1811 
1812  break;
1813  case AVMEDIA_TYPE_AUDIO:
1814  if (!frame->sample_rate)
1815  frame->sample_rate = avctx->sample_rate;
1816  if (frame->format < 0)
1817  frame->format = avctx->sample_fmt;
1818  if (!frame->channel_layout) {
1819  if (avctx->channel_layout) {
1821  avctx->channels) {
1822  av_log(avctx, AV_LOG_ERROR, "Inconsistent channel "
1823  "configuration.\n");
1824  return AVERROR(EINVAL);
1825  }
1826 
1827  frame->channel_layout = avctx->channel_layout;
1828  } else {
1829  if (avctx->channels > FF_SANE_NB_CHANNELS) {
1830  av_log(avctx, AV_LOG_ERROR, "Too many channels: %d.\n",
1831  avctx->channels);
1832  return AVERROR(ENOSYS);
1833  }
1834  }
1835  }
1836  frame->channels = avctx->channels;
1837  break;
1838  }
1839  return 0;
1840 }
1841 
1843 {
1844  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
1845  int i;
1846  int num_planes = av_pix_fmt_count_planes(frame->format);
1848  int flags = desc ? desc->flags : 0;
1849  if (num_planes == 1 && (flags & AV_PIX_FMT_FLAG_PAL))
1850  num_planes = 2;
1851  if ((flags & FF_PSEUDOPAL) && frame->data[1])
1852  num_planes = 2;
1853  for (i = 0; i < num_planes; i++) {
1854  av_assert0(frame->data[i]);
1855  }
1856  // For formats without data like hwaccel allow unused pointers to be non-NULL.
1857  for (i = num_planes; num_planes > 0 && i < FF_ARRAY_ELEMS(frame->data); i++) {
1858  if (frame->data[i])
1859  av_log(avctx, AV_LOG_ERROR, "Buffer returned by get_buffer2() did not zero unused plane pointers\n");
1860  frame->data[i] = NULL;
1861  }
1862  }
1863 }
1864 
1865 static void decode_data_free(void *opaque, uint8_t *data)
1866 {
1867  FrameDecodeData *fdd = (FrameDecodeData*)data;
1868 
1869  if (fdd->post_process_opaque_free)
1871 
1872  if (fdd->hwaccel_priv_free)
1873  fdd->hwaccel_priv_free(fdd->hwaccel_priv);
1874 
1875  av_freep(&fdd);
1876 }
1877 
1879 {
1880  AVBufferRef *fdd_buf;
1881  FrameDecodeData *fdd;
1882 
1883  av_assert1(!frame->private_ref);
1884  av_buffer_unref(&frame->private_ref);
1885 
1886  fdd = av_mallocz(sizeof(*fdd));
1887  if (!fdd)
1888  return AVERROR(ENOMEM);
1889 
1890  fdd_buf = av_buffer_create((uint8_t*)fdd, sizeof(*fdd), decode_data_free,
1892  if (!fdd_buf) {
1893  av_freep(&fdd);
1894  return AVERROR(ENOMEM);
1895  }
1896 
1897  frame->private_ref = fdd_buf;
1898 
1899  return 0;
1900 }
1901 
1903 {
1904  const AVHWAccel *hwaccel = avctx->hwaccel;
1905  int override_dimensions = 1;
1906  int ret;
1907 
1908  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
1909  if ((unsigned)avctx->width > INT_MAX - STRIDE_ALIGN ||
1910  (ret = av_image_check_size2(FFALIGN(avctx->width, STRIDE_ALIGN), avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx)) < 0 || avctx->pix_fmt<0) {
1911  av_log(avctx, AV_LOG_ERROR, "video_get_buffer: image parameters invalid\n");
1912  ret = AVERROR(EINVAL);
1913  goto fail;
1914  }
1915 
1916  if (frame->width <= 0 || frame->height <= 0) {
1917  frame->width = FFMAX(avctx->width, AV_CEIL_RSHIFT(avctx->coded_width, avctx->lowres));
1918  frame->height = FFMAX(avctx->height, AV_CEIL_RSHIFT(avctx->coded_height, avctx->lowres));
1919  override_dimensions = 0;
1920  }
1921 
1922  if (frame->data[0] || frame->data[1] || frame->data[2] || frame->data[3]) {
1923  av_log(avctx, AV_LOG_ERROR, "pic->data[*]!=NULL in get_buffer_internal\n");
1924  ret = AVERROR(EINVAL);
1925  goto fail;
1926  }
1927  } else if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
1928  if (frame->nb_samples * (int64_t)avctx->channels > avctx->max_samples) {
1929  av_log(avctx, AV_LOG_ERROR, "samples per frame %d, exceeds max_samples %"PRId64"\n", frame->nb_samples, avctx->max_samples);
1930  ret = AVERROR(EINVAL);
1931  goto fail;
1932  }
1933  }
1934  ret = ff_decode_frame_props(avctx, frame);
1935  if (ret < 0)
1936  goto fail;
1937 
1938  if (hwaccel) {
1939  if (hwaccel->alloc_frame) {
1940  ret = hwaccel->alloc_frame(avctx, frame);
1941  goto end;
1942  }
1943  } else
1944  avctx->sw_pix_fmt = avctx->pix_fmt;
1945 
1946  ret = avctx->get_buffer2(avctx, frame, flags);
1947  if (ret < 0)
1948  goto fail;
1949 
1950  validate_avframe_allocation(avctx, frame);
1951 
1952  ret = ff_attach_decode_data(frame);
1953  if (ret < 0)
1954  goto fail;
1955 
1956 end:
1957  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO && !override_dimensions &&
1959  frame->width = avctx->width;
1960  frame->height = avctx->height;
1961  }
1962 
1963 fail:
1964  if (ret < 0) {
1965  av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
1966  av_frame_unref(frame);
1967  }
1968 
1969  return ret;
1970 }
1971 
1973 {
1974  AVFrame *tmp;
1975  int ret;
1976 
1978 
1979  if (frame->data[0] && (frame->width != avctx->width || frame->height != avctx->height || frame->format != avctx->pix_fmt)) {
1980  av_log(avctx, AV_LOG_WARNING, "Picture changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s in reget buffer()\n",
1981  frame->width, frame->height, av_get_pix_fmt_name(frame->format), avctx->width, avctx->height, av_get_pix_fmt_name(avctx->pix_fmt));
1982  av_frame_unref(frame);
1983  }
1984 
1985  if (!frame->data[0])
1986  return ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
1987 
1988  if ((flags & FF_REGET_BUFFER_FLAG_READONLY) || av_frame_is_writable(frame))
1989  return ff_decode_frame_props(avctx, frame);
1990 
1991  tmp = av_frame_alloc();
1992  if (!tmp)
1993  return AVERROR(ENOMEM);
1994 
1995  av_frame_move_ref(tmp, frame);
1996 
1997  ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
1998  if (ret < 0) {
1999  av_frame_free(&tmp);
2000  return ret;
2001  }
2002 
2003  av_frame_copy(frame, tmp);
2004  av_frame_free(&tmp);
2005 
2006  return 0;
2007 }
2008 
2010 {
2011  int ret = reget_buffer_internal(avctx, frame, flags);
2012  if (ret < 0)
2013  av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
2014  return ret;
2015 }
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:132
#define FF_SANE_NB_CHANNELS
Definition: internal.h:98
This struct aggregates all the (hardware/vendor-specific) "high-level" state, i.e.
Definition: hwcontext.h:61
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: avcodec.h:1601
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
callback to negotiate the pixelFormat
Definition: avcodec.h:783
AVCodecHWConfig public
This is the structure which will be returned to the user by avcodec_get_hw_config().
Definition: hwconfig.h:34
int nb_draining_errors
Definition: internal.h:202
#define FF_SUB_CHARENC_MODE_PRE_DECODER
the AVPacket data needs to be recoded to UTF-8 before being fed to the decoder, requires iconv ...
Definition: avcodec.h:2111
#define NULL
Definition: coverity.c:32
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1323
const struct AVCodec * codec
Definition: avcodec.h:540
AVRational framerate
Definition: avcodec.h:2062
int av_fifo_grow(AVFifoBuffer *f, unsigned int size)
Enlarge an AVFifoBuffer.
Definition: fifo.c:107
const AVCodecDescriptor * codec_descriptor
AVCodecDescriptor.
Definition: avcodec.h:2083
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
#define AV_NUM_DATA_POINTERS
Definition: frame.h:315
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
AVPacketSideDataType
Definition: packet.h:40
int64_t pts_correction_num_faulty_dts
Number of incorrect PTS values so far.
Definition: avcodec.h:2091
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
#define GET_UTF8(val, GET_BYTE, ERROR)
Convert a UTF-8 character (up to 4 bytes) to its 32-bit UCS-4 encoded form.
Definition: common.h:499
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
This structure describes decoded (raw) audio or video data.
Definition: frame.h:314
int stride_align[AV_NUM_DATA_POINTERS]
Definition: decode.c:60
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
This side data must be associated with an audio frame and corresponds to enum AVAudioServiceType defi...
Definition: frame.h:113
int apply_cropping
Video decoding only.
Definition: avcodec.h:2297
static int decode_receive_frame_internal(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:534
The codec supports this format by some internal method.
Definition: codec.h:411
#define AV_CODEC_FLAG2_SKIP_MANUAL
Do not skip samples and export skip information as frame side data.
Definition: avcodec.h:384
AVFifoBuffer * pkt_props
Definition: internal.h:151
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:719
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:585
int capabilities
Hardware accelerated codec capabilities.
Definition: avcodec.h:2424
AVPacket * last_pkt_props
Properties (timestamps+side data) extracted from the last packet passed for decoding.
Definition: internal.h:150
const char * desc
Definition: libsvtav1.c:79
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
static int convert_sub_to_old_ass_form(AVSubtitle *sub, const AVPacket *pkt, AVRational tb)
Definition: decode.c:964
int changed_frames_dropped
Definition: internal.h:205
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2613
AVFrame * to_free
Definition: internal.h:136
int64_t pos
byte position in stream, -1 if unknown
Definition: packet.h:383
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:105
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1<< 16)) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63)))#define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};static void cpy1(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, len);}static void cpy2(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 2 *len);}static void cpy4(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 4 *len);}static void cpy8(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;}void swri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len){int ch;int off=0;const int os=(out->planar?1:out->ch_count)*out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){int planes=in->planar?in->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){int planes=out->planar?out->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){int planes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out->ch+ch,(const uint8_t **) in->ch+ch, off *(out-> planar
Definition: audioconvert.c:56
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:505
static int decode_simple_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:518
int width
Definition: decode.c:59
int(* receive_frame)(struct AVCodecContext *avctx, struct AVFrame *frame)
Decode API with decoupled packet/frame dataflow.
Definition: codec.h:298
This side data should be associated with a video stream and contains Stereoscopic 3D information in f...
Definition: packet.h:114
ATSC A53 Part 4 Closed Captions.
Definition: packet.h:242
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1166
int nb_extended_buf
Number of elements in extended_buf.
Definition: frame.h:523
static FF_DISABLE_DEPRECATION_WARNINGS int unrefcount_frame(AVCodecInternal *avci, AVFrame *frame)
Definition: decode.c:721
int ff_decode_bsfs_init(AVCodecContext *avctx)
Called during avcodec_open2() to initialize avctx->internal->bsf.
Definition: decode.c:187
Content light level (based on CTA-861.3).
Definition: frame.h:136
int num
Numerator.
Definition: rational.h:59
Timecode which conforms to SMPTE ST 12-1.
Definition: frame.h:168
int size
Definition: packet.h:364
int av_codec_is_decoder(const AVCodec *codec)
Definition: utils.c:100
int initial_channels
Definition: internal.h:209
enum AVPixelFormat pix_fmt
Supported pixel format.
Definition: avcodec.h:2418
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:910
void(* hwaccel_priv_free)(void *priv)
Definition: decode.h:53
#define AV_CODEC_PROP_TEXT_SUB
Subtitle codec is text based.
Definition: codec_desc.h:102
static void frame_pool_free(void *opaque, uint8_t *data)
Definition: decode.c:1460
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:741
int samples
Definition: decode.c:64
int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, const AVPacket *avpkt)
Decode the audio frame of size avpkt->size from avpkt->data into frame.
Definition: decode.c:855
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:209
Mastering display metadata associated with a video frame.
Definition: frame.h:119
unsigned num_rects
Definition: avcodec.h:2679
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:582
enum AVMediaType type
Definition: codec.h:203
#define FF_ARRAY_ELEMS(a)
static int apply_param_change(AVCodecContext *avctx, const AVPacket *avpkt)
Definition: decode.c:67
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
static int recode_subtitle(AVCodecContext *avctx, AVPacket *outpkt, const AVPacket *inpkt)
Definition: decode.c:872
AVBufferPool * pools[4]
Pools for each data plane.
Definition: decode.c:53
int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
Set various frame properties from the codec context / packet data.
Definition: decode.c:1728
size_t crop_bottom
Definition: frame.h:675
static AVPacket pkt
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:1022
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1675
int(* alloc_frame)(AVCodecContext *avctx, AVFrame *frame)
Allocate a custom buffer.
Definition: avcodec.h:2437
static int utf8_check(const uint8_t *str)
Definition: decode.c:930
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
static int apply_cropping(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:616
int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int(*func)(void *, void *, int))
Feed data from a user-supplied callback to an AVFifoBuffer.
Definition: fifo.c:122
Mastering display metadata (based on SMPTE-2086:2014).
Definition: packet.h:222
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:654
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
AVSubtitleRect ** rects
Definition: avcodec.h:2680
int(* uninit)(AVCodecContext *avctx)
Uninitialize the hwaccel private data.
Definition: avcodec.h:2529
static AVBufferRef * frame_pool_alloc(void)
Definition: decode.c:1471
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:75
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
static FF_ENABLE_DEPRECATION_WARNINGS void get_subtitle_defaults(AVSubtitle *sub)
Definition: decode.c:865
int height
Definition: decode.c:59
enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Definition: decode.c:1111
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1199
uint8_t
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
int ff_attach_decode_data(AVFrame *frame)
Definition: decode.c:1878
int av_packet_unpack_dictionary(const uint8_t *data, int size, AVDictionary **dict)
Unpack a dictionary from side_data.
Definition: avpacket.c:530
AVOptions.
static int64_t guess_correct_pts(AVCodecContext *ctx, int64_t reordered_pts, int64_t dts)
Attempt to guess proper monotonic timestamps for decoded video frames which might have incorrect time...
Definition: decode.c:264
size_t crop_left
Definition: frame.h:676
int avpriv_set_systematic_pal2(uint32_t pal[256], enum AVPixelFormat pix_fmt)
Definition: imgutils.c:176
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:381
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: packet.h:108
#define AV_CODEC_FLAG_UNALIGNED
Allow decoders to produce frames with data planes that are not aligned to CPU requirements (e...
Definition: avcodec.h:271
#define AV_WL8(p, d)
Definition: intreadwrite.h:399
Multithreading support functions.
AVBufferRef * private_ref
AVBufferRef for internal use by a single libav* library.
Definition: frame.h:693
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
Definition: decode.c:221
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:407
enum AVPixelFormat pix_fmt
For decoders, a hardware pixel format which that decoder may be able to decode to if suitable hardwar...
Definition: codec.h:432
int av_fifo_space(const AVFifoBuffer *f)
Return the amount of space in bytes in the AVFifoBuffer, that is the amount of data you can write int...
Definition: fifo.c:82
int planes
Definition: decode.c:62
Structure to hold side data for an AVFrame.
Definition: frame.h:220
int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar)
Check if the given sample aspect ratio of an image is valid.
Definition: imgutils.c:322
size_t compat_decode_consumed
Definition: internal.h:189
static void finish(void)
Definition: movenc.c:345
uint8_t * data
Definition: packet.h:363
static int copy_packet_props(AVPacket *dst, const AVPacket *src)
Definition: decode.c:147
#define FF_REGET_BUFFER_FLAG_READONLY
the returned buffer does not need to be writable
Definition: internal.h:303
#define AVERROR_EOF
End of file.
Definition: error.h:55
AVDictionary * metadata
metadata.
Definition: frame.h:600
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:1749
#define AV_BUFFER_FLAG_READONLY
Always treat the buffer as read-only, even when it has only one reference.
Definition: buffer.h:113
int(* init)(AVCodecContext *avctx)
Initialize the hwaccel private data.
Definition: avcodec.h:2521
ptrdiff_t size
Definition: opengl_enc.c:100
int initial_height
Definition: internal.h:207
int initial_format
Definition: internal.h:206
The data represents the AVSphericalMapping structure defined in libavutil/spherical.h.
Definition: frame.h:130
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:1173
#define FFALIGN(x, a)
Definition: macros.h:48
The codec supports this format via the hw_device_ctx interface.
Definition: codec.h:391
#define av_log(a,...)
The buffer pool.
const char * name
Definition: pixdesc.h:82
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:615
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
Check if the sample format is planar.
Definition: samplefmt.c:112
void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, int linesize_align[AV_NUM_DATA_POINTERS])
Modify width and height values so that they will result in a memory buffer that is acceptable for the...
Definition: utils.c:155
int ff_thread_decode_frame(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, AVPacket *avpkt)
Submit a new frame to a decoding thread.
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
#define AV_RL8(x)
Definition: intreadwrite.h:398
#define src
Definition: vp8dsp.c:255
int av_new_packet(AVPacket *pkt, int size)
Allocate the payload of a packet and initialize its fields with default values.
Definition: avpacket.c:88
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
Definition: avcodec.h:2076
AVBSFContext * bsf
Definition: internal.h:144
int width
Definition: frame.h:372
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:821
void * post_process_opaque
Definition: decode.h:46
#define AV_BPRINT_SIZE_UNLIMITED
const struct AVCodecHWConfigInternal *const * hw_configs
Array of pointers to hardware configurations supported by the codec, or NULL if no hardware supported...
Definition: codec.h:323
static int hwaccel_init(AVCodecContext *avctx, const AVCodecHWConfigInternal *hw_config)
Definition: decode.c:1274
static void validate_avframe_allocation(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:1842
An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows:
Definition: packet.h:72
int av_frame_apply_cropping(AVFrame *frame, int flags)
Crop the given video AVFrame according to its crop_left/crop_top/crop_right/ crop_bottom fields...
Definition: frame.c:891
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:353
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
ICC profile data consisting of an opaque octet buffer following the format described by ISO 15076-1...
Definition: packet.h:274
int64_t pts_correction_last_pts
Number of incorrect DTS values so far.
Definition: avcodec.h:2092
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Identical in function to ff_get_buffer(), except it reuses the existing buffer if available...
Definition: decode.c:2009
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1787
int methods
Bit set of AV_CODEC_HW_CONFIG_METHOD_* flags, describing the possible setup methods which can be used...
Definition: codec.h:437
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
int avcodec_is_open(AVCodecContext *s)
Definition: utils.c:1999
int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:643
static int extract_packet_props(AVCodecInternal *avci, const AVPacket *pkt)
Definition: decode.c:159
AVFrame * buffer_frame
Definition: internal.h:182
int capabilities
Codec capabilities.
Definition: codec.h:209
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:558
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void(*func)(void *, void *, int))
Feed data from an AVFifoBuffer to a user-supplied callback.
Definition: fifo.c:213
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: packet.h:346
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:611
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:569
AVRational time_base_in
The timebase used for the timestamps of the input packets.
Definition: bsf.h:89
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:140
simple assert() macros that are a bit more flexible than ISO C assert().
int side_data_elems
Definition: packet.h:375
AVBufferRef * av_buffer_create(uint8_t *data, int size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:29
int64_t max_pixels
The number of pixels per image to maximally accept.
Definition: avcodec.h:2239
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:333
enum AVHWDeviceType type
This field identifies the underlying API used for hardware access.
Definition: hwcontext.h:79
#define FFMAX(a, b)
Definition: common.h:103
int av_hwframe_get_buffer(AVBufferRef *hwframe_ref, AVFrame *frame, int flags)
Allocate a new frame attached to the given AVHWFramesContext.
Definition: hwcontext.c:502
#define fail()
Definition: checkasm.h:133
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:799
const AVHWAccel * hwaccel
If this configuration uses a hwaccel, a pointer to it.
Definition: hwconfig.h:39
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: internal.h:67
int priv_data_size
Size of the private data to allocate in AVCodecInternal.hwaccel_priv_data.
Definition: avcodec.h:2535
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:369
int av_bsf_init(AVBSFContext *ctx)
Prepare the filter for use, after all the parameters and options have been set.
Definition: bsf.c:147
reference-counted frame API
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:1242
uint32_t end_display_time
Definition: avcodec.h:2678
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:2681
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:491
int props
Codec properties, a combination of AV_CODEC_PROP_* flags.
Definition: codec_desc.h:54
size_t crop_top
Definition: frame.h:674
common internal API header
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:106
int initial_pool_size
Initial size of the frame pool.
Definition: hwcontext.h:199
int av_bsf_list_parse_str(const char *str, AVBSFContext **bsf_lst)
Parse string describing list of bitstream filters and create single AVBSFContext describing the whole...
Definition: bsf.c:522
int av_packet_copy_props(AVPacket *dst, const AVPacket *src)
Copy only "properties" fields from src to dst.
Definition: avpacket.c:572
int channels
number of audio channels, only used for audio.
Definition: frame.h:620
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:551
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1640
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1779
const char * name
Name of the hardware accelerated codec.
Definition: avcodec.h:2397
#define FFMIN(a, b)
Definition: common.h:105
AVBufferRef ** extended_buf
For planar audio which requires more than AV_NUM_DATA_POINTERS AVBufferRef pointers, this array will hold all the references which cannot fit into AVFrame.buf.
Definition: frame.h:519
int channels
Definition: decode.c:63
AVFrame * compat_decode_frame
Definition: internal.h:193
int width
picture width / height.
Definition: avcodec.h:704
uint8_t w
Definition: llviddspenc.c:39
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames...
Definition: avcodec.h:2209
static int add_metadata_from_side_data(const AVPacket *avpkt, AVFrame *frame)
Definition: decode.c:1716
static int compat_decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, const AVPacket *pkt)
Definition: decode.c:767
AVPacket * in_pkt
Definition: internal.h:113
This side data should be associated with a video stream and corresponds to the AVSphericalMapping str...
Definition: packet.h:228
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
AVFormatContext * ctx
Definition: movenc.c:48
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:1145
AVFrameSideDataType
Definition: frame.h:48
#define AVERROR_INPUT_CHANGED
Input changed between calls. Reconfiguration is required. (can be OR-ed with AVERROR_OUTPUT_CHANGED) ...
Definition: error.h:73
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
uint16_t format
Definition: avcodec.h:2676
#define s(width, name)
Definition: cbs_vp9.c:257
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:143
#define FF_DEBUG_BUFFERS
Definition: avcodec.h:1630
int64_t reordered_opaque
opaque 64-bit number (generally a PTS) that will be reordered and output in AVFrame.reordered_opaque
Definition: avcodec.h:1668
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:1651
static int av_bprint_is_complete(const AVBPrint *buf)
Test if the print buffer is complete (not truncated).
Definition: bprint.h:185
const char * bsfs
Decoding only, a comma-separated list of bitstream filters to apply to packets before decoding...
Definition: codec.h:314
DecodeSimpleContext ds
Definition: internal.h:143
char * sub_charenc
DTS of the last frame.
Definition: avcodec.h:2100
static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:1600
int draining
checks API usage: after codec draining, flush is required to resume operation
Definition: internal.h:176
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:1768
int linesize[4]
Definition: decode.c:61
int sub_charenc_mode
Subtitles character encoding mode.
Definition: avcodec.h:2108
if(ret)
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal codec state / flush internal buffers.
Definition: utils.c:1094
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
int(* post_process)(void *logctx, AVFrame *frame)
The callback to perform some delayed processing on the frame right before it is returned to the calle...
Definition: decode.h:45
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:387
Content light level (based on CTA-861.3).
Definition: packet.h:235
int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:580
static int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame, int64_t *discarded_samples)
Definition: decode.c:296
int(* decode)(struct AVCodecContext *, void *outdata, int *outdata_size, struct AVPacket *avpkt)
Definition: codec.h:284
void(* post_process_opaque_free)(void *opaque)
Definition: decode.h:47
#define AV_LOG_INFO
Standard information.
Definition: log.h:205
int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt)
Submit a packet for filtering.
Definition: bsf.c:200
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:253
int avcodec_default_get_buffer2(AVCodecContext *avctx, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
Definition: decode.c:1692
Libavcodec external API header.
enum AVMediaType codec_type
Definition: avcodec.h:539
int compat_decode_warned
Definition: internal.h:186
int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown...
Definition: frame.h:593
A list of zero terminated key/value strings.
Definition: packet.h:172
int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, const AVPacket *avpkt)
Decode the video frame of size avpkt->size from avpkt->data into picture.
Definition: decode.c:848
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:84
int av_fifo_size(const AVFifoBuffer *f)
Return the amount of data in bytes in the AVFifoBuffer, that is the amount of data you can read from ...
Definition: fifo.c:77
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:594
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_WB16 unsigned int_TMPL byte
Definition: bytestream.h:91
int sample_rate
samples per second
Definition: avcodec.h:1191
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:345
int initial_sample_rate
Definition: internal.h:208
int debug
debug
Definition: avcodec.h:1618
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
const AVCodecHWConfig * avcodec_get_hw_config(const AVCodec *codec, int index)
Retrieve supported hardware configurations for a codec.
Definition: utils.c:1885
main external API structure.
Definition: avcodec.h:531
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:606
int skip_samples_multiplier
Definition: internal.h:199
uint8_t * data
The data buffer.
Definition: buffer.h:89
int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt)
Retrieve a filtered packet.
Definition: bsf.c:226
static int reget_buffer_internal(AVCodecContext *avctx, AVFrame *frame, int flags)
Definition: decode.c:1972
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:1154
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:402
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1902
uint8_t * data
Definition: frame.h:222
#define AV_CODEC_PROP_BITMAP_SUB
Subtitle codec is bitmap based Decoded AVSubtitle data can be read from the AVSubtitleRect->pict fiel...
Definition: codec_desc.h:97
int av_samples_copy(uint8_t **dst, uint8_t *const *src, int dst_offset, int src_offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Copy samples from src to dst.
Definition: samplefmt.c:213
size_t crop_right
Definition: frame.h:677
int64_t max_samples
The number of samples per frame to maximally accept.
Definition: avcodec.h:2327
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:83
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
int coded_height
Definition: avcodec.h:719
int64_t reordered_opaque
reordered opaque 64 bits (generally an integer or a double precision float PTS but can be anything)...
Definition: frame.h:481
int sample_rate
Sample rate of the audio data.
Definition: frame.h:486
int(* get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags)
This callback is called at the beginning of each frame to get data buffer(s) for it.
Definition: avcodec.h:1346
int showed_multi_packet_warning
Definition: internal.h:197
Definition: f_ebur128.c:91
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
Definition: imgutils.c:89
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:726
const char * av_hwdevice_get_type_name(enum AVHWDeviceType type)
Get the string name of an AVHWDeviceType.
Definition: hwcontext.c:92
#define AV_CODEC_CAP_SUBFRAMES
Codec can output multiple frames per AVPacket Normally demuxers return one frame at a time...
Definition: codec.h:93
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:308
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:119
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1159
Rational number (pair of numerator and denominator).
Definition: rational.h:58
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:1152
int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
Definition: utils.c:2141
Recommmends skipping the specified number of samples.
Definition: packet.h:156
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:124
int sub_text_format
Control the form of AVSubtitle.rects[N]->ass.
Definition: avcodec.h:2216
int av_image_fill_plane_sizes(size_t sizes[4], enum AVPixelFormat pix_fmt, int height, const ptrdiff_t linesizes[4])
Fill plane sizes for an image with pixel format pix_fmt and height height.
Definition: imgutils.c:111
int skip_samples
Number of audio samples to skip at the start of the next decoded frame.
Definition: internal.h:166
#define STRIDE_ALIGN
Definition: internal.h:109
enum AVChromaLocation chroma_location
Definition: frame.h:571
int(* frame_params)(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx)
Fill the given hw_frames context with current codec parameters.
Definition: avcodec.h:2550
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:578
attribute_deprecated int refcounted_frames
If non-zero, the decoded audio and video frames returned from avcodec_decode_video2() and avcodec_dec...
Definition: avcodec.h:1363
AVBufferRef * pool
Definition: internal.h:139
#define AV_CODEC_FLAG_DROPCHANGED
Don&#39;t output frames whose parameters differ from first decoded frame in stream.
Definition: avcodec.h:292
int size
Size of data in bytes.
Definition: buffer.h:93
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
static int64_t pts
#define SIZE_SPECIFIER
Definition: internal.h:191
#define flags(name, subs,...)
Definition: cbs_av1.c:561
This side data should be associated with an audio stream and contains ReplayGain information in form ...
Definition: packet.h:99
The codec supports this format via the hw_frames_ctx interface.
Definition: codec.h:404
#define FF_CODEC_CAP_SETS_PKT_DTS
Decoders marked with FF_CODEC_CAP_SETS_PKT_DTS want to set AVFrame.pkt_dts manually.
Definition: internal.h:56
int ff_decode_get_hw_frames_ctx(AVCodecContext *avctx, enum AVHWDeviceType dev_type)
Make sure avctx.hw_frames_ctx is set.
Definition: decode.c:1174
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:328
static void decode_data_free(void *opaque, uint8_t *data)
Definition: decode.c:1865
#define UTF8_MAX_BYTES
Definition: decode.c:871
attribute_deprecated int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Definition: frame.h:415
Timecode which conforms to SMPTE ST 12-1:2014.
Definition: packet.h:291
void av_bprint_clear(AVBPrint *buf)
Reset the string to "" but keep internal allocated data.
Definition: bprint.c:227
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
AVPacket * buffer_pkt
buffers for using new encode/decode API through legacy API
Definition: internal.h:181
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:423
A reference to a data buffer.
Definition: buffer.h:81
int extra_hw_frames
Definition: avcodec.h:2311
The codec supports this format by some ad-hoc method.
Definition: codec.h:420
AVPacketSideData * side_data
Additional packet data that can be provided by the container.
Definition: packet.h:374
This struct stores per-frame lavc-internal data and is attached to it via private_ref.
Definition: decode.h:34
int avcodec_get_hw_frames_parameters(AVCodecContext *avctx, AVBufferRef *device_ref, enum AVPixelFormat hw_pix_fmt, AVBufferRef **out_frames_ref)
Create and return a AVHWFramesContext with values adequate for hardware decoding. ...
Definition: decode.c:1225
static enum AVPixelFormat hw_pix_fmt
Definition: hw_decode.c:46
#define AV_PKT_FLAG_DISCARD
Flag is used to discard packets which are required to maintain valid decoder state but are not requir...
Definition: packet.h:408
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:83
common internal api header.
common internal and external API header
AVBufferPool * av_buffer_pool_init(int size, AVBufferRef *(*alloc)(int size))
Allocate and initialize a buffer pool.
Definition: buffer.c:266
#define AV_HWACCEL_CODEC_CAP_EXPERIMENTAL
HWAccel is experimental and is thus avoided in favor of non experimental codecs.
Definition: avcodec.h:2557
#define AV_CODEC_CAP_PARAM_CHANGE
Codec supports changed parameters at any point.
Definition: codec.h:114
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
Definition: hwcontext.c:247
static void insert_ts(AVBPrint *buf, int ts)
Definition: decode.c:950
void * hwaccel_priv_data
hwaccel-specific private data
Definition: internal.h:171
int caps_internal
Internal codec capabilities.
Definition: codec.h:308
int den
Denominator.
Definition: rational.h:60
int av_image_check_size2(unsigned int w, unsigned int h, int64_t max_pixels, enum AVPixelFormat pix_fmt, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of a plane of an image with...
Definition: imgutils.c:288
uint64_t initial_channel_layout
Definition: internal.h:210
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:215
Apply the maximum possible cropping, even if it requires setting the AVFrame.data[] entries to unalig...
Definition: frame.h:959
static int video_get_buffer(AVCodecContext *s, AVFrame *pic)
Definition: decode.c:1645
Formatted text, the ass field must be set by the decoder and is authoritative.
Definition: avcodec.h:2635
#define FF_PSEUDOPAL
Definition: internal.h:297
AVHWDeviceType
Definition: hwcontext.h:27
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
int channels
number of audio channels
Definition: avcodec.h:1192
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:566
char * ass
0 terminated ASS/SSA compatible event line.
Definition: avcodec.h:2670
#define AV_FRAME_FLAG_DISCARD
A flag to mark the frames which need to be decoded, but shouldn&#39;t be output.
Definition: frame.h:543
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:618
enum AVColorPrimaries color_primaries
Definition: frame.h:560
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: packet.h:362
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
int64_t pts_correction_last_dts
PTS of the last frame.
Definition: avcodec.h:2093
size_t compat_decode_partial_size
Definition: internal.h:192
#define AV_CODEC_FLAG_TRUNCATED
Input bitstream might be truncated at a random location instead of only at frame boundaries.
Definition: avcodec.h:317
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:1222
#define IS_EMPTY(pkt)
Definition: decode.c:145
static int update_frame_pool(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:1489
int height
Definition: frame.h:372
#define av_freep(p)
int64_t pts_correction_num_faulty_pts
Current statistics for PTS correction.
Definition: avcodec.h:2090
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:562
static float sub(float src0, float src1)
void av_bsf_free(AVBSFContext **pctx)
Free a bitstream filter context and everything associated with it; write NULL into the supplied point...
Definition: bsf.c:40
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:373
Recommmends skipping the specified number of samples.
Definition: frame.h:108
void * hwaccel_priv
Per-frame private data for hwaccels.
Definition: decode.h:52
#define av_malloc_array(a, b)
enum AVHWDeviceType device_type
The device type associated with the configuration.
Definition: codec.h:444
#define FF_SUB_TEXT_FMT_ASS_WITH_TIMINGS
Definition: avcodec.h:2219
#define FF_SUB_CHARENC_MODE_IGNORE
neither convert the subtitles, nor check them for valid UTF-8
Definition: avcodec.h:2112
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2489
ReplayGain information in the form of the AVReplayGain struct.
Definition: frame.h:76
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
enum AVSubtitleType type
Definition: avcodec.h:2661
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:361
int format
Definition: decode.c:58
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:2261
int pkt_size
size of the corresponding packet containing the compressed frame.
Definition: frame.h:629
float min
Stereoscopic 3d metadata.
Definition: frame.h:63
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:91
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
static double val(void *priv, double ch)
Definition: aeval.c:76
This structure stores compressed data.
Definition: packet.h:340
AVCodecParameters * par_in
Parameters of the input stream.
Definition: bsf.h:77
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:514
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:380
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1596
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:356
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:2069
for(j=16;j >0;--j)
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:658
int i
Definition: input.c:407
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
static void hwaccel_uninit(AVCodecContext *avctx)
Definition: decode.c:1311
#define tb
Definition: regdef.h:68
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:190
This side data should be associated with an audio stream and corresponds to enum AVAudioServiceType.
Definition: packet.h:120
static uint8_t tmp[11]
Definition: aes_ctr.c:27