FFmpeg
decode.c
Go to the documentation of this file.
1 /*
2  * generic decoding-related code
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <stdint.h>
22 #include <string.h>
23 
24 #include "config.h"
25 
26 #if CONFIG_ICONV
27 # include <iconv.h>
28 #endif
29 
30 #include "libavutil/avassert.h"
31 #include "libavutil/avstring.h"
32 #include "libavutil/bprint.h"
33 #include "libavutil/common.h"
34 #include "libavutil/frame.h"
35 #include "libavutil/hwcontext.h"
36 #include "libavutil/imgutils.h"
37 #include "libavutil/internal.h"
38 #include "libavutil/intmath.h"
39 #include "libavutil/opt.h"
40 
41 #include "avcodec.h"
42 #include "bytestream.h"
43 #include "decode.h"
44 #include "hwconfig.h"
45 #include "internal.h"
46 #include "packet_internal.h"
47 #include "thread.h"
48 
49 typedef struct FramePool {
50  /**
51  * Pools for each data plane. For audio all the planes have the same size,
52  * so only pools[0] is used.
53  */
55 
56  /*
57  * Pool parameters
58  */
59  int format;
60  int width, height;
62  int linesize[4];
63  int planes;
64  int channels;
65  int samples;
66 } FramePool;
67 
68 static int apply_param_change(AVCodecContext *avctx, const AVPacket *avpkt)
69 {
70  int size, ret;
71  const uint8_t *data;
72  uint32_t flags;
73  int64_t val;
74 
76  if (!data)
77  return 0;
78 
79  if (!(avctx->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE)) {
80  av_log(avctx, AV_LOG_ERROR, "This decoder does not support parameter "
81  "changes, but PARAM_CHANGE side data was sent to it.\n");
82  ret = AVERROR(EINVAL);
83  goto fail2;
84  }
85 
86  if (size < 4)
87  goto fail;
88 
89  flags = bytestream_get_le32(&data);
90  size -= 4;
91 
93  if (size < 4)
94  goto fail;
95  val = bytestream_get_le32(&data);
96  if (val <= 0 || val > INT_MAX) {
97  av_log(avctx, AV_LOG_ERROR, "Invalid channel count");
98  ret = AVERROR_INVALIDDATA;
99  goto fail2;
100  }
101  avctx->channels = val;
102  size -= 4;
103  }
105  if (size < 8)
106  goto fail;
107  avctx->channel_layout = bytestream_get_le64(&data);
108  size -= 8;
109  }
111  if (size < 4)
112  goto fail;
113  val = bytestream_get_le32(&data);
114  if (val <= 0 || val > INT_MAX) {
115  av_log(avctx, AV_LOG_ERROR, "Invalid sample rate");
116  ret = AVERROR_INVALIDDATA;
117  goto fail2;
118  }
119  avctx->sample_rate = val;
120  size -= 4;
121  }
123  if (size < 8)
124  goto fail;
125  avctx->width = bytestream_get_le32(&data);
126  avctx->height = bytestream_get_le32(&data);
127  size -= 8;
128  ret = ff_set_dimensions(avctx, avctx->width, avctx->height);
129  if (ret < 0)
130  goto fail2;
131  }
132 
133  return 0;
134 fail:
135  av_log(avctx, AV_LOG_ERROR, "PARAM_CHANGE side data too small.\n");
136  ret = AVERROR_INVALIDDATA;
137 fail2:
138  if (ret < 0) {
139  av_log(avctx, AV_LOG_ERROR, "Error applying parameter changes.\n");
140  if (avctx->err_recognition & AV_EF_EXPLODE)
141  return ret;
142  }
143  return 0;
144 }
145 
146 #define IS_EMPTY(pkt) (!(pkt)->data)
147 
149 {
150  int ret = 0;
151 
152  ret = avpriv_packet_list_put(&avci->pkt_props, &avci->pkt_props_tail, pkt,
154  if (ret < 0)
155  return ret;
156  avci->pkt_props_tail->pkt.size = pkt->size; // HACK: Needed for ff_decode_frame_props().
157  avci->pkt_props_tail->pkt.data = (void*)1; // HACK: Needed for IS_EMPTY().
158 
159  if (IS_EMPTY(avci->last_pkt_props)) {
160  ret = avpriv_packet_list_get(&avci->pkt_props,
161  &avci->pkt_props_tail,
162  avci->last_pkt_props);
163  av_assert0(ret != AVERROR(EAGAIN));
164  }
165  return ret;
166 }
167 
169 {
170  AVCodecInternal *avci = avctx->internal;
171  int ret;
172 
173  if (avci->bsf)
174  return 0;
175 
176  ret = av_bsf_list_parse_str(avctx->codec->bsfs, &avci->bsf);
177  if (ret < 0) {
178  av_log(avctx, AV_LOG_ERROR, "Error parsing decoder bitstream filters '%s': %s\n", avctx->codec->bsfs, av_err2str(ret));
179  if (ret != AVERROR(ENOMEM))
180  ret = AVERROR_BUG;
181  goto fail;
182  }
183 
184  /* We do not currently have an API for passing the input timebase into decoders,
185  * but no filters used here should actually need it.
186  * So we make up some plausible-looking number (the MPEG 90kHz timebase) */
187  avci->bsf->time_base_in = (AVRational){ 1, 90000 };
188  ret = avcodec_parameters_from_context(avci->bsf->par_in, avctx);
189  if (ret < 0)
190  goto fail;
191 
192  ret = av_bsf_init(avci->bsf);
193  if (ret < 0)
194  goto fail;
195 
196  return 0;
197 fail:
198  av_bsf_free(&avci->bsf);
199  return ret;
200 }
201 
203 {
204  AVCodecInternal *avci = avctx->internal;
205  int ret;
206 
207  if (avci->draining)
208  return AVERROR_EOF;
209 
210  ret = av_bsf_receive_packet(avci->bsf, pkt);
211  if (ret == AVERROR_EOF)
212  avci->draining = 1;
213  if (ret < 0)
214  return ret;
215 
216  ret = extract_packet_props(avctx->internal, pkt);
217  if (ret < 0)
218  goto finish;
219 
220  ret = apply_param_change(avctx, pkt);
221  if (ret < 0)
222  goto finish;
223 
224 #if FF_API_OLD_ENCDEC
225  if (avctx->codec->receive_frame)
226  avci->compat_decode_consumed += pkt->size;
227 #endif
228 
229  return 0;
230 finish:
231  av_packet_unref(pkt);
232  return ret;
233 }
234 
235 /**
236  * Attempt to guess proper monotonic timestamps for decoded video frames
237  * which might have incorrect times. Input timestamps may wrap around, in
238  * which case the output will as well.
239  *
240  * @param pts the pts field of the decoded AVPacket, as passed through
241  * AVFrame.pts
242  * @param dts the dts field of the decoded AVPacket
243  * @return one of the input values, may be AV_NOPTS_VALUE
244  */
246  int64_t reordered_pts, int64_t dts)
247 {
248  int64_t pts = AV_NOPTS_VALUE;
249 
250  if (dts != AV_NOPTS_VALUE) {
252  ctx->pts_correction_last_dts = dts;
253  } else if (reordered_pts != AV_NOPTS_VALUE)
254  ctx->pts_correction_last_dts = reordered_pts;
255 
256  if (reordered_pts != AV_NOPTS_VALUE) {
257  ctx->pts_correction_num_faulty_pts += reordered_pts <= ctx->pts_correction_last_pts;
258  ctx->pts_correction_last_pts = reordered_pts;
259  } else if(dts != AV_NOPTS_VALUE)
260  ctx->pts_correction_last_pts = dts;
261 
263  && reordered_pts != AV_NOPTS_VALUE)
264  pts = reordered_pts;
265  else
266  pts = dts;
267 
268  return pts;
269 }
270 
271 /*
272  * The core of the receive_frame_wrapper for the decoders implementing
273  * the simple API. Certain decoders might consume partial packets without
274  * returning any output, so this function needs to be called in a loop until it
275  * returns EAGAIN.
276  **/
277 static inline int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame, int64_t *discarded_samples)
278 {
279  AVCodecInternal *avci = avctx->internal;
280  DecodeSimpleContext *ds = &avci->ds;
281  AVPacket *pkt = ds->in_pkt;
282  // copy to ensure we do not change pkt
283  int got_frame, actual_got_frame;
284  int ret;
285 
286  if (!pkt->data && !avci->draining) {
287  av_packet_unref(pkt);
288  ret = ff_decode_get_packet(avctx, pkt);
289  if (ret < 0 && ret != AVERROR_EOF)
290  return ret;
291  }
292 
293  // Some codecs (at least wma lossless) will crash when feeding drain packets
294  // after EOF was signaled.
295  if (avci->draining_done)
296  return AVERROR_EOF;
297 
298  if (!pkt->data &&
299  !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY ||
301  return AVERROR_EOF;
302 
303  got_frame = 0;
304 
305  if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME) {
306  ret = ff_thread_decode_frame(avctx, frame, &got_frame, pkt);
307  } else {
308  ret = avctx->codec->decode(avctx, frame, &got_frame, pkt);
309 
311  frame->pkt_dts = pkt->dts;
312  if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
313  if(!avctx->has_b_frames)
314  frame->pkt_pos = pkt->pos;
315  //FIXME these should be under if(!avctx->has_b_frames)
316  /* get_buffer is supposed to set frame parameters */
317  if (!(avctx->codec->capabilities & AV_CODEC_CAP_DR1)) {
318  if (!frame->sample_aspect_ratio.num) frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
319  if (!frame->width) frame->width = avctx->width;
320  if (!frame->height) frame->height = avctx->height;
321  if (frame->format == AV_PIX_FMT_NONE) frame->format = avctx->pix_fmt;
322  }
323  }
324  }
325  emms_c();
326  actual_got_frame = got_frame;
327 
328  if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
329  if (frame->flags & AV_FRAME_FLAG_DISCARD)
330  got_frame = 0;
331  } else if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
332  uint8_t *side;
333  int side_size;
334  uint32_t discard_padding = 0;
335  uint8_t skip_reason = 0;
336  uint8_t discard_reason = 0;
337 
338  if (ret >= 0 && got_frame) {
339  if (frame->format == AV_SAMPLE_FMT_NONE)
340  frame->format = avctx->sample_fmt;
341  if (!frame->channel_layout)
342  frame->channel_layout = avctx->channel_layout;
343  if (!frame->channels)
344  frame->channels = avctx->channels;
345  if (!frame->sample_rate)
346  frame->sample_rate = avctx->sample_rate;
347  }
348 
350  if(side && side_size>=10) {
351  avci->skip_samples = AV_RL32(side) * avci->skip_samples_multiplier;
352  discard_padding = AV_RL32(side + 4);
353  av_log(avctx, AV_LOG_DEBUG, "skip %d / discard %d samples due to side data\n",
354  avci->skip_samples, (int)discard_padding);
355  skip_reason = AV_RL8(side + 8);
356  discard_reason = AV_RL8(side + 9);
357  }
358 
359  if ((frame->flags & AV_FRAME_FLAG_DISCARD) && got_frame &&
360  !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
361  avci->skip_samples = FFMAX(0, avci->skip_samples - frame->nb_samples);
362  got_frame = 0;
363  *discarded_samples += frame->nb_samples;
364  }
365 
366  if (avci->skip_samples > 0 && got_frame &&
367  !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
368  if(frame->nb_samples <= avci->skip_samples){
369  got_frame = 0;
370  *discarded_samples += frame->nb_samples;
371  avci->skip_samples -= frame->nb_samples;
372  av_log(avctx, AV_LOG_DEBUG, "skip whole frame, skip left: %d\n",
373  avci->skip_samples);
374  } else {
375  av_samples_copy(frame->extended_data, frame->extended_data, 0, avci->skip_samples,
376  frame->nb_samples - avci->skip_samples, avctx->channels, frame->format);
377  if(avctx->pkt_timebase.num && avctx->sample_rate) {
378  int64_t diff_ts = av_rescale_q(avci->skip_samples,
379  (AVRational){1, avctx->sample_rate},
380  avctx->pkt_timebase);
381  if(frame->pts!=AV_NOPTS_VALUE)
382  frame->pts += diff_ts;
383 #if FF_API_PKT_PTS
385  if(frame->pkt_pts!=AV_NOPTS_VALUE)
386  frame->pkt_pts += diff_ts;
388 #endif
389  if(frame->pkt_dts!=AV_NOPTS_VALUE)
390  frame->pkt_dts += diff_ts;
391  if (frame->pkt_duration >= diff_ts)
392  frame->pkt_duration -= diff_ts;
393  } else {
394  av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for skipped samples.\n");
395  }
396  av_log(avctx, AV_LOG_DEBUG, "skip %d/%d samples\n",
397  avci->skip_samples, frame->nb_samples);
398  *discarded_samples += avci->skip_samples;
399  frame->nb_samples -= avci->skip_samples;
400  avci->skip_samples = 0;
401  }
402  }
403 
404  if (discard_padding > 0 && discard_padding <= frame->nb_samples && got_frame &&
405  !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
406  if (discard_padding == frame->nb_samples) {
407  *discarded_samples += frame->nb_samples;
408  got_frame = 0;
409  } else {
410  if(avctx->pkt_timebase.num && avctx->sample_rate) {
411  int64_t diff_ts = av_rescale_q(frame->nb_samples - discard_padding,
412  (AVRational){1, avctx->sample_rate},
413  avctx->pkt_timebase);
414  frame->pkt_duration = diff_ts;
415  } else {
416  av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for discarded samples.\n");
417  }
418  av_log(avctx, AV_LOG_DEBUG, "discard %d/%d samples\n",
419  (int)discard_padding, frame->nb_samples);
420  frame->nb_samples -= discard_padding;
421  }
422  }
423 
424  if ((avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL) && got_frame) {
426  if (fside) {
427  AV_WL32(fside->data, avci->skip_samples);
428  AV_WL32(fside->data + 4, discard_padding);
429  AV_WL8(fside->data + 8, skip_reason);
430  AV_WL8(fside->data + 9, discard_reason);
431  avci->skip_samples = 0;
432  }
433  }
434  }
435 
436  if (avctx->codec->type == AVMEDIA_TYPE_AUDIO &&
438  ret >= 0 && ret != pkt->size && !(avctx->codec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
439  av_log(avctx, AV_LOG_WARNING, "Multiple frames in a packet.\n");
440  avci->showed_multi_packet_warning = 1;
441  }
442 
443  if (!got_frame)
444  av_frame_unref(frame);
445 
446  if (ret >= 0 && avctx->codec->type == AVMEDIA_TYPE_VIDEO && !(avctx->flags & AV_CODEC_FLAG_TRUNCATED))
447  ret = pkt->size;
448 
449 #if FF_API_AVCTX_TIMEBASE
450  if (avctx->framerate.num > 0 && avctx->framerate.den > 0)
451  avctx->time_base = av_inv_q(av_mul_q(avctx->framerate, (AVRational){avctx->ticks_per_frame, 1}));
452 #endif
453 
454  /* do not stop draining when actual_got_frame != 0 or ret < 0 */
455  /* got_frame == 0 but actual_got_frame != 0 when frame is discarded */
456  if (avci->draining && !actual_got_frame) {
457  if (ret < 0) {
458  /* prevent infinite loop if a decoder wrongly always return error on draining */
459  /* reasonable nb_errors_max = maximum b frames + thread count */
460  int nb_errors_max = 20 + (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME ?
461  avctx->thread_count : 1);
462 
463  if (avci->nb_draining_errors++ >= nb_errors_max) {
464  av_log(avctx, AV_LOG_ERROR, "Too many errors when draining, this is a bug. "
465  "Stop draining and force EOF.\n");
466  avci->draining_done = 1;
467  ret = AVERROR_BUG;
468  }
469  } else {
470  avci->draining_done = 1;
471  }
472  }
473 
474 #if FF_API_OLD_ENCDEC
475  avci->compat_decode_consumed += ret;
476 #endif
477 
478  if (ret >= pkt->size || ret < 0) {
479  av_packet_unref(pkt);
481  } else {
482  int consumed = ret;
483 
484  pkt->data += consumed;
485  pkt->size -= consumed;
486  avci->last_pkt_props->size -= consumed; // See extract_packet_props() comment.
487  pkt->pts = AV_NOPTS_VALUE;
488  pkt->dts = AV_NOPTS_VALUE;
491  }
492 
493  if (got_frame)
494  av_assert0(frame->buf[0]);
495 
496  return ret < 0 ? ret : 0;
497 }
498 
500 {
501  int ret;
502  int64_t discarded_samples = 0;
503 
504  while (!frame->buf[0]) {
505  if (discarded_samples > avctx->max_samples)
506  return AVERROR(EAGAIN);
507  ret = decode_simple_internal(avctx, frame, &discarded_samples);
508  if (ret < 0)
509  return ret;
510  }
511 
512  return 0;
513 }
514 
516 {
517  AVCodecInternal *avci = avctx->internal;
518  int ret;
519 
520  av_assert0(!frame->buf[0]);
521 
522  if (avctx->codec->receive_frame) {
523  ret = avctx->codec->receive_frame(avctx, frame);
524  if (ret != AVERROR(EAGAIN))
526  } else
527  ret = decode_simple_receive_frame(avctx, frame);
528 
529  if (ret == AVERROR_EOF)
530  avci->draining_done = 1;
531 
532  if (!ret) {
534  frame->pts,
535  frame->pkt_dts);
536 
537  /* the only case where decode data is not set should be decoders
538  * that do not call ff_get_buffer() */
539  av_assert0((frame->private_ref && frame->private_ref->size == sizeof(FrameDecodeData)) ||
540  !(avctx->codec->capabilities & AV_CODEC_CAP_DR1));
541 
542  if (frame->private_ref) {
544 
545  if (fdd->post_process) {
546  ret = fdd->post_process(avctx, frame);
547  if (ret < 0) {
548  av_frame_unref(frame);
549  return ret;
550  }
551  }
552  }
553  }
554 
555  /* free the per-frame decode data */
556  av_buffer_unref(&frame->private_ref);
557 
558  return ret;
559 }
560 
561 int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
562 {
563  AVCodecInternal *avci = avctx->internal;
564  int ret;
565 
566  if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
567  return AVERROR(EINVAL);
568 
569  if (avctx->internal->draining)
570  return AVERROR_EOF;
571 
572  if (avpkt && !avpkt->size && avpkt->data)
573  return AVERROR(EINVAL);
574 
576  if (avpkt && (avpkt->data || avpkt->side_data_elems)) {
577  ret = av_packet_ref(avci->buffer_pkt, avpkt);
578  if (ret < 0)
579  return ret;
580  }
581 
582  ret = av_bsf_send_packet(avci->bsf, avci->buffer_pkt);
583  if (ret < 0) {
585  return ret;
586  }
587 
588  if (!avci->buffer_frame->buf[0]) {
589  ret = decode_receive_frame_internal(avctx, avci->buffer_frame);
590  if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
591  return ret;
592  }
593 
594  return 0;
595 }
596 
598 {
599  /* make sure we are noisy about decoders returning invalid cropping data */
600  if (frame->crop_left >= INT_MAX - frame->crop_right ||
601  frame->crop_top >= INT_MAX - frame->crop_bottom ||
602  (frame->crop_left + frame->crop_right) >= frame->width ||
603  (frame->crop_top + frame->crop_bottom) >= frame->height) {
604  av_log(avctx, AV_LOG_WARNING,
605  "Invalid cropping information set by a decoder: "
607  "(frame size %dx%d). This is a bug, please report it\n",
608  frame->crop_left, frame->crop_right, frame->crop_top, frame->crop_bottom,
609  frame->width, frame->height);
610  frame->crop_left = 0;
611  frame->crop_right = 0;
612  frame->crop_top = 0;
613  frame->crop_bottom = 0;
614  return 0;
615  }
616 
617  if (!avctx->apply_cropping)
618  return 0;
619 
620  return av_frame_apply_cropping(frame, avctx->flags & AV_CODEC_FLAG_UNALIGNED ?
622 }
623 
624 int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
625 {
626  AVCodecInternal *avci = avctx->internal;
627  int ret, changed;
628 
629  av_frame_unref(frame);
630 
631  if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
632  return AVERROR(EINVAL);
633 
634  if (avci->buffer_frame->buf[0]) {
635  av_frame_move_ref(frame, avci->buffer_frame);
636  } else {
637  ret = decode_receive_frame_internal(avctx, frame);
638  if (ret < 0)
639  return ret;
640  }
641 
642  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
643  ret = apply_cropping(avctx, frame);
644  if (ret < 0) {
645  av_frame_unref(frame);
646  return ret;
647  }
648  }
649 
650  avctx->frame_number++;
651 
652  if (avctx->flags & AV_CODEC_FLAG_DROPCHANGED) {
653 
654  if (avctx->frame_number == 1) {
655  avci->initial_format = frame->format;
656  switch(avctx->codec_type) {
657  case AVMEDIA_TYPE_VIDEO:
658  avci->initial_width = frame->width;
659  avci->initial_height = frame->height;
660  break;
661  case AVMEDIA_TYPE_AUDIO:
662  avci->initial_sample_rate = frame->sample_rate ? frame->sample_rate :
663  avctx->sample_rate;
664  avci->initial_channels = frame->channels;
665  avci->initial_channel_layout = frame->channel_layout;
666  break;
667  }
668  }
669 
670  if (avctx->frame_number > 1) {
671  changed = avci->initial_format != frame->format;
672 
673  switch(avctx->codec_type) {
674  case AVMEDIA_TYPE_VIDEO:
675  changed |= avci->initial_width != frame->width ||
676  avci->initial_height != frame->height;
677  break;
678  case AVMEDIA_TYPE_AUDIO:
679  changed |= avci->initial_sample_rate != frame->sample_rate ||
680  avci->initial_sample_rate != avctx->sample_rate ||
681  avci->initial_channels != frame->channels ||
682  avci->initial_channel_layout != frame->channel_layout;
683  break;
684  }
685 
686  if (changed) {
687  avci->changed_frames_dropped++;
688  av_log(avctx, AV_LOG_INFO, "dropped changed frame #%d pts %"PRId64
689  " drop count: %d \n",
690  avctx->frame_number, frame->pts,
691  avci->changed_frames_dropped);
692  av_frame_unref(frame);
693  return AVERROR_INPUT_CHANGED;
694  }
695  }
696  }
697  return 0;
698 }
699 
700 #if FF_API_OLD_ENCDEC
703 {
704  int ret;
705 
706  /* move the original frame to our backup */
707  av_frame_unref(avci->to_free);
708  av_frame_move_ref(avci->to_free, frame);
709 
710  /* now copy everything except the AVBufferRefs back
711  * note that we make a COPY of the side data, so calling av_frame_free() on
712  * the caller's frame will work properly */
713  ret = av_frame_copy_props(frame, avci->to_free);
714  if (ret < 0)
715  return ret;
716 
717  memcpy(frame->data, avci->to_free->data, sizeof(frame->data));
718  memcpy(frame->linesize, avci->to_free->linesize, sizeof(frame->linesize));
719  if (avci->to_free->extended_data != avci->to_free->data) {
720  int planes = avci->to_free->channels;
721  int size = planes * sizeof(*frame->extended_data);
722 
723  if (!size) {
724  av_frame_unref(frame);
725  return AVERROR_BUG;
726  }
727 
728  frame->extended_data = av_malloc(size);
729  if (!frame->extended_data) {
730  av_frame_unref(frame);
731  return AVERROR(ENOMEM);
732  }
733  memcpy(frame->extended_data, avci->to_free->extended_data,
734  size);
735  } else
736  frame->extended_data = frame->data;
737 
738  frame->format = avci->to_free->format;
739  frame->width = avci->to_free->width;
740  frame->height = avci->to_free->height;
741  frame->channel_layout = avci->to_free->channel_layout;
742  frame->nb_samples = avci->to_free->nb_samples;
743  frame->channels = avci->to_free->channels;
744 
745  return 0;
746 }
747 
749  int *got_frame, const AVPacket *pkt)
750 {
751  AVCodecInternal *avci = avctx->internal;
752  int ret = 0;
753 
755 
756  if (avci->draining_done && pkt && pkt->size != 0) {
757  av_log(avctx, AV_LOG_WARNING, "Got unexpected packet after EOF\n");
758  avcodec_flush_buffers(avctx);
759  }
760 
761  *got_frame = 0;
762 
763  if (avci->compat_decode_partial_size > 0 &&
764  avci->compat_decode_partial_size != pkt->size) {
765  av_log(avctx, AV_LOG_ERROR,
766  "Got unexpected packet size after a partial decode\n");
767  ret = AVERROR(EINVAL);
768  goto finish;
769  }
770 
771  if (!avci->compat_decode_partial_size) {
772  ret = avcodec_send_packet(avctx, pkt);
773  if (ret == AVERROR_EOF)
774  ret = 0;
775  else if (ret == AVERROR(EAGAIN)) {
776  /* we fully drain all the output in each decode call, so this should not
777  * ever happen */
778  ret = AVERROR_BUG;
779  goto finish;
780  } else if (ret < 0)
781  goto finish;
782  }
783 
784  while (ret >= 0) {
785  ret = avcodec_receive_frame(avctx, frame);
786  if (ret < 0) {
787  if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
788  ret = 0;
789  goto finish;
790  }
791 
792  if (frame != avci->compat_decode_frame) {
793  if (!avctx->refcounted_frames) {
794  ret = unrefcount_frame(avci, frame);
795  if (ret < 0)
796  goto finish;
797  }
798 
799  *got_frame = 1;
800  frame = avci->compat_decode_frame;
801  } else {
802  if (!avci->compat_decode_warned) {
803  av_log(avctx, AV_LOG_WARNING, "The deprecated avcodec_decode_* "
804  "API cannot return all the frames for this decoder. "
805  "Some frames will be dropped. Update your code to the "
806  "new decoding API to fix this.\n");
807  avci->compat_decode_warned = 1;
808  }
809  }
810 
811  if (avci->draining || (!avctx->codec->bsfs && avci->compat_decode_consumed < pkt->size))
812  break;
813  }
814 
815 finish:
816  if (ret == 0) {
817  /* if there are any bsfs then assume full packet is always consumed */
818  if (avctx->codec->bsfs)
819  ret = pkt->size;
820  else
821  ret = FFMIN(avci->compat_decode_consumed, pkt->size);
822  }
823  avci->compat_decode_consumed = 0;
824  avci->compat_decode_partial_size = (ret >= 0) ? pkt->size - ret : 0;
825 
826  return ret;
827 }
828 
829 int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
830  int *got_picture_ptr,
831  const AVPacket *avpkt)
832 {
833  return compat_decode(avctx, picture, got_picture_ptr, avpkt);
834 }
835 
836 int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx,
837  AVFrame *frame,
838  int *got_frame_ptr,
839  const AVPacket *avpkt)
840 {
841  return compat_decode(avctx, frame, got_frame_ptr, avpkt);
842 }
844 #endif
845 
847 {
848  memset(sub, 0, sizeof(*sub));
849  sub->pts = AV_NOPTS_VALUE;
850 }
851 
852 #define UTF8_MAX_BYTES 4 /* 5 and 6 bytes sequences should not be used */
853 static int recode_subtitle(AVCodecContext *avctx,
854  AVPacket *outpkt, const AVPacket *inpkt)
855 {
856 #if CONFIG_ICONV
857  iconv_t cd = (iconv_t)-1;
858  int ret = 0;
859  char *inb, *outb;
860  size_t inl, outl;
861  AVPacket tmp;
862 #endif
863 
864  if (avctx->sub_charenc_mode != FF_SUB_CHARENC_MODE_PRE_DECODER || inpkt->size == 0)
865  return 0;
866 
867 #if CONFIG_ICONV
868  cd = iconv_open("UTF-8", avctx->sub_charenc);
869  av_assert0(cd != (iconv_t)-1);
870 
871  inb = inpkt->data;
872  inl = inpkt->size;
873 
874  if (inl >= INT_MAX / UTF8_MAX_BYTES - AV_INPUT_BUFFER_PADDING_SIZE) {
875  av_log(avctx, AV_LOG_ERROR, "Subtitles packet is too big for recoding\n");
876  ret = AVERROR(ENOMEM);
877  goto end;
878  }
879 
880  ret = av_new_packet(&tmp, inl * UTF8_MAX_BYTES);
881  if (ret < 0)
882  goto end;
883  outpkt->buf = tmp.buf;
884  outpkt->data = tmp.data;
885  outpkt->size = tmp.size;
886  outb = outpkt->data;
887  outl = outpkt->size;
888 
889  if (iconv(cd, &inb, &inl, &outb, &outl) == (size_t)-1 ||
890  iconv(cd, NULL, NULL, &outb, &outl) == (size_t)-1 ||
891  outl >= outpkt->size || inl != 0) {
892  ret = FFMIN(AVERROR(errno), -1);
893  av_log(avctx, AV_LOG_ERROR, "Unable to recode subtitle event \"%s\" "
894  "from %s to UTF-8\n", inpkt->data, avctx->sub_charenc);
895  av_packet_unref(&tmp);
896  goto end;
897  }
898  outpkt->size -= outl;
899  memset(outpkt->data + outpkt->size, 0, outl);
900 
901 end:
902  if (cd != (iconv_t)-1)
903  iconv_close(cd);
904  return ret;
905 #else
906  av_log(avctx, AV_LOG_ERROR, "requesting subtitles recoding without iconv");
907  return AVERROR(EINVAL);
908 #endif
909 }
910 
911 static int utf8_check(const uint8_t *str)
912 {
913  const uint8_t *byte;
914  uint32_t codepoint, min;
915 
916  while (*str) {
917  byte = str;
918  GET_UTF8(codepoint, *(byte++), return 0;);
919  min = byte - str == 1 ? 0 : byte - str == 2 ? 0x80 :
920  1 << (5 * (byte - str) - 4);
921  if (codepoint < min || codepoint >= 0x110000 ||
922  codepoint == 0xFFFE /* BOM */ ||
923  codepoint >= 0xD800 && codepoint <= 0xDFFF /* surrogates */)
924  return 0;
925  str = byte;
926  }
927  return 1;
928 }
929 
930 #if FF_API_ASS_TIMING
931 static void insert_ts(AVBPrint *buf, int ts)
932 {
933  if (ts == -1) {
934  av_bprintf(buf, "9:59:59.99,");
935  } else {
936  int h, m, s;
937 
938  h = ts/360000; ts -= 360000*h;
939  m = ts/ 6000; ts -= 6000*m;
940  s = ts/ 100; ts -= 100*s;
941  av_bprintf(buf, "%d:%02d:%02d.%02d,", h, m, s, ts);
942  }
943 }
944 
946 {
947  int i;
948  AVBPrint buf;
949 
951 
952  for (i = 0; i < sub->num_rects; i++) {
953  char *final_dialog;
954  const char *dialog;
955  AVSubtitleRect *rect = sub->rects[i];
956  int ts_start, ts_duration = -1;
957  long int layer;
958 
959  if (rect->type != SUBTITLE_ASS || !strncmp(rect->ass, "Dialogue: ", 10))
960  continue;
961 
962  av_bprint_clear(&buf);
963 
964  /* skip ReadOrder */
965  dialog = strchr(rect->ass, ',');
966  if (!dialog)
967  continue;
968  dialog++;
969 
970  /* extract Layer or Marked */
971  layer = strtol(dialog, (char**)&dialog, 10);
972  if (*dialog != ',')
973  continue;
974  dialog++;
975 
976  /* rescale timing to ASS time base (ms) */
977  ts_start = av_rescale_q(pkt->pts, tb, av_make_q(1, 100));
978  if (pkt->duration != -1)
979  ts_duration = av_rescale_q(pkt->duration, tb, av_make_q(1, 100));
980  sub->end_display_time = FFMAX(sub->end_display_time, 10 * ts_duration);
981 
982  /* construct ASS (standalone file form with timestamps) string */
983  av_bprintf(&buf, "Dialogue: %ld,", layer);
984  insert_ts(&buf, ts_start);
985  insert_ts(&buf, ts_duration == -1 ? -1 : ts_start + ts_duration);
986  av_bprintf(&buf, "%s\r\n", dialog);
987 
988  final_dialog = av_strdup(buf.str);
989  if (!av_bprint_is_complete(&buf) || !final_dialog) {
990  av_freep(&final_dialog);
991  av_bprint_finalize(&buf, NULL);
992  return AVERROR(ENOMEM);
993  }
994  av_freep(&rect->ass);
995  rect->ass = final_dialog;
996  }
997 
998  av_bprint_finalize(&buf, NULL);
999  return 0;
1000 }
1001 #endif
1002 
1004  int *got_sub_ptr,
1005  AVPacket *avpkt)
1006 {
1007  int i, ret = 0;
1008 
1009  if (!avpkt->data && avpkt->size) {
1010  av_log(avctx, AV_LOG_ERROR, "invalid packet: NULL data, size != 0\n");
1011  return AVERROR(EINVAL);
1012  }
1013  if (!avctx->codec)
1014  return AVERROR(EINVAL);
1015  if (avctx->codec->type != AVMEDIA_TYPE_SUBTITLE) {
1016  av_log(avctx, AV_LOG_ERROR, "Invalid media type for subtitles\n");
1017  return AVERROR(EINVAL);
1018  }
1019 
1020  *got_sub_ptr = 0;
1021  get_subtitle_defaults(sub);
1022 
1023  if ((avctx->codec->capabilities & AV_CODEC_CAP_DELAY) || avpkt->size) {
1024  AVPacket pkt_recoded = *avpkt;
1025 
1026  ret = recode_subtitle(avctx, &pkt_recoded, avpkt);
1027  if (ret < 0) {
1028  *got_sub_ptr = 0;
1029  } else {
1030  ret = extract_packet_props(avctx->internal, &pkt_recoded);
1031  if (ret < 0)
1032  return ret;
1033 
1034  if (avctx->pkt_timebase.num && avpkt->pts != AV_NOPTS_VALUE)
1035  sub->pts = av_rescale_q(avpkt->pts,
1036  avctx->pkt_timebase, AV_TIME_BASE_Q);
1037  ret = avctx->codec->decode(avctx, sub, got_sub_ptr, &pkt_recoded);
1038  av_assert1((ret >= 0) >= !!*got_sub_ptr &&
1039  !!*got_sub_ptr >= !!sub->num_rects);
1040 
1041 #if FF_API_ASS_TIMING
1043  && *got_sub_ptr && sub->num_rects) {
1044  const AVRational tb = avctx->pkt_timebase.num ? avctx->pkt_timebase
1045  : avctx->time_base;
1046  int err = convert_sub_to_old_ass_form(sub, avpkt, tb);
1047  if (err < 0)
1048  ret = err;
1049  }
1050 #endif
1051 
1052  if (sub->num_rects && !sub->end_display_time && avpkt->duration &&
1053  avctx->pkt_timebase.num) {
1054  AVRational ms = { 1, 1000 };
1055  sub->end_display_time = av_rescale_q(avpkt->duration,
1056  avctx->pkt_timebase, ms);
1057  }
1058 
1060  sub->format = 0;
1061  else if (avctx->codec_descriptor->props & AV_CODEC_PROP_TEXT_SUB)
1062  sub->format = 1;
1063 
1064  for (i = 0; i < sub->num_rects; i++) {
1066  sub->rects[i]->ass && !utf8_check(sub->rects[i]->ass)) {
1067  av_log(avctx, AV_LOG_ERROR,
1068  "Invalid UTF-8 in decoded subtitles text; "
1069  "maybe missing -sub_charenc option\n");
1070  avsubtitle_free(sub);
1071  ret = AVERROR_INVALIDDATA;
1072  break;
1073  }
1074  }
1075 
1076  if (avpkt->data != pkt_recoded.data) { // did we recode?
1077  /* prevent from destroying side data from original packet */
1078  pkt_recoded.side_data = NULL;
1079  pkt_recoded.side_data_elems = 0;
1080 
1081  av_packet_unref(&pkt_recoded);
1082  }
1083  }
1084 
1085  if (*got_sub_ptr)
1086  avctx->frame_number++;
1087  }
1088 
1089  return ret;
1090 }
1091 
1093  const enum AVPixelFormat *fmt)
1094 {
1095  const AVPixFmtDescriptor *desc;
1096  const AVCodecHWConfig *config;
1097  int i, n;
1098 
1099  // If a device was supplied when the codec was opened, assume that the
1100  // user wants to use it.
1101  if (avctx->hw_device_ctx && avctx->codec->hw_configs) {
1102  AVHWDeviceContext *device_ctx =
1104  for (i = 0;; i++) {
1105  config = &avctx->codec->hw_configs[i]->public;
1106  if (!config)
1107  break;
1108  if (!(config->methods &
1110  continue;
1111  if (device_ctx->type != config->device_type)
1112  continue;
1113  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++) {
1114  if (config->pix_fmt == fmt[n])
1115  return fmt[n];
1116  }
1117  }
1118  }
1119  // No device or other setup, so we have to choose from things which
1120  // don't any other external information.
1121 
1122  // If the last element of the list is a software format, choose it
1123  // (this should be best software format if any exist).
1124  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++);
1125  desc = av_pix_fmt_desc_get(fmt[n - 1]);
1126  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
1127  return fmt[n - 1];
1128 
1129  // Finally, traverse the list in order and choose the first entry
1130  // with no external dependencies (if there is no hardware configuration
1131  // information available then this just picks the first entry).
1132  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++) {
1133  for (i = 0;; i++) {
1134  config = avcodec_get_hw_config(avctx->codec, i);
1135  if (!config)
1136  break;
1137  if (config->pix_fmt == fmt[n])
1138  break;
1139  }
1140  if (!config) {
1141  // No specific config available, so the decoder must be able
1142  // to handle this format without any additional setup.
1143  return fmt[n];
1144  }
1146  // Usable with only internal setup.
1147  return fmt[n];
1148  }
1149  }
1150 
1151  // Nothing is usable, give up.
1152  return AV_PIX_FMT_NONE;
1153 }
1154 
1156  enum AVHWDeviceType dev_type)
1157 {
1158  AVHWDeviceContext *device_ctx;
1159  AVHWFramesContext *frames_ctx;
1160  int ret;
1161 
1162  if (!avctx->hwaccel)
1163  return AVERROR(ENOSYS);
1164 
1165  if (avctx->hw_frames_ctx)
1166  return 0;
1167  if (!avctx->hw_device_ctx) {
1168  av_log(avctx, AV_LOG_ERROR, "A hardware frames or device context is "
1169  "required for hardware accelerated decoding.\n");
1170  return AVERROR(EINVAL);
1171  }
1172 
1173  device_ctx = (AVHWDeviceContext *)avctx->hw_device_ctx->data;
1174  if (device_ctx->type != dev_type) {
1175  av_log(avctx, AV_LOG_ERROR, "Device type %s expected for hardware "
1176  "decoding, but got %s.\n", av_hwdevice_get_type_name(dev_type),
1177  av_hwdevice_get_type_name(device_ctx->type));
1178  return AVERROR(EINVAL);
1179  }
1180 
1182  avctx->hw_device_ctx,
1183  avctx->hwaccel->pix_fmt,
1184  &avctx->hw_frames_ctx);
1185  if (ret < 0)
1186  return ret;
1187 
1188  frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1189 
1190 
1191  if (frames_ctx->initial_pool_size) {
1192  // We guarantee 4 base work surfaces. The function above guarantees 1
1193  // (the absolute minimum), so add the missing count.
1194  frames_ctx->initial_pool_size += 3;
1195  }
1196 
1197  ret = av_hwframe_ctx_init(avctx->hw_frames_ctx);
1198  if (ret < 0) {
1199  av_buffer_unref(&avctx->hw_frames_ctx);
1200  return ret;
1201  }
1202 
1203  return 0;
1204 }
1205 
1207  AVBufferRef *device_ref,
1209  AVBufferRef **out_frames_ref)
1210 {
1211  AVBufferRef *frames_ref = NULL;
1212  const AVCodecHWConfigInternal *hw_config;
1213  const AVHWAccel *hwa;
1214  int i, ret;
1215 
1216  for (i = 0;; i++) {
1217  hw_config = avctx->codec->hw_configs[i];
1218  if (!hw_config)
1219  return AVERROR(ENOENT);
1220  if (hw_config->public.pix_fmt == hw_pix_fmt)
1221  break;
1222  }
1223 
1224  hwa = hw_config->hwaccel;
1225  if (!hwa || !hwa->frame_params)
1226  return AVERROR(ENOENT);
1227 
1228  frames_ref = av_hwframe_ctx_alloc(device_ref);
1229  if (!frames_ref)
1230  return AVERROR(ENOMEM);
1231 
1232  ret = hwa->frame_params(avctx, frames_ref);
1233  if (ret >= 0) {
1234  AVHWFramesContext *frames_ctx = (AVHWFramesContext*)frames_ref->data;
1235 
1236  if (frames_ctx->initial_pool_size) {
1237  // If the user has requested that extra output surfaces be
1238  // available then add them here.
1239  if (avctx->extra_hw_frames > 0)
1240  frames_ctx->initial_pool_size += avctx->extra_hw_frames;
1241 
1242  // If frame threading is enabled then an extra surface per thread
1243  // is also required.
1244  if (avctx->active_thread_type & FF_THREAD_FRAME)
1245  frames_ctx->initial_pool_size += avctx->thread_count;
1246  }
1247 
1248  *out_frames_ref = frames_ref;
1249  } else {
1250  av_buffer_unref(&frames_ref);
1251  }
1252  return ret;
1253 }
1254 
1255 static int hwaccel_init(AVCodecContext *avctx,
1256  const AVCodecHWConfigInternal *hw_config)
1257 {
1258  const AVHWAccel *hwaccel;
1259  int err;
1260 
1261  hwaccel = hw_config->hwaccel;
1264  av_log(avctx, AV_LOG_WARNING, "Ignoring experimental hwaccel: %s\n",
1265  hwaccel->name);
1266  return AVERROR_PATCHWELCOME;
1267  }
1268 
1269  if (hwaccel->priv_data_size) {
1270  avctx->internal->hwaccel_priv_data =
1271  av_mallocz(hwaccel->priv_data_size);
1272  if (!avctx->internal->hwaccel_priv_data)
1273  return AVERROR(ENOMEM);
1274  }
1275 
1276  avctx->hwaccel = hwaccel;
1277  if (hwaccel->init) {
1278  err = hwaccel->init(avctx);
1279  if (err < 0) {
1280  av_log(avctx, AV_LOG_ERROR, "Failed setup for format %s: "
1281  "hwaccel initialisation returned error.\n",
1282  av_get_pix_fmt_name(hw_config->public.pix_fmt));
1284  avctx->hwaccel = NULL;
1285  return err;
1286  }
1287  }
1288 
1289  return 0;
1290 }
1291 
1292 static void hwaccel_uninit(AVCodecContext *avctx)
1293 {
1294  if (avctx->hwaccel && avctx->hwaccel->uninit)
1295  avctx->hwaccel->uninit(avctx);
1296 
1298 
1299  avctx->hwaccel = NULL;
1300 
1301  av_buffer_unref(&avctx->hw_frames_ctx);
1302 }
1303 
1304 int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
1305 {
1306  const AVPixFmtDescriptor *desc;
1307  enum AVPixelFormat *choices;
1308  enum AVPixelFormat ret, user_choice;
1309  const AVCodecHWConfigInternal *hw_config;
1310  const AVCodecHWConfig *config;
1311  int i, n, err;
1312 
1313  // Find end of list.
1314  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++);
1315  // Must contain at least one entry.
1316  av_assert0(n >= 1);
1317  // If a software format is available, it must be the last entry.
1318  desc = av_pix_fmt_desc_get(fmt[n - 1]);
1319  if (desc->flags & AV_PIX_FMT_FLAG_HWACCEL) {
1320  // No software format is available.
1321  } else {
1322  avctx->sw_pix_fmt = fmt[n - 1];
1323  }
1324 
1325  choices = av_malloc_array(n + 1, sizeof(*choices));
1326  if (!choices)
1327  return AV_PIX_FMT_NONE;
1328 
1329  memcpy(choices, fmt, (n + 1) * sizeof(*choices));
1330 
1331  for (;;) {
1332  // Remove the previous hwaccel, if there was one.
1333  hwaccel_uninit(avctx);
1334 
1335  user_choice = avctx->get_format(avctx, choices);
1336  if (user_choice == AV_PIX_FMT_NONE) {
1337  // Explicitly chose nothing, give up.
1338  ret = AV_PIX_FMT_NONE;
1339  break;
1340  }
1341 
1342  desc = av_pix_fmt_desc_get(user_choice);
1343  if (!desc) {
1344  av_log(avctx, AV_LOG_ERROR, "Invalid format returned by "
1345  "get_format() callback.\n");
1346  ret = AV_PIX_FMT_NONE;
1347  break;
1348  }
1349  av_log(avctx, AV_LOG_DEBUG, "Format %s chosen by get_format().\n",
1350  desc->name);
1351 
1352  for (i = 0; i < n; i++) {
1353  if (choices[i] == user_choice)
1354  break;
1355  }
1356  if (i == n) {
1357  av_log(avctx, AV_LOG_ERROR, "Invalid return from get_format(): "
1358  "%s not in possible list.\n", desc->name);
1359  ret = AV_PIX_FMT_NONE;
1360  break;
1361  }
1362 
1363  if (avctx->codec->hw_configs) {
1364  for (i = 0;; i++) {
1365  hw_config = avctx->codec->hw_configs[i];
1366  if (!hw_config)
1367  break;
1368  if (hw_config->public.pix_fmt == user_choice)
1369  break;
1370  }
1371  } else {
1372  hw_config = NULL;
1373  }
1374 
1375  if (!hw_config) {
1376  // No config available, so no extra setup required.
1377  ret = user_choice;
1378  break;
1379  }
1380  config = &hw_config->public;
1381 
1382  if (config->methods &
1384  avctx->hw_frames_ctx) {
1385  const AVHWFramesContext *frames_ctx =
1387  if (frames_ctx->format != user_choice) {
1388  av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
1389  "does not match the format of the provided frames "
1390  "context.\n", desc->name);
1391  goto try_again;
1392  }
1393  } else if (config->methods &
1395  avctx->hw_device_ctx) {
1396  const AVHWDeviceContext *device_ctx =
1398  if (device_ctx->type != config->device_type) {
1399  av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
1400  "does not match the type of the provided device "
1401  "context.\n", desc->name);
1402  goto try_again;
1403  }
1404  } else if (config->methods &
1406  // Internal-only setup, no additional configuration.
1407  } else if (config->methods &
1409  // Some ad-hoc configuration we can't see and can't check.
1410  } else {
1411  av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
1412  "missing configuration.\n", desc->name);
1413  goto try_again;
1414  }
1415  if (hw_config->hwaccel) {
1416  av_log(avctx, AV_LOG_DEBUG, "Format %s requires hwaccel "
1417  "initialisation.\n", desc->name);
1418  err = hwaccel_init(avctx, hw_config);
1419  if (err < 0)
1420  goto try_again;
1421  }
1422  ret = user_choice;
1423  break;
1424 
1425  try_again:
1426  av_log(avctx, AV_LOG_DEBUG, "Format %s not usable, retrying "
1427  "get_format() without it.\n", desc->name);
1428  for (i = 0; i < n; i++) {
1429  if (choices[i] == user_choice)
1430  break;
1431  }
1432  for (; i + 1 < n; i++)
1433  choices[i] = choices[i + 1];
1434  --n;
1435  }
1436 
1437  av_freep(&choices);
1438  return ret;
1439 }
1440 
1441 static void frame_pool_free(void *opaque, uint8_t *data)
1442 {
1443  FramePool *pool = (FramePool*)data;
1444  int i;
1445 
1446  for (i = 0; i < FF_ARRAY_ELEMS(pool->pools); i++)
1447  av_buffer_pool_uninit(&pool->pools[i]);
1448 
1449  av_freep(&data);
1450 }
1451 
1453 {
1454  FramePool *pool = av_mallocz(sizeof(*pool));
1455  AVBufferRef *buf;
1456 
1457  if (!pool)
1458  return NULL;
1459 
1460  buf = av_buffer_create((uint8_t*)pool, sizeof(*pool),
1461  frame_pool_free, NULL, 0);
1462  if (!buf) {
1463  av_freep(&pool);
1464  return NULL;
1465  }
1466 
1467  return buf;
1468 }
1469 
1471 {
1472  FramePool *pool = avctx->internal->pool ?
1473  (FramePool*)avctx->internal->pool->data : NULL;
1474  AVBufferRef *pool_buf;
1475  int i, ret, ch, planes;
1476 
1477  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
1478  int planar = av_sample_fmt_is_planar(frame->format);
1479  ch = frame->channels;
1480  planes = planar ? ch : 1;
1481  }
1482 
1483  if (pool && pool->format == frame->format) {
1484  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO &&
1485  pool->width == frame->width && pool->height == frame->height)
1486  return 0;
1487  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && pool->planes == planes &&
1488  pool->channels == ch && frame->nb_samples == pool->samples)
1489  return 0;
1490  }
1491 
1492  pool_buf = frame_pool_alloc();
1493  if (!pool_buf)
1494  return AVERROR(ENOMEM);
1495  pool = (FramePool*)pool_buf->data;
1496 
1497  switch (avctx->codec_type) {
1498  case AVMEDIA_TYPE_VIDEO: {
1499  int linesize[4];
1500  int w = frame->width;
1501  int h = frame->height;
1502  int unaligned;
1503  ptrdiff_t linesize1[4];
1504  size_t size[4];
1505 
1506  avcodec_align_dimensions2(avctx, &w, &h, pool->stride_align);
1507 
1508  do {
1509  // NOTE: do not align linesizes individually, this breaks e.g. assumptions
1510  // that linesize[0] == 2*linesize[1] in the MPEG-encoder for 4:2:2
1511  ret = av_image_fill_linesizes(linesize, avctx->pix_fmt, w);
1512  if (ret < 0)
1513  goto fail;
1514  // increase alignment of w for next try (rhs gives the lowest bit set in w)
1515  w += w & ~(w - 1);
1516 
1517  unaligned = 0;
1518  for (i = 0; i < 4; i++)
1519  unaligned |= linesize[i] % pool->stride_align[i];
1520  } while (unaligned);
1521 
1522  for (i = 0; i < 4; i++)
1523  linesize1[i] = linesize[i];
1524  ret = av_image_fill_plane_sizes(size, avctx->pix_fmt, h, linesize1);
1525  if (ret < 0)
1526  goto fail;
1527 
1528  for (i = 0; i < 4; i++) {
1529  pool->linesize[i] = linesize[i];
1530  if (size[i]) {
1531  if (size[i] > INT_MAX - (16 + STRIDE_ALIGN - 1)) {
1532  ret = AVERROR(EINVAL);
1533  goto fail;
1534  }
1535  pool->pools[i] = av_buffer_pool_init(size[i] + 16 + STRIDE_ALIGN - 1,
1536  CONFIG_MEMORY_POISONING ?
1537  NULL :
1539  if (!pool->pools[i]) {
1540  ret = AVERROR(ENOMEM);
1541  goto fail;
1542  }
1543  }
1544  }
1545  pool->format = frame->format;
1546  pool->width = frame->width;
1547  pool->height = frame->height;
1548 
1549  break;
1550  }
1551  case AVMEDIA_TYPE_AUDIO: {
1552  ret = av_samples_get_buffer_size(&pool->linesize[0], ch,
1553  frame->nb_samples, frame->format, 0);
1554  if (ret < 0)
1555  goto fail;
1556 
1557  pool->pools[0] = av_buffer_pool_init(pool->linesize[0], NULL);
1558  if (!pool->pools[0]) {
1559  ret = AVERROR(ENOMEM);
1560  goto fail;
1561  }
1562 
1563  pool->format = frame->format;
1564  pool->planes = planes;
1565  pool->channels = ch;
1566  pool->samples = frame->nb_samples;
1567  break;
1568  }
1569  default: av_assert0(0);
1570  }
1571 
1572  av_buffer_unref(&avctx->internal->pool);
1573  avctx->internal->pool = pool_buf;
1574 
1575  return 0;
1576 fail:
1577  av_buffer_unref(&pool_buf);
1578  return ret;
1579 }
1580 
1582 {
1583  FramePool *pool = (FramePool*)avctx->internal->pool->data;
1584  int planes = pool->planes;
1585  int i;
1586 
1587  frame->linesize[0] = pool->linesize[0];
1588 
1590  frame->extended_data = av_mallocz_array(planes, sizeof(*frame->extended_data));
1593  sizeof(*frame->extended_buf));
1594  if (!frame->extended_data || !frame->extended_buf) {
1595  av_freep(&frame->extended_data);
1596  av_freep(&frame->extended_buf);
1597  return AVERROR(ENOMEM);
1598  }
1599  } else {
1600  frame->extended_data = frame->data;
1601  av_assert0(frame->nb_extended_buf == 0);
1602  }
1603 
1604  for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
1605  frame->buf[i] = av_buffer_pool_get(pool->pools[0]);
1606  if (!frame->buf[i])
1607  goto fail;
1608  frame->extended_data[i] = frame->data[i] = frame->buf[i]->data;
1609  }
1610  for (i = 0; i < frame->nb_extended_buf; i++) {
1611  frame->extended_buf[i] = av_buffer_pool_get(pool->pools[0]);
1612  if (!frame->extended_buf[i])
1613  goto fail;
1614  frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data;
1615  }
1616 
1617  if (avctx->debug & FF_DEBUG_BUFFERS)
1618  av_log(avctx, AV_LOG_DEBUG, "default_get_buffer called on frame %p", frame);
1619 
1620  return 0;
1621 fail:
1622  av_frame_unref(frame);
1623  return AVERROR(ENOMEM);
1624 }
1625 
1627 {
1628  FramePool *pool = (FramePool*)s->internal->pool->data;
1630  int i;
1631 
1632  if (pic->data[0] || pic->data[1] || pic->data[2] || pic->data[3]) {
1633  av_log(s, AV_LOG_ERROR, "pic->data[*]!=NULL in avcodec_default_get_buffer\n");
1634  return -1;
1635  }
1636 
1637  if (!desc) {
1638  av_log(s, AV_LOG_ERROR,
1639  "Unable to get pixel format descriptor for format %s\n",
1640  av_get_pix_fmt_name(pic->format));
1641  return AVERROR(EINVAL);
1642  }
1643 
1644  memset(pic->data, 0, sizeof(pic->data));
1645  pic->extended_data = pic->data;
1646 
1647  for (i = 0; i < 4 && pool->pools[i]; i++) {
1648  pic->linesize[i] = pool->linesize[i];
1649 
1650  pic->buf[i] = av_buffer_pool_get(pool->pools[i]);
1651  if (!pic->buf[i])
1652  goto fail;
1653 
1654  pic->data[i] = pic->buf[i]->data;
1655  }
1656  for (; i < AV_NUM_DATA_POINTERS; i++) {
1657  pic->data[i] = NULL;
1658  pic->linesize[i] = 0;
1659  }
1660  if (desc->flags & AV_PIX_FMT_FLAG_PAL ||
1661  ((desc->flags & FF_PSEUDOPAL) && pic->data[1]))
1662  avpriv_set_systematic_pal2((uint32_t *)pic->data[1], pic->format);
1663 
1664  if (s->debug & FF_DEBUG_BUFFERS)
1665  av_log(s, AV_LOG_DEBUG, "default_get_buffer called on pic %p\n", pic);
1666 
1667  return 0;
1668 fail:
1669  av_frame_unref(pic);
1670  return AVERROR(ENOMEM);
1671 }
1672 
1674 {
1675  int ret;
1676 
1677  if (avctx->hw_frames_ctx) {
1678  ret = av_hwframe_get_buffer(avctx->hw_frames_ctx, frame, 0);
1679  frame->width = avctx->coded_width;
1680  frame->height = avctx->coded_height;
1681  return ret;
1682  }
1683 
1684  if ((ret = update_frame_pool(avctx, frame)) < 0)
1685  return ret;
1686 
1687  switch (avctx->codec_type) {
1688  case AVMEDIA_TYPE_VIDEO:
1689  return video_get_buffer(avctx, frame);
1690  case AVMEDIA_TYPE_AUDIO:
1691  return audio_get_buffer(avctx, frame);
1692  default:
1693  return -1;
1694  }
1695 }
1696 
1698 {
1699  int size;
1700  const uint8_t *side_metadata;
1701 
1702  AVDictionary **frame_md = &frame->metadata;
1703 
1704  side_metadata = av_packet_get_side_data(avpkt,
1706  return av_packet_unpack_dictionary(side_metadata, size, frame_md);
1707 }
1708 
1710 {
1711  AVPacket *pkt = avctx->internal->last_pkt_props;
1712  int i;
1713  static const struct {
1714  enum AVPacketSideDataType packet;
1716  } sd[] = {
1727  };
1728 
1729  if (IS_EMPTY(pkt))
1731  &avctx->internal->pkt_props_tail,
1732  pkt);
1733 
1734  if (pkt) {
1735  frame->pts = pkt->pts;
1736 #if FF_API_PKT_PTS
1738  frame->pkt_pts = pkt->pts;
1740 #endif
1741  frame->pkt_pos = pkt->pos;
1742  frame->pkt_duration = pkt->duration;
1743  frame->pkt_size = pkt->size;
1744 
1745  for (i = 0; i < FF_ARRAY_ELEMS(sd); i++) {
1746  int size;
1747  uint8_t *packet_sd = av_packet_get_side_data(pkt, sd[i].packet, &size);
1748  if (packet_sd) {
1749  AVFrameSideData *frame_sd = av_frame_new_side_data(frame,
1750  sd[i].frame,
1751  size);
1752  if (!frame_sd)
1753  return AVERROR(ENOMEM);
1754 
1755  memcpy(frame_sd->data, packet_sd, size);
1756  }
1757  }
1758  add_metadata_from_side_data(pkt, frame);
1759 
1760  if (pkt->flags & AV_PKT_FLAG_DISCARD) {
1761  frame->flags |= AV_FRAME_FLAG_DISCARD;
1762  } else {
1763  frame->flags = (frame->flags & ~AV_FRAME_FLAG_DISCARD);
1764  }
1765  }
1766  frame->reordered_opaque = avctx->reordered_opaque;
1767 
1768  if (frame->color_primaries == AVCOL_PRI_UNSPECIFIED)
1769  frame->color_primaries = avctx->color_primaries;
1770  if (frame->color_trc == AVCOL_TRC_UNSPECIFIED)
1771  frame->color_trc = avctx->color_trc;
1772  if (frame->colorspace == AVCOL_SPC_UNSPECIFIED)
1773  frame->colorspace = avctx->colorspace;
1774  if (frame->color_range == AVCOL_RANGE_UNSPECIFIED)
1775  frame->color_range = avctx->color_range;
1777  frame->chroma_location = avctx->chroma_sample_location;
1778 
1779  switch (avctx->codec->type) {
1780  case AVMEDIA_TYPE_VIDEO:
1781  frame->format = avctx->pix_fmt;
1782  if (!frame->sample_aspect_ratio.num)
1783  frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
1784 
1785  if (frame->width && frame->height &&
1786  av_image_check_sar(frame->width, frame->height,
1787  frame->sample_aspect_ratio) < 0) {
1788  av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
1789  frame->sample_aspect_ratio.num,
1790  frame->sample_aspect_ratio.den);
1791  frame->sample_aspect_ratio = (AVRational){ 0, 1 };
1792  }
1793 
1794  break;
1795  case AVMEDIA_TYPE_AUDIO:
1796  if (!frame->sample_rate)
1797  frame->sample_rate = avctx->sample_rate;
1798  if (frame->format < 0)
1799  frame->format = avctx->sample_fmt;
1800  if (!frame->channel_layout) {
1801  if (avctx->channel_layout) {
1803  avctx->channels) {
1804  av_log(avctx, AV_LOG_ERROR, "Inconsistent channel "
1805  "configuration.\n");
1806  return AVERROR(EINVAL);
1807  }
1808 
1809  frame->channel_layout = avctx->channel_layout;
1810  } else {
1811  if (avctx->channels > FF_SANE_NB_CHANNELS) {
1812  av_log(avctx, AV_LOG_ERROR, "Too many channels: %d.\n",
1813  avctx->channels);
1814  return AVERROR(ENOSYS);
1815  }
1816  }
1817  }
1818  frame->channels = avctx->channels;
1819  break;
1820  }
1821  return 0;
1822 }
1823 
1825 {
1826  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
1827  int i;
1828  int num_planes = av_pix_fmt_count_planes(frame->format);
1830  int flags = desc ? desc->flags : 0;
1831  if (num_planes == 1 && (flags & AV_PIX_FMT_FLAG_PAL))
1832  num_planes = 2;
1833  if ((flags & FF_PSEUDOPAL) && frame->data[1])
1834  num_planes = 2;
1835  for (i = 0; i < num_planes; i++) {
1836  av_assert0(frame->data[i]);
1837  }
1838  // For formats without data like hwaccel allow unused pointers to be non-NULL.
1839  for (i = num_planes; num_planes > 0 && i < FF_ARRAY_ELEMS(frame->data); i++) {
1840  if (frame->data[i])
1841  av_log(avctx, AV_LOG_ERROR, "Buffer returned by get_buffer2() did not zero unused plane pointers\n");
1842  frame->data[i] = NULL;
1843  }
1844  }
1845 }
1846 
1847 static void decode_data_free(void *opaque, uint8_t *data)
1848 {
1849  FrameDecodeData *fdd = (FrameDecodeData*)data;
1850 
1851  if (fdd->post_process_opaque_free)
1853 
1854  if (fdd->hwaccel_priv_free)
1855  fdd->hwaccel_priv_free(fdd->hwaccel_priv);
1856 
1857  av_freep(&fdd);
1858 }
1859 
1861 {
1862  AVBufferRef *fdd_buf;
1863  FrameDecodeData *fdd;
1864 
1865  av_assert1(!frame->private_ref);
1866  av_buffer_unref(&frame->private_ref);
1867 
1868  fdd = av_mallocz(sizeof(*fdd));
1869  if (!fdd)
1870  return AVERROR(ENOMEM);
1871 
1872  fdd_buf = av_buffer_create((uint8_t*)fdd, sizeof(*fdd), decode_data_free,
1874  if (!fdd_buf) {
1875  av_freep(&fdd);
1876  return AVERROR(ENOMEM);
1877  }
1878 
1879  frame->private_ref = fdd_buf;
1880 
1881  return 0;
1882 }
1883 
1885 {
1886  const AVHWAccel *hwaccel = avctx->hwaccel;
1887  int override_dimensions = 1;
1888  int ret;
1889 
1890  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
1891  if ((unsigned)avctx->width > INT_MAX - STRIDE_ALIGN ||
1892  (ret = av_image_check_size2(FFALIGN(avctx->width, STRIDE_ALIGN), avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx)) < 0 || avctx->pix_fmt<0) {
1893  av_log(avctx, AV_LOG_ERROR, "video_get_buffer: image parameters invalid\n");
1894  ret = AVERROR(EINVAL);
1895  goto fail;
1896  }
1897 
1898  if (frame->width <= 0 || frame->height <= 0) {
1899  frame->width = FFMAX(avctx->width, AV_CEIL_RSHIFT(avctx->coded_width, avctx->lowres));
1900  frame->height = FFMAX(avctx->height, AV_CEIL_RSHIFT(avctx->coded_height, avctx->lowres));
1901  override_dimensions = 0;
1902  }
1903 
1904  if (frame->data[0] || frame->data[1] || frame->data[2] || frame->data[3]) {
1905  av_log(avctx, AV_LOG_ERROR, "pic->data[*]!=NULL in get_buffer_internal\n");
1906  ret = AVERROR(EINVAL);
1907  goto fail;
1908  }
1909  } else if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
1910  if (frame->nb_samples * (int64_t)avctx->channels > avctx->max_samples) {
1911  av_log(avctx, AV_LOG_ERROR, "samples per frame %d, exceeds max_samples %"PRId64"\n", frame->nb_samples, avctx->max_samples);
1912  ret = AVERROR(EINVAL);
1913  goto fail;
1914  }
1915  }
1916  ret = ff_decode_frame_props(avctx, frame);
1917  if (ret < 0)
1918  goto fail;
1919 
1920  if (hwaccel) {
1921  if (hwaccel->alloc_frame) {
1922  ret = hwaccel->alloc_frame(avctx, frame);
1923  goto end;
1924  }
1925  } else
1926  avctx->sw_pix_fmt = avctx->pix_fmt;
1927 
1928  ret = avctx->get_buffer2(avctx, frame, flags);
1929  if (ret < 0)
1930  goto fail;
1931 
1932  validate_avframe_allocation(avctx, frame);
1933 
1934  ret = ff_attach_decode_data(frame);
1935  if (ret < 0)
1936  goto fail;
1937 
1938 end:
1939  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO && !override_dimensions &&
1941  frame->width = avctx->width;
1942  frame->height = avctx->height;
1943  }
1944 
1945 fail:
1946  if (ret < 0) {
1947  av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
1948  av_frame_unref(frame);
1949  }
1950 
1951  return ret;
1952 }
1953 
1955 {
1956  AVFrame *tmp;
1957  int ret;
1958 
1960 
1961  if (frame->data[0] && (frame->width != avctx->width || frame->height != avctx->height || frame->format != avctx->pix_fmt)) {
1962  av_log(avctx, AV_LOG_WARNING, "Picture changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s in reget buffer()\n",
1963  frame->width, frame->height, av_get_pix_fmt_name(frame->format), avctx->width, avctx->height, av_get_pix_fmt_name(avctx->pix_fmt));
1964  av_frame_unref(frame);
1965  }
1966 
1967  if (!frame->data[0])
1968  return ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
1969 
1970  if ((flags & FF_REGET_BUFFER_FLAG_READONLY) || av_frame_is_writable(frame))
1971  return ff_decode_frame_props(avctx, frame);
1972 
1973  tmp = av_frame_alloc();
1974  if (!tmp)
1975  return AVERROR(ENOMEM);
1976 
1977  av_frame_move_ref(tmp, frame);
1978 
1979  ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
1980  if (ret < 0) {
1981  av_frame_free(&tmp);
1982  return ret;
1983  }
1984 
1985  av_frame_copy(frame, tmp);
1986  av_frame_free(&tmp);
1987 
1988  return 0;
1989 }
1990 
1992 {
1993  int ret = reget_buffer_internal(avctx, frame, flags);
1994  if (ret < 0)
1995  av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
1996  return ret;
1997 }
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:132
#define FF_SANE_NB_CHANNELS
Definition: internal.h:97
This struct aggregates all the (hardware/vendor-specific) "high-level" state, i.e.
Definition: hwcontext.h:61
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: avcodec.h:1601
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
callback to negotiate the pixelFormat
Definition: avcodec.h:783
AVCodecHWConfig public
This is the structure which will be returned to the user by avcodec_get_hw_config().
Definition: hwconfig.h:34
int nb_draining_errors
Definition: internal.h:202
#define FF_SUB_CHARENC_MODE_PRE_DECODER
the AVPacket data needs to be recoded to UTF-8 before being fed to the decoder, requires iconv ...
Definition: avcodec.h:2111
#define NULL
Definition: coverity.c:32
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1304
const struct AVCodec * codec
Definition: avcodec.h:540
AVRational framerate
Definition: avcodec.h:2062
const AVCodecDescriptor * codec_descriptor
AVCodecDescriptor.
Definition: avcodec.h:2083
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
#define AV_NUM_DATA_POINTERS
Definition: frame.h:315
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
AVPacketSideDataType
Definition: packet.h:40
int64_t pts_correction_num_faulty_dts
Number of incorrect PTS values so far.
Definition: avcodec.h:2091
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
#define GET_UTF8(val, GET_BYTE, ERROR)
Convert a UTF-8 character (up to 4 bytes) to its 32-bit UCS-4 encoded form.
Definition: common.h:499
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
This structure describes decoded (raw) audio or video data.
Definition: frame.h:314
int stride_align[AV_NUM_DATA_POINTERS]
Definition: decode.c:61
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
This side data must be associated with an audio frame and corresponds to enum AVAudioServiceType defi...
Definition: frame.h:113
int apply_cropping
Video decoding only.
Definition: avcodec.h:2297
static int decode_receive_frame_internal(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:515
The codec supports this format by some internal method.
Definition: codec.h:411
#define AV_CODEC_FLAG2_SKIP_MANUAL
Do not skip samples and export skip information as frame side data.
Definition: avcodec.h:384
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:719
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:585
int capabilities
Hardware accelerated codec capabilities.
Definition: avcodec.h:2424
AVPacket * last_pkt_props
Properties (timestamps+side data) extracted from the last packet passed for decoding.
Definition: internal.h:149
const char * desc
Definition: libsvtav1.c:79
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
static int convert_sub_to_old_ass_form(AVSubtitle *sub, const AVPacket *pkt, AVRational tb)
Definition: decode.c:945
int changed_frames_dropped
Definition: internal.h:205
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2613
AVFrame * to_free
Definition: internal.h:135
int64_t pos
byte position in stream, -1 if unknown
Definition: packet.h:383
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:106
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1<< 16)) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63)))#define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};static void cpy1(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, len);}static void cpy2(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 2 *len);}static void cpy4(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 4 *len);}static void cpy8(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;}void swri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len){int ch;int off=0;const int os=(out->planar?1:out->ch_count)*out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){int planes=in->planar?in->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){int planes=out->planar?out->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){int planes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out->ch+ch,(const uint8_t **) in->ch+ch, off *(out-> planar
Definition: audioconvert.c:56
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:505
static int decode_simple_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:499
int width
Definition: decode.c:60
int(* receive_frame)(struct AVCodecContext *avctx, struct AVFrame *frame)
Decode API with decoupled packet/frame dataflow.
Definition: codec.h:298
This side data should be associated with a video stream and contains Stereoscopic 3D information in f...
Definition: packet.h:114
ATSC A53 Part 4 Closed Captions.
Definition: packet.h:242
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1166
int nb_extended_buf
Number of elements in extended_buf.
Definition: frame.h:523
static FF_DISABLE_DEPRECATION_WARNINGS int unrefcount_frame(AVCodecInternal *avci, AVFrame *frame)
Definition: decode.c:702
int ff_decode_bsfs_init(AVCodecContext *avctx)
Called during avcodec_open2() to initialize avctx->internal->bsf.
Definition: decode.c:168
Content light level (based on CTA-861.3).
Definition: frame.h:136
int num
Numerator.
Definition: rational.h:59
Timecode which conforms to SMPTE ST 12-1.
Definition: frame.h:168
int size
Definition: packet.h:364
int av_codec_is_decoder(const AVCodec *codec)
Definition: utils.c:101
int initial_channels
Definition: internal.h:209
enum AVPixelFormat pix_fmt
Supported pixel format.
Definition: avcodec.h:2418
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:910
void(* hwaccel_priv_free)(void *priv)
Definition: decode.h:53
#define AV_CODEC_PROP_TEXT_SUB
Subtitle codec is text based.
Definition: codec_desc.h:102
static void frame_pool_free(void *opaque, uint8_t *data)
Definition: decode.c:1441
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:741
int samples
Definition: decode.c:65
int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, const AVPacket *avpkt)
Decode the audio frame of size avpkt->size from avpkt->data into frame.
Definition: decode.c:836
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:209
Mastering display metadata associated with a video frame.
Definition: frame.h:119
unsigned num_rects
Definition: avcodec.h:2679
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:582
enum AVMediaType type
Definition: codec.h:203
#define FF_ARRAY_ELEMS(a)
static int apply_param_change(AVCodecContext *avctx, const AVPacket *avpkt)
Definition: decode.c:68
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
static int recode_subtitle(AVCodecContext *avctx, AVPacket *outpkt, const AVPacket *inpkt)
Definition: decode.c:853
AVBufferPool * pools[4]
Pools for each data plane.
Definition: decode.c:54
int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
Set various frame properties from the codec context / packet data.
Definition: decode.c:1709
size_t crop_bottom
Definition: frame.h:675
static AVPacket pkt
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:1003
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1675
int(* alloc_frame)(AVCodecContext *avctx, AVFrame *frame)
Allocate a custom buffer.
Definition: avcodec.h:2437
static int utf8_check(const uint8_t *str)
Definition: decode.c:911
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
static int apply_cropping(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:597
Mastering display metadata (based on SMPTE-2086:2014).
Definition: packet.h:222
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:654
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
AVSubtitleRect ** rects
Definition: avcodec.h:2680
int(* uninit)(AVCodecContext *avctx)
Uninitialize the hwaccel private data.
Definition: avcodec.h:2529
static AVBufferRef * frame_pool_alloc(void)
Definition: decode.c:1452
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:75
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
static FF_ENABLE_DEPRECATION_WARNINGS void get_subtitle_defaults(AVSubtitle *sub)
Definition: decode.c:846
int height
Definition: decode.c:60
enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Definition: decode.c:1092
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1199
uint8_t
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
int ff_attach_decode_data(AVFrame *frame)
Definition: decode.c:1860
int av_packet_unpack_dictionary(const uint8_t *data, int size, AVDictionary **dict)
Unpack a dictionary from side_data.
Definition: avpacket.c:530
AVOptions.
static int64_t guess_correct_pts(AVCodecContext *ctx, int64_t reordered_pts, int64_t dts)
Attempt to guess proper monotonic timestamps for decoded video frames which might have incorrect time...
Definition: decode.c:245
size_t crop_left
Definition: frame.h:676
AVPacket pkt
Definition: packet.h:397
int avpriv_set_systematic_pal2(uint32_t pal[256], enum AVPixelFormat pix_fmt)
Definition: imgutils.c:176
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:381
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: packet.h:108
#define AV_CODEC_FLAG_UNALIGNED
Allow decoders to produce frames with data planes that are not aligned to CPU requirements (e...
Definition: avcodec.h:271
#define AV_WL8(p, d)
Definition: intreadwrite.h:399
Multithreading support functions.
AVBufferRef * private_ref
AVBufferRef for internal use by a single libav* library.
Definition: frame.h:693
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
Definition: decode.c:202
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:407
enum AVPixelFormat pix_fmt
For decoders, a hardware pixel format which that decoder may be able to decode to if suitable hardwar...
Definition: codec.h:432
int planes
Definition: decode.c:63
Structure to hold side data for an AVFrame.
Definition: frame.h:220
int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar)
Check if the given sample aspect ratio of an image is valid.
Definition: imgutils.c:322
size_t compat_decode_consumed
Definition: internal.h:189
static void finish(void)
Definition: movenc.c:345
uint8_t * data
Definition: packet.h:363
#define FF_REGET_BUFFER_FLAG_READONLY
the returned buffer does not need to be writable
Definition: internal.h:303
#define AVERROR_EOF
End of file.
Definition: error.h:55
AVDictionary * metadata
metadata.
Definition: frame.h:600
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:1749
#define AV_BUFFER_FLAG_READONLY
Always treat the buffer as read-only, even when it has only one reference.
Definition: buffer.h:113
int(* init)(AVCodecContext *avctx)
Initialize the hwaccel private data.
Definition: avcodec.h:2521
ptrdiff_t size
Definition: opengl_enc.c:100
int initial_height
Definition: internal.h:207
int initial_format
Definition: internal.h:206
The data represents the AVSphericalMapping structure defined in libavutil/spherical.h.
Definition: frame.h:130
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:1173
#define FFALIGN(x, a)
Definition: macros.h:48
The codec supports this format via the hw_device_ctx interface.
Definition: codec.h:391
#define av_log(a,...)
The buffer pool.
const char * name
Definition: pixdesc.h:82
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:615
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
Check if the sample format is planar.
Definition: samplefmt.c:112
void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, int linesize_align[AV_NUM_DATA_POINTERS])
Modify width and height values so that they will result in a memory buffer that is acceptable for the...
Definition: utils.c:156
int ff_thread_decode_frame(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, AVPacket *avpkt)
Submit a new frame to a decoding thread.
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
#define AV_RL8(x)
Definition: intreadwrite.h:398
AVPacketList * pkt_props_tail
Definition: internal.h:151
int av_new_packet(AVPacket *pkt, int size)
Allocate the payload of a packet and initialize its fields with default values.
Definition: avpacket.c:88
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
Definition: avcodec.h:2076
AVBSFContext * bsf
Definition: internal.h:143
int width
Definition: frame.h:372
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:821
void * post_process_opaque
Definition: decode.h:46
#define AV_BPRINT_SIZE_UNLIMITED
const struct AVCodecHWConfigInternal *const * hw_configs
Array of pointers to hardware configurations supported by the codec, or NULL if no hardware supported...
Definition: codec.h:323
static int hwaccel_init(AVCodecContext *avctx, const AVCodecHWConfigInternal *hw_config)
Definition: decode.c:1255
static void validate_avframe_allocation(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:1824
An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows:
Definition: packet.h:72
int av_frame_apply_cropping(AVFrame *frame, int flags)
Crop the given video AVFrame according to its crop_left/crop_top/crop_right/ crop_bottom fields...
Definition: frame.c:891
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:353
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
ICC profile data consisting of an opaque octet buffer following the format described by ISO 15076-1...
Definition: packet.h:274
int64_t pts_correction_last_pts
Number of incorrect DTS values so far.
Definition: avcodec.h:2092
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Identical in function to ff_get_buffer(), except it reuses the existing buffer if available...
Definition: decode.c:1991
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1787
int methods
Bit set of AV_CODEC_HW_CONFIG_METHOD_* flags, describing the possible setup methods which can be used...
Definition: codec.h:437
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
int avcodec_is_open(AVCodecContext *s)
Definition: utils.c:1984
int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:624
AVFrame * buffer_frame
Definition: internal.h:182
int capabilities
Codec capabilities.
Definition: codec.h:209
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:558
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: packet.h:346
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:611
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:569
AVRational time_base_in
The timebase used for the timestamps of the input packets.
Definition: bsf.h:89
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:140
simple assert() macros that are a bit more flexible than ISO C assert().
int side_data_elems
Definition: packet.h:375
AVBufferRef * av_buffer_create(uint8_t *data, int size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:29
int64_t max_pixels
The number of pixels per image to maximally accept.
Definition: avcodec.h:2239
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:333
enum AVHWDeviceType type
This field identifies the underlying API used for hardware access.
Definition: hwcontext.h:79
#define FFMAX(a, b)
Definition: common.h:103
int av_hwframe_get_buffer(AVBufferRef *hwframe_ref, AVFrame *frame, int flags)
Allocate a new frame attached to the given AVHWFramesContext.
Definition: hwcontext.c:502
#define fail()
Definition: checkasm.h:133
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:799
const AVHWAccel * hwaccel
If this configuration uses a hwaccel, a pointer to it.
Definition: hwconfig.h:39
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: internal.h:66
int priv_data_size
Size of the private data to allocate in AVCodecInternal.hwaccel_priv_data.
Definition: avcodec.h:2535
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:369
int av_bsf_init(AVBSFContext *ctx)
Prepare the filter for use, after all the parameters and options have been set.
Definition: bsf.c:147
reference-counted frame API
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:1242
uint32_t end_display_time
Definition: avcodec.h:2678
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:2681
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:491
int props
Codec properties, a combination of AV_CODEC_PROP_* flags.
Definition: codec_desc.h:54
size_t crop_top
Definition: frame.h:674
common internal API header
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:106
int initial_pool_size
Initial size of the frame pool.
Definition: hwcontext.h:199
int av_bsf_list_parse_str(const char *str, AVBSFContext **bsf_lst)
Parse string describing list of bitstream filters and create single AVBSFContext describing the whole...
Definition: bsf.c:522
int av_packet_copy_props(AVPacket *dst, const AVPacket *src)
Copy only "properties" fields from src to dst.
Definition: avpacket.c:572
int channels
number of audio channels, only used for audio.
Definition: frame.h:620
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:551
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1640
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1779
const char * name
Name of the hardware accelerated codec.
Definition: avcodec.h:2397
#define FFMIN(a, b)
Definition: common.h:105
AVBufferRef ** extended_buf
For planar audio which requires more than AV_NUM_DATA_POINTERS AVBufferRef pointers, this array will hold all the references which cannot fit into AVFrame.buf.
Definition: frame.h:519
int channels
Definition: decode.c:64
AVFrame * compat_decode_frame
Definition: internal.h:193
int width
picture width / height.
Definition: avcodec.h:704
uint8_t w
Definition: llviddspenc.c:39
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames...
Definition: avcodec.h:2209
static int add_metadata_from_side_data(const AVPacket *avpkt, AVFrame *frame)
Definition: decode.c:1697
static int compat_decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, const AVPacket *pkt)
Definition: decode.c:748
AVPacket * in_pkt
Definition: internal.h:112
This side data should be associated with a video stream and corresponds to the AVSphericalMapping str...
Definition: packet.h:228
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
AVFormatContext * ctx
Definition: movenc.c:48
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:1145
AVFrameSideDataType
Definition: frame.h:48
#define AVERROR_INPUT_CHANGED
Input changed between calls. Reconfiguration is required. (can be OR-ed with AVERROR_OUTPUT_CHANGED) ...
Definition: error.h:73
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
uint16_t format
Definition: avcodec.h:2676
#define s(width, name)
Definition: cbs_vp9.c:257
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:143
#define FF_DEBUG_BUFFERS
Definition: avcodec.h:1630
int64_t reordered_opaque
opaque 64-bit number (generally a PTS) that will be reordered and output in AVFrame.reordered_opaque
Definition: avcodec.h:1668
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:1651
static int av_bprint_is_complete(const AVBPrint *buf)
Test if the print buffer is complete (not truncated).
Definition: bprint.h:185
const char * bsfs
Decoding only, a comma-separated list of bitstream filters to apply to packets before decoding...
Definition: codec.h:314
DecodeSimpleContext ds
Definition: internal.h:142
int avpriv_packet_list_put(AVPacketList **packet_buffer, AVPacketList **plast_pktl, AVPacket *pkt, int(*copy)(AVPacket *dst, const AVPacket *src), int flags)
Append an AVPacket to the list.
Definition: avpacket.c:729
char * sub_charenc
DTS of the last frame.
Definition: avcodec.h:2100
static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:1581
int draining
checks API usage: after codec draining, flush is required to resume operation
Definition: internal.h:176
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:1768
int linesize[4]
Definition: decode.c:62
int sub_charenc_mode
Subtitles character encoding mode.
Definition: avcodec.h:2108
if(ret)
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal codec state / flush internal buffers.
Definition: utils.c:1093
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
int(* post_process)(void *logctx, AVFrame *frame)
The callback to perform some delayed processing on the frame right before it is returned to the calle...
Definition: decode.h:45
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:387
Content light level (based on CTA-861.3).
Definition: packet.h:235
int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:561
static int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame, int64_t *discarded_samples)
Definition: decode.c:277
int(* decode)(struct AVCodecContext *, void *outdata, int *outdata_size, struct AVPacket *avpkt)
Definition: codec.h:284
AVPacketList * pkt_props
Definition: internal.h:150
void(* post_process_opaque_free)(void *opaque)
Definition: decode.h:47
#define AV_LOG_INFO
Standard information.
Definition: log.h:205
int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt)
Submit a packet for filtering.
Definition: bsf.c:200
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:253
int avcodec_default_get_buffer2(AVCodecContext *avctx, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
Definition: decode.c:1673
Libavcodec external API header.
enum AVMediaType codec_type
Definition: avcodec.h:539
int compat_decode_warned
Definition: internal.h:186
int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown...
Definition: frame.h:593
A list of zero terminated key/value strings.
Definition: packet.h:172
int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, const AVPacket *avpkt)
Decode the video frame of size avpkt->size from avpkt->data into picture.
Definition: decode.c:829
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:84
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:594
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_WB16 unsigned int_TMPL byte
Definition: bytestream.h:91
int sample_rate
samples per second
Definition: avcodec.h:1191
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:345
int initial_sample_rate
Definition: internal.h:208
int debug
debug
Definition: avcodec.h:1618
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
const AVCodecHWConfig * avcodec_get_hw_config(const AVCodec *codec, int index)
Retrieve supported hardware configurations for a codec.
Definition: utils.c:1870
main external API structure.
Definition: avcodec.h:531
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:606
int skip_samples_multiplier
Definition: internal.h:199
uint8_t * data
The data buffer.
Definition: buffer.h:89
int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt)
Retrieve a filtered packet.
Definition: bsf.c:226
static int reget_buffer_internal(AVCodecContext *avctx, AVFrame *frame, int flags)
Definition: decode.c:1954
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:1148
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:402
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1884
uint8_t * data
Definition: frame.h:222
int avpriv_packet_list_get(AVPacketList **pkt_buffer, AVPacketList **pkt_buffer_end, AVPacket *pkt)
Remove the oldest AVPacket in the list and return it.
Definition: avpacket.c:766
#define AV_CODEC_PROP_BITMAP_SUB
Subtitle codec is bitmap based Decoded AVSubtitle data can be read from the AVSubtitleRect->pict fiel...
Definition: codec_desc.h:97
int av_samples_copy(uint8_t **dst, uint8_t *const *src, int dst_offset, int src_offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Copy samples from src to dst.
Definition: samplefmt.c:213
size_t crop_right
Definition: frame.h:677
int64_t max_samples
The number of samples per frame to maximally accept.
Definition: avcodec.h:2327
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:83
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
int coded_height
Definition: avcodec.h:719
int64_t reordered_opaque
reordered opaque 64 bits (generally an integer or a double precision float PTS but can be anything)...
Definition: frame.h:481
int sample_rate
Sample rate of the audio data.
Definition: frame.h:486
int(* get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags)
This callback is called at the beginning of each frame to get data buffer(s) for it.
Definition: avcodec.h:1346
int showed_multi_packet_warning
Definition: internal.h:197
Definition: f_ebur128.c:91
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
Definition: imgutils.c:89
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:726
const char * av_hwdevice_get_type_name(enum AVHWDeviceType type)
Get the string name of an AVHWDeviceType.
Definition: hwcontext.c:92
#define AV_CODEC_CAP_SUBFRAMES
Codec can output multiple frames per AVPacket Normally demuxers return one frame at a time...
Definition: codec.h:93
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:308
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:119
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1159
Rational number (pair of numerator and denominator).
Definition: rational.h:58
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:1152
int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
Definition: utils.c:2126
Recommmends skipping the specified number of samples.
Definition: packet.h:156
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:124
int sub_text_format
Control the form of AVSubtitle.rects[N]->ass.
Definition: avcodec.h:2216
int av_image_fill_plane_sizes(size_t sizes[4], enum AVPixelFormat pix_fmt, int height, const ptrdiff_t linesizes[4])
Fill plane sizes for an image with pixel format pix_fmt and height height.
Definition: imgutils.c:111
int skip_samples
Number of audio samples to skip at the start of the next decoded frame.
Definition: internal.h:166
#define STRIDE_ALIGN
Definition: internal.h:108
enum AVChromaLocation chroma_location
Definition: frame.h:571
int(* frame_params)(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx)
Fill the given hw_frames context with current codec parameters.
Definition: avcodec.h:2550
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:578
attribute_deprecated int refcounted_frames
If non-zero, the decoded audio and video frames returned from avcodec_decode_video2() and avcodec_dec...
Definition: avcodec.h:1363
AVBufferRef * pool
Definition: internal.h:138
#define AV_CODEC_FLAG_DROPCHANGED
Don&#39;t output frames whose parameters differ from first decoded frame in stream.
Definition: avcodec.h:292
int size
Size of data in bytes.
Definition: buffer.h:93
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
static int64_t pts
#define SIZE_SPECIFIER
Definition: internal.h:191
#define flags(name, subs,...)
Definition: cbs_av1.c:561
This side data should be associated with an audio stream and contains ReplayGain information in form ...
Definition: packet.h:99
The codec supports this format via the hw_frames_ctx interface.
Definition: codec.h:404
#define FF_CODEC_CAP_SETS_PKT_DTS
Decoders marked with FF_CODEC_CAP_SETS_PKT_DTS want to set AVFrame.pkt_dts manually.
Definition: internal.h:55
int ff_decode_get_hw_frames_ctx(AVCodecContext *avctx, enum AVHWDeviceType dev_type)
Make sure avctx.hw_frames_ctx is set.
Definition: decode.c:1155
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:328
static void decode_data_free(void *opaque, uint8_t *data)
Definition: decode.c:1847
#define UTF8_MAX_BYTES
Definition: decode.c:852
attribute_deprecated int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Definition: frame.h:415
Timecode which conforms to SMPTE ST 12-1:2014.
Definition: packet.h:291
void av_bprint_clear(AVBPrint *buf)
Reset the string to "" but keep internal allocated data.
Definition: bprint.c:227
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
AVPacket * buffer_pkt
buffers for using new encode/decode API through legacy API
Definition: internal.h:181
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:423
A reference to a data buffer.
Definition: buffer.h:81
int extra_hw_frames
Definition: avcodec.h:2311
The codec supports this format by some ad-hoc method.
Definition: codec.h:420
AVPacketSideData * side_data
Additional packet data that can be provided by the container.
Definition: packet.h:374
This struct stores per-frame lavc-internal data and is attached to it via private_ref.
Definition: decode.h:34
int avcodec_get_hw_frames_parameters(AVCodecContext *avctx, AVBufferRef *device_ref, enum AVPixelFormat hw_pix_fmt, AVBufferRef **out_frames_ref)
Create and return a AVHWFramesContext with values adequate for hardware decoding. ...
Definition: decode.c:1206
static enum AVPixelFormat hw_pix_fmt
Definition: hw_decode.c:46
#define AV_PKT_FLAG_DISCARD
Flag is used to discard packets which are required to maintain valid decoder state but are not requir...
Definition: packet.h:408
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:83
common internal api header.
common internal and external API header
AVBufferPool * av_buffer_pool_init(int size, AVBufferRef *(*alloc)(int size))
Allocate and initialize a buffer pool.
Definition: buffer.c:266
#define AV_HWACCEL_CODEC_CAP_EXPERIMENTAL
HWAccel is experimental and is thus avoided in favor of non experimental codecs.
Definition: avcodec.h:2557
#define AV_CODEC_CAP_PARAM_CHANGE
Codec supports changed parameters at any point.
Definition: codec.h:114
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
Definition: hwcontext.c:247
static void insert_ts(AVBPrint *buf, int ts)
Definition: decode.c:931
void * hwaccel_priv_data
hwaccel-specific private data
Definition: internal.h:171
int caps_internal
Internal codec capabilities.
Definition: codec.h:308
int den
Denominator.
Definition: rational.h:60
int av_image_check_size2(unsigned int w, unsigned int h, int64_t max_pixels, enum AVPixelFormat pix_fmt, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of a plane of an image with...
Definition: imgutils.c:288
uint64_t initial_channel_layout
Definition: internal.h:210
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:215
Apply the maximum possible cropping, even if it requires setting the AVFrame.data[] entries to unalig...
Definition: frame.h:959
static int video_get_buffer(AVCodecContext *s, AVFrame *pic)
Definition: decode.c:1626
Formatted text, the ass field must be set by the decoder and is authoritative.
Definition: avcodec.h:2635
#define FF_PSEUDOPAL
Definition: internal.h:297
AVHWDeviceType
Definition: hwcontext.h:27
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
int channels
number of audio channels
Definition: avcodec.h:1192
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:566
char * ass
0 terminated ASS/SSA compatible event line.
Definition: avcodec.h:2670
#define AV_FRAME_FLAG_DISCARD
A flag to mark the frames which need to be decoded, but shouldn&#39;t be output.
Definition: frame.h:543
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:618
enum AVColorPrimaries color_primaries
Definition: frame.h:560
static int extract_packet_props(AVCodecInternal *avci, AVPacket *pkt)
Definition: decode.c:148
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: packet.h:362
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
int64_t pts_correction_last_dts
PTS of the last frame.
Definition: avcodec.h:2093
size_t compat_decode_partial_size
Definition: internal.h:192
#define AV_CODEC_FLAG_TRUNCATED
Input bitstream might be truncated at a random location instead of only at frame boundaries.
Definition: avcodec.h:317
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:1222
#define IS_EMPTY(pkt)
Definition: decode.c:146
static int update_frame_pool(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:1470
int height
Definition: frame.h:372
#define av_freep(p)
int64_t pts_correction_num_faulty_pts
Current statistics for PTS correction.
Definition: avcodec.h:2090
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:562
static float sub(float src0, float src1)
void av_bsf_free(AVBSFContext **pctx)
Free a bitstream filter context and everything associated with it; write NULL into the supplied point...
Definition: bsf.c:40
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:373
Recommmends skipping the specified number of samples.
Definition: frame.h:108
void * hwaccel_priv
Per-frame private data for hwaccels.
Definition: decode.h:52
#define av_malloc_array(a, b)
enum AVHWDeviceType device_type
The device type associated with the configuration.
Definition: codec.h:444
#define FF_SUB_TEXT_FMT_ASS_WITH_TIMINGS
Definition: avcodec.h:2219
#define FF_SUB_CHARENC_MODE_IGNORE
neither convert the subtitles, nor check them for valid UTF-8
Definition: avcodec.h:2112
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2489
ReplayGain information in the form of the AVReplayGain struct.
Definition: frame.h:76
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
enum AVSubtitleType type
Definition: avcodec.h:2661
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:361
int format
Definition: decode.c:59
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:2261
int pkt_size
size of the corresponding packet containing the compressed frame.
Definition: frame.h:629
float min
Stereoscopic 3d metadata.
Definition: frame.h:63
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:91
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
static double val(void *priv, double ch)
Definition: aeval.c:76
This structure stores compressed data.
Definition: packet.h:340
AVCodecParameters * par_in
Parameters of the input stream.
Definition: bsf.h:77
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:514
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:380
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1596
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:356
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:2069
for(j=16;j >0;--j)
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:658
int i
Definition: input.c:407
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
static void hwaccel_uninit(AVCodecContext *avctx)
Definition: decode.c:1292
#define tb
Definition: regdef.h:68
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:190
This side data should be associated with an audio stream and corresponds to enum AVAudioServiceType.
Definition: packet.h:120
static uint8_t tmp[11]
Definition: aes_ctr.c:27