FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
decode.c
Go to the documentation of this file.
1 /*
2  * generic decoding-related code
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <stdint.h>
22 #include <string.h>
23 
24 #include "config.h"
25 
26 #if CONFIG_ICONV
27 # include <iconv.h>
28 #endif
29 
30 #include "libavutil/avassert.h"
31 #include "libavutil/avstring.h"
32 #include "libavutil/bprint.h"
33 #include "libavutil/common.h"
34 #include "libavutil/frame.h"
35 #include "libavutil/hwcontext.h"
36 #include "libavutil/imgutils.h"
37 #include "libavutil/internal.h"
38 #include "libavutil/intmath.h"
39 #include "libavutil/opt.h"
40 
41 #include "avcodec.h"
42 #include "bytestream.h"
43 #include "decode.h"
44 #include "hwaccel.h"
45 #include "internal.h"
46 #include "thread.h"
47 
48 static int apply_param_change(AVCodecContext *avctx, const AVPacket *avpkt)
49 {
50  int size = 0, ret;
51  const uint8_t *data;
52  uint32_t flags;
53  int64_t val;
54 
56  if (!data)
57  return 0;
58 
59  if (!(avctx->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE)) {
60  av_log(avctx, AV_LOG_ERROR, "This decoder does not support parameter "
61  "changes, but PARAM_CHANGE side data was sent to it.\n");
62  ret = AVERROR(EINVAL);
63  goto fail2;
64  }
65 
66  if (size < 4)
67  goto fail;
68 
69  flags = bytestream_get_le32(&data);
70  size -= 4;
71 
73  if (size < 4)
74  goto fail;
75  val = bytestream_get_le32(&data);
76  if (val <= 0 || val > INT_MAX) {
77  av_log(avctx, AV_LOG_ERROR, "Invalid channel count");
78  ret = AVERROR_INVALIDDATA;
79  goto fail2;
80  }
81  avctx->channels = val;
82  size -= 4;
83  }
85  if (size < 8)
86  goto fail;
87  avctx->channel_layout = bytestream_get_le64(&data);
88  size -= 8;
89  }
91  if (size < 4)
92  goto fail;
93  val = bytestream_get_le32(&data);
94  if (val <= 0 || val > INT_MAX) {
95  av_log(avctx, AV_LOG_ERROR, "Invalid sample rate");
96  ret = AVERROR_INVALIDDATA;
97  goto fail2;
98  }
99  avctx->sample_rate = val;
100  size -= 4;
101  }
103  if (size < 8)
104  goto fail;
105  avctx->width = bytestream_get_le32(&data);
106  avctx->height = bytestream_get_le32(&data);
107  size -= 8;
108  ret = ff_set_dimensions(avctx, avctx->width, avctx->height);
109  if (ret < 0)
110  goto fail2;
111  }
112 
113  return 0;
114 fail:
115  av_log(avctx, AV_LOG_ERROR, "PARAM_CHANGE side data too small.\n");
116  ret = AVERROR_INVALIDDATA;
117 fail2:
118  if (ret < 0) {
119  av_log(avctx, AV_LOG_ERROR, "Error applying parameter changes.\n");
120  if (avctx->err_recognition & AV_EF_EXPLODE)
121  return ret;
122  }
123  return 0;
124 }
125 
127 {
128  int ret = 0;
129 
131  if (pkt) {
132  ret = av_packet_copy_props(avci->last_pkt_props, pkt);
133  if (!ret)
134  avci->last_pkt_props->size = pkt->size; // HACK: Needed for ff_decode_frame_props().
135  }
136  return ret;
137 }
138 
140 {
141  int ret;
142 
143  /* move the original frame to our backup */
144  av_frame_unref(avci->to_free);
145  av_frame_move_ref(avci->to_free, frame);
146 
147  /* now copy everything except the AVBufferRefs back
148  * note that we make a COPY of the side data, so calling av_frame_free() on
149  * the caller's frame will work properly */
150  ret = av_frame_copy_props(frame, avci->to_free);
151  if (ret < 0)
152  return ret;
153 
154  memcpy(frame->data, avci->to_free->data, sizeof(frame->data));
155  memcpy(frame->linesize, avci->to_free->linesize, sizeof(frame->linesize));
156  if (avci->to_free->extended_data != avci->to_free->data) {
157  int planes = avci->to_free->channels;
158  int size = planes * sizeof(*frame->extended_data);
159 
160  if (!size) {
161  av_frame_unref(frame);
162  return AVERROR_BUG;
163  }
164 
165  frame->extended_data = av_malloc(size);
166  if (!frame->extended_data) {
167  av_frame_unref(frame);
168  return AVERROR(ENOMEM);
169  }
170  memcpy(frame->extended_data, avci->to_free->extended_data,
171  size);
172  } else
173  frame->extended_data = frame->data;
174 
175  frame->format = avci->to_free->format;
176  frame->width = avci->to_free->width;
177  frame->height = avci->to_free->height;
178  frame->channel_layout = avci->to_free->channel_layout;
179  frame->nb_samples = avci->to_free->nb_samples;
180  frame->channels = avci->to_free->channels;
181 
182  return 0;
183 }
184 
186 {
187  AVCodecInternal *avci = avctx->internal;
188  DecodeFilterContext *s = &avci->filter;
189  const char *bsfs_str;
190  int ret;
191 
192  if (s->nb_bsfs)
193  return 0;
194 
195  bsfs_str = avctx->codec->bsfs ? avctx->codec->bsfs : "null";
196  while (bsfs_str && *bsfs_str) {
197  AVBSFContext **tmp;
198  const AVBitStreamFilter *filter;
199  char *bsf, *bsf_options_str, *bsf_name;
200 
201  bsf = av_get_token(&bsfs_str, ",");
202  if (!bsf) {
203  ret = AVERROR(ENOMEM);
204  goto fail;
205  }
206  bsf_name = av_strtok(bsf, "=", &bsf_options_str);
207  if (!bsf_name) {
208  av_freep(&bsf);
209  ret = AVERROR(ENOMEM);
210  goto fail;
211  }
212 
213  filter = av_bsf_get_by_name(bsf_name);
214  if (!filter) {
215  av_log(avctx, AV_LOG_ERROR, "A non-existing bitstream filter %s "
216  "requested by a decoder. This is a bug, please report it.\n",
217  bsf_name);
218  av_freep(&bsf);
219  ret = AVERROR_BUG;
220  goto fail;
221  }
222 
223  tmp = av_realloc_array(s->bsfs, s->nb_bsfs + 1, sizeof(*s->bsfs));
224  if (!tmp) {
225  av_freep(&bsf);
226  ret = AVERROR(ENOMEM);
227  goto fail;
228  }
229  s->bsfs = tmp;
230  s->nb_bsfs++;
231 
232  ret = av_bsf_alloc(filter, &s->bsfs[s->nb_bsfs - 1]);
233  if (ret < 0) {
234  av_freep(&bsf);
235  goto fail;
236  }
237 
238  if (s->nb_bsfs == 1) {
239  /* We do not currently have an API for passing the input timebase into decoders,
240  * but no filters used here should actually need it.
241  * So we make up some plausible-looking number (the MPEG 90kHz timebase) */
242  s->bsfs[s->nb_bsfs - 1]->time_base_in = (AVRational){ 1, 90000 };
244  avctx);
245  } else {
246  s->bsfs[s->nb_bsfs - 1]->time_base_in = s->bsfs[s->nb_bsfs - 2]->time_base_out;
247  ret = avcodec_parameters_copy(s->bsfs[s->nb_bsfs - 1]->par_in,
248  s->bsfs[s->nb_bsfs - 2]->par_out);
249  }
250  if (ret < 0) {
251  av_freep(&bsf);
252  goto fail;
253  }
254 
255  if (bsf_options_str && filter->priv_class) {
256  const AVOption *opt = av_opt_next(s->bsfs[s->nb_bsfs - 1]->priv_data, NULL);
257  const char * shorthand[2] = {NULL};
258 
259  if (opt)
260  shorthand[0] = opt->name;
261 
262  ret = av_opt_set_from_string(s->bsfs[s->nb_bsfs - 1]->priv_data, bsf_options_str, shorthand, "=", ":");
263  if (ret < 0) {
264  if (ret != AVERROR(ENOMEM)) {
265  av_log(avctx, AV_LOG_ERROR, "Invalid options for bitstream filter %s "
266  "requested by the decoder. This is a bug, please report it.\n",
267  bsf_name);
268  ret = AVERROR_BUG;
269  }
270  av_freep(&bsf);
271  goto fail;
272  }
273  }
274  av_freep(&bsf);
275 
276  ret = av_bsf_init(s->bsfs[s->nb_bsfs - 1]);
277  if (ret < 0)
278  goto fail;
279 
280  if (*bsfs_str)
281  bsfs_str++;
282  }
283 
284  return 0;
285 fail:
286  ff_decode_bsfs_uninit(avctx);
287  return ret;
288 }
289 
290 /* try to get one output packet from the filter chain */
291 static int bsfs_poll(AVCodecContext *avctx, AVPacket *pkt)
292 {
293  DecodeFilterContext *s = &avctx->internal->filter;
294  int idx, ret;
295 
296  /* start with the last filter in the chain */
297  idx = s->nb_bsfs - 1;
298  while (idx >= 0) {
299  /* request a packet from the currently selected filter */
300  ret = av_bsf_receive_packet(s->bsfs[idx], pkt);
301  if (ret == AVERROR(EAGAIN)) {
302  /* no packets available, try the next filter up the chain */
303  ret = 0;
304  idx--;
305  continue;
306  } else if (ret < 0 && ret != AVERROR_EOF) {
307  return ret;
308  }
309 
310  /* got a packet or EOF -- pass it to the caller or to the next filter
311  * down the chain */
312  if (idx == s->nb_bsfs - 1) {
313  return ret;
314  } else {
315  idx++;
316  ret = av_bsf_send_packet(s->bsfs[idx], ret < 0 ? NULL : pkt);
317  if (ret < 0) {
318  av_log(avctx, AV_LOG_ERROR,
319  "Error pre-processing a packet before decoding\n");
320  av_packet_unref(pkt);
321  return ret;
322  }
323  }
324  }
325 
326  return AVERROR(EAGAIN);
327 }
328 
330 {
331  AVCodecInternal *avci = avctx->internal;
332  int ret;
333 
334  if (avci->draining)
335  return AVERROR_EOF;
336 
337  ret = bsfs_poll(avctx, pkt);
338  if (ret == AVERROR_EOF)
339  avci->draining = 1;
340  if (ret < 0)
341  return ret;
342 
343  ret = extract_packet_props(avctx->internal, pkt);
344  if (ret < 0)
345  goto finish;
346 
347  ret = apply_param_change(avctx, pkt);
348  if (ret < 0)
349  goto finish;
350 
351  if (avctx->codec->receive_frame)
352  avci->compat_decode_consumed += pkt->size;
353 
354  return 0;
355 finish:
356  av_packet_unref(pkt);
357  return ret;
358 }
359 
360 /**
361  * Attempt to guess proper monotonic timestamps for decoded video frames
362  * which might have incorrect times. Input timestamps may wrap around, in
363  * which case the output will as well.
364  *
365  * @param pts the pts field of the decoded AVPacket, as passed through
366  * AVFrame.pts
367  * @param dts the dts field of the decoded AVPacket
368  * @return one of the input values, may be AV_NOPTS_VALUE
369  */
371  int64_t reordered_pts, int64_t dts)
372 {
373  int64_t pts = AV_NOPTS_VALUE;
374 
375  if (dts != AV_NOPTS_VALUE) {
377  ctx->pts_correction_last_dts = dts;
378  } else if (reordered_pts != AV_NOPTS_VALUE)
379  ctx->pts_correction_last_dts = reordered_pts;
380 
381  if (reordered_pts != AV_NOPTS_VALUE) {
382  ctx->pts_correction_num_faulty_pts += reordered_pts <= ctx->pts_correction_last_pts;
383  ctx->pts_correction_last_pts = reordered_pts;
384  } else if(dts != AV_NOPTS_VALUE)
385  ctx->pts_correction_last_pts = dts;
386 
388  && reordered_pts != AV_NOPTS_VALUE)
389  pts = reordered_pts;
390  else
391  pts = dts;
392 
393  return pts;
394 }
395 
396 /*
397  * The core of the receive_frame_wrapper for the decoders implementing
398  * the simple API. Certain decoders might consume partial packets without
399  * returning any output, so this function needs to be called in a loop until it
400  * returns EAGAIN.
401  **/
403 {
404  AVCodecInternal *avci = avctx->internal;
405  DecodeSimpleContext *ds = &avci->ds;
406  AVPacket *pkt = ds->in_pkt;
407  // copy to ensure we do not change pkt
408  int got_frame, actual_got_frame;
409  int ret;
410 
411  if (!pkt->data && !avci->draining) {
412  av_packet_unref(pkt);
413  ret = ff_decode_get_packet(avctx, pkt);
414  if (ret < 0 && ret != AVERROR_EOF)
415  return ret;
416  }
417 
418  // Some codecs (at least wma lossless) will crash when feeding drain packets
419  // after EOF was signaled.
420  if (avci->draining_done)
421  return AVERROR_EOF;
422 
423  if (!pkt->data &&
424  !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY ||
426  return AVERROR_EOF;
427 
428  got_frame = 0;
429 
430  if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME) {
431  ret = ff_thread_decode_frame(avctx, frame, &got_frame, pkt);
432  } else {
433  ret = avctx->codec->decode(avctx, frame, &got_frame, pkt);
434 
436  frame->pkt_dts = pkt->dts;
437  if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
438  if(!avctx->has_b_frames)
439  frame->pkt_pos = pkt->pos;
440  //FIXME these should be under if(!avctx->has_b_frames)
441  /* get_buffer is supposed to set frame parameters */
442  if (!(avctx->codec->capabilities & AV_CODEC_CAP_DR1)) {
443  if (!frame->sample_aspect_ratio.num) frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
444  if (!frame->width) frame->width = avctx->width;
445  if (!frame->height) frame->height = avctx->height;
446  if (frame->format == AV_PIX_FMT_NONE) frame->format = avctx->pix_fmt;
447  }
448  }
449  }
450  emms_c();
451  actual_got_frame = got_frame;
452 
453  if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
454  if (frame->flags & AV_FRAME_FLAG_DISCARD)
455  got_frame = 0;
456  if (got_frame)
458  frame->pts,
459  frame->pkt_dts);
460  } else if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
461  uint8_t *side;
462  int side_size;
463  uint32_t discard_padding = 0;
464  uint8_t skip_reason = 0;
465  uint8_t discard_reason = 0;
466 
467  if (ret >= 0 && got_frame) {
469  frame->pts,
470  frame->pkt_dts);
471  if (frame->format == AV_SAMPLE_FMT_NONE)
472  frame->format = avctx->sample_fmt;
473  if (!frame->channel_layout)
474  frame->channel_layout = avctx->channel_layout;
475  if (!frame->channels)
476  frame->channels = avctx->channels;
477  if (!frame->sample_rate)
478  frame->sample_rate = avctx->sample_rate;
479  }
480 
482  if(side && side_size>=10) {
484  discard_padding = AV_RL32(side + 4);
485  av_log(avctx, AV_LOG_DEBUG, "skip %d / discard %d samples due to side data\n",
486  avctx->internal->skip_samples, (int)discard_padding);
487  skip_reason = AV_RL8(side + 8);
488  discard_reason = AV_RL8(side + 9);
489  }
490 
491  if ((frame->flags & AV_FRAME_FLAG_DISCARD) && got_frame &&
492  !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
493  avctx->internal->skip_samples = FFMAX(0, avctx->internal->skip_samples - frame->nb_samples);
494  got_frame = 0;
495  }
496 
497  if (avctx->internal->skip_samples > 0 && got_frame &&
498  !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
499  if(frame->nb_samples <= avctx->internal->skip_samples){
500  got_frame = 0;
501  avctx->internal->skip_samples -= frame->nb_samples;
502  av_log(avctx, AV_LOG_DEBUG, "skip whole frame, skip left: %d\n",
503  avctx->internal->skip_samples);
504  } else {
506  frame->nb_samples - avctx->internal->skip_samples, avctx->channels, frame->format);
507  if(avctx->pkt_timebase.num && avctx->sample_rate) {
508  int64_t diff_ts = av_rescale_q(avctx->internal->skip_samples,
509  (AVRational){1, avctx->sample_rate},
510  avctx->pkt_timebase);
511  if(frame->pts!=AV_NOPTS_VALUE)
512  frame->pts += diff_ts;
513 #if FF_API_PKT_PTS
515  if(frame->pkt_pts!=AV_NOPTS_VALUE)
516  frame->pkt_pts += diff_ts;
518 #endif
519  if(frame->pkt_dts!=AV_NOPTS_VALUE)
520  frame->pkt_dts += diff_ts;
521  if (frame->pkt_duration >= diff_ts)
522  frame->pkt_duration -= diff_ts;
523  } else {
524  av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for skipped samples.\n");
525  }
526  av_log(avctx, AV_LOG_DEBUG, "skip %d/%d samples\n",
527  avctx->internal->skip_samples, frame->nb_samples);
528  frame->nb_samples -= avctx->internal->skip_samples;
529  avctx->internal->skip_samples = 0;
530  }
531  }
532 
533  if (discard_padding > 0 && discard_padding <= frame->nb_samples && got_frame &&
534  !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
535  if (discard_padding == frame->nb_samples) {
536  got_frame = 0;
537  } else {
538  if(avctx->pkt_timebase.num && avctx->sample_rate) {
539  int64_t diff_ts = av_rescale_q(frame->nb_samples - discard_padding,
540  (AVRational){1, avctx->sample_rate},
541  avctx->pkt_timebase);
542  frame->pkt_duration = diff_ts;
543  } else {
544  av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for discarded samples.\n");
545  }
546  av_log(avctx, AV_LOG_DEBUG, "discard %d/%d samples\n",
547  (int)discard_padding, frame->nb_samples);
548  frame->nb_samples -= discard_padding;
549  }
550  }
551 
552  if ((avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL) && got_frame) {
554  if (fside) {
555  AV_WL32(fside->data, avctx->internal->skip_samples);
556  AV_WL32(fside->data + 4, discard_padding);
557  AV_WL8(fside->data + 8, skip_reason);
558  AV_WL8(fside->data + 9, discard_reason);
559  avctx->internal->skip_samples = 0;
560  }
561  }
562  }
563 
564  if (avctx->codec->type == AVMEDIA_TYPE_AUDIO &&
565  !avci->showed_multi_packet_warning &&
566  ret >= 0 && ret != pkt->size && !(avctx->codec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
567  av_log(avctx, AV_LOG_WARNING, "Multiple frames in a packet.\n");
568  avci->showed_multi_packet_warning = 1;
569  }
570 
571  if (!got_frame)
573 
574  if (ret >= 0 && avctx->codec->type == AVMEDIA_TYPE_VIDEO && !(avctx->flags & AV_CODEC_FLAG_TRUNCATED))
575  ret = pkt->size;
576 
577 #if FF_API_AVCTX_TIMEBASE
578  if (avctx->framerate.num > 0 && avctx->framerate.den > 0)
579  avctx->time_base = av_inv_q(av_mul_q(avctx->framerate, (AVRational){avctx->ticks_per_frame, 1}));
580 #endif
581 
582  /* do not stop draining when actual_got_frame != 0 or ret < 0 */
583  /* got_frame == 0 but actual_got_frame != 0 when frame is discarded */
584  if (avctx->internal->draining && !actual_got_frame) {
585  if (ret < 0) {
586  /* prevent infinite loop if a decoder wrongly always return error on draining */
587  /* reasonable nb_errors_max = maximum b frames + thread count */
588  int nb_errors_max = 20 + (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME ?
589  avctx->thread_count : 1);
590 
591  if (avci->nb_draining_errors++ >= nb_errors_max) {
592  av_log(avctx, AV_LOG_ERROR, "Too many errors when draining, this is a bug. "
593  "Stop draining and force EOF.\n");
594  avci->draining_done = 1;
595  ret = AVERROR_BUG;
596  }
597  } else {
598  avci->draining_done = 1;
599  }
600  }
601 
602  avci->compat_decode_consumed += ret;
603 
604  if (ret >= pkt->size || ret < 0) {
606  } else {
607  int consumed = ret;
608 
609  pkt->data += consumed;
610  pkt->size -= consumed;
611  avci->last_pkt_props->size -= consumed; // See extract_packet_props() comment.
614  avci->last_pkt_props->pts = AV_NOPTS_VALUE;
615  avci->last_pkt_props->dts = AV_NOPTS_VALUE;
616  }
617 
618  if (got_frame)
619  av_assert0(frame->buf[0]);
620 
621  return ret < 0 ? ret : 0;
622 }
623 
625 {
626  int ret;
627 
628  while (!frame->buf[0]) {
629  ret = decode_simple_internal(avctx, frame);
630  if (ret < 0)
631  return ret;
632  }
633 
634  return 0;
635 }
636 
638 {
639  AVCodecInternal *avci = avctx->internal;
640  int ret;
641 
642  av_assert0(!frame->buf[0]);
643 
644  if (avctx->codec->receive_frame)
645  ret = avctx->codec->receive_frame(avctx, frame);
646  else
647  ret = decode_simple_receive_frame(avctx, frame);
648 
649  if (ret == AVERROR_EOF)
650  avci->draining_done = 1;
651 
652  if (!ret) {
653  /* the only case where decode data is not set should be decoders
654  * that do not call ff_get_buffer() */
655  av_assert0((frame->private_ref && frame->private_ref->size == sizeof(FrameDecodeData)) ||
656  !(avctx->codec->capabilities & AV_CODEC_CAP_DR1));
657 
658  if (frame->private_ref) {
660 
661  if (fdd->post_process) {
662  ret = fdd->post_process(avctx, frame);
663  if (ret < 0) {
664  av_frame_unref(frame);
665  return ret;
666  }
667  }
668  }
669  }
670 
671  /* free the per-frame decode data */
672  av_buffer_unref(&frame->private_ref);
673 
674  return ret;
675 }
676 
677 int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
678 {
679  AVCodecInternal *avci = avctx->internal;
680  int ret;
681 
682  if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
683  return AVERROR(EINVAL);
684 
685  if (avctx->internal->draining)
686  return AVERROR_EOF;
687 
688  if (avpkt && !avpkt->size && avpkt->data)
689  return AVERROR(EINVAL);
690 
692  if (avpkt && (avpkt->data || avpkt->side_data_elems)) {
693  ret = av_packet_ref(avci->buffer_pkt, avpkt);
694  if (ret < 0)
695  return ret;
696  }
697 
698  ret = av_bsf_send_packet(avci->filter.bsfs[0], avci->buffer_pkt);
699  if (ret < 0) {
701  return ret;
702  }
703 
704  if (!avci->buffer_frame->buf[0]) {
705  ret = decode_receive_frame_internal(avctx, avci->buffer_frame);
706  if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
707  return ret;
708  }
709 
710  return 0;
711 }
712 
714 {
715  /* make sure we are noisy about decoders returning invalid cropping data */
716  if (frame->crop_left >= INT_MAX - frame->crop_right ||
717  frame->crop_top >= INT_MAX - frame->crop_bottom ||
718  (frame->crop_left + frame->crop_right) >= frame->width ||
719  (frame->crop_top + frame->crop_bottom) >= frame->height) {
720  av_log(avctx, AV_LOG_WARNING,
721  "Invalid cropping information set by a decoder: "
723  "(frame size %dx%d). This is a bug, please report it\n",
724  frame->crop_left, frame->crop_right, frame->crop_top, frame->crop_bottom,
725  frame->width, frame->height);
726  frame->crop_left = 0;
727  frame->crop_right = 0;
728  frame->crop_top = 0;
729  frame->crop_bottom = 0;
730  return 0;
731  }
732 
733  if (!avctx->apply_cropping)
734  return 0;
735 
736  return av_frame_apply_cropping(frame, avctx->flags & AV_CODEC_FLAG_UNALIGNED ?
738 }
739 
740 int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
741 {
742  AVCodecInternal *avci = avctx->internal;
743  int ret;
744 
745  av_frame_unref(frame);
746 
747  if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
748  return AVERROR(EINVAL);
749 
750  if (avci->buffer_frame->buf[0]) {
751  av_frame_move_ref(frame, avci->buffer_frame);
752  } else {
753  ret = decode_receive_frame_internal(avctx, frame);
754  if (ret < 0)
755  return ret;
756  }
757 
758  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
759  ret = apply_cropping(avctx, frame);
760  if (ret < 0) {
761  av_frame_unref(frame);
762  return ret;
763  }
764  }
765 
766  avctx->frame_number++;
767 
768  return 0;
769 }
770 
772  int *got_frame, const AVPacket *pkt)
773 {
774  AVCodecInternal *avci = avctx->internal;
775  int ret = 0;
776 
778 
779  if (avci->draining_done && pkt && pkt->size != 0) {
780  av_log(avctx, AV_LOG_WARNING, "Got unexpected packet after EOF\n");
781  avcodec_flush_buffers(avctx);
782  }
783 
784  *got_frame = 0;
785  avci->compat_decode = 1;
786 
787  if (avci->compat_decode_partial_size > 0 &&
788  avci->compat_decode_partial_size != pkt->size) {
789  av_log(avctx, AV_LOG_ERROR,
790  "Got unexpected packet size after a partial decode\n");
791  ret = AVERROR(EINVAL);
792  goto finish;
793  }
794 
795  if (!avci->compat_decode_partial_size) {
796  ret = avcodec_send_packet(avctx, pkt);
797  if (ret == AVERROR_EOF)
798  ret = 0;
799  else if (ret == AVERROR(EAGAIN)) {
800  /* we fully drain all the output in each decode call, so this should not
801  * ever happen */
802  ret = AVERROR_BUG;
803  goto finish;
804  } else if (ret < 0)
805  goto finish;
806  }
807 
808  while (ret >= 0) {
809  ret = avcodec_receive_frame(avctx, frame);
810  if (ret < 0) {
811  if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
812  ret = 0;
813  goto finish;
814  }
815 
816  if (frame != avci->compat_decode_frame) {
817  if (!avctx->refcounted_frames) {
818  ret = unrefcount_frame(avci, frame);
819  if (ret < 0)
820  goto finish;
821  }
822 
823  *got_frame = 1;
824  frame = avci->compat_decode_frame;
825  } else {
826  if (!avci->compat_decode_warned) {
827  av_log(avctx, AV_LOG_WARNING, "The deprecated avcodec_decode_* "
828  "API cannot return all the frames for this decoder. "
829  "Some frames will be dropped. Update your code to the "
830  "new decoding API to fix this.\n");
831  avci->compat_decode_warned = 1;
832  }
833  }
834 
835  if (avci->draining || (!avctx->codec->bsfs && avci->compat_decode_consumed < pkt->size))
836  break;
837  }
838 
839 finish:
840  if (ret == 0) {
841  /* if there are any bsfs then assume full packet is always consumed */
842  if (avctx->codec->bsfs)
843  ret = pkt->size;
844  else
845  ret = FFMIN(avci->compat_decode_consumed, pkt->size);
846  }
847  avci->compat_decode_consumed = 0;
848  avci->compat_decode_partial_size = (ret >= 0) ? pkt->size - ret : 0;
849 
850  return ret;
851 }
852 
853 int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
854  int *got_picture_ptr,
855  const AVPacket *avpkt)
856 {
857  return compat_decode(avctx, picture, got_picture_ptr, avpkt);
858 }
859 
860 int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx,
861  AVFrame *frame,
862  int *got_frame_ptr,
863  const AVPacket *avpkt)
864 {
865  return compat_decode(avctx, frame, got_frame_ptr, avpkt);
866 }
867 
869 {
870  memset(sub, 0, sizeof(*sub));
871  sub->pts = AV_NOPTS_VALUE;
872 }
873 
874 #define UTF8_MAX_BYTES 4 /* 5 and 6 bytes sequences should not be used */
875 static int recode_subtitle(AVCodecContext *avctx,
876  AVPacket *outpkt, const AVPacket *inpkt)
877 {
878 #if CONFIG_ICONV
879  iconv_t cd = (iconv_t)-1;
880  int ret = 0;
881  char *inb, *outb;
882  size_t inl, outl;
883  AVPacket tmp;
884 #endif
885 
886  if (avctx->sub_charenc_mode != FF_SUB_CHARENC_MODE_PRE_DECODER || inpkt->size == 0)
887  return 0;
888 
889 #if CONFIG_ICONV
890  cd = iconv_open("UTF-8", avctx->sub_charenc);
891  av_assert0(cd != (iconv_t)-1);
892 
893  inb = inpkt->data;
894  inl = inpkt->size;
895 
896  if (inl >= INT_MAX / UTF8_MAX_BYTES - AV_INPUT_BUFFER_PADDING_SIZE) {
897  av_log(avctx, AV_LOG_ERROR, "Subtitles packet is too big for recoding\n");
898  ret = AVERROR(ENOMEM);
899  goto end;
900  }
901 
902  ret = av_new_packet(&tmp, inl * UTF8_MAX_BYTES);
903  if (ret < 0)
904  goto end;
905  outpkt->buf = tmp.buf;
906  outpkt->data = tmp.data;
907  outpkt->size = tmp.size;
908  outb = outpkt->data;
909  outl = outpkt->size;
910 
911  if (iconv(cd, &inb, &inl, &outb, &outl) == (size_t)-1 ||
912  iconv(cd, NULL, NULL, &outb, &outl) == (size_t)-1 ||
913  outl >= outpkt->size || inl != 0) {
914  ret = FFMIN(AVERROR(errno), -1);
915  av_log(avctx, AV_LOG_ERROR, "Unable to recode subtitle event \"%s\" "
916  "from %s to UTF-8\n", inpkt->data, avctx->sub_charenc);
917  av_packet_unref(&tmp);
918  goto end;
919  }
920  outpkt->size -= outl;
921  memset(outpkt->data + outpkt->size, 0, outl);
922 
923 end:
924  if (cd != (iconv_t)-1)
925  iconv_close(cd);
926  return ret;
927 #else
928  av_log(avctx, AV_LOG_ERROR, "requesting subtitles recoding without iconv");
929  return AVERROR(EINVAL);
930 #endif
931 }
932 
933 static int utf8_check(const uint8_t *str)
934 {
935  const uint8_t *byte;
936  uint32_t codepoint, min;
937 
938  while (*str) {
939  byte = str;
940  GET_UTF8(codepoint, *(byte++), return 0;);
941  min = byte - str == 1 ? 0 : byte - str == 2 ? 0x80 :
942  1 << (5 * (byte - str) - 4);
943  if (codepoint < min || codepoint >= 0x110000 ||
944  codepoint == 0xFFFE /* BOM */ ||
945  codepoint >= 0xD800 && codepoint <= 0xDFFF /* surrogates */)
946  return 0;
947  str = byte;
948  }
949  return 1;
950 }
951 
952 #if FF_API_ASS_TIMING
953 static void insert_ts(AVBPrint *buf, int ts)
954 {
955  if (ts == -1) {
956  av_bprintf(buf, "9:59:59.99,");
957  } else {
958  int h, m, s;
959 
960  h = ts/360000; ts -= 360000*h;
961  m = ts/ 6000; ts -= 6000*m;
962  s = ts/ 100; ts -= 100*s;
963  av_bprintf(buf, "%d:%02d:%02d.%02d,", h, m, s, ts);
964  }
965 }
966 
968 {
969  int i;
970  AVBPrint buf;
971 
973 
974  for (i = 0; i < sub->num_rects; i++) {
975  char *final_dialog;
976  const char *dialog;
977  AVSubtitleRect *rect = sub->rects[i];
978  int ts_start, ts_duration = -1;
979  long int layer;
980 
981  if (rect->type != SUBTITLE_ASS || !strncmp(rect->ass, "Dialogue: ", 10))
982  continue;
983 
984  av_bprint_clear(&buf);
985 
986  /* skip ReadOrder */
987  dialog = strchr(rect->ass, ',');
988  if (!dialog)
989  continue;
990  dialog++;
991 
992  /* extract Layer or Marked */
993  layer = strtol(dialog, (char**)&dialog, 10);
994  if (*dialog != ',')
995  continue;
996  dialog++;
997 
998  /* rescale timing to ASS time base (ms) */
999  ts_start = av_rescale_q(pkt->pts, tb, av_make_q(1, 100));
1000  if (pkt->duration != -1)
1001  ts_duration = av_rescale_q(pkt->duration, tb, av_make_q(1, 100));
1002  sub->end_display_time = FFMAX(sub->end_display_time, 10 * ts_duration);
1003 
1004  /* construct ASS (standalone file form with timestamps) string */
1005  av_bprintf(&buf, "Dialogue: %ld,", layer);
1006  insert_ts(&buf, ts_start);
1007  insert_ts(&buf, ts_duration == -1 ? -1 : ts_start + ts_duration);
1008  av_bprintf(&buf, "%s\r\n", dialog);
1009 
1010  final_dialog = av_strdup(buf.str);
1011  if (!av_bprint_is_complete(&buf) || !final_dialog) {
1012  av_freep(&final_dialog);
1013  av_bprint_finalize(&buf, NULL);
1014  return AVERROR(ENOMEM);
1015  }
1016  av_freep(&rect->ass);
1017  rect->ass = final_dialog;
1018  }
1019 
1020  av_bprint_finalize(&buf, NULL);
1021  return 0;
1022 }
1023 #endif
1024 
1026  int *got_sub_ptr,
1027  AVPacket *avpkt)
1028 {
1029  int i, ret = 0;
1030 
1031  if (!avpkt->data && avpkt->size) {
1032  av_log(avctx, AV_LOG_ERROR, "invalid packet: NULL data, size != 0\n");
1033  return AVERROR(EINVAL);
1034  }
1035  if (!avctx->codec)
1036  return AVERROR(EINVAL);
1037  if (avctx->codec->type != AVMEDIA_TYPE_SUBTITLE) {
1038  av_log(avctx, AV_LOG_ERROR, "Invalid media type for subtitles\n");
1039  return AVERROR(EINVAL);
1040  }
1041 
1042  *got_sub_ptr = 0;
1043  get_subtitle_defaults(sub);
1044 
1045  if ((avctx->codec->capabilities & AV_CODEC_CAP_DELAY) || avpkt->size) {
1046  AVPacket pkt_recoded = *avpkt;
1047 
1048  ret = recode_subtitle(avctx, &pkt_recoded, avpkt);
1049  if (ret < 0) {
1050  *got_sub_ptr = 0;
1051  } else {
1052  ret = extract_packet_props(avctx->internal, &pkt_recoded);
1053  if (ret < 0)
1054  return ret;
1055 
1056  if (avctx->pkt_timebase.num && avpkt->pts != AV_NOPTS_VALUE)
1057  sub->pts = av_rescale_q(avpkt->pts,
1058  avctx->pkt_timebase, AV_TIME_BASE_Q);
1059  ret = avctx->codec->decode(avctx, sub, got_sub_ptr, &pkt_recoded);
1060  av_assert1((ret >= 0) >= !!*got_sub_ptr &&
1061  !!*got_sub_ptr >= !!sub->num_rects);
1062 
1063 #if FF_API_ASS_TIMING
1065  && *got_sub_ptr && sub->num_rects) {
1066  const AVRational tb = avctx->pkt_timebase.num ? avctx->pkt_timebase
1067  : avctx->time_base;
1068  int err = convert_sub_to_old_ass_form(sub, avpkt, tb);
1069  if (err < 0)
1070  ret = err;
1071  }
1072 #endif
1073 
1074  if (sub->num_rects && !sub->end_display_time && avpkt->duration &&
1075  avctx->pkt_timebase.num) {
1076  AVRational ms = { 1, 1000 };
1077  sub->end_display_time = av_rescale_q(avpkt->duration,
1078  avctx->pkt_timebase, ms);
1079  }
1080 
1082  sub->format = 0;
1083  else if (avctx->codec_descriptor->props & AV_CODEC_PROP_TEXT_SUB)
1084  sub->format = 1;
1085 
1086  for (i = 0; i < sub->num_rects; i++) {
1088  sub->rects[i]->ass && !utf8_check(sub->rects[i]->ass)) {
1089  av_log(avctx, AV_LOG_ERROR,
1090  "Invalid UTF-8 in decoded subtitles text; "
1091  "maybe missing -sub_charenc option\n");
1092  avsubtitle_free(sub);
1093  ret = AVERROR_INVALIDDATA;
1094  break;
1095  }
1096  }
1097 
1098  if (avpkt->data != pkt_recoded.data) { // did we recode?
1099  /* prevent from destroying side data from original packet */
1100  pkt_recoded.side_data = NULL;
1101  pkt_recoded.side_data_elems = 0;
1102 
1103  av_packet_unref(&pkt_recoded);
1104  }
1105  }
1106 
1107  if (*got_sub_ptr)
1108  avctx->frame_number++;
1109  }
1110 
1111  return ret;
1112 }
1113 
1115  const enum AVPixelFormat *fmt)
1116 {
1117  const AVPixFmtDescriptor *desc;
1118  const AVCodecHWConfig *config;
1119  int i, n;
1120 
1121  // If a device was supplied when the codec was opened, assume that the
1122  // user wants to use it.
1123  if (avctx->hw_device_ctx && avctx->codec->hw_configs) {
1124  AVHWDeviceContext *device_ctx =
1126  for (i = 0;; i++) {
1127  config = &avctx->codec->hw_configs[i]->public;
1128  if (!config)
1129  break;
1130  if (!(config->methods &
1132  continue;
1133  if (device_ctx->type != config->device_type)
1134  continue;
1135  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++) {
1136  if (config->pix_fmt == fmt[n])
1137  return fmt[n];
1138  }
1139  }
1140  }
1141  // No device or other setup, so we have to choose from things which
1142  // don't any other external information.
1143 
1144  // If the last element of the list is a software format, choose it
1145  // (this should be best software format if any exist).
1146  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++);
1147  desc = av_pix_fmt_desc_get(fmt[n - 1]);
1148  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
1149  return fmt[n - 1];
1150 
1151  // Finally, traverse the list in order and choose the first entry
1152  // with no external dependencies (if there is no hardware configuration
1153  // information available then this just picks the first entry).
1154  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++) {
1155  for (i = 0;; i++) {
1156  config = avcodec_get_hw_config(avctx->codec, i);
1157  if (!config)
1158  break;
1159  if (config->pix_fmt == fmt[n])
1160  break;
1161  }
1162  if (!config) {
1163  // No specific config available, so the decoder must be able
1164  // to handle this format without any additional setup.
1165  return fmt[n];
1166  }
1168  // Usable with only internal setup.
1169  return fmt[n];
1170  }
1171  }
1172 
1173  // Nothing is usable, give up.
1174  return AV_PIX_FMT_NONE;
1175 }
1176 
1178  enum AVHWDeviceType dev_type)
1179 {
1180  AVHWDeviceContext *device_ctx;
1181  AVHWFramesContext *frames_ctx;
1182  int ret;
1183 
1184  if (!avctx->hwaccel)
1185  return AVERROR(ENOSYS);
1186 
1187  if (avctx->hw_frames_ctx)
1188  return 0;
1189  if (!avctx->hw_device_ctx) {
1190  av_log(avctx, AV_LOG_ERROR, "A hardware frames or device context is "
1191  "required for hardware accelerated decoding.\n");
1192  return AVERROR(EINVAL);
1193  }
1194 
1195  device_ctx = (AVHWDeviceContext *)avctx->hw_device_ctx->data;
1196  if (device_ctx->type != dev_type) {
1197  av_log(avctx, AV_LOG_ERROR, "Device type %s expected for hardware "
1198  "decoding, but got %s.\n", av_hwdevice_get_type_name(dev_type),
1199  av_hwdevice_get_type_name(device_ctx->type));
1200  return AVERROR(EINVAL);
1201  }
1202 
1204  avctx->hw_device_ctx,
1205  avctx->hwaccel->pix_fmt,
1206  &avctx->hw_frames_ctx);
1207  if (ret < 0)
1208  return ret;
1209 
1210  frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1211 
1212 
1213  if (frames_ctx->initial_pool_size) {
1214  // We guarantee 4 base work surfaces. The function above guarantees 1
1215  // (the absolute minimum), so add the missing count.
1216  frames_ctx->initial_pool_size += 3;
1217  }
1218 
1219  ret = av_hwframe_ctx_init(avctx->hw_frames_ctx);
1220  if (ret < 0) {
1221  av_buffer_unref(&avctx->hw_frames_ctx);
1222  return ret;
1223  }
1224 
1225  return 0;
1226 }
1227 
1229  AVBufferRef *device_ref,
1231  AVBufferRef **out_frames_ref)
1232 {
1233  AVBufferRef *frames_ref = NULL;
1234  const AVCodecHWConfigInternal *hw_config;
1235  const AVHWAccel *hwa;
1236  int i, ret;
1237 
1238  for (i = 0;; i++) {
1239  hw_config = avctx->codec->hw_configs[i];
1240  if (!hw_config)
1241  return AVERROR(ENOENT);
1242  if (hw_config->public.pix_fmt == hw_pix_fmt)
1243  break;
1244  }
1245 
1246  hwa = hw_config->hwaccel;
1247  if (!hwa || !hwa->frame_params)
1248  return AVERROR(ENOENT);
1249 
1250  frames_ref = av_hwframe_ctx_alloc(device_ref);
1251  if (!frames_ref)
1252  return AVERROR(ENOMEM);
1253 
1254  ret = hwa->frame_params(avctx, frames_ref);
1255  if (ret >= 0) {
1256  AVHWFramesContext *frames_ctx = (AVHWFramesContext*)frames_ref->data;
1257 
1258  if (frames_ctx->initial_pool_size) {
1259  // If the user has requested that extra output surfaces be
1260  // available then add them here.
1261  if (avctx->extra_hw_frames > 0)
1262  frames_ctx->initial_pool_size += avctx->extra_hw_frames;
1263 
1264  // If frame threading is enabled then an extra surface per thread
1265  // is also required.
1266  if (avctx->active_thread_type & FF_THREAD_FRAME)
1267  frames_ctx->initial_pool_size += avctx->thread_count;
1268  }
1269 
1270  *out_frames_ref = frames_ref;
1271  } else {
1272  av_buffer_unref(&frames_ref);
1273  }
1274  return ret;
1275 }
1276 
1277 static int hwaccel_init(AVCodecContext *avctx,
1278  const AVCodecHWConfigInternal *hw_config)
1279 {
1280  const AVHWAccel *hwaccel;
1281  int err;
1282 
1283  hwaccel = hw_config->hwaccel;
1286  av_log(avctx, AV_LOG_WARNING, "Ignoring experimental hwaccel: %s\n",
1287  hwaccel->name);
1288  return AVERROR_PATCHWELCOME;
1289  }
1290 
1291  if (hwaccel->priv_data_size) {
1292  avctx->internal->hwaccel_priv_data =
1293  av_mallocz(hwaccel->priv_data_size);
1294  if (!avctx->internal->hwaccel_priv_data)
1295  return AVERROR(ENOMEM);
1296  }
1297 
1298  avctx->hwaccel = hwaccel;
1299  if (hwaccel->init) {
1300  err = hwaccel->init(avctx);
1301  if (err < 0) {
1302  av_log(avctx, AV_LOG_ERROR, "Failed setup for format %s: "
1303  "hwaccel initialisation returned error.\n",
1304  av_get_pix_fmt_name(hw_config->public.pix_fmt));
1306  avctx->hwaccel = NULL;
1307  return err;
1308  }
1309  }
1310 
1311  return 0;
1312 }
1313 
1314 static void hwaccel_uninit(AVCodecContext *avctx)
1315 {
1316  if (avctx->hwaccel && avctx->hwaccel->uninit)
1317  avctx->hwaccel->uninit(avctx);
1318 
1320 
1321  avctx->hwaccel = NULL;
1322 
1323  av_buffer_unref(&avctx->hw_frames_ctx);
1324 }
1325 
1327 {
1328  const AVPixFmtDescriptor *desc;
1329  enum AVPixelFormat *choices;
1330  enum AVPixelFormat ret, user_choice;
1331  const AVCodecHWConfigInternal *hw_config;
1332  const AVCodecHWConfig *config;
1333  int i, n, err;
1334 
1335  // Find end of list.
1336  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++);
1337  // Must contain at least one entry.
1338  av_assert0(n >= 1);
1339  // If a software format is available, it must be the last entry.
1340  desc = av_pix_fmt_desc_get(fmt[n - 1]);
1341  if (desc->flags & AV_PIX_FMT_FLAG_HWACCEL) {
1342  // No software format is available.
1343  } else {
1344  avctx->sw_pix_fmt = fmt[n - 1];
1345  }
1346 
1347  choices = av_malloc_array(n + 1, sizeof(*choices));
1348  if (!choices)
1349  return AV_PIX_FMT_NONE;
1350 
1351  memcpy(choices, fmt, (n + 1) * sizeof(*choices));
1352 
1353  for (;;) {
1354  // Remove the previous hwaccel, if there was one.
1355  hwaccel_uninit(avctx);
1356 
1357  user_choice = avctx->get_format(avctx, choices);
1358  if (user_choice == AV_PIX_FMT_NONE) {
1359  // Explicitly chose nothing, give up.
1360  ret = AV_PIX_FMT_NONE;
1361  break;
1362  }
1363 
1364  desc = av_pix_fmt_desc_get(user_choice);
1365  if (!desc) {
1366  av_log(avctx, AV_LOG_ERROR, "Invalid format returned by "
1367  "get_format() callback.\n");
1368  ret = AV_PIX_FMT_NONE;
1369  break;
1370  }
1371  av_log(avctx, AV_LOG_DEBUG, "Format %s chosen by get_format().\n",
1372  desc->name);
1373 
1374  for (i = 0; i < n; i++) {
1375  if (choices[i] == user_choice)
1376  break;
1377  }
1378  if (i == n) {
1379  av_log(avctx, AV_LOG_ERROR, "Invalid return from get_format(): "
1380  "%s not in possible list.\n", desc->name);
1381  break;
1382  }
1383 
1384  if (avctx->codec->hw_configs) {
1385  for (i = 0;; i++) {
1386  hw_config = avctx->codec->hw_configs[i];
1387  if (!hw_config)
1388  break;
1389  if (hw_config->public.pix_fmt == user_choice)
1390  break;
1391  }
1392  } else {
1393  hw_config = NULL;
1394  }
1395 
1396  if (!hw_config) {
1397  // No config available, so no extra setup required.
1398  ret = user_choice;
1399  break;
1400  }
1401  config = &hw_config->public;
1402 
1403  if (config->methods &
1405  avctx->hw_frames_ctx) {
1406  const AVHWFramesContext *frames_ctx =
1408  if (frames_ctx->format != user_choice) {
1409  av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
1410  "does not match the format of the provided frames "
1411  "context.\n", desc->name);
1412  goto try_again;
1413  }
1414  } else if (config->methods &
1416  avctx->hw_device_ctx) {
1417  const AVHWDeviceContext *device_ctx =
1419  if (device_ctx->type != config->device_type) {
1420  av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
1421  "does not match the type of the provided device "
1422  "context.\n", desc->name);
1423  goto try_again;
1424  }
1425  } else if (config->methods &
1427  // Internal-only setup, no additional configuration.
1428  } else if (config->methods &
1430  // Some ad-hoc configuration we can't see and can't check.
1431  } else {
1432  av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
1433  "missing configuration.\n", desc->name);
1434  goto try_again;
1435  }
1436  if (hw_config->hwaccel) {
1437  av_log(avctx, AV_LOG_DEBUG, "Format %s requires hwaccel "
1438  "initialisation.\n", desc->name);
1439  err = hwaccel_init(avctx, hw_config);
1440  if (err < 0)
1441  goto try_again;
1442  }
1443  ret = user_choice;
1444  break;
1445 
1446  try_again:
1447  av_log(avctx, AV_LOG_DEBUG, "Format %s not usable, retrying "
1448  "get_format() without it.\n", desc->name);
1449  for (i = 0; i < n; i++) {
1450  if (choices[i] == user_choice)
1451  break;
1452  }
1453  for (; i + 1 < n; i++)
1454  choices[i] = choices[i + 1];
1455  --n;
1456  }
1457 
1458  av_freep(&choices);
1459  return ret;
1460 }
1461 
1463 {
1464  FramePool *pool = avctx->internal->pool;
1465  int i, ret;
1466 
1467  switch (avctx->codec_type) {
1468  case AVMEDIA_TYPE_VIDEO: {
1469  uint8_t *data[4];
1470  int linesize[4];
1471  int size[4] = { 0 };
1472  int w = frame->width;
1473  int h = frame->height;
1474  int tmpsize, unaligned;
1475 
1476  if (pool->format == frame->format &&
1477  pool->width == frame->width && pool->height == frame->height)
1478  return 0;
1479 
1480  avcodec_align_dimensions2(avctx, &w, &h, pool->stride_align);
1481 
1482  do {
1483  // NOTE: do not align linesizes individually, this breaks e.g. assumptions
1484  // that linesize[0] == 2*linesize[1] in the MPEG-encoder for 4:2:2
1485  ret = av_image_fill_linesizes(linesize, avctx->pix_fmt, w);
1486  if (ret < 0)
1487  return ret;
1488  // increase alignment of w for next try (rhs gives the lowest bit set in w)
1489  w += w & ~(w - 1);
1490 
1491  unaligned = 0;
1492  for (i = 0; i < 4; i++)
1493  unaligned |= linesize[i] % pool->stride_align[i];
1494  } while (unaligned);
1495 
1496  tmpsize = av_image_fill_pointers(data, avctx->pix_fmt, h,
1497  NULL, linesize);
1498  if (tmpsize < 0)
1499  return tmpsize;
1500 
1501  for (i = 0; i < 3 && data[i + 1]; i++)
1502  size[i] = data[i + 1] - data[i];
1503  size[i] = tmpsize - (data[i] - data[0]);
1504 
1505  for (i = 0; i < 4; i++) {
1506  av_buffer_pool_uninit(&pool->pools[i]);
1507  pool->linesize[i] = linesize[i];
1508  if (size[i]) {
1509  pool->pools[i] = av_buffer_pool_init(size[i] + 16 + STRIDE_ALIGN - 1,
1510  CONFIG_MEMORY_POISONING ?
1511  NULL :
1513  if (!pool->pools[i]) {
1514  ret = AVERROR(ENOMEM);
1515  goto fail;
1516  }
1517  }
1518  }
1519  pool->format = frame->format;
1520  pool->width = frame->width;
1521  pool->height = frame->height;
1522 
1523  break;
1524  }
1525  case AVMEDIA_TYPE_AUDIO: {
1526  int ch = frame->channels; //av_get_channel_layout_nb_channels(frame->channel_layout);
1527  int planar = av_sample_fmt_is_planar(frame->format);
1528  int planes = planar ? ch : 1;
1529 
1530  if (pool->format == frame->format && pool->planes == planes &&
1531  pool->channels == ch && frame->nb_samples == pool->samples)
1532  return 0;
1533 
1534  av_buffer_pool_uninit(&pool->pools[0]);
1535  ret = av_samples_get_buffer_size(&pool->linesize[0], ch,
1536  frame->nb_samples, frame->format, 0);
1537  if (ret < 0)
1538  goto fail;
1539 
1540  pool->pools[0] = av_buffer_pool_init(pool->linesize[0], NULL);
1541  if (!pool->pools[0]) {
1542  ret = AVERROR(ENOMEM);
1543  goto fail;
1544  }
1545 
1546  pool->format = frame->format;
1547  pool->planes = planes;
1548  pool->channels = ch;
1549  pool->samples = frame->nb_samples;
1550  break;
1551  }
1552  default: av_assert0(0);
1553  }
1554  return 0;
1555 fail:
1556  for (i = 0; i < 4; i++)
1557  av_buffer_pool_uninit(&pool->pools[i]);
1558  pool->format = -1;
1559  pool->planes = pool->channels = pool->samples = 0;
1560  pool->width = pool->height = 0;
1561  return ret;
1562 }
1563 
1565 {
1566  FramePool *pool = avctx->internal->pool;
1567  int planes = pool->planes;
1568  int i;
1569 
1570  frame->linesize[0] = pool->linesize[0];
1571 
1572  if (planes > AV_NUM_DATA_POINTERS) {
1573  frame->extended_data = av_mallocz_array(planes, sizeof(*frame->extended_data));
1574  frame->nb_extended_buf = planes - AV_NUM_DATA_POINTERS;
1576  sizeof(*frame->extended_buf));
1577  if (!frame->extended_data || !frame->extended_buf) {
1578  av_freep(&frame->extended_data);
1579  av_freep(&frame->extended_buf);
1580  return AVERROR(ENOMEM);
1581  }
1582  } else {
1583  frame->extended_data = frame->data;
1584  av_assert0(frame->nb_extended_buf == 0);
1585  }
1586 
1587  for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
1588  frame->buf[i] = av_buffer_pool_get(pool->pools[0]);
1589  if (!frame->buf[i])
1590  goto fail;
1591  frame->extended_data[i] = frame->data[i] = frame->buf[i]->data;
1592  }
1593  for (i = 0; i < frame->nb_extended_buf; i++) {
1594  frame->extended_buf[i] = av_buffer_pool_get(pool->pools[0]);
1595  if (!frame->extended_buf[i])
1596  goto fail;
1597  frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data;
1598  }
1599 
1600  if (avctx->debug & FF_DEBUG_BUFFERS)
1601  av_log(avctx, AV_LOG_DEBUG, "default_get_buffer called on frame %p", frame);
1602 
1603  return 0;
1604 fail:
1605  av_frame_unref(frame);
1606  return AVERROR(ENOMEM);
1607 }
1608 
1610 {
1611  FramePool *pool = s->internal->pool;
1613  int i;
1614 
1615  if (pic->data[0] || pic->data[1] || pic->data[2] || pic->data[3]) {
1616  av_log(s, AV_LOG_ERROR, "pic->data[*]!=NULL in avcodec_default_get_buffer\n");
1617  return -1;
1618  }
1619 
1620  if (!desc) {
1621  av_log(s, AV_LOG_ERROR,
1622  "Unable to get pixel format descriptor for format %s\n",
1623  av_get_pix_fmt_name(pic->format));
1624  return AVERROR(EINVAL);
1625  }
1626 
1627  memset(pic->data, 0, sizeof(pic->data));
1628  pic->extended_data = pic->data;
1629 
1630  for (i = 0; i < 4 && pool->pools[i]; i++) {
1631  pic->linesize[i] = pool->linesize[i];
1632 
1633  pic->buf[i] = av_buffer_pool_get(pool->pools[i]);
1634  if (!pic->buf[i])
1635  goto fail;
1636 
1637  pic->data[i] = pic->buf[i]->data;
1638  }
1639  for (; i < AV_NUM_DATA_POINTERS; i++) {
1640  pic->data[i] = NULL;
1641  pic->linesize[i] = 0;
1642  }
1643  if (desc->flags & AV_PIX_FMT_FLAG_PAL ||
1644  ((desc->flags & FF_PSEUDOPAL) && pic->data[1]))
1645  avpriv_set_systematic_pal2((uint32_t *)pic->data[1], pic->format);
1646 
1647  if (s->debug & FF_DEBUG_BUFFERS)
1648  av_log(s, AV_LOG_DEBUG, "default_get_buffer called on pic %p\n", pic);
1649 
1650  return 0;
1651 fail:
1652  av_frame_unref(pic);
1653  return AVERROR(ENOMEM);
1654 }
1655 
1657 {
1658  int ret;
1659 
1660  if (avctx->hw_frames_ctx) {
1661  ret = av_hwframe_get_buffer(avctx->hw_frames_ctx, frame, 0);
1662  frame->width = avctx->coded_width;
1663  frame->height = avctx->coded_height;
1664  return ret;
1665  }
1666 
1667  if ((ret = update_frame_pool(avctx, frame)) < 0)
1668  return ret;
1669 
1670  switch (avctx->codec_type) {
1671  case AVMEDIA_TYPE_VIDEO:
1672  return video_get_buffer(avctx, frame);
1673  case AVMEDIA_TYPE_AUDIO:
1674  return audio_get_buffer(avctx, frame);
1675  default:
1676  return -1;
1677  }
1678 }
1679 
1681 {
1682  int size;
1683  const uint8_t *side_metadata;
1684 
1685  AVDictionary **frame_md = &frame->metadata;
1686 
1687  side_metadata = av_packet_get_side_data(avpkt,
1689  return av_packet_unpack_dictionary(side_metadata, size, frame_md);
1690 }
1691 
1693 {
1694  const AVPacket *pkt = avctx->internal->last_pkt_props;
1695  int i;
1696  static const struct {
1697  enum AVPacketSideDataType packet;
1699  } sd[] = {
1708  };
1709 
1710  if (pkt) {
1711  frame->pts = pkt->pts;
1712 #if FF_API_PKT_PTS
1714  frame->pkt_pts = pkt->pts;
1716 #endif
1717  frame->pkt_pos = pkt->pos;
1718  frame->pkt_duration = pkt->duration;
1719  frame->pkt_size = pkt->size;
1720 
1721  for (i = 0; i < FF_ARRAY_ELEMS(sd); i++) {
1722  int size;
1723  uint8_t *packet_sd = av_packet_get_side_data(pkt, sd[i].packet, &size);
1724  if (packet_sd) {
1725  AVFrameSideData *frame_sd = av_frame_new_side_data(frame,
1726  sd[i].frame,
1727  size);
1728  if (!frame_sd)
1729  return AVERROR(ENOMEM);
1730 
1731  memcpy(frame_sd->data, packet_sd, size);
1732  }
1733  }
1734  add_metadata_from_side_data(pkt, frame);
1735 
1736  if (pkt->flags & AV_PKT_FLAG_DISCARD) {
1737  frame->flags |= AV_FRAME_FLAG_DISCARD;
1738  } else {
1739  frame->flags = (frame->flags & ~AV_FRAME_FLAG_DISCARD);
1740  }
1741  }
1742  frame->reordered_opaque = avctx->reordered_opaque;
1743 
1744  if (frame->color_primaries == AVCOL_PRI_UNSPECIFIED)
1745  frame->color_primaries = avctx->color_primaries;
1746  if (frame->color_trc == AVCOL_TRC_UNSPECIFIED)
1747  frame->color_trc = avctx->color_trc;
1748  if (frame->colorspace == AVCOL_SPC_UNSPECIFIED)
1749  frame->colorspace = avctx->colorspace;
1750  if (frame->color_range == AVCOL_RANGE_UNSPECIFIED)
1751  frame->color_range = avctx->color_range;
1753  frame->chroma_location = avctx->chroma_sample_location;
1754 
1755  switch (avctx->codec->type) {
1756  case AVMEDIA_TYPE_VIDEO:
1757  frame->format = avctx->pix_fmt;
1758  if (!frame->sample_aspect_ratio.num)
1759  frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
1760 
1761  if (frame->width && frame->height &&
1762  av_image_check_sar(frame->width, frame->height,
1763  frame->sample_aspect_ratio) < 0) {
1764  av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
1765  frame->sample_aspect_ratio.num,
1766  frame->sample_aspect_ratio.den);
1767  frame->sample_aspect_ratio = (AVRational){ 0, 1 };
1768  }
1769 
1770  break;
1771  case AVMEDIA_TYPE_AUDIO:
1772  if (!frame->sample_rate)
1773  frame->sample_rate = avctx->sample_rate;
1774  if (frame->format < 0)
1775  frame->format = avctx->sample_fmt;
1776  if (!frame->channel_layout) {
1777  if (avctx->channel_layout) {
1779  avctx->channels) {
1780  av_log(avctx, AV_LOG_ERROR, "Inconsistent channel "
1781  "configuration.\n");
1782  return AVERROR(EINVAL);
1783  }
1784 
1785  frame->channel_layout = avctx->channel_layout;
1786  } else {
1787  if (avctx->channels > FF_SANE_NB_CHANNELS) {
1788  av_log(avctx, AV_LOG_ERROR, "Too many channels: %d.\n",
1789  avctx->channels);
1790  return AVERROR(ENOSYS);
1791  }
1792  }
1793  }
1794  frame->channels = avctx->channels;
1795  break;
1796  }
1797  return 0;
1798 }
1799 
1801 {
1802  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
1803  int i;
1804  int num_planes = av_pix_fmt_count_planes(frame->format);
1806  int flags = desc ? desc->flags : 0;
1807  if (num_planes == 1 && (flags & AV_PIX_FMT_FLAG_PAL))
1808  num_planes = 2;
1809  if ((flags & FF_PSEUDOPAL) && frame->data[1])
1810  num_planes = 2;
1811  for (i = 0; i < num_planes; i++) {
1812  av_assert0(frame->data[i]);
1813  }
1814  // For formats without data like hwaccel allow unused pointers to be non-NULL.
1815  for (i = num_planes; num_planes > 0 && i < FF_ARRAY_ELEMS(frame->data); i++) {
1816  if (frame->data[i])
1817  av_log(avctx, AV_LOG_ERROR, "Buffer returned by get_buffer2() did not zero unused plane pointers\n");
1818  frame->data[i] = NULL;
1819  }
1820  }
1821 }
1822 
1823 static void decode_data_free(void *opaque, uint8_t *data)
1824 {
1825  FrameDecodeData *fdd = (FrameDecodeData*)data;
1826 
1827  if (fdd->post_process_opaque_free)
1829 
1830  if (fdd->hwaccel_priv_free)
1831  fdd->hwaccel_priv_free(fdd->hwaccel_priv);
1832 
1833  av_freep(&fdd);
1834 }
1835 
1837 {
1838  AVBufferRef *fdd_buf;
1839  FrameDecodeData *fdd;
1840 
1841  av_assert1(!frame->private_ref);
1842  av_buffer_unref(&frame->private_ref);
1843 
1844  fdd = av_mallocz(sizeof(*fdd));
1845  if (!fdd)
1846  return AVERROR(ENOMEM);
1847 
1848  fdd_buf = av_buffer_create((uint8_t*)fdd, sizeof(*fdd), decode_data_free,
1850  if (!fdd_buf) {
1851  av_freep(&fdd);
1852  return AVERROR(ENOMEM);
1853  }
1854 
1855  frame->private_ref = fdd_buf;
1856 
1857  return 0;
1858 }
1859 
1861 {
1862  const AVHWAccel *hwaccel = avctx->hwaccel;
1863  int override_dimensions = 1;
1864  int ret;
1865 
1866  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
1867  if ((ret = av_image_check_size2(FFALIGN(avctx->width, STRIDE_ALIGN), avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx)) < 0 || avctx->pix_fmt<0) {
1868  av_log(avctx, AV_LOG_ERROR, "video_get_buffer: image parameters invalid\n");
1869  return AVERROR(EINVAL);
1870  }
1871 
1872  if (frame->width <= 0 || frame->height <= 0) {
1873  frame->width = FFMAX(avctx->width, AV_CEIL_RSHIFT(avctx->coded_width, avctx->lowres));
1874  frame->height = FFMAX(avctx->height, AV_CEIL_RSHIFT(avctx->coded_height, avctx->lowres));
1875  override_dimensions = 0;
1876  }
1877 
1878  if (frame->data[0] || frame->data[1] || frame->data[2] || frame->data[3]) {
1879  av_log(avctx, AV_LOG_ERROR, "pic->data[*]!=NULL in get_buffer_internal\n");
1880  return AVERROR(EINVAL);
1881  }
1882  }
1883  ret = ff_decode_frame_props(avctx, frame);
1884  if (ret < 0)
1885  return ret;
1886 
1887  if (hwaccel) {
1888  if (hwaccel->alloc_frame) {
1889  ret = hwaccel->alloc_frame(avctx, frame);
1890  goto end;
1891  }
1892  } else
1893  avctx->sw_pix_fmt = avctx->pix_fmt;
1894 
1895  ret = avctx->get_buffer2(avctx, frame, flags);
1896  if (ret < 0)
1897  goto end;
1898 
1899  validate_avframe_allocation(avctx, frame);
1900 
1901  ret = ff_attach_decode_data(frame);
1902  if (ret < 0)
1903  goto end;
1904 
1905 end:
1906  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO && !override_dimensions &&
1908  frame->width = avctx->width;
1909  frame->height = avctx->height;
1910  }
1911 
1912  if (ret < 0)
1913  av_frame_unref(frame);
1914 
1915  return ret;
1916 }
1917 
1919 {
1920  int ret = get_buffer_internal(avctx, frame, flags);
1921  if (ret < 0) {
1922  av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
1923  frame->width = frame->height = 0;
1924  }
1925  return ret;
1926 }
1927 
1929 {
1930  AVFrame *tmp;
1931  int ret;
1932 
1934 
1935  if (frame->data[0] && (frame->width != avctx->width || frame->height != avctx->height || frame->format != avctx->pix_fmt)) {
1936  av_log(avctx, AV_LOG_WARNING, "Picture changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s in reget buffer()\n",
1937  frame->width, frame->height, av_get_pix_fmt_name(frame->format), avctx->width, avctx->height, av_get_pix_fmt_name(avctx->pix_fmt));
1938  av_frame_unref(frame);
1939  }
1940 
1941  if (!frame->data[0])
1942  return ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
1943 
1944  if (av_frame_is_writable(frame))
1945  return ff_decode_frame_props(avctx, frame);
1946 
1947  tmp = av_frame_alloc();
1948  if (!tmp)
1949  return AVERROR(ENOMEM);
1950 
1951  av_frame_move_ref(tmp, frame);
1952 
1953  ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
1954  if (ret < 0) {
1955  av_frame_free(&tmp);
1956  return ret;
1957  }
1958 
1959  av_frame_copy(frame, tmp);
1960  av_frame_free(&tmp);
1961 
1962  return 0;
1963 }
1964 
1966 {
1967  int ret = reget_buffer_internal(avctx, frame);
1968  if (ret < 0)
1969  av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
1970  return ret;
1971 }
1972 
1973 static void bsfs_flush(AVCodecContext *avctx)
1974 {
1975  DecodeFilterContext *s = &avctx->internal->filter;
1976 
1977  for (int i = 0; i < s->nb_bsfs; i++)
1978  av_bsf_flush(s->bsfs[i]);
1979 }
1980 
1982 {
1983  avctx->internal->draining = 0;
1984  avctx->internal->draining_done = 0;
1985  avctx->internal->nb_draining_errors = 0;
1989  avctx->internal->buffer_pkt_valid = 0;
1990 
1991  av_packet_unref(avctx->internal->ds.in_pkt);
1992 
1993  if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME)
1994  ff_thread_flush(avctx);
1995  else if (avctx->codec->flush)
1996  avctx->codec->flush(avctx);
1997 
1998  avctx->pts_correction_last_pts =
1999  avctx->pts_correction_last_dts = INT64_MIN;
2000 
2001  bsfs_flush(avctx);
2002 
2003  if (!avctx->refcounted_frames)
2004  av_frame_unref(avctx->internal->to_free);
2005 }
2006 
2008 {
2009  DecodeFilterContext *s = &avctx->internal->filter;
2010  int i;
2011 
2012  for (i = 0; i < s->nb_bsfs; i++)
2013  av_bsf_free(&s->bsfs[i]);
2014  av_freep(&s->bsfs);
2015  s->nb_bsfs = 0;
2016 }
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:132
#define FF_SANE_NB_CHANNELS
Definition: internal.h:86
This struct aggregates all the (hardware/vendor-specific) "high-level" state, i.e.
Definition: hwcontext.h:60
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: avcodec.h:2597
AVCodecHWConfig public
This is the structure which will be returned to the user by avcodec_get_hw_config().
Definition: hwaccel.h:34
int nb_draining_errors
Definition: internal.h:220
#define FF_SUB_CHARENC_MODE_PRE_DECODER
the AVPacket data needs to be recoded to UTF-8 before being fed to the decoder, requires iconv ...
Definition: avcodec.h:3114
void av_bsf_free(AVBSFContext **ctx)
Free a bitstream filter context and everything associated with it; write NULL into the supplied point...
Definition: bsf.c:35
#define NULL
Definition: coverity.c:32
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1326
const struct AVCodec * codec
Definition: avcodec.h:1542
const char const char void * val
Definition: avisynth_c.h:771
const AVCodecDescriptor * codec_descriptor
AVCodecDescriptor.
Definition: avcodec.h:3077
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
const AVClass * priv_class
A class for the private data, used to declare bitstream filter private AVOptions. ...
Definition: avcodec.h:5771
#define AV_NUM_DATA_POINTERS
Definition: frame.h:227
AVCodecParameters * par_out
Parameters of the output stream.
Definition: avcodec.h:5737
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
AVPacketSideDataType
Definition: avcodec.h:1152
int64_t pts_correction_num_faulty_dts
Number of incorrect PTS values so far.
Definition: avcodec.h:3094
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
#define GET_UTF8(val, GET_BYTE, ERROR)
Convert a UTF-8 character (up to 4 bytes) to its 32-bit UCS-4 encoded form.
Definition: common.h:385
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2446
This structure describes decoded (raw) audio or video data.
Definition: frame.h:226
int(* init)(AVCodecContext *avctx)
Initialize the hwaccel private data.
Definition: avcodec.h:3722
int stride_align[AV_NUM_DATA_POINTERS]
Definition: internal.h:112
AVOption.
Definition: opt.h:246
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
This side data must be associated with an audio frame and corresponds to enum AVAudioServiceType defi...
Definition: frame.h:113
int apply_cropping
Video decoding only.
Definition: avcodec.h:3301
static int decode_receive_frame_internal(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:637
const struct AVCodecHWConfigInternal ** hw_configs
Array of pointers to hardware configurations supported by the codec, or NULL if no hardware supported...
Definition: avcodec.h:3565
The codec supports this format by some internal method.
Definition: avcodec.h:3386
#define AV_CODEC_FLAG2_SKIP_MANUAL
Do not skip samples and export skip information as frame side data.
Definition: avcodec.h:946
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:1721
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:498
int capabilities
Hardware accelerated codec capabilities.
Definition: avcodec.h:3625
const char * fmt
Definition: avisynth_c.h:769
void(* flush)(AVCodecContext *)
Flush buffers.
Definition: avcodec.h:3545
AVPacket * last_pkt_props
Properties (timestamps+side data) extracted from the last packet passed for decoding.
Definition: internal.h:172
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
static int convert_sub_to_old_ass_form(AVSubtitle *sub, const AVPacket *pkt, AVRational tb)
Definition: decode.c:967
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2486
AVFrame * to_free
Definition: internal.h:159
int64_t pos
byte position in stream, -1 if unknown
Definition: avcodec.h:1465
static void get_subtitle_defaults(AVSubtitle *sub)
Definition: decode.c:868
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:418
static int decode_simple_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:624
const char * desc
Definition: nvenc.c:65
int width
Definition: internal.h:111
This side data should be associated with a video stream and contains Stereoscopic 3D information in f...
Definition: avcodec.h:1226
ATSC A53 Part 4 Closed Captions.
Definition: avcodec.h:1354
void(* post_process_opaque_free)(void *opaque)
Definition: decode.h:47
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2164
int nb_extended_buf
Number of elements in extended_buf.
Definition: frame.h:436
int ff_decode_bsfs_init(AVCodecContext *avctx)
Definition: decode.c:185
Content light level (based on CTA-861.3).
Definition: frame.h:136
int num
Numerator.
Definition: rational.h:59
The bitstream filter state.
Definition: avcodec.h:5703
int size
Definition: avcodec.h:1446
const AVBitStreamFilter * av_bsf_get_by_name(const char *name)
enum AVPixelFormat pix_fmt
Supported pixel format.
Definition: avcodec.h:3619
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:1912
#define AV_CODEC_PROP_TEXT_SUB
Subtitle codec is text based.
Definition: avcodec.h:772
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1743
int samples
Definition: internal.h:116
static int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:402
int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, const AVPacket *avpkt)
Decode the audio frame of size avpkt->size from avpkt->data into frame.
Definition: decode.c:860
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:208
Mastering display metadata associated with a video frame.
Definition: frame.h:119
unsigned num_rects
Definition: avcodec.h:3880
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:582
static void bsfs_flush(AVCodecContext *avctx)
Definition: decode.c:1973
enum AVMediaType type
Definition: avcodec.h:3437
void(* hwaccel_priv_free)(void *priv)
Definition: decode.h:53
static int apply_param_change(AVCodecContext *avctx, const AVPacket *avpkt)
Definition: decode.c:48
static int get_buffer_internal(AVCodecContext *avctx, AVFrame *frame, int flags)
Definition: decode.c:1860
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
static int recode_subtitle(AVCodecContext *avctx, AVPacket *outpkt, const AVPacket *inpkt)
Definition: decode.c:875
AVBufferPool * pools[4]
Pools for each data plane.
Definition: internal.h:105
int(* decode)(AVCodecContext *, void *outdata, int *outdata_size, AVPacket *avpkt)
Definition: avcodec.h:3522
int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
Set various frame properties from the codec context / packet data.
Definition: decode.c:1692
size_t crop_bottom
Definition: frame.h:586
static AVPacket pkt
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:1025
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:2690
void * priv_data
Opaque filter-specific private data.
Definition: avcodec.h:5724
static int utf8_check(const uint8_t *str)
Definition: decode.c:933
int av_bsf_init(AVBSFContext *ctx)
Prepare the filter for use, after all the parameters and options have been set.
Definition: bsf.c:134
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame)
Identical in function to av_frame_make_writable(), except it uses ff_get_buffer() to allocate the buf...
Definition: decode.c:1965
static int apply_cropping(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:713
void ff_decode_bsfs_uninit(AVCodecContext *avctx)
Definition: decode.c:2007
Mastering display metadata (based on SMPTE-2086:2014).
Definition: avcodec.h:1334
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1656
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
AVSubtitleRect ** rects
Definition: avcodec.h:3881
int av_codec_is_decoder(const AVCodec *codec)
Definition: utils.c:99
int(* post_process)(void *logctx, AVFrame *frame)
The callback to perform some delayed processing on the frame right before it is returned to the calle...
Definition: decode.h:45
int av_bsf_alloc(const AVBitStreamFilter *filter, AVBSFContext **ctx)
Allocate a context for a given bitstream filter.
Definition: bsf.c:81
int av_opt_set_from_string(void *ctx, const char *opts, const char *const *shorthand, const char *key_val_sep, const char *pairs_sep)
Parse the key-value pairs list in opts.
Definition: opt.c:1506
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:993
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
const char * name
Definition: opt.h:247
int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt)
Retrieve a filtered packet.
Definition: bsf.c:211
static void filter(int16_t *output, ptrdiff_t out_stride, int16_t *low, ptrdiff_t low_stride, int16_t *high, ptrdiff_t high_stride, int len, int clip)
Definition: cfhd.c:153
DecodeFilterContext filter
Definition: internal.h:166
int height
Definition: internal.h:111
enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Definition: decode.c:1114
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2197
uint8_t
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
int ff_attach_decode_data(AVFrame *frame)
Definition: decode.c:1836
int av_packet_unpack_dictionary(const uint8_t *data, int size, AVDictionary **dict)
Unpack a dictionary from side_data.
Definition: avpacket.c:523
AVOptions.
static int64_t guess_correct_pts(AVCodecContext *ctx, int64_t reordered_pts, int64_t dts)
Attempt to guess proper monotonic timestamps for decoded video frames which might have incorrect time...
Definition: decode.c:370
size_t crop_left
Definition: frame.h:587
int avpriv_set_systematic_pal2(uint32_t pal[256], enum AVPixelFormat pix_fmt)
Definition: imgutils.c:152
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: avcodec.h:1463
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: avcodec.h:1220
#define AV_CODEC_FLAG_UNALIGNED
Allow decoders to produce frames with data planes that are not aligned to CPU requirements (e...
Definition: avcodec.h:838
#define AV_WL8(p, d)
Definition: intreadwrite.h:399
Multithreading support functions.
AVBufferRef * private_ref
AVBufferRef for internal use by a single libav* library.
Definition: frame.h:604
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
Definition: decode.c:329
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:319
enum AVPixelFormat pix_fmt
A hardware pixel format which the codec can use.
Definition: avcodec.h:3402
static AVFrame * frame
int planes
Definition: internal.h:114
Structure to hold side data for an AVFrame.
Definition: frame.h:188
int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar)
Check if the given sample aspect ratio of an image is valid.
Definition: imgutils.c:287
size_t compat_decode_consumed
Definition: internal.h:209
static void finish(void)
Definition: movenc.c:345
uint8_t * data
Definition: avcodec.h:1445
#define AVERROR_EOF
End of file.
Definition: error.h:55
AVDictionary * metadata
metadata.
Definition: frame.h:513
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:2765
#define AV_BUFFER_FLAG_READONLY
Always treat the buffer as read-only, even when it has only one reference.
Definition: buffer.h:113
ptrdiff_t size
Definition: opengl_enc.c:101
The data represents the AVSphericalMapping structure defined in libavutil/spherical.h.
Definition: frame.h:130
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
Definition: mem.c:198
static int bsfs_poll(AVCodecContext *avctx, AVPacket *pkt)
Definition: decode.c:291
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:2171
#define FFALIGN(x, a)
Definition: macros.h:48
The codec supports this format via the hw_device_ctx interface.
Definition: avcodec.h:3370
#define av_log(a,...)
const char * name
Definition: pixdesc.h:82
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:607
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
Check if the sample format is planar.
Definition: samplefmt.c:112
FramePool * pool
Definition: internal.h:161
void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, int linesize_align[AV_NUM_DATA_POINTERS])
Modify width and height values so that they will result in a memory buffer that is acceptable for the...
Definition: utils.c:154
int ff_thread_decode_frame(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, AVPacket *avpkt)
Submit a new frame to a decoding thread.
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
#define AV_RL8(x)
Definition: intreadwrite.h:398
int av_new_packet(AVPacket *pkt, int size)
Allocate the payload of a packet and initialize its fields with default values.
Definition: avpacket.c:86
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
Definition: avcodec.h:3070
int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src)
Copy the contents of src to dst.
Definition: utils.c:2013
int width
Definition: frame.h:284
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1823
void * post_process_opaque
Definition: decode.h:46
#define AV_BPRINT_SIZE_UNLIMITED
static int hwaccel_init(AVCodecContext *avctx, const AVCodecHWConfigInternal *hw_config)
Definition: decode.c:1277
static void validate_avframe_allocation(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:1800
#define AVERROR(e)
Definition: error.h:43
An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows:
Definition: avcodec.h:1184
int av_frame_apply_cropping(AVFrame *frame, int flags)
Crop the given video AVFrame according to its crop_left/crop_top/crop_right/ crop_bottom fields...
Definition: frame.c:879
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:350
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
int64_t pts_correction_last_pts
Number of incorrect DTS values so far.
Definition: avcodec.h:3095
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:2804
int methods
Bit set of AV_CODEC_HW_CONFIG_METHOD_* flags, describing the possible setup methods which can be used...
Definition: avcodec.h:3407
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
int avcodec_is_open(AVCodecContext *s)
Definition: utils.c:1871
int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:740
static int extract_packet_props(AVCodecInternal *avci, const AVPacket *pkt)
Definition: decode.c:126
AVFrame * buffer_frame
Definition: internal.h:202
int capabilities
Codec capabilities.
Definition: avcodec.h:3443
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:471
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: avcodec.h:1428
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1613
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:482
AVRational time_base_in
The timebase used for the timestamps of the input packets.
Definition: avcodec.h:5743
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:140
simple assert() macros that are a bit more flexible than ISO C assert().
const AVOption * av_opt_next(const void *obj, const AVOption *last)
Iterate over all AVOptions belonging to obj.
Definition: opt.c:45
int side_data_elems
Definition: avcodec.h:1457
AVBufferRef * av_buffer_create(uint8_t *data, int size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:28
int64_t max_pixels
The number of pixels per image to maximally accept.
Definition: avcodec.h:3243
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:329
Apply the maximum possible cropping, even if it requires setting the AVFrame.data[] entries to unalig...
Definition: frame.h:871
static int reget_buffer_internal(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:1928
enum AVHWDeviceType type
This field identifies the underlying API used for hardware access.
Definition: hwcontext.h:78
#define FFMAX(a, b)
Definition: common.h:94
int av_hwframe_get_buffer(AVBufferRef *hwframe_ref, AVFrame *frame, int flags)
Allocate a new frame attached to the given AVHWFramesContext.
Definition: hwcontext.c:465
#define fail()
Definition: checkasm.h:117
char * av_get_token(const char **buf, const char *term)
Unescape the given string until a non escaped terminating char, and return the token corresponding to...
Definition: avstring.c:149
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:792
const AVHWAccel * hwaccel
If this configuration uses a hwaccel, a pointer to it.
Definition: hwaccel.h:39
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: internal.h:66
int priv_data_size
Size of the private data to allocate in AVCodecInternal.hwaccel_priv_data.
Definition: avcodec.h:3736
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1451
reference-counted frame API
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2240
uint32_t end_display_time
Definition: avcodec.h:3879
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:3882
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:404
int props
Codec properties, a combination of AV_CODEC_PROP_* flags.
Definition: avcodec.h:724
size_t crop_top
Definition: frame.h:585
common internal API header
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:106
static const struct @304 planes[]
int initial_pool_size
Initial size of the frame pool.
Definition: hwcontext.h:198
int av_packet_copy_props(AVPacket *dst, const AVPacket *src)
Copy only "properties" fields from src to dst.
Definition: avpacket.c:564
int channels
number of audio channels, only used for audio.
Definition: frame.h:531
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:464
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:2658
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:2796
const char * name
Name of the hardware accelerated codec.
Definition: avcodec.h:3598
#define FFMIN(a, b)
Definition: common.h:96
AVBufferRef ** extended_buf
For planar audio which requires more than AV_NUM_DATA_POINTERS AVBufferRef pointers, this array will hold all the references which cannot fit into AVFrame.buf.
Definition: frame.h:432
int channels
Definition: internal.h:115
int(* alloc_frame)(AVCodecContext *avctx, AVFrame *frame)
Allocate a custom buffer.
Definition: avcodec.h:3638
AVFrame * compat_decode_frame
Definition: internal.h:213
int width
picture width / height.
Definition: avcodec.h:1706
uint8_t w
Definition: llviddspenc.c:38
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames...
Definition: avcodec.h:3213
static int add_metadata_from_side_data(const AVPacket *avpkt, AVFrame *frame)
Definition: decode.c:1680
AVRational time_base_out
The timebase used for the timestamps of the output packets.
Definition: avcodec.h:5749
static int compat_decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, const AVPacket *pkt)
Definition: decode.c:771
AVPacket * in_pkt
Definition: internal.h:120
This side data should be associated with a video stream and corresponds to the AVSphericalMapping str...
Definition: avcodec.h:1340
int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt)
Submit a packet for filtering.
Definition: bsf.c:185
AVFormatContext * ctx
Definition: movenc.c:48
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:2143
AVFrameSideDataType
Definition: frame.h:48
uint16_t format
Definition: avcodec.h:3877
#define s(width, name)
Definition: cbs_vp9.c:257
#define FF_DEBUG_BUFFERS
Definition: avcodec.h:2636
int(* frame_params)(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx)
Fill the given hw_frames context with current codec parameters.
Definition: avcodec.h:3751
int64_t reordered_opaque
opaque 64-bit number (generally a PTS) that will be reordered and output in AVFrame.reordered_opaque
Definition: avcodec.h:2683
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:2669
int n
Definition: avisynth_c.h:684
static int av_bprint_is_complete(const AVBPrint *buf)
Test if the print buffer is complete (not truncated).
Definition: bprint.h:185
const char * bsfs
Decoding only, a comma-separated list of bitstream filters to apply to packets before decoding...
Definition: avcodec.h:3556
DecodeSimpleContext ds
Definition: internal.h:165
char * sub_charenc
DTS of the last frame.
Definition: avcodec.h:3103
static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:1564
int draining
checks API usage: after codec draining, flush is required to resume operation
Definition: internal.h:195
#define FF_ARRAY_ELEMS(a)
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:2785
int linesize[4]
Definition: internal.h:113
int sub_charenc_mode
Subtitles character encoding mode.
Definition: avcodec.h:3111
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal decoder state / flush internal buffers.
Definition: decode.c:1981
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:299
Content light level (based on CTA-861.3).
Definition: avcodec.h:1347
int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:677
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:251
int avcodec_default_get_buffer2(AVCodecContext *avctx, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
Definition: decode.c:1656
int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height, uint8_t *ptr, const int linesizes[4])
Fill plane data pointers for an image with pixel format pix_fmt and height height.
Definition: imgutils.c:111
Libavcodec external API header.
enum AVMediaType codec_type
Definition: avcodec.h:1541
int compat_decode_warned
Definition: internal.h:206
int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown...
Definition: frame.h:506
A list of zero terminated key/value strings.
Definition: avcodec.h:1284
int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, const AVPacket *avpkt)
Decode the video frame of size avpkt->size from avpkt->data into picture.
Definition: decode.c:853
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:84
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:594
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_WB16 unsigned int_TMPL byte
Definition: bytestream.h:87
int sample_rate
samples per second
Definition: avcodec.h:2189
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:257
int debug
debug
Definition: avcodec.h:2614
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
const AVCodecHWConfig * avcodec_get_hw_config(const AVCodec *codec, int index)
Retrieve supported hardware configurations for a codec.
Definition: utils.c:1757
main external API structure.
Definition: avcodec.h:1533
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:598
int skip_samples_multiplier
Definition: internal.h:217
uint8_t * data
The data buffer.
Definition: buffer.h:89
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(constuint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(constint16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(constint32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(constint64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(constfloat *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(constdouble *) pi *(INT64_C(1)<< 63)))#defineFMT_PAIR_FUNC(out, in) staticconv_func_type *constfmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};staticvoidcpy1(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, len);}staticvoidcpy2(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 2 *len);}staticvoidcpy4(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 4 *len);}staticvoidcpy8(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, constint *ch_map, intflags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) returnNULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) returnNULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case1:ctx->simd_f=cpy1;break;case2:ctx->simd_f=cpy2;break;case4:ctx->simd_f=cpy4;break;case8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);returnctx;}voidswri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}intswri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, intlen){intch;intoff=0;constintos=(out->planar?1:out->ch_count)*out->bps;unsignedmisaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){intplanes=in->planar?in->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){intplanes=out->planar?out->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){intplanes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch const uint8_t **in ch off *out planar
Definition: audioconvert.c:56
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:1050
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:314
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1918
uint8_t * data
Definition: frame.h:190
#define AV_CODEC_PROP_BITMAP_SUB
Subtitle codec is bitmap based Decoded AVSubtitle data can be read from the AVSubtitleRect->pict fiel...
Definition: avcodec.h:767
int av_samples_copy(uint8_t **dst, uint8_t *const *src, int dst_offset, int src_offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Copy samples from src to dst.
Definition: samplefmt.c:213
void * buf
Definition: avisynth_c.h:690
size_t crop_right
Definition: frame.h:588
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:83
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
int coded_height
Definition: avcodec.h:1721
int64_t reordered_opaque
reordered opaque 64 bits (generally an integer or a double precision float PTS but can be anything)...
Definition: frame.h:394
int sample_rate
Sample rate of the audio data.
Definition: frame.h:399
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
callback to negotiate the pixelFormat
Definition: avcodec.h:1785
Definition: f_ebur128.c:91
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
Definition: imgutils.c:89
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:722
const char * av_hwdevice_get_type_name(enum AVHWDeviceType type)
Get the string name of an AVHWDeviceType.
Definition: hwcontext.c:88
#define AV_CODEC_CAP_SUBFRAMES
Codec can output multiple frames per AVPacket Normally demuxers return one frame at a time...
Definition: avcodec.h:1011
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:275
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:119
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:2157
Rational number (pair of numerator and denominator).
Definition: rational.h:58
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:2150
int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
Definition: utils.c:2031
Recommmends skipping the specified number of samples.
Definition: avcodec.h:1268
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:123
int sub_text_format
Control the form of AVSubtitle.rects[N]->ass.
Definition: avcodec.h:3220
int buffer_pkt_valid
Definition: internal.h:201
int skip_samples
Number of audio samples to skip at the start of the next decoded frame.
Definition: internal.h:185
int(* get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags)
This callback is called at the beginning of each frame to get data buffer(s) for it.
Definition: avcodec.h:2344
#define STRIDE_ALIGN
Definition: internal.h:97
enum AVChromaLocation chroma_location
Definition: frame.h:484
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:491
attribute_deprecated int refcounted_frames
If non-zero, the decoded audio and video frames returned from avcodec_decode_video2() and avcodec_dec...
Definition: avcodec.h:2360
int size
Size of data in bytes.
Definition: buffer.h:93
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
static int64_t pts
#define SIZE_SPECIFIER
Definition: internal.h:262
#define flags(name, subs,...)
Definition: cbs_av1.c:596
This side data should be associated with an audio stream and contains ReplayGain information in form ...
Definition: avcodec.h:1211
The codec supports this format via the hw_frames_ctx interface.
Definition: avcodec.h:3379
#define FF_CODEC_CAP_SETS_PKT_DTS
Decoders marked with FF_CODEC_CAP_SETS_PKT_DTS want to set AVFrame.pkt_dts manually.
Definition: internal.h:55
int ff_decode_get_hw_frames_ctx(AVCodecContext *avctx, enum AVHWDeviceType dev_type)
Make sure avctx.hw_frames_ctx is set.
Definition: decode.c:1177
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:240
static void decode_data_free(void *opaque, uint8_t *data)
Definition: decode.c:1823
#define UTF8_MAX_BYTES
Definition: decode.c:874
attribute_deprecated int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Definition: frame.h:327
void av_bprint_clear(AVBPrint *buf)
Reset the string to "" but keep internal allocated data.
Definition: bprint.c:227
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok()...
Definition: avstring.c:184
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
AVPacket * buffer_pkt
buffers for using new encode/decode API through legacy API
Definition: internal.h:200
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:335
A reference to a data buffer.
Definition: buffer.h:81
int extra_hw_frames
Definition: avcodec.h:3315
static int unrefcount_frame(AVCodecInternal *avci, AVFrame *frame)
Definition: decode.c:139
The codec supports this format by some ad-hoc method.
Definition: avcodec.h:3395
AVPacketSideData * side_data
Additional packet data that can be provided by the container.
Definition: avcodec.h:1456
This struct stores per-frame lavc-internal data and is attached to it via private_ref.
Definition: decode.h:34
int avcodec_get_hw_frames_parameters(AVCodecContext *avctx, AVBufferRef *device_ref, enum AVPixelFormat hw_pix_fmt, AVBufferRef **out_frames_ref)
Create and return a AVHWFramesContext with values adequate for hardware decoding. ...
Definition: decode.c:1228
static enum AVPixelFormat hw_pix_fmt
Definition: hw_decode.c:46
#define AV_PKT_FLAG_DISCARD
Flag is used to discard packets which are required to maintain valid decoder state but are not requir...
Definition: avcodec.h:1484
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
common internal api header.
common internal and external API header
if(ret< 0)
Definition: vf_mcdeint.c:279
AVBufferPool * av_buffer_pool_init(int size, AVBufferRef *(*alloc)(int size))
Allocate and initialize a buffer pool.
Definition: buffer.c:238
#define AV_HWACCEL_CODEC_CAP_EXPERIMENTAL
HWAccel is experimental and is thus avoided in favor of non experimental codecs.
Definition: avcodec.h:3758
#define AV_CODEC_CAP_PARAM_CHANGE
Codec supports changed parameters at any point.
Definition: avcodec.h:1032
int(* uninit)(AVCodecContext *avctx)
Uninitialize the hwaccel private data.
Definition: avcodec.h:3730
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
Definition: hwcontext.c:243
static void insert_ts(AVBPrint *buf, int ts)
Definition: decode.c:953
void * hwaccel_priv_data
hwaccel-specific private data
Definition: internal.h:190
int caps_internal
Internal codec capabilities.
Definition: avcodec.h:3550
int den
Denominator.
Definition: rational.h:60
int av_image_check_size2(unsigned int w, unsigned int h, int64_t max_pixels, enum AVPixelFormat pix_fmt, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of a plane of an image with...
Definition: imgutils.c:253
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:782
AVBSFContext ** bsfs
Definition: internal.h:125
static int video_get_buffer(AVCodecContext *s, AVFrame *pic)
Definition: decode.c:1609
Formatted text, the ass field must be set by the decoder and is authoritative.
Definition: avcodec.h:3836
#define FF_PSEUDOPAL
Definition: internal.h:367
AVHWDeviceType
Definition: hwcontext.h:27
void ff_thread_flush(AVCodecContext *avctx)
Wait for decoding threads to finish and reset internal state.
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:85
int channels
number of audio channels
Definition: avcodec.h:2190
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1568
char * ass
0 terminated ASS/SSA compatible event line.
Definition: avcodec.h:3871
#define AV_FRAME_FLAG_DISCARD
A flag to mark the frames which need to be decoded, but shouldn't be output.
Definition: frame.h:456
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:1620
enum AVColorPrimaries color_primaries
Definition: frame.h:473
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1444
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
int64_t pts_correction_last_dts
PTS of the last frame.
Definition: avcodec.h:3096
size_t compat_decode_partial_size
Definition: internal.h:212
#define AV_CODEC_FLAG_TRUNCATED
Input bitstream might be truncated at a random location instead of only at frame boundaries.
Definition: avcodec.h:879
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:2220
static int update_frame_pool(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:1462
int height
Definition: frame.h:284
void av_bsf_flush(AVBSFContext *ctx)
Reset the internal bitstream filter state / flush internal buffers.
Definition: bsf.c:175
#define av_freep(p)
int64_t pts_correction_num_faulty_pts
Current statistics for PTS correction.
Definition: avcodec.h:3093
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:475
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:334
Recommmends skipping the specified number of samples.
Definition: frame.h:108
void * hwaccel_priv
Per-frame private data for hwaccels.
Definition: decode.h:52
#define av_malloc_array(a, b)
enum AVHWDeviceType device_type
The device type associated with the configuration.
Definition: avcodec.h:3414
#define FF_SUB_TEXT_FMT_ASS_WITH_TIMINGS
Definition: avcodec.h:3223
#define FF_SUB_CHARENC_MODE_IGNORE
neither convert the subtitles, nor check them for valid UTF-8
Definition: avcodec.h:3115
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2362
ReplayGain information in the form of the AVReplayGain struct.
Definition: frame.h:76
enum AVSubtitleType type
Definition: avcodec.h:3862
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:273
int format
Definition: internal.h:110
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:3265
int pkt_size
size of the corresponding packet containing the compressed frame.
Definition: frame.h:540
float min
Stereoscopic 3d metadata.
Definition: frame.h:63
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
This structure stores compressed data.
Definition: avcodec.h:1422
AVCodecParameters * par_in
Parameters of the input stream.
Definition: avcodec.h:5731
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:1144
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:292
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:2592
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:968
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1438
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:3063
for(j=16;j >0;--j)
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:654
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
static void hwaccel_uninit(AVCodecContext *avctx)
Definition: decode.c:1314
#define tb
Definition: regdef.h:68
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(constuint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(constint16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(constint32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(constint64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(constfloat *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(constdouble *) pi *(INT64_C(1)<< 63)))#defineFMT_PAIR_FUNC(out, in) staticconv_func_type *constfmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};staticvoidcpy1(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, len);}staticvoidcpy2(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 2 *len);}staticvoidcpy4(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 4 *len);}staticvoidcpy8(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, constint *ch_map, intflags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) returnNULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) returnNULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case1:ctx->simd_f=cpy1;break;case2:ctx->simd_f=cpy2;break;case4:ctx->simd_f=cpy4;break;case8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);returnctx;}voidswri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}intswri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, intlen){intch;intoff=0;constintos=(out->planar?1:out->ch_count)*out->bps;unsignedmisaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){intplanes=in->planar?in->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){intplanes=out->planar?out->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){intplanes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:191
This side data should be associated with an audio stream and corresponds to enum AVAudioServiceType.
Definition: avcodec.h:1232
static uint8_t tmp[11]
Definition: aes_ctr.c:26
int(* receive_frame)(AVCodecContext *avctx, AVFrame *frame)
Decode API with decoupled packet/frame dataflow.
Definition: avcodec.h:3540