FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
decode.c
Go to the documentation of this file.
1 /*
2  * generic decoding-related code
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <stdint.h>
22 #include <string.h>
23 
24 #include "config.h"
25 
26 #if CONFIG_ICONV
27 # include <iconv.h>
28 #endif
29 
30 #include "libavutil/avassert.h"
31 #include "libavutil/avstring.h"
32 #include "libavutil/bprint.h"
33 #include "libavutil/common.h"
34 #include "libavutil/frame.h"
35 #include "libavutil/hwcontext.h"
36 #include "libavutil/imgutils.h"
37 #include "libavutil/internal.h"
38 #include "libavutil/intmath.h"
39 
40 #include "avcodec.h"
41 #include "bytestream.h"
42 #include "decode.h"
43 #include "internal.h"
44 #include "thread.h"
45 
46 static int apply_param_change(AVCodecContext *avctx, const AVPacket *avpkt)
47 {
48  int size = 0, ret;
49  const uint8_t *data;
50  uint32_t flags;
51  int64_t val;
52 
54  if (!data)
55  return 0;
56 
57  if (!(avctx->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE)) {
58  av_log(avctx, AV_LOG_ERROR, "This decoder does not support parameter "
59  "changes, but PARAM_CHANGE side data was sent to it.\n");
60  ret = AVERROR(EINVAL);
61  goto fail2;
62  }
63 
64  if (size < 4)
65  goto fail;
66 
67  flags = bytestream_get_le32(&data);
68  size -= 4;
69 
71  if (size < 4)
72  goto fail;
73  val = bytestream_get_le32(&data);
74  if (val <= 0 || val > INT_MAX) {
75  av_log(avctx, AV_LOG_ERROR, "Invalid channel count");
76  ret = AVERROR_INVALIDDATA;
77  goto fail2;
78  }
79  avctx->channels = val;
80  size -= 4;
81  }
83  if (size < 8)
84  goto fail;
85  avctx->channel_layout = bytestream_get_le64(&data);
86  size -= 8;
87  }
89  if (size < 4)
90  goto fail;
91  val = bytestream_get_le32(&data);
92  if (val <= 0 || val > INT_MAX) {
93  av_log(avctx, AV_LOG_ERROR, "Invalid sample rate");
94  ret = AVERROR_INVALIDDATA;
95  goto fail2;
96  }
97  avctx->sample_rate = val;
98  size -= 4;
99  }
101  if (size < 8)
102  goto fail;
103  avctx->width = bytestream_get_le32(&data);
104  avctx->height = bytestream_get_le32(&data);
105  size -= 8;
106  ret = ff_set_dimensions(avctx, avctx->width, avctx->height);
107  if (ret < 0)
108  goto fail2;
109  }
110 
111  return 0;
112 fail:
113  av_log(avctx, AV_LOG_ERROR, "PARAM_CHANGE side data too small.\n");
114  ret = AVERROR_INVALIDDATA;
115 fail2:
116  if (ret < 0) {
117  av_log(avctx, AV_LOG_ERROR, "Error applying parameter changes.\n");
118  if (avctx->err_recognition & AV_EF_EXPLODE)
119  return ret;
120  }
121  return 0;
122 }
123 
125 {
126  int ret = 0;
127 
129  if (pkt) {
130  ret = av_packet_copy_props(avci->last_pkt_props, pkt);
131  if (!ret)
132  avci->last_pkt_props->size = pkt->size; // HACK: Needed for ff_init_buffer_info().
133  }
134  return ret;
135 }
136 
138 {
139  int ret;
140 
141  /* move the original frame to our backup */
142  av_frame_unref(avci->to_free);
143  av_frame_move_ref(avci->to_free, frame);
144 
145  /* now copy everything except the AVBufferRefs back
146  * note that we make a COPY of the side data, so calling av_frame_free() on
147  * the caller's frame will work properly */
148  ret = av_frame_copy_props(frame, avci->to_free);
149  if (ret < 0)
150  return ret;
151 
152  memcpy(frame->data, avci->to_free->data, sizeof(frame->data));
153  memcpy(frame->linesize, avci->to_free->linesize, sizeof(frame->linesize));
154  if (avci->to_free->extended_data != avci->to_free->data) {
155  int planes = avci->to_free->channels;
156  int size = planes * sizeof(*frame->extended_data);
157 
158  if (!size) {
159  av_frame_unref(frame);
160  return AVERROR_BUG;
161  }
162 
163  frame->extended_data = av_malloc(size);
164  if (!frame->extended_data) {
165  av_frame_unref(frame);
166  return AVERROR(ENOMEM);
167  }
168  memcpy(frame->extended_data, avci->to_free->extended_data,
169  size);
170  } else
171  frame->extended_data = frame->data;
172 
173  frame->format = avci->to_free->format;
174  frame->width = avci->to_free->width;
175  frame->height = avci->to_free->height;
176  frame->channel_layout = avci->to_free->channel_layout;
177  frame->nb_samples = avci->to_free->nb_samples;
178  frame->channels = avci->to_free->channels;
179 
180  return 0;
181 }
182 
183 static int bsfs_init(AVCodecContext *avctx)
184 {
185  AVCodecInternal *avci = avctx->internal;
186  DecodeFilterContext *s = &avci->filter;
187  const char *bsfs_str;
188  int ret;
189 
190  if (s->nb_bsfs)
191  return 0;
192 
193  bsfs_str = avctx->codec->bsfs ? avctx->codec->bsfs : "null";
194  while (bsfs_str && *bsfs_str) {
195  AVBSFContext **tmp;
196  const AVBitStreamFilter *filter;
197  char *bsf;
198 
199  bsf = av_get_token(&bsfs_str, ",");
200  if (!bsf) {
201  ret = AVERROR(ENOMEM);
202  goto fail;
203  }
204 
205  filter = av_bsf_get_by_name(bsf);
206  if (!filter) {
207  av_log(avctx, AV_LOG_ERROR, "A non-existing bitstream filter %s "
208  "requested by a decoder. This is a bug, please report it.\n",
209  bsf);
210  ret = AVERROR_BUG;
211  av_freep(&bsf);
212  goto fail;
213  }
214  av_freep(&bsf);
215 
216  tmp = av_realloc_array(s->bsfs, s->nb_bsfs + 1, sizeof(*s->bsfs));
217  if (!tmp) {
218  ret = AVERROR(ENOMEM);
219  goto fail;
220  }
221  s->bsfs = tmp;
222  s->nb_bsfs++;
223 
224  ret = av_bsf_alloc(filter, &s->bsfs[s->nb_bsfs - 1]);
225  if (ret < 0)
226  goto fail;
227 
228  if (s->nb_bsfs == 1) {
229  /* We do not currently have an API for passing the input timebase into decoders,
230  * but no filters used here should actually need it.
231  * So we make up some plausible-looking number (the MPEG 90kHz timebase) */
232  s->bsfs[s->nb_bsfs - 1]->time_base_in = (AVRational){ 1, 90000 };
234  avctx);
235  } else {
236  s->bsfs[s->nb_bsfs - 1]->time_base_in = s->bsfs[s->nb_bsfs - 2]->time_base_out;
237  ret = avcodec_parameters_copy(s->bsfs[s->nb_bsfs - 1]->par_in,
238  s->bsfs[s->nb_bsfs - 2]->par_out);
239  }
240  if (ret < 0)
241  goto fail;
242 
243  ret = av_bsf_init(s->bsfs[s->nb_bsfs - 1]);
244  if (ret < 0)
245  goto fail;
246  }
247 
248  return 0;
249 fail:
250  ff_decode_bsfs_uninit(avctx);
251  return ret;
252 }
253 
254 /* try to get one output packet from the filter chain */
255 static int bsfs_poll(AVCodecContext *avctx, AVPacket *pkt)
256 {
257  DecodeFilterContext *s = &avctx->internal->filter;
258  int idx, ret;
259 
260  /* start with the last filter in the chain */
261  idx = s->nb_bsfs - 1;
262  while (idx >= 0) {
263  /* request a packet from the currently selected filter */
264  ret = av_bsf_receive_packet(s->bsfs[idx], pkt);
265  if (ret == AVERROR(EAGAIN)) {
266  /* no packets available, try the next filter up the chain */
267  ret = 0;
268  idx--;
269  continue;
270  } else if (ret < 0 && ret != AVERROR_EOF) {
271  return ret;
272  }
273 
274  /* got a packet or EOF -- pass it to the caller or to the next filter
275  * down the chain */
276  if (idx == s->nb_bsfs - 1) {
277  return ret;
278  } else {
279  idx++;
280  ret = av_bsf_send_packet(s->bsfs[idx], ret < 0 ? NULL : pkt);
281  if (ret < 0) {
282  av_log(avctx, AV_LOG_ERROR,
283  "Error pre-processing a packet before decoding\n");
284  av_packet_unref(pkt);
285  return ret;
286  }
287  }
288  }
289 
290  return AVERROR(EAGAIN);
291 }
292 
294 {
295  AVCodecInternal *avci = avctx->internal;
296  int ret;
297 
298  if (avci->draining)
299  return AVERROR_EOF;
300 
301  ret = bsfs_poll(avctx, pkt);
302  if (ret == AVERROR_EOF)
303  avci->draining = 1;
304  if (ret < 0)
305  return ret;
306 
307  ret = extract_packet_props(avctx->internal, pkt);
308  if (ret < 0)
309  goto finish;
310 
311  ret = apply_param_change(avctx, pkt);
312  if (ret < 0)
313  goto finish;
314 
315  if (avctx->codec->receive_frame)
316  avci->compat_decode_consumed += pkt->size;
317 
318  return 0;
319 finish:
320  av_packet_unref(pkt);
321  return ret;
322 }
323 
324 /**
325  * Attempt to guess proper monotonic timestamps for decoded video frames
326  * which might have incorrect times. Input timestamps may wrap around, in
327  * which case the output will as well.
328  *
329  * @param pts the pts field of the decoded AVPacket, as passed through
330  * AVFrame.pts
331  * @param dts the dts field of the decoded AVPacket
332  * @return one of the input values, may be AV_NOPTS_VALUE
333  */
335  int64_t reordered_pts, int64_t dts)
336 {
337  int64_t pts = AV_NOPTS_VALUE;
338 
339  if (dts != AV_NOPTS_VALUE) {
341  ctx->pts_correction_last_dts = dts;
342  } else if (reordered_pts != AV_NOPTS_VALUE)
343  ctx->pts_correction_last_dts = reordered_pts;
344 
345  if (reordered_pts != AV_NOPTS_VALUE) {
346  ctx->pts_correction_num_faulty_pts += reordered_pts <= ctx->pts_correction_last_pts;
347  ctx->pts_correction_last_pts = reordered_pts;
348  } else if(dts != AV_NOPTS_VALUE)
349  ctx->pts_correction_last_pts = dts;
350 
352  && reordered_pts != AV_NOPTS_VALUE)
353  pts = reordered_pts;
354  else
355  pts = dts;
356 
357  return pts;
358 }
359 
360 /*
361  * The core of the receive_frame_wrapper for the decoders implementing
362  * the simple API. Certain decoders might consume partial packets without
363  * returning any output, so this function needs to be called in a loop until it
364  * returns EAGAIN.
365  **/
367 {
368  AVCodecInternal *avci = avctx->internal;
369  DecodeSimpleContext *ds = &avci->ds;
370  AVPacket *pkt = ds->in_pkt;
371  // copy to ensure we do not change pkt
372  AVPacket tmp;
373  int got_frame, actual_got_frame, did_split;
374  int ret;
375 
376  if (!pkt->data && !avci->draining) {
377  av_packet_unref(pkt);
378  ret = ff_decode_get_packet(avctx, pkt);
379  if (ret < 0 && ret != AVERROR_EOF)
380  return ret;
381  }
382 
383  // Some codecs (at least wma lossless) will crash when feeding drain packets
384  // after EOF was signaled.
385  if (avci->draining_done)
386  return AVERROR_EOF;
387 
388  if (!pkt->data &&
389  !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY ||
391  return AVERROR_EOF;
392 
393  tmp = *pkt;
394 #if FF_API_MERGE_SD
396  did_split = avci->compat_decode_partial_size ?
399 
400  if (did_split) {
401  ret = extract_packet_props(avctx->internal, &tmp);
402  if (ret < 0)
403  return ret;
404 
405  ret = apply_param_change(avctx, &tmp);
406  if (ret < 0)
407  return ret;
408  }
410 #endif
411 
412  got_frame = 0;
413 
414  if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME) {
415  ret = ff_thread_decode_frame(avctx, frame, &got_frame, &tmp);
416  } else {
417  ret = avctx->codec->decode(avctx, frame, &got_frame, &tmp);
418 
420  frame->pkt_dts = pkt->dts;
421  if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
422  if(!avctx->has_b_frames)
423  frame->pkt_pos = pkt->pos;
424  //FIXME these should be under if(!avctx->has_b_frames)
425  /* get_buffer is supposed to set frame parameters */
426  if (!(avctx->codec->capabilities & AV_CODEC_CAP_DR1)) {
427  if (!frame->sample_aspect_ratio.num) frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
428  if (!frame->width) frame->width = avctx->width;
429  if (!frame->height) frame->height = avctx->height;
430  if (frame->format == AV_PIX_FMT_NONE) frame->format = avctx->pix_fmt;
431  }
432  }
433  }
434  emms_c();
435  actual_got_frame = got_frame;
436 
437  if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
438  if (frame->flags & AV_FRAME_FLAG_DISCARD)
439  got_frame = 0;
440  if (got_frame)
442  frame->pts,
443  frame->pkt_dts);
444  } else if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
445  uint8_t *side;
446  int side_size;
447  uint32_t discard_padding = 0;
448  uint8_t skip_reason = 0;
449  uint8_t discard_reason = 0;
450 
451  if (ret >= 0 && got_frame) {
453  frame->pts,
454  frame->pkt_dts);
455  if (frame->format == AV_SAMPLE_FMT_NONE)
456  frame->format = avctx->sample_fmt;
457  if (!frame->channel_layout)
458  frame->channel_layout = avctx->channel_layout;
459  if (!frame->channels)
460  frame->channels = avctx->channels;
461  if (!frame->sample_rate)
462  frame->sample_rate = avctx->sample_rate;
463  }
464 
466  if(side && side_size>=10) {
468  discard_padding = AV_RL32(side + 4);
469  av_log(avctx, AV_LOG_DEBUG, "skip %d / discard %d samples due to side data\n",
470  avctx->internal->skip_samples, (int)discard_padding);
471  skip_reason = AV_RL8(side + 8);
472  discard_reason = AV_RL8(side + 9);
473  }
474 
475  if ((frame->flags & AV_FRAME_FLAG_DISCARD) && got_frame &&
476  !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
477  avctx->internal->skip_samples = FFMAX(0, avctx->internal->skip_samples - frame->nb_samples);
478  got_frame = 0;
479  }
480 
481  if (avctx->internal->skip_samples > 0 && got_frame &&
482  !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
483  if(frame->nb_samples <= avctx->internal->skip_samples){
484  got_frame = 0;
485  avctx->internal->skip_samples -= frame->nb_samples;
486  av_log(avctx, AV_LOG_DEBUG, "skip whole frame, skip left: %d\n",
487  avctx->internal->skip_samples);
488  } else {
490  frame->nb_samples - avctx->internal->skip_samples, avctx->channels, frame->format);
491  if(avctx->pkt_timebase.num && avctx->sample_rate) {
492  int64_t diff_ts = av_rescale_q(avctx->internal->skip_samples,
493  (AVRational){1, avctx->sample_rate},
494  avctx->pkt_timebase);
495  if(frame->pts!=AV_NOPTS_VALUE)
496  frame->pts += diff_ts;
497 #if FF_API_PKT_PTS
499  if(frame->pkt_pts!=AV_NOPTS_VALUE)
500  frame->pkt_pts += diff_ts;
502 #endif
503  if(frame->pkt_dts!=AV_NOPTS_VALUE)
504  frame->pkt_dts += diff_ts;
505  if (frame->pkt_duration >= diff_ts)
506  frame->pkt_duration -= diff_ts;
507  } else {
508  av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for skipped samples.\n");
509  }
510  av_log(avctx, AV_LOG_DEBUG, "skip %d/%d samples\n",
511  avctx->internal->skip_samples, frame->nb_samples);
512  frame->nb_samples -= avctx->internal->skip_samples;
513  avctx->internal->skip_samples = 0;
514  }
515  }
516 
517  if (discard_padding > 0 && discard_padding <= frame->nb_samples && got_frame &&
518  !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
519  if (discard_padding == frame->nb_samples) {
520  got_frame = 0;
521  } else {
522  if(avctx->pkt_timebase.num && avctx->sample_rate) {
523  int64_t diff_ts = av_rescale_q(frame->nb_samples - discard_padding,
524  (AVRational){1, avctx->sample_rate},
525  avctx->pkt_timebase);
526  frame->pkt_duration = diff_ts;
527  } else {
528  av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for discarded samples.\n");
529  }
530  av_log(avctx, AV_LOG_DEBUG, "discard %d/%d samples\n",
531  (int)discard_padding, frame->nb_samples);
532  frame->nb_samples -= discard_padding;
533  }
534  }
535 
536  if ((avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL) && got_frame) {
538  if (fside) {
539  AV_WL32(fside->data, avctx->internal->skip_samples);
540  AV_WL32(fside->data + 4, discard_padding);
541  AV_WL8(fside->data + 8, skip_reason);
542  AV_WL8(fside->data + 9, discard_reason);
543  avctx->internal->skip_samples = 0;
544  }
545  }
546  }
547 #if FF_API_MERGE_SD
548  if (did_split) {
550  if(ret == tmp.size)
551  ret = pkt->size;
552  }
553 #endif
554 
555  if (avctx->codec->type == AVMEDIA_TYPE_AUDIO &&
556  !avci->showed_multi_packet_warning &&
557  ret >= 0 && ret != pkt->size && !(avctx->codec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
558  av_log(avctx, AV_LOG_WARNING, "Multiple frames in a packet.\n");
559  avci->showed_multi_packet_warning = 1;
560  }
561 
562  if (!got_frame)
564 
565  if (ret >= 0 && avctx->codec->type == AVMEDIA_TYPE_VIDEO && !(avctx->flags & AV_CODEC_FLAG_TRUNCATED))
566  ret = pkt->size;
567 
568 #if FF_API_AVCTX_TIMEBASE
569  if (avctx->framerate.num > 0 && avctx->framerate.den > 0)
570  avctx->time_base = av_inv_q(av_mul_q(avctx->framerate, (AVRational){avctx->ticks_per_frame, 1}));
571 #endif
572 
573  /* do not stop draining when actual_got_frame != 0 or ret < 0 */
574  /* got_frame == 0 but actual_got_frame != 0 when frame is discarded */
575  if (avctx->internal->draining && !actual_got_frame) {
576  if (ret < 0) {
577  /* prevent infinite loop if a decoder wrongly always return error on draining */
578  /* reasonable nb_errors_max = maximum b frames + thread count */
579  int nb_errors_max = 20 + (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME ?
580  avctx->thread_count : 1);
581 
582  if (avci->nb_draining_errors++ >= nb_errors_max) {
583  av_log(avctx, AV_LOG_ERROR, "Too many errors when draining, this is a bug. "
584  "Stop draining and force EOF.\n");
585  avci->draining_done = 1;
586  ret = AVERROR_BUG;
587  }
588  } else {
589  avci->draining_done = 1;
590  }
591  }
592 
593  avci->compat_decode_consumed += ret;
594 
595  if (ret >= pkt->size || ret < 0) {
597  } else {
598  int consumed = ret;
599 
600  pkt->data += consumed;
601  pkt->size -= consumed;
602  avci->last_pkt_props->size -= consumed; // See extract_packet_props() comment.
605  avci->last_pkt_props->pts = AV_NOPTS_VALUE;
606  avci->last_pkt_props->dts = AV_NOPTS_VALUE;
607  }
608 
609  if (got_frame)
610  av_assert0(frame->buf[0]);
611 
612  return ret < 0 ? ret : 0;
613 }
614 
616 {
617  int ret;
618 
619  while (!frame->buf[0]) {
620  ret = decode_simple_internal(avctx, frame);
621  if (ret < 0)
622  return ret;
623  }
624 
625  return 0;
626 }
627 
629 {
630  AVCodecInternal *avci = avctx->internal;
631  int ret;
632 
633  av_assert0(!frame->buf[0]);
634 
635  if (avctx->codec->receive_frame)
636  ret = avctx->codec->receive_frame(avctx, frame);
637  else
638  ret = decode_simple_receive_frame(avctx, frame);
639 
640  if (ret == AVERROR_EOF)
641  avci->draining_done = 1;
642 
643  return ret;
644 }
645 
647 {
648  AVCodecInternal *avci = avctx->internal;
649  int ret;
650 
651  if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
652  return AVERROR(EINVAL);
653 
654  if (avctx->internal->draining)
655  return AVERROR_EOF;
656 
657  if (avpkt && !avpkt->size && avpkt->data)
658  return AVERROR(EINVAL);
659 
660  ret = bsfs_init(avctx);
661  if (ret < 0)
662  return ret;
663 
665  if (avpkt && (avpkt->data || avpkt->side_data_elems)) {
666  ret = av_packet_ref(avci->buffer_pkt, avpkt);
667  if (ret < 0)
668  return ret;
669  }
670 
671  ret = av_bsf_send_packet(avci->filter.bsfs[0], avci->buffer_pkt);
672  if (ret < 0) {
674  return ret;
675  }
676 
677  if (!avci->buffer_frame->buf[0]) {
678  ret = decode_receive_frame_internal(avctx, avci->buffer_frame);
679  if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
680  return ret;
681  }
682 
683  return 0;
684 }
685 
687 {
688  /* make sure we are noisy about decoders returning invalid cropping data */
689  if (frame->crop_left >= INT_MAX - frame->crop_right ||
690  frame->crop_top >= INT_MAX - frame->crop_bottom ||
691  (frame->crop_left + frame->crop_right) >= frame->width ||
692  (frame->crop_top + frame->crop_bottom) >= frame->height) {
693  av_log(avctx, AV_LOG_WARNING,
694  "Invalid cropping information set by a decoder: "
696  "(frame size %dx%d). This is a bug, please report it\n",
697  frame->crop_left, frame->crop_right, frame->crop_top, frame->crop_bottom,
698  frame->width, frame->height);
699  frame->crop_left = 0;
700  frame->crop_right = 0;
701  frame->crop_top = 0;
702  frame->crop_bottom = 0;
703  return 0;
704  }
705 
706  if (!avctx->apply_cropping)
707  return 0;
708 
709  return av_frame_apply_cropping(frame, avctx->flags & AV_CODEC_FLAG_UNALIGNED ?
711 }
712 
714 {
715  AVCodecInternal *avci = avctx->internal;
716  int ret;
717 
718  av_frame_unref(frame);
719 
720  if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
721  return AVERROR(EINVAL);
722 
723  ret = bsfs_init(avctx);
724  if (ret < 0)
725  return ret;
726 
727  if (avci->buffer_frame->buf[0]) {
728  av_frame_move_ref(frame, avci->buffer_frame);
729  } else {
730  ret = decode_receive_frame_internal(avctx, frame);
731  if (ret < 0)
732  return ret;
733  }
734 
735  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
736  ret = apply_cropping(avctx, frame);
737  if (ret < 0) {
738  av_frame_unref(frame);
739  return ret;
740  }
741  }
742 
743  avctx->frame_number++;
744 
745  return 0;
746 }
747 
749  int *got_frame, const AVPacket *pkt)
750 {
751  AVCodecInternal *avci = avctx->internal;
752  int ret = 0;
753 
755 
756  if (avci->draining_done && pkt && pkt->size != 0) {
757  av_log(avctx, AV_LOG_WARNING, "Got unexpected packet after EOF\n");
758  avcodec_flush_buffers(avctx);
759  }
760 
761  *got_frame = 0;
762  avci->compat_decode = 1;
763 
764  if (avci->compat_decode_partial_size > 0 &&
765  avci->compat_decode_partial_size != pkt->size) {
766  av_log(avctx, AV_LOG_ERROR,
767  "Got unexpected packet size after a partial decode\n");
768  ret = AVERROR(EINVAL);
769  goto finish;
770  }
771 
772  if (!avci->compat_decode_partial_size) {
773  ret = avcodec_send_packet(avctx, pkt);
774  if (ret == AVERROR_EOF)
775  ret = 0;
776  else if (ret == AVERROR(EAGAIN)) {
777  /* we fully drain all the output in each decode call, so this should not
778  * ever happen */
779  ret = AVERROR_BUG;
780  goto finish;
781  } else if (ret < 0)
782  goto finish;
783  }
784 
785  while (ret >= 0) {
786  ret = avcodec_receive_frame(avctx, frame);
787  if (ret < 0) {
788  if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
789  ret = 0;
790  goto finish;
791  }
792 
793  if (frame != avci->compat_decode_frame) {
794  if (!avctx->refcounted_frames) {
795  ret = unrefcount_frame(avci, frame);
796  if (ret < 0)
797  goto finish;
798  }
799 
800  *got_frame = 1;
801  frame = avci->compat_decode_frame;
802  } else {
803  if (!avci->compat_decode_warned) {
804  av_log(avctx, AV_LOG_WARNING, "The deprecated avcodec_decode_* "
805  "API cannot return all the frames for this decoder. "
806  "Some frames will be dropped. Update your code to the "
807  "new decoding API to fix this.\n");
808  avci->compat_decode_warned = 1;
809  }
810  }
811 
812  if (avci->draining || (!avctx->codec->bsfs && avci->compat_decode_consumed < pkt->size))
813  break;
814  }
815 
816 finish:
817  if (ret == 0) {
818  /* if there are any bsfs then assume full packet is always consumed */
819  if (avctx->codec->bsfs)
820  ret = pkt->size;
821  else
822  ret = FFMIN(avci->compat_decode_consumed, pkt->size);
823  }
824  avci->compat_decode_consumed = 0;
825  avci->compat_decode_partial_size = (ret >= 0) ? pkt->size - ret : 0;
826 
827  return ret;
828 }
829 
831  int *got_picture_ptr,
832  const AVPacket *avpkt)
833 {
834  return compat_decode(avctx, picture, got_picture_ptr, avpkt);
835 }
836 
838  AVFrame *frame,
839  int *got_frame_ptr,
840  const AVPacket *avpkt)
841 {
842  return compat_decode(avctx, frame, got_frame_ptr, avpkt);
843 }
844 
846 {
847  memset(sub, 0, sizeof(*sub));
848  sub->pts = AV_NOPTS_VALUE;
849 }
850 
851 #define UTF8_MAX_BYTES 4 /* 5 and 6 bytes sequences should not be used */
852 static int recode_subtitle(AVCodecContext *avctx,
853  AVPacket *outpkt, const AVPacket *inpkt)
854 {
855 #if CONFIG_ICONV
856  iconv_t cd = (iconv_t)-1;
857  int ret = 0;
858  char *inb, *outb;
859  size_t inl, outl;
860  AVPacket tmp;
861 #endif
862 
863  if (avctx->sub_charenc_mode != FF_SUB_CHARENC_MODE_PRE_DECODER || inpkt->size == 0)
864  return 0;
865 
866 #if CONFIG_ICONV
867  cd = iconv_open("UTF-8", avctx->sub_charenc);
868  av_assert0(cd != (iconv_t)-1);
869 
870  inb = inpkt->data;
871  inl = inpkt->size;
872 
873  if (inl >= INT_MAX / UTF8_MAX_BYTES - AV_INPUT_BUFFER_PADDING_SIZE) {
874  av_log(avctx, AV_LOG_ERROR, "Subtitles packet is too big for recoding\n");
875  ret = AVERROR(ENOMEM);
876  goto end;
877  }
878 
879  ret = av_new_packet(&tmp, inl * UTF8_MAX_BYTES);
880  if (ret < 0)
881  goto end;
882  outpkt->buf = tmp.buf;
883  outpkt->data = tmp.data;
884  outpkt->size = tmp.size;
885  outb = outpkt->data;
886  outl = outpkt->size;
887 
888  if (iconv(cd, &inb, &inl, &outb, &outl) == (size_t)-1 ||
889  iconv(cd, NULL, NULL, &outb, &outl) == (size_t)-1 ||
890  outl >= outpkt->size || inl != 0) {
891  ret = FFMIN(AVERROR(errno), -1);
892  av_log(avctx, AV_LOG_ERROR, "Unable to recode subtitle event \"%s\" "
893  "from %s to UTF-8\n", inpkt->data, avctx->sub_charenc);
894  av_packet_unref(&tmp);
895  goto end;
896  }
897  outpkt->size -= outl;
898  memset(outpkt->data + outpkt->size, 0, outl);
899 
900 end:
901  if (cd != (iconv_t)-1)
902  iconv_close(cd);
903  return ret;
904 #else
905  av_log(avctx, AV_LOG_ERROR, "requesting subtitles recoding without iconv");
906  return AVERROR(EINVAL);
907 #endif
908 }
909 
910 static int utf8_check(const uint8_t *str)
911 {
912  const uint8_t *byte;
913  uint32_t codepoint, min;
914 
915  while (*str) {
916  byte = str;
917  GET_UTF8(codepoint, *(byte++), return 0;);
918  min = byte - str == 1 ? 0 : byte - str == 2 ? 0x80 :
919  1 << (5 * (byte - str) - 4);
920  if (codepoint < min || codepoint >= 0x110000 ||
921  codepoint == 0xFFFE /* BOM */ ||
922  codepoint >= 0xD800 && codepoint <= 0xDFFF /* surrogates */)
923  return 0;
924  str = byte;
925  }
926  return 1;
927 }
928 
929 #if FF_API_ASS_TIMING
930 static void insert_ts(AVBPrint *buf, int ts)
931 {
932  if (ts == -1) {
933  av_bprintf(buf, "9:59:59.99,");
934  } else {
935  int h, m, s;
936 
937  h = ts/360000; ts -= 360000*h;
938  m = ts/ 6000; ts -= 6000*m;
939  s = ts/ 100; ts -= 100*s;
940  av_bprintf(buf, "%d:%02d:%02d.%02d,", h, m, s, ts);
941  }
942 }
943 
945 {
946  int i;
947  AVBPrint buf;
948 
950 
951  for (i = 0; i < sub->num_rects; i++) {
952  char *final_dialog;
953  const char *dialog;
954  AVSubtitleRect *rect = sub->rects[i];
955  int ts_start, ts_duration = -1;
956  long int layer;
957 
958  if (rect->type != SUBTITLE_ASS || !strncmp(rect->ass, "Dialogue: ", 10))
959  continue;
960 
961  av_bprint_clear(&buf);
962 
963  /* skip ReadOrder */
964  dialog = strchr(rect->ass, ',');
965  if (!dialog)
966  continue;
967  dialog++;
968 
969  /* extract Layer or Marked */
970  layer = strtol(dialog, (char**)&dialog, 10);
971  if (*dialog != ',')
972  continue;
973  dialog++;
974 
975  /* rescale timing to ASS time base (ms) */
976  ts_start = av_rescale_q(pkt->pts, tb, av_make_q(1, 100));
977  if (pkt->duration != -1)
978  ts_duration = av_rescale_q(pkt->duration, tb, av_make_q(1, 100));
979  sub->end_display_time = FFMAX(sub->end_display_time, 10 * ts_duration);
980 
981  /* construct ASS (standalone file form with timestamps) string */
982  av_bprintf(&buf, "Dialogue: %ld,", layer);
983  insert_ts(&buf, ts_start);
984  insert_ts(&buf, ts_duration == -1 ? -1 : ts_start + ts_duration);
985  av_bprintf(&buf, "%s\r\n", dialog);
986 
987  final_dialog = av_strdup(buf.str);
988  if (!av_bprint_is_complete(&buf) || !final_dialog) {
989  av_freep(&final_dialog);
990  av_bprint_finalize(&buf, NULL);
991  return AVERROR(ENOMEM);
992  }
993  av_freep(&rect->ass);
994  rect->ass = final_dialog;
995  }
996 
997  av_bprint_finalize(&buf, NULL);
998  return 0;
999 }
1000 #endif
1001 
1003  int *got_sub_ptr,
1004  AVPacket *avpkt)
1005 {
1006  int i, ret = 0;
1007  AVCodecInternal *avci = avctx->internal;
1008 
1009  if (!avpkt->data && avpkt->size) {
1010  av_log(avctx, AV_LOG_ERROR, "invalid packet: NULL data, size != 0\n");
1011  return AVERROR(EINVAL);
1012  }
1013  if (!avctx->codec)
1014  return AVERROR(EINVAL);
1015  if (avctx->codec->type != AVMEDIA_TYPE_SUBTITLE) {
1016  av_log(avctx, AV_LOG_ERROR, "Invalid media type for subtitles\n");
1017  return AVERROR(EINVAL);
1018  }
1019 
1020  *got_sub_ptr = 0;
1021  get_subtitle_defaults(sub);
1022 
1023  if ((avctx->codec->capabilities & AV_CODEC_CAP_DELAY) || avpkt->size) {
1024  AVPacket pkt_recoded;
1025  AVPacket tmp = *avpkt;
1026 #if FF_API_MERGE_SD
1028  int did_split = avci->compat_decode_partial_size ?
1031  //apply_param_change(avctx, &tmp);
1032 
1033  if (did_split) {
1034  /* FFMIN() prevents overflow in case the packet wasn't allocated with
1035  * proper padding.
1036  * If the side data is smaller than the buffer padding size, the
1037  * remaining bytes should have already been filled with zeros by the
1038  * original packet allocation anyway. */
1039  memset(tmp.data + tmp.size, 0,
1040  FFMIN(avpkt->size - tmp.size, AV_INPUT_BUFFER_PADDING_SIZE));
1041  }
1043 #endif
1044 
1045  pkt_recoded = tmp;
1046  ret = recode_subtitle(avctx, &pkt_recoded, &tmp);
1047  if (ret < 0) {
1048  *got_sub_ptr = 0;
1049  } else {
1050  ret = extract_packet_props(avctx->internal, &pkt_recoded);
1051  if (ret < 0)
1052  return ret;
1053 
1054  if (avctx->pkt_timebase.num && avpkt->pts != AV_NOPTS_VALUE)
1055  sub->pts = av_rescale_q(avpkt->pts,
1056  avctx->pkt_timebase, AV_TIME_BASE_Q);
1057  ret = avctx->codec->decode(avctx, sub, got_sub_ptr, &pkt_recoded);
1058  av_assert1((ret >= 0) >= !!*got_sub_ptr &&
1059  !!*got_sub_ptr >= !!sub->num_rects);
1060 
1061 #if FF_API_ASS_TIMING
1063  && *got_sub_ptr && sub->num_rects) {
1064  const AVRational tb = avctx->pkt_timebase.num ? avctx->pkt_timebase
1065  : avctx->time_base;
1066  int err = convert_sub_to_old_ass_form(sub, avpkt, tb);
1067  if (err < 0)
1068  ret = err;
1069  }
1070 #endif
1071 
1072  if (sub->num_rects && !sub->end_display_time && avpkt->duration &&
1073  avctx->pkt_timebase.num) {
1074  AVRational ms = { 1, 1000 };
1075  sub->end_display_time = av_rescale_q(avpkt->duration,
1076  avctx->pkt_timebase, ms);
1077  }
1078 
1080  sub->format = 0;
1081  else if (avctx->codec_descriptor->props & AV_CODEC_PROP_TEXT_SUB)
1082  sub->format = 1;
1083 
1084  for (i = 0; i < sub->num_rects; i++) {
1085  if (sub->rects[i]->ass && !utf8_check(sub->rects[i]->ass)) {
1086  av_log(avctx, AV_LOG_ERROR,
1087  "Invalid UTF-8 in decoded subtitles text; "
1088  "maybe missing -sub_charenc option\n");
1089  avsubtitle_free(sub);
1090  ret = AVERROR_INVALIDDATA;
1091  break;
1092  }
1093  }
1094 
1095  if (tmp.data != pkt_recoded.data) { // did we recode?
1096  /* prevent from destroying side data from original packet */
1097  pkt_recoded.side_data = NULL;
1098  pkt_recoded.side_data_elems = 0;
1099 
1100  av_packet_unref(&pkt_recoded);
1101  }
1102  }
1103 
1104 #if FF_API_MERGE_SD
1105  if (did_split) {
1107  if(ret == tmp.size)
1108  ret = avpkt->size;
1109  }
1110 #endif
1111 
1112  if (*got_sub_ptr)
1113  avctx->frame_number++;
1114  }
1115 
1116  return ret;
1117 }
1118 
1120 {
1121  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
1122  return desc->flags & AV_PIX_FMT_FLAG_HWACCEL;
1123 }
1124 
1126 {
1127  while (*fmt != AV_PIX_FMT_NONE && is_hwaccel_pix_fmt(*fmt))
1128  ++fmt;
1129  return fmt[0];
1130 }
1131 
1133  enum AVPixelFormat pix_fmt)
1134 {
1135  AVHWAccel *hwaccel = NULL;
1136 
1137  while ((hwaccel = av_hwaccel_next(hwaccel)))
1138  if (hwaccel->id == codec_id
1139  && hwaccel->pix_fmt == pix_fmt)
1140  return hwaccel;
1141  return NULL;
1142 }
1143 
1144 static int setup_hwaccel(AVCodecContext *avctx,
1145  const enum AVPixelFormat fmt,
1146  const char *name)
1147 {
1148  AVHWAccel *hwa = find_hwaccel(avctx->codec_id, fmt);
1149  int ret = 0;
1150 
1151  if (!hwa) {
1152  av_log(avctx, AV_LOG_ERROR,
1153  "Could not find an AVHWAccel for the pixel format: %s",
1154  name);
1155  return AVERROR(ENOENT);
1156  }
1157 
1160  av_log(avctx, AV_LOG_WARNING, "Ignoring experimental hwaccel: %s\n",
1161  hwa->name);
1162  return AVERROR_PATCHWELCOME;
1163  }
1164 
1165  if (hwa->priv_data_size) {
1167  if (!avctx->internal->hwaccel_priv_data)
1168  return AVERROR(ENOMEM);
1169  }
1170 
1171  avctx->hwaccel = hwa;
1172  if (hwa->init) {
1173  ret = hwa->init(avctx);
1174  if (ret < 0) {
1176  avctx->hwaccel = NULL;
1177  return ret;
1178  }
1179  }
1180 
1181  return 0;
1182 }
1183 
1185 {
1186  const AVPixFmtDescriptor *desc;
1187  enum AVPixelFormat *choices;
1188  enum AVPixelFormat ret;
1189  unsigned n = 0;
1190 
1191  while (fmt[n] != AV_PIX_FMT_NONE)
1192  ++n;
1193 
1194  av_assert0(n >= 1);
1195  avctx->sw_pix_fmt = fmt[n - 1];
1197 
1198  choices = av_malloc_array(n + 1, sizeof(*choices));
1199  if (!choices)
1200  return AV_PIX_FMT_NONE;
1201 
1202  memcpy(choices, fmt, (n + 1) * sizeof(*choices));
1203 
1204  for (;;) {
1205  if (avctx->hwaccel && avctx->hwaccel->uninit)
1206  avctx->hwaccel->uninit(avctx);
1208  avctx->hwaccel = NULL;
1209 
1210  av_buffer_unref(&avctx->hw_frames_ctx);
1211 
1212  ret = avctx->get_format(avctx, choices);
1213 
1214  desc = av_pix_fmt_desc_get(ret);
1215  if (!desc) {
1216  ret = AV_PIX_FMT_NONE;
1217  break;
1218  }
1219 
1220  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
1221  break;
1222 #if FF_API_CAP_VDPAU
1224  break;
1225 #endif
1226 
1227  if (avctx->hw_frames_ctx) {
1228  AVHWFramesContext *hw_frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1229  if (hw_frames_ctx->format != ret) {
1230  av_log(avctx, AV_LOG_ERROR, "Format returned from get_buffer() "
1231  "does not match the format of provided AVHWFramesContext\n");
1232  ret = AV_PIX_FMT_NONE;
1233  break;
1234  }
1235  }
1236 
1237  if (!setup_hwaccel(avctx, ret, desc->name))
1238  break;
1239 
1240  /* Remove failed hwaccel from choices */
1241  for (n = 0; choices[n] != ret; n++)
1242  av_assert0(choices[n] != AV_PIX_FMT_NONE);
1243 
1244  do
1245  choices[n] = choices[n + 1];
1246  while (choices[n++] != AV_PIX_FMT_NONE);
1247  }
1248 
1249  av_freep(&choices);
1250  return ret;
1251 }
1252 
1254 {
1255  FramePool *pool = avctx->internal->pool;
1256  int i, ret;
1257 
1258  switch (avctx->codec_type) {
1259  case AVMEDIA_TYPE_VIDEO: {
1260  uint8_t *data[4];
1261  int linesize[4];
1262  int size[4] = { 0 };
1263  int w = frame->width;
1264  int h = frame->height;
1265  int tmpsize, unaligned;
1266 
1267  if (pool->format == frame->format &&
1268  pool->width == frame->width && pool->height == frame->height)
1269  return 0;
1270 
1271  avcodec_align_dimensions2(avctx, &w, &h, pool->stride_align);
1272 
1273  do {
1274  // NOTE: do not align linesizes individually, this breaks e.g. assumptions
1275  // that linesize[0] == 2*linesize[1] in the MPEG-encoder for 4:2:2
1276  ret = av_image_fill_linesizes(linesize, avctx->pix_fmt, w);
1277  if (ret < 0)
1278  return ret;
1279  // increase alignment of w for next try (rhs gives the lowest bit set in w)
1280  w += w & ~(w - 1);
1281 
1282  unaligned = 0;
1283  for (i = 0; i < 4; i++)
1284  unaligned |= linesize[i] % pool->stride_align[i];
1285  } while (unaligned);
1286 
1287  tmpsize = av_image_fill_pointers(data, avctx->pix_fmt, h,
1288  NULL, linesize);
1289  if (tmpsize < 0)
1290  return -1;
1291 
1292  for (i = 0; i < 3 && data[i + 1]; i++)
1293  size[i] = data[i + 1] - data[i];
1294  size[i] = tmpsize - (data[i] - data[0]);
1295 
1296  for (i = 0; i < 4; i++) {
1297  av_buffer_pool_uninit(&pool->pools[i]);
1298  pool->linesize[i] = linesize[i];
1299  if (size[i]) {
1300  pool->pools[i] = av_buffer_pool_init(size[i] + 16 + STRIDE_ALIGN - 1,
1301  CONFIG_MEMORY_POISONING ?
1302  NULL :
1304  if (!pool->pools[i]) {
1305  ret = AVERROR(ENOMEM);
1306  goto fail;
1307  }
1308  }
1309  }
1310  pool->format = frame->format;
1311  pool->width = frame->width;
1312  pool->height = frame->height;
1313 
1314  break;
1315  }
1316  case AVMEDIA_TYPE_AUDIO: {
1317  int ch = frame->channels; //av_get_channel_layout_nb_channels(frame->channel_layout);
1318  int planar = av_sample_fmt_is_planar(frame->format);
1319  int planes = planar ? ch : 1;
1320 
1321  if (pool->format == frame->format && pool->planes == planes &&
1322  pool->channels == ch && frame->nb_samples == pool->samples)
1323  return 0;
1324 
1325  av_buffer_pool_uninit(&pool->pools[0]);
1326  ret = av_samples_get_buffer_size(&pool->linesize[0], ch,
1327  frame->nb_samples, frame->format, 0);
1328  if (ret < 0)
1329  goto fail;
1330 
1331  pool->pools[0] = av_buffer_pool_init(pool->linesize[0], NULL);
1332  if (!pool->pools[0]) {
1333  ret = AVERROR(ENOMEM);
1334  goto fail;
1335  }
1336 
1337  pool->format = frame->format;
1338  pool->planes = planes;
1339  pool->channels = ch;
1340  pool->samples = frame->nb_samples;
1341  break;
1342  }
1343  default: av_assert0(0);
1344  }
1345  return 0;
1346 fail:
1347  for (i = 0; i < 4; i++)
1348  av_buffer_pool_uninit(&pool->pools[i]);
1349  pool->format = -1;
1350  pool->planes = pool->channels = pool->samples = 0;
1351  pool->width = pool->height = 0;
1352  return ret;
1353 }
1354 
1356 {
1357  FramePool *pool = avctx->internal->pool;
1358  int planes = pool->planes;
1359  int i;
1360 
1361  frame->linesize[0] = pool->linesize[0];
1362 
1363  if (planes > AV_NUM_DATA_POINTERS) {
1364  frame->extended_data = av_mallocz_array(planes, sizeof(*frame->extended_data));
1365  frame->nb_extended_buf = planes - AV_NUM_DATA_POINTERS;
1367  sizeof(*frame->extended_buf));
1368  if (!frame->extended_data || !frame->extended_buf) {
1369  av_freep(&frame->extended_data);
1370  av_freep(&frame->extended_buf);
1371  return AVERROR(ENOMEM);
1372  }
1373  } else {
1374  frame->extended_data = frame->data;
1375  av_assert0(frame->nb_extended_buf == 0);
1376  }
1377 
1378  for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
1379  frame->buf[i] = av_buffer_pool_get(pool->pools[0]);
1380  if (!frame->buf[i])
1381  goto fail;
1382  frame->extended_data[i] = frame->data[i] = frame->buf[i]->data;
1383  }
1384  for (i = 0; i < frame->nb_extended_buf; i++) {
1385  frame->extended_buf[i] = av_buffer_pool_get(pool->pools[0]);
1386  if (!frame->extended_buf[i])
1387  goto fail;
1388  frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data;
1389  }
1390 
1391  if (avctx->debug & FF_DEBUG_BUFFERS)
1392  av_log(avctx, AV_LOG_DEBUG, "default_get_buffer called on frame %p", frame);
1393 
1394  return 0;
1395 fail:
1396  av_frame_unref(frame);
1397  return AVERROR(ENOMEM);
1398 }
1399 
1401 {
1402  FramePool *pool = s->internal->pool;
1404  int i;
1405 
1406  if (pic->data[0] || pic->data[1] || pic->data[2] || pic->data[3]) {
1407  av_log(s, AV_LOG_ERROR, "pic->data[*]!=NULL in avcodec_default_get_buffer\n");
1408  return -1;
1409  }
1410 
1411  if (!desc) {
1412  av_log(s, AV_LOG_ERROR,
1413  "Unable to get pixel format descriptor for format %s\n",
1414  av_get_pix_fmt_name(pic->format));
1415  return AVERROR(EINVAL);
1416  }
1417 
1418  memset(pic->data, 0, sizeof(pic->data));
1419  pic->extended_data = pic->data;
1420 
1421  for (i = 0; i < 4 && pool->pools[i]; i++) {
1422  pic->linesize[i] = pool->linesize[i];
1423 
1424  pic->buf[i] = av_buffer_pool_get(pool->pools[i]);
1425  if (!pic->buf[i])
1426  goto fail;
1427 
1428  pic->data[i] = pic->buf[i]->data;
1429  }
1430  for (; i < AV_NUM_DATA_POINTERS; i++) {
1431  pic->data[i] = NULL;
1432  pic->linesize[i] = 0;
1433  }
1434  if (desc->flags & AV_PIX_FMT_FLAG_PAL ||
1436  avpriv_set_systematic_pal2((uint32_t *)pic->data[1], pic->format);
1437 
1438  if (s->debug & FF_DEBUG_BUFFERS)
1439  av_log(s, AV_LOG_DEBUG, "default_get_buffer called on pic %p\n", pic);
1440 
1441  return 0;
1442 fail:
1443  av_frame_unref(pic);
1444  return AVERROR(ENOMEM);
1445 }
1446 
1448 {
1449  int ret;
1450 
1451  if (avctx->hw_frames_ctx) {
1452  ret = av_hwframe_get_buffer(avctx->hw_frames_ctx, frame, 0);
1453  frame->width = avctx->coded_width;
1454  frame->height = avctx->coded_height;
1455  return ret;
1456  }
1457 
1458  if ((ret = update_frame_pool(avctx, frame)) < 0)
1459  return ret;
1460 
1461  switch (avctx->codec_type) {
1462  case AVMEDIA_TYPE_VIDEO:
1463  return video_get_buffer(avctx, frame);
1464  case AVMEDIA_TYPE_AUDIO:
1465  return audio_get_buffer(avctx, frame);
1466  default:
1467  return -1;
1468  }
1469 }
1470 
1472 {
1473  int size;
1474  const uint8_t *side_metadata;
1475 
1476  AVDictionary **frame_md = &frame->metadata;
1477 
1478  side_metadata = av_packet_get_side_data(avpkt,
1480  return av_packet_unpack_dictionary(side_metadata, size, frame_md);
1481 }
1482 
1484 {
1485  const AVPacket *pkt = avctx->internal->last_pkt_props;
1486  int i;
1487  static const struct {
1488  enum AVPacketSideDataType packet;
1490  } sd[] = {
1499  };
1500 
1501  if (pkt) {
1502  frame->pts = pkt->pts;
1503 #if FF_API_PKT_PTS
1505  frame->pkt_pts = pkt->pts;
1507 #endif
1508  frame->pkt_pos = pkt->pos;
1509  frame->pkt_duration = pkt->duration;
1510  frame->pkt_size = pkt->size;
1511 
1512  for (i = 0; i < FF_ARRAY_ELEMS(sd); i++) {
1513  int size;
1514  uint8_t *packet_sd = av_packet_get_side_data(pkt, sd[i].packet, &size);
1515  if (packet_sd) {
1516  AVFrameSideData *frame_sd = av_frame_new_side_data(frame,
1517  sd[i].frame,
1518  size);
1519  if (!frame_sd)
1520  return AVERROR(ENOMEM);
1521 
1522  memcpy(frame_sd->data, packet_sd, size);
1523  }
1524  }
1525  add_metadata_from_side_data(pkt, frame);
1526 
1527  if (pkt->flags & AV_PKT_FLAG_DISCARD) {
1528  frame->flags |= AV_FRAME_FLAG_DISCARD;
1529  } else {
1530  frame->flags = (frame->flags & ~AV_FRAME_FLAG_DISCARD);
1531  }
1532  }
1533  frame->reordered_opaque = avctx->reordered_opaque;
1534 
1535  if (frame->color_primaries == AVCOL_PRI_UNSPECIFIED)
1536  frame->color_primaries = avctx->color_primaries;
1537  if (frame->color_trc == AVCOL_TRC_UNSPECIFIED)
1538  frame->color_trc = avctx->color_trc;
1539  if (frame->colorspace == AVCOL_SPC_UNSPECIFIED)
1540  frame->colorspace = avctx->colorspace;
1541  if (frame->color_range == AVCOL_RANGE_UNSPECIFIED)
1542  frame->color_range = avctx->color_range;
1544  frame->chroma_location = avctx->chroma_sample_location;
1545 
1546  switch (avctx->codec->type) {
1547  case AVMEDIA_TYPE_VIDEO:
1548  frame->format = avctx->pix_fmt;
1549  if (!frame->sample_aspect_ratio.num)
1550  frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
1551 
1552  if (frame->width && frame->height &&
1553  av_image_check_sar(frame->width, frame->height,
1554  frame->sample_aspect_ratio) < 0) {
1555  av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
1556  frame->sample_aspect_ratio.num,
1557  frame->sample_aspect_ratio.den);
1558  frame->sample_aspect_ratio = (AVRational){ 0, 1 };
1559  }
1560 
1561  break;
1562  case AVMEDIA_TYPE_AUDIO:
1563  if (!frame->sample_rate)
1564  frame->sample_rate = avctx->sample_rate;
1565  if (frame->format < 0)
1566  frame->format = avctx->sample_fmt;
1567  if (!frame->channel_layout) {
1568  if (avctx->channel_layout) {
1570  avctx->channels) {
1571  av_log(avctx, AV_LOG_ERROR, "Inconsistent channel "
1572  "configuration.\n");
1573  return AVERROR(EINVAL);
1574  }
1575 
1576  frame->channel_layout = avctx->channel_layout;
1577  } else {
1578  if (avctx->channels > FF_SANE_NB_CHANNELS) {
1579  av_log(avctx, AV_LOG_ERROR, "Too many channels: %d.\n",
1580  avctx->channels);
1581  return AVERROR(ENOSYS);
1582  }
1583  }
1584  }
1585  frame->channels = avctx->channels;
1586  break;
1587  }
1588  return 0;
1589 }
1590 
1592 {
1593  return ff_init_buffer_info(avctx, frame);
1594 }
1595 
1597 {
1598  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
1599  int i;
1600  int num_planes = av_pix_fmt_count_planes(frame->format);
1602  int flags = desc ? desc->flags : 0;
1603  if (num_planes == 1 && (flags & AV_PIX_FMT_FLAG_PAL))
1604  num_planes = 2;
1605  for (i = 0; i < num_planes; i++) {
1606  av_assert0(frame->data[i]);
1607  }
1608  // For now do not enforce anything for palette of pseudopal formats
1609  if (num_planes == 1 && (flags & AV_PIX_FMT_FLAG_PSEUDOPAL))
1610  num_planes = 2;
1611  // For formats without data like hwaccel allow unused pointers to be non-NULL.
1612  for (i = num_planes; num_planes > 0 && i < FF_ARRAY_ELEMS(frame->data); i++) {
1613  if (frame->data[i])
1614  av_log(avctx, AV_LOG_ERROR, "Buffer returned by get_buffer2() did not zero unused plane pointers\n");
1615  frame->data[i] = NULL;
1616  }
1617  }
1618 }
1619 
1621 {
1622  const AVHWAccel *hwaccel = avctx->hwaccel;
1623  int override_dimensions = 1;
1624  int ret;
1625 
1626  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
1627  if ((ret = av_image_check_size2(avctx->width, avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx)) < 0 || avctx->pix_fmt<0) {
1628  av_log(avctx, AV_LOG_ERROR, "video_get_buffer: image parameters invalid\n");
1629  return AVERROR(EINVAL);
1630  }
1631 
1632  if (frame->width <= 0 || frame->height <= 0) {
1633  frame->width = FFMAX(avctx->width, AV_CEIL_RSHIFT(avctx->coded_width, avctx->lowres));
1634  frame->height = FFMAX(avctx->height, AV_CEIL_RSHIFT(avctx->coded_height, avctx->lowres));
1635  override_dimensions = 0;
1636  }
1637 
1638  if (frame->data[0] || frame->data[1] || frame->data[2] || frame->data[3]) {
1639  av_log(avctx, AV_LOG_ERROR, "pic->data[*]!=NULL in get_buffer_internal\n");
1640  return AVERROR(EINVAL);
1641  }
1642  }
1643  ret = ff_decode_frame_props(avctx, frame);
1644  if (ret < 0)
1645  return ret;
1646 
1647  if (hwaccel) {
1648  if (hwaccel->alloc_frame) {
1649  ret = hwaccel->alloc_frame(avctx, frame);
1650  goto end;
1651  }
1652  } else
1653  avctx->sw_pix_fmt = avctx->pix_fmt;
1654 
1655  ret = avctx->get_buffer2(avctx, frame, flags);
1656  if (ret >= 0)
1657  validate_avframe_allocation(avctx, frame);
1658 
1659 end:
1660  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO && !override_dimensions &&
1662  frame->width = avctx->width;
1663  frame->height = avctx->height;
1664  }
1665 
1666  return ret;
1667 }
1668 
1670 {
1671  int ret = get_buffer_internal(avctx, frame, flags);
1672  if (ret < 0) {
1673  av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
1674  frame->width = frame->height = 0;
1675  }
1676  return ret;
1677 }
1678 
1680 {
1681  AVFrame *tmp;
1682  int ret;
1683 
1685 
1686  if (frame->data[0] && (frame->width != avctx->width || frame->height != avctx->height || frame->format != avctx->pix_fmt)) {
1687  av_log(avctx, AV_LOG_WARNING, "Picture changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s in reget buffer()\n",
1688  frame->width, frame->height, av_get_pix_fmt_name(frame->format), avctx->width, avctx->height, av_get_pix_fmt_name(avctx->pix_fmt));
1689  av_frame_unref(frame);
1690  }
1691 
1692  ff_init_buffer_info(avctx, frame);
1693 
1694  if (!frame->data[0])
1695  return ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
1696 
1697  if (av_frame_is_writable(frame))
1698  return ff_decode_frame_props(avctx, frame);
1699 
1700  tmp = av_frame_alloc();
1701  if (!tmp)
1702  return AVERROR(ENOMEM);
1703 
1704  av_frame_move_ref(tmp, frame);
1705 
1706  ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
1707  if (ret < 0) {
1708  av_frame_free(&tmp);
1709  return ret;
1710  }
1711 
1712  av_frame_copy(frame, tmp);
1713  av_frame_free(&tmp);
1714 
1715  return 0;
1716 }
1717 
1719 {
1720  int ret = reget_buffer_internal(avctx, frame);
1721  if (ret < 0)
1722  av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
1723  return ret;
1724 }
1725 
1727 {
1728  avctx->internal->draining = 0;
1729  avctx->internal->draining_done = 0;
1730  avctx->internal->nb_draining_errors = 0;
1734  avctx->internal->buffer_pkt_valid = 0;
1735 
1736  av_packet_unref(avctx->internal->ds.in_pkt);
1737 
1738  if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME)
1739  ff_thread_flush(avctx);
1740  else if (avctx->codec->flush)
1741  avctx->codec->flush(avctx);
1742 
1743  avctx->pts_correction_last_pts =
1744  avctx->pts_correction_last_dts = INT64_MIN;
1745 
1746  ff_decode_bsfs_uninit(avctx);
1747 
1748  if (!avctx->refcounted_frames)
1749  av_frame_unref(avctx->internal->to_free);
1750 }
1751 
1753 {
1754  DecodeFilterContext *s = &avctx->internal->filter;
1755  int i;
1756 
1757  for (i = 0; i < s->nb_bsfs; i++)
1758  av_bsf_free(&s->bsfs[i]);
1759  av_freep(&s->bsfs);
1760  s->nb_bsfs = 0;
1761 }
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:132
#define FF_SANE_NB_CHANNELS
Definition: internal.h:90
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: avcodec.h:2986
static int is_hwaccel_pix_fmt(enum AVPixelFormat pix_fmt)
Definition: decode.c:1119
int nb_draining_errors
Definition: internal.h:222
#define FF_SUB_CHARENC_MODE_PRE_DECODER
the AVPacket data needs to be recoded to UTF-8 before being fed to the decoder, requires iconv ...
Definition: avcodec.h:3518
attribute_deprecated int av_packet_split_side_data(AVPacket *pkt)
Definition: avpacket.c:434
void av_bsf_free(AVBSFContext **ctx)
Free a bitstream filter context and everything associated with it; write NULL into the supplied point...
Definition: bsf.c:35
#define NULL
Definition: coverity.c:32
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1184
const struct AVCodec * codec
Definition: avcodec.h:1770
const char const char void * val
Definition: avisynth_c.h:771
const AVCodecDescriptor * codec_descriptor
AVCodecDescriptor.
Definition: avcodec.h:3481
const char * s
Definition: avisynth_c.h:768
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static enum AVPixelFormat pix_fmt
#define AV_NUM_DATA_POINTERS
Definition: frame.h:202
AVCodecParameters * par_out
Parameters of the output stream.
Definition: avcodec.h:5948
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
AVPacketSideDataType
Definition: avcodec.h:1405
int64_t pts_correction_num_faulty_dts
Number of incorrect PTS values so far.
Definition: avcodec.h:3498
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
#define GET_UTF8(val, GET_BYTE, ERROR)
Convert a UTF-8 character (up to 4 bytes) to its 32-bit UCS-4 encoded form.
Definition: common.h:361
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2419
This structure describes decoded (raw) audio or video data.
Definition: frame.h:201
int(* init)(AVCodecContext *avctx)
Initialize the hwaccel private data.
Definition: avcodec.h:3984
int stride_align[AV_NUM_DATA_POINTERS]
Definition: internal.h:114
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
int ff_packet_split_and_drop_side_data(AVPacket *pkt)
Definition: avpacket.c:483
This side data must be associated with an audio frame and corresponds to enum AVAudioServiceType defi...
Definition: frame.h:113
int apply_cropping
Video decoding only.
Definition: avcodec.h:3704
static int decode_receive_frame_internal(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:628
#define AV_CODEC_FLAG2_SKIP_MANUAL
Do not skip samples and export skip information as frame side data.
Definition: avcodec.h:980
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:1963
int ff_init_buffer_info(AVCodecContext *avctx, AVFrame *frame)
does needed setup of pkt_pts/pos and such for (re)get_buffer();
Definition: decode.c:1483
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:473
int capabilities
Hardware accelerated codec capabilities.
Definition: avcodec.h:3900
const char * fmt
Definition: avisynth_c.h:769
void(* flush)(AVCodecContext *)
Flush buffers.
Definition: avcodec.h:3845
AVPacket * last_pkt_props
Properties (timestamps+side data) extracted from the last packet passed for decoding.
Definition: internal.h:174
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
static int convert_sub_to_old_ass_form(AVSubtitle *sub, const AVPacket *pkt, AVRational tb)
Definition: decode.c:944
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2459
AVFrame * to_free
Definition: internal.h:161
int64_t pos
byte position in stream, -1 if unknown
Definition: avcodec.h:1699
static void get_subtitle_defaults(AVSubtitle *sub)
Definition: decode.c:845
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:211
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:393
static int decode_simple_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:615
const char * desc
Definition: nvenc.c:60
int width
Definition: internal.h:113
This side data should be associated with a video stream and contains Stereoscopic 3D information in f...
Definition: avcodec.h:1479
ATSC A53 Part 4 Closed Captions.
Definition: avcodec.h:1607
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2498
int nb_extended_buf
Number of elements in extended_buf.
Definition: frame.h:411
Content light level (based on CTA-861.3).
Definition: frame.h:136
int num
Numerator.
Definition: rational.h:59
The bitstream filter state.
Definition: avcodec.h:5914
int size
Definition: avcodec.h:1680
const AVBitStreamFilter * av_bsf_get_by_name(const char *name)
enum AVPixelFormat pix_fmt
Supported pixel format.
Definition: avcodec.h:3894
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:2172
#define AV_CODEC_PROP_TEXT_SUB
Subtitle codec is text based.
Definition: avcodec.h:767
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1989
int samples
Definition: internal.h:118
static int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:366
int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, const AVPacket *avpkt)
Decode the audio frame of size avpkt->size from avpkt->data into frame.
Definition: decode.c:837
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:206
Mastering display metadata associated with a video frame.
Definition: frame.h:119
unsigned num_rects
Definition: avcodec.h:4132
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:531
enum AVMediaType type
Definition: avcodec.h:3752
static int apply_param_change(AVCodecContext *avctx, const AVPacket *avpkt)
Definition: decode.c:46
static int get_buffer_internal(AVCodecContext *avctx, AVFrame *frame, int flags)
Definition: decode.c:1620
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:222
static int recode_subtitle(AVCodecContext *avctx, AVPacket *outpkt, const AVPacket *inpkt)
Definition: decode.c:852
AVBufferPool * pools[4]
Pools for each data plane.
Definition: internal.h:107
int(* decode)(AVCodecContext *, void *outdata, int *outdata_size, AVPacket *avpkt)
Definition: avcodec.h:3822
int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
Set various frame properties from the codec context / packet data.
Definition: decode.c:1591
size_t crop_bottom
Definition: frame.h:560
static AVPacket pkt
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:1002
static int utf8_check(const uint8_t *str)
Definition: decode.c:910
int av_bsf_init(AVBSFContext *ctx)
Prepare the filter for use, after all the parameters and options have been set.
Definition: bsf.c:134
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame)
Identical in function to av_frame_make_writable(), except it uses ff_get_buffer() to allocate the buf...
Definition: decode.c:1718
static int apply_cropping(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:686
void ff_decode_bsfs_uninit(AVCodecContext *avctx)
Definition: decode.c:1752
Mastering display metadata (based on SMPTE-2086:2014).
Definition: avcodec.h:1587
Apply the maximum possible cropping, even if it requires setting the AVFrame.data[] entries to unalig...
Definition: frame.h:791
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1898
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
AVSubtitleRect ** rects
Definition: avcodec.h:4133
int av_codec_is_decoder(const AVCodec *codec)
Definition: utils.c:174
int av_bsf_alloc(const AVBitStreamFilter *filter, AVBSFContext **ctx)
Allocate a context for a given bitstream filter.
Definition: bsf.c:81
struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:3082
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:1027
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt)
Retrieve a filtered packet.
Definition: bsf.c:196
DecodeFilterContext filter
Definition: internal.h:168
int height
Definition: internal.h:113
enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
Definition: decode.c:1125
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2531
uint8_t
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:150
int av_packet_unpack_dictionary(const uint8_t *data, int size, AVDictionary **dict)
Unpack a dictionary from side_data.
Definition: avpacket.c:545
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
static int64_t guess_correct_pts(AVCodecContext *ctx, int64_t reordered_pts, int64_t dts)
Attempt to guess proper monotonic timestamps for decoded video frames which might have incorrect time...
Definition: decode.c:334
size_t crop_left
Definition: frame.h:561
int avpriv_set_systematic_pal2(uint32_t pal[256], enum AVPixelFormat pix_fmt)
Definition: imgutils.c:152
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: avcodec.h:1697
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: avcodec.h:1473
#define AV_CODEC_FLAG_UNALIGNED
Allow decoders to produce frames with data planes that are not aligned to CPU requirements (e...
Definition: avcodec.h:872
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(constuint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(constint16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(constint32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(constint64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(constfloat *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(constdouble *) pi *(INT64_C(1)<< 63)))#defineFMT_PAIR_FUNC(out, in) staticconv_func_type *constfmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};staticvoidcpy1(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, len);}staticvoidcpy2(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 2 *len);}staticvoidcpy4(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 4 *len);}staticvoidcpy8(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, constint *ch_map, intflags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) returnNULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) returnNULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case1:ctx->simd_f=cpy1;break;case2:ctx->simd_f=cpy2;break;case4:ctx->simd_f=cpy4;break;case8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);returnctx;}voidswri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}intswri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, intlen){intch;intoff=0;constintos=(out->planar?1:out->ch_count)*out->bps;unsignedmisaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){intplanes=in->planar?in->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){intplanes=out->planar?out->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){intplanes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
#define AV_WL8(p, d)
Definition: intreadwrite.h:404
Multithreading support functions.
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
Definition: decode.c:293
#define emms_c()
Definition: internal.h:54
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:294
static void filter(int16_t *output, ptrdiff_t out_stride, int16_t *low, ptrdiff_t low_stride, int16_t *high, ptrdiff_t high_stride, int len, uint8_t clip)
Definition: cfhd.c:80
static AVFrame * frame
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(constuint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(constint16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(constint32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(constint64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(constfloat *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(constdouble *) pi *(INT64_C(1)<< 63)))#defineFMT_PAIR_FUNC(out, in) staticconv_func_type *constfmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};staticvoidcpy1(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, len);}staticvoidcpy2(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 2 *len);}staticvoidcpy4(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 4 *len);}staticvoidcpy8(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, constint *ch_map, intflags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) returnNULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) returnNULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case1:ctx->simd_f=cpy1;break;case2:ctx->simd_f=cpy2;break;case4:ctx->simd_f=cpy4;break;case8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);returnctx;}voidswri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}intswri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, intlen){intch;intoff=0;constintos=(out->planar?1:out->ch_count)*out->bps;unsignedmisaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){intplanes=in->planar?in->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){intplanes=out->planar?out->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){intplanes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch const uint8_t **in ch off *out planar
Definition: audioconvert.c:56
int planes
Definition: internal.h:116
Structure to hold side data for an AVFrame.
Definition: frame.h:163
int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar)
Check if the given sample aspect ratio of an image is valid.
Definition: imgutils.c:286
size_t compat_decode_consumed
Definition: internal.h:211
static void finish(void)
Definition: movenc.c:344
uint8_t * data
Definition: avcodec.h:1679
static int flags
Definition: log.c:57
#define AV_CODEC_CAP_HWACCEL_VDPAU
Codec can export data for HW decoding (VDPAU).
Definition: avcodec.h:1038
#define AVERROR_EOF
End of file.
Definition: error.h:55
AVDictionary * metadata
metadata.
Definition: frame.h:488
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:3172
ptrdiff_t size
Definition: opengl_enc.c:101
The data represents the AVSphericalMapping structure defined in libavutil/spherical.h.
Definition: frame.h:130
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
Definition: mem.c:184
static int bsfs_poll(AVCodecContext *avctx, AVPacket *pkt)
Definition: decode.c:255
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:2505
#define av_log(a,...)
const char * name
Definition: pixdesc.h:82
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:627
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
Check if the sample format is planar.
Definition: samplefmt.c:112
FramePool * pool
Definition: internal.h:163
void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, int linesize_align[AV_NUM_DATA_POINTERS])
Modify width and height values so that they will result in a memory buffer that is acceptable for the...
Definition: utils.c:261
int ff_thread_decode_frame(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, AVPacket *avpkt)
Submit a new frame to a decoding thread.
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
#define AV_RL8(x)
Definition: intreadwrite.h:403
int av_new_packet(AVPacket *pkt, int size)
Allocate the payload of a packet and initialize its fields with default values.
Definition: avpacket.c:86
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
Definition: avcodec.h:3474
AVCodecID
Identify the syntax and semantics of the bitstream.
Definition: avcodec.h:214
int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src)
Copy the contents of src to dst.
Definition: utils.c:2279
int width
Definition: frame.h:259
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:2083
#define AV_BPRINT_SIZE_UNLIMITED
static void validate_avframe_allocation(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:1596
#define AVERROR(e)
Definition: error.h:43
An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows:
Definition: avcodec.h:1437
int av_frame_apply_cropping(AVFrame *frame, int flags)
Crop the given video AVFrame according to its crop_left/crop_top/crop_right/ crop_bottom fields...
Definition: frame.c:821
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:350
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:163
void av_packet_free_side_data(AVPacket *pkt)
Convenience function to free all the side data stored.
Definition: avpacket.c:270
int64_t pts_correction_last_pts
Number of incorrect DTS values so far.
Definition: avcodec.h:3499
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:3211
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
int avcodec_is_open(AVCodecContext *s)
Definition: utils.c:2137
int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:713
static int extract_packet_props(AVCodecInternal *avci, const AVPacket *pkt)
Definition: decode.c:124
AVFrame * buffer_frame
Definition: internal.h:204
int capabilities
Codec capabilities.
Definition: avcodec.h:3758
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:446
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: avcodec.h:1662
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1856
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:457
AVRational time_base_in
The timebase used for the timestamps of the input packets.
Definition: avcodec.h:5954
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:140
simple assert() macros that are a bit more flexible than ISO C assert().
int side_data_elems
Definition: avcodec.h:1691
int64_t max_pixels
The number of pixels per image to maximally accept.
Definition: avcodec.h:3646
static int reget_buffer_internal(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:1679
#define FFMAX(a, b)
Definition: common.h:94
static void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.h:229
int av_hwframe_get_buffer(AVBufferRef *hwframe_ref, AVFrame *frame, int flags)
Allocate a new frame attached to the given AVHWFramesContext.
Definition: hwcontext.c:459
#define fail()
Definition: checkasm.h:109
char * av_get_token(const char **buf, const char *term)
Unescape the given string until a non escaped terminating char, and return the token corresponding to...
Definition: avstring.c:149
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:740
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: internal.h:66
int priv_data_size
Size of the private data to allocate in AVCodecInternal.hwaccel_priv_data.
Definition: avcodec.h:3998
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1685
reference-counted frame API
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2574
uint32_t end_display_time
Definition: avcodec.h:4131
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:4134
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:379
int props
Codec properties, a combination of AV_CODEC_PROP_* flags.
Definition: avcodec.h:719
size_t crop_top
Definition: frame.h:559
common internal API header
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:106
int av_packet_copy_props(AVPacket *dst, const AVPacket *src)
Copy only "properties" fields from src to dst.
Definition: avpacket.c:586
int channels
number of audio channels, only used for audio.
Definition: frame.h:506
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:439
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:3050
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:3203
const char * name
Name of the hardware accelerated codec.
Definition: avcodec.h:3873
#define FFMIN(a, b)
Definition: common.h:96
AVBufferRef ** extended_buf
For planar audio which requires more than AV_NUM_DATA_POINTERS AVBufferRef pointers, this array will hold all the references which cannot fit into AVFrame.buf.
Definition: frame.h:407
int channels
Definition: internal.h:117
int(* alloc_frame)(AVCodecContext *avctx, AVFrame *frame)
Allocate a custom buffer.
Definition: avcodec.h:3914
AVFrame * compat_decode_frame
Definition: internal.h:215
static AVHWAccel * find_hwaccel(enum AVCodecID codec_id, enum AVPixelFormat pix_fmt)
Definition: decode.c:1132
int width
picture width / height.
Definition: avcodec.h:1948
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames...
Definition: avcodec.h:3616
static int add_metadata_from_side_data(const AVPacket *avpkt, AVFrame *frame)
Definition: decode.c:1471
AVRational time_base_out
The timebase used for the timestamps of the output packets.
Definition: avcodec.h:5960
static int compat_decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, const AVPacket *pkt)
Definition: decode.c:748
AVPacket * in_pkt
Definition: internal.h:122
#define AV_PIX_FMT_FLAG_PSEUDOPAL
The pixel format is "pseudo-paletted".
Definition: pixdesc.h:158
This side data should be associated with a video stream and corresponds to the AVSphericalMapping str...
Definition: avcodec.h:1593
int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt)
Submit a packet for filtering.
Definition: bsf.c:175
AVFormatContext * ctx
Definition: movenc.c:48
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:2477
AVFrameSideDataType
Definition: frame.h:48
uint16_t format
Definition: avcodec.h:4129
#define FF_DEBUG_BUFFERS
Definition: avcodec.h:3028
int64_t reordered_opaque
opaque 64-bit number (generally a PTS) that will be reordered and output in AVFrame.reordered_opaque
Definition: avcodec.h:3075
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:3061
int n
Definition: avisynth_c.h:684
static int av_bprint_is_complete(const AVBPrint *buf)
Test if the print buffer is complete (not truncated).
Definition: bprint.h:185
const char * bsfs
Decoding only, a comma-separated list of bitstream filters to apply to packets before decoding...
Definition: avcodec.h:3856
enum AVCodecID codec_id
Definition: vaapi_decode.c:235
DecodeSimpleContext ds
Definition: internal.h:167
char * sub_charenc
DTS of the last frame.
Definition: avcodec.h:3507
static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:1355
int draining
checks API usage: after codec draining, flush is required to resume operation
Definition: internal.h:197
#define FF_ARRAY_ELEMS(a)
int linesize[4]
Definition: internal.h:115
int sub_charenc_mode
Subtitles character encoding mode.
Definition: avcodec.h:3515
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal decoder state / flush internal buffers.
Definition: decode.c:1726
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:274
Content light level (based on CTA-861.3).
Definition: avcodec.h:1600
int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:646
#define attribute_align_arg
Definition: internal.h:61
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:237
int avcodec_default_get_buffer2(AVCodecContext *avctx, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
Definition: decode.c:1447
int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height, uint8_t *ptr, const int linesizes[4])
Fill plane data pointers for an image with pixel format pix_fmt and height height.
Definition: imgutils.c:111
Libavcodec external API header.
enum AVMediaType codec_type
Definition: avcodec.h:1769
int compat_decode_warned
Definition: internal.h:208
int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown...
Definition: frame.h:481
A list of zero terminated key/value strings.
Definition: avcodec.h:1537
int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, const AVPacket *avpkt)
Decode the video frame of size avpkt->size from avpkt->data into picture.
Definition: decode.c:830
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:84
enum AVCodecID codec_id
Definition: avcodec.h:1778
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:543
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_WB16 unsigned int_TMPL byte
Definition: bytestream.h:87
int sample_rate
samples per second
Definition: avcodec.h:2523
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:232
int debug
debug
Definition: avcodec.h:3003
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
main external API structure.
Definition: avcodec.h:1761
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:618
int skip_samples_multiplier
Definition: internal.h:219
uint8_t * data
The data buffer.
Definition: buffer.h:89
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:1144
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:289
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1669
uint8_t * data
Definition: frame.h:165
#define AV_CODEC_PROP_BITMAP_SUB
Subtitle codec is bitmap based Decoded AVSubtitle data can be read from the AVSubtitleRect->pict fiel...
Definition: avcodec.h:762
int av_samples_copy(uint8_t **dst, uint8_t *const *src, int dst_offset, int src_offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Copy samples from src to dst.
Definition: samplefmt.c:213
void * buf
Definition: avisynth_c.h:690
size_t crop_right
Definition: frame.h:562
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:83
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
int coded_height
Definition: avcodec.h:1963
int64_t reordered_opaque
reordered opaque 64 bits (generally an integer or a double precision float PTS but can be anything)...
Definition: frame.h:369
int sample_rate
Sample rate of the audio data.
Definition: frame.h:374
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
callback to negotiate the pixelFormat
Definition: avcodec.h:2039
Definition: f_ebur128.c:91
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
Definition: imgutils.c:89
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:674
#define AV_CODEC_CAP_SUBFRAMES
Codec can output multiple frames per AVPacket Normally demuxers return one frame at a time...
Definition: avcodec.h:1052
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:275
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:119
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:2491
Rational number (pair of numerator and denominator).
Definition: rational.h:58
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:2484
int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
Definition: utils.c:2297
Recommmends skipping the specified number of samples.
Definition: avcodec.h:1521
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:121
int sub_text_format
Control the form of AVSubtitle.rects[N]->ass.
Definition: avcodec.h:3623
int buffer_pkt_valid
Definition: internal.h:203
int skip_samples
Number of audio samples to skip at the start of the next decoded frame.
Definition: internal.h:187
int(* get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags)
This callback is called at the beginning of each frame to get data buffer(s) for it.
Definition: avcodec.h:2678
#define STRIDE_ALIGN
Definition: internal.h:99
enum AVChromaLocation chroma_location
Definition: frame.h:459
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:466
attribute_deprecated int refcounted_frames
If non-zero, the decoded audio and video frames returned from avcodec_decode_video2() and avcodec_dec...
Definition: avcodec.h:2694
AVHWAccel * av_hwaccel_next(const AVHWAccel *hwaccel)
If hwaccel is NULL, returns the first registered hardware accelerator, if hwaccel is non-NULL...
Definition: utils.c:1955
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:505
static int64_t pts
Global timestamp for the audio frames.
#define SIZE_SPECIFIER
Definition: internal.h:255
This side data should be associated with an audio stream and contains ReplayGain information in form ...
Definition: avcodec.h:1464
#define FF_CODEC_CAP_SETS_PKT_DTS
Decoders marked with FF_CODEC_CAP_SETS_PKT_DTS want to set AVFrame.pkt_dts manually.
Definition: internal.h:55
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:215
#define UTF8_MAX_BYTES
Definition: decode.c:851
attribute_deprecated int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Definition: frame.h:302
void av_bprint_clear(AVBPrint *buf)
Reset the string to "" but keep internal allocated data.
Definition: bprint.c:227
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
AVPacket * buffer_pkt
buffers for using new encode/decode API through legacy API
Definition: internal.h:202
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:310
static int unrefcount_frame(AVCodecInternal *avci, AVFrame *frame)
Definition: decode.c:137
#define AV_PKT_FLAG_DISCARD
Flag is used to discard packets which are required to maintain valid decoder state but are not requir...
Definition: avcodec.h:1718
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:83
common internal api header.
common internal and external API header
if(ret< 0)
Definition: vf_mcdeint.c:279
AVBufferPool * av_buffer_pool_init(int size, AVBufferRef *(*alloc)(int size))
Allocate and initialize a buffer pool.
Definition: buffer.c:238
#define AV_HWACCEL_CODEC_CAP_EXPERIMENTAL
HWAccel is experimental and is thus avoided in favor of non experimental codecs.
Definition: avcodec.h:4010
#define AV_CODEC_CAP_PARAM_CHANGE
Codec supports changed parameters at any point.
Definition: avcodec.h:1073
int(* uninit)(AVCodecContext *avctx)
Uninitialize the hwaccel private data.
Definition: avcodec.h:3992
static void insert_ts(AVBPrint *buf, int ts)
Definition: decode.c:930
void * hwaccel_priv_data
hwaccel-specific private data
Definition: internal.h:192
enum AVCodecID id
Codec implemented by the hardware accelerator.
Definition: avcodec.h:3887
int caps_internal
Internal codec capabilities.
Definition: avcodec.h:3850
int den
Denominator.
Definition: rational.h:60
int av_image_check_size2(unsigned int w, unsigned int h, int64_t max_pixels, enum AVPixelFormat pix_fmt, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of a plane of an image with...
Definition: imgutils.c:252
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:777
AVBSFContext ** bsfs
Definition: internal.h:127
static int video_get_buffer(AVCodecContext *s, AVFrame *pic)
Definition: decode.c:1400
Formatted text, the ass field must be set by the decoder and is authoritative.
Definition: avcodec.h:4088
void ff_thread_flush(AVCodecContext *avctx)
Wait for decoding threads to finish and reset internal state.
static int setup_hwaccel(AVCodecContext *avctx, const enum AVPixelFormat fmt, const char *name)
Definition: decode.c:1144
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
int channels
number of audio channels
Definition: avcodec.h:2524
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1811
char * ass
0 terminated ASS/SSA compatible event line.
Definition: avcodec.h:4123
#define AV_FRAME_FLAG_DISCARD
A flag to mark the frames which need to be decoded, but shouldn't be output.
Definition: frame.h:431
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:1863
enum AVColorPrimaries color_primaries
Definition: frame.h:448
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1678
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
int64_t pts_correction_last_dts
PTS of the last frame.
Definition: avcodec.h:3500
size_t compat_decode_partial_size
Definition: internal.h:214
#define AV_CODEC_FLAG_TRUNCATED
Input bitstream might be truncated at a random location instead of only at frame boundaries.
Definition: avcodec.h:913
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:2554
static int update_frame_pool(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:1253
int height
Definition: frame.h:259
#define av_freep(p)
int64_t pts_correction_num_faulty_pts
Current statistics for PTS correction.
Definition: avcodec.h:3497
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:450
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:334
Recommmends skipping the specified number of samples.
Definition: frame.h:108
#define av_malloc_array(a, b)
#define FF_SUB_TEXT_FMT_ASS_WITH_TIMINGS
Definition: avcodec.h:3626
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2335
ReplayGain information in the form of the AVReplayGain struct.
Definition: frame.h:76
enum AVSubtitleType type
Definition: avcodec.h:4114
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:248
int format
Definition: internal.h:112
int pkt_size
size of the corresponding packet containing the compressed frame.
Definition: frame.h:515
float min
Stereoscopic 3d metadata.
Definition: frame.h:63
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
AVPixelFormat
Pixel format.
Definition: pixfmt.h:60
This structure stores compressed data.
Definition: avcodec.h:1656
AVCodecParameters * par_in
Parameters of the input stream.
Definition: avcodec.h:5942
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:1397
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:267
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:2981
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:1002
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1672
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:3467
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:603
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
#define tb
Definition: regdef.h:68
#define AV_WL32(p, v)
Definition: intreadwrite.h:431
static int bsfs_init(AVCodecContext *avctx)
Definition: decode.c:183
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
const char * name
Definition: opengl_enc.c:103
This side data should be associated with an audio stream and corresponds to enum AVAudioServiceType.
Definition: avcodec.h:1485
static uint8_t tmp[11]
Definition: aes_ctr.c:26
int(* receive_frame)(AVCodecContext *avctx, AVFrame *frame)
Decode API with decoupled packet/frame dataflow.
Definition: avcodec.h:3840