FFmpeg
decode.c
Go to the documentation of this file.
1 /*
2  * generic decoding-related code
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <stdint.h>
22 #include <string.h>
23 
24 #include "config.h"
25 
26 #if CONFIG_ICONV
27 # include <iconv.h>
28 #endif
29 
30 #include "libavutil/avassert.h"
31 #include "libavutil/avstring.h"
32 #include "libavutil/bprint.h"
33 #include "libavutil/common.h"
34 #include "libavutil/frame.h"
35 #include "libavutil/hwcontext.h"
36 #include "libavutil/imgutils.h"
37 #include "libavutil/internal.h"
38 #include "libavutil/intmath.h"
39 #include "libavutil/opt.h"
40 
41 #include "avcodec.h"
42 #include "bytestream.h"
43 #include "decode.h"
44 #include "hwconfig.h"
45 #include "internal.h"
46 #include "packet_internal.h"
47 #include "thread.h"
48 
49 typedef struct FramePool {
50  /**
51  * Pools for each data plane. For audio all the planes have the same size,
52  * so only pools[0] is used.
53  */
55 
56  /*
57  * Pool parameters
58  */
59  int format;
60  int width, height;
62  int linesize[4];
63  int planes;
64  int channels;
65  int samples;
66 } FramePool;
67 
68 static int apply_param_change(AVCodecContext *avctx, const AVPacket *avpkt)
69 {
70  int size, ret;
71  const uint8_t *data;
72  uint32_t flags;
73  int64_t val;
74 
76  if (!data)
77  return 0;
78 
79  if (!(avctx->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE)) {
80  av_log(avctx, AV_LOG_ERROR, "This decoder does not support parameter "
81  "changes, but PARAM_CHANGE side data was sent to it.\n");
82  ret = AVERROR(EINVAL);
83  goto fail2;
84  }
85 
86  if (size < 4)
87  goto fail;
88 
89  flags = bytestream_get_le32(&data);
90  size -= 4;
91 
93  if (size < 4)
94  goto fail;
95  val = bytestream_get_le32(&data);
96  if (val <= 0 || val > INT_MAX) {
97  av_log(avctx, AV_LOG_ERROR, "Invalid channel count");
98  ret = AVERROR_INVALIDDATA;
99  goto fail2;
100  }
101  avctx->channels = val;
102  size -= 4;
103  }
105  if (size < 8)
106  goto fail;
107  avctx->channel_layout = bytestream_get_le64(&data);
108  size -= 8;
109  }
111  if (size < 4)
112  goto fail;
113  val = bytestream_get_le32(&data);
114  if (val <= 0 || val > INT_MAX) {
115  av_log(avctx, AV_LOG_ERROR, "Invalid sample rate");
116  ret = AVERROR_INVALIDDATA;
117  goto fail2;
118  }
119  avctx->sample_rate = val;
120  size -= 4;
121  }
123  if (size < 8)
124  goto fail;
125  avctx->width = bytestream_get_le32(&data);
126  avctx->height = bytestream_get_le32(&data);
127  size -= 8;
128  ret = ff_set_dimensions(avctx, avctx->width, avctx->height);
129  if (ret < 0)
130  goto fail2;
131  }
132 
133  return 0;
134 fail:
135  av_log(avctx, AV_LOG_ERROR, "PARAM_CHANGE side data too small.\n");
136  ret = AVERROR_INVALIDDATA;
137 fail2:
138  if (ret < 0) {
139  av_log(avctx, AV_LOG_ERROR, "Error applying parameter changes.\n");
140  if (avctx->err_recognition & AV_EF_EXPLODE)
141  return ret;
142  }
143  return 0;
144 }
145 
146 #define IS_EMPTY(pkt) (!(pkt)->data)
147 
149 {
150  int ret = 0;
151 
152  ret = avpriv_packet_list_put(&avci->pkt_props, &avci->pkt_props_tail, pkt,
154  if (ret < 0)
155  return ret;
156  avci->pkt_props_tail->pkt.size = pkt->size; // HACK: Needed for ff_decode_frame_props().
157  avci->pkt_props_tail->pkt.data = (void*)1; // HACK: Needed for IS_EMPTY().
158 
159  if (IS_EMPTY(avci->last_pkt_props)) {
160  ret = avpriv_packet_list_get(&avci->pkt_props,
161  &avci->pkt_props_tail,
162  avci->last_pkt_props);
163  av_assert0(ret != AVERROR(EAGAIN));
164  }
165  return ret;
166 }
167 
169 {
170  int ret;
171 
172  /* move the original frame to our backup */
173  av_frame_unref(avci->to_free);
174  av_frame_move_ref(avci->to_free, frame);
175 
176  /* now copy everything except the AVBufferRefs back
177  * note that we make a COPY of the side data, so calling av_frame_free() on
178  * the caller's frame will work properly */
179  ret = av_frame_copy_props(frame, avci->to_free);
180  if (ret < 0)
181  return ret;
182 
183  memcpy(frame->data, avci->to_free->data, sizeof(frame->data));
184  memcpy(frame->linesize, avci->to_free->linesize, sizeof(frame->linesize));
185  if (avci->to_free->extended_data != avci->to_free->data) {
186  int planes = avci->to_free->channels;
187  int size = planes * sizeof(*frame->extended_data);
188 
189  if (!size) {
190  av_frame_unref(frame);
191  return AVERROR_BUG;
192  }
193 
194  frame->extended_data = av_malloc(size);
195  if (!frame->extended_data) {
196  av_frame_unref(frame);
197  return AVERROR(ENOMEM);
198  }
199  memcpy(frame->extended_data, avci->to_free->extended_data,
200  size);
201  } else
202  frame->extended_data = frame->data;
203 
204  frame->format = avci->to_free->format;
205  frame->width = avci->to_free->width;
206  frame->height = avci->to_free->height;
207  frame->channel_layout = avci->to_free->channel_layout;
208  frame->nb_samples = avci->to_free->nb_samples;
209  frame->channels = avci->to_free->channels;
210 
211  return 0;
212 }
213 
215 {
216  AVCodecInternal *avci = avctx->internal;
217  int ret;
218 
219  if (avci->bsf)
220  return 0;
221 
222  ret = av_bsf_list_parse_str(avctx->codec->bsfs, &avci->bsf);
223  if (ret < 0) {
224  av_log(avctx, AV_LOG_ERROR, "Error parsing decoder bitstream filters '%s': %s\n", avctx->codec->bsfs, av_err2str(ret));
225  if (ret != AVERROR(ENOMEM))
226  ret = AVERROR_BUG;
227  goto fail;
228  }
229 
230  /* We do not currently have an API for passing the input timebase into decoders,
231  * but no filters used here should actually need it.
232  * So we make up some plausible-looking number (the MPEG 90kHz timebase) */
233  avci->bsf->time_base_in = (AVRational){ 1, 90000 };
234  ret = avcodec_parameters_from_context(avci->bsf->par_in, avctx);
235  if (ret < 0)
236  goto fail;
237 
238  ret = av_bsf_init(avci->bsf);
239  if (ret < 0)
240  goto fail;
241 
242  return 0;
243 fail:
244  av_bsf_free(&avci->bsf);
245  return ret;
246 }
247 
249 {
250  AVCodecInternal *avci = avctx->internal;
251  int ret;
252 
253  if (avci->draining)
254  return AVERROR_EOF;
255 
256  ret = av_bsf_receive_packet(avci->bsf, pkt);
257  if (ret == AVERROR_EOF)
258  avci->draining = 1;
259  if (ret < 0)
260  return ret;
261 
262  ret = extract_packet_props(avctx->internal, pkt);
263  if (ret < 0)
264  goto finish;
265 
266  ret = apply_param_change(avctx, pkt);
267  if (ret < 0)
268  goto finish;
269 
270  if (avctx->codec->receive_frame)
271  avci->compat_decode_consumed += pkt->size;
272 
273  return 0;
274 finish:
275  av_packet_unref(pkt);
276  return ret;
277 }
278 
279 /**
280  * Attempt to guess proper monotonic timestamps for decoded video frames
281  * which might have incorrect times. Input timestamps may wrap around, in
282  * which case the output will as well.
283  *
284  * @param pts the pts field of the decoded AVPacket, as passed through
285  * AVFrame.pts
286  * @param dts the dts field of the decoded AVPacket
287  * @return one of the input values, may be AV_NOPTS_VALUE
288  */
290  int64_t reordered_pts, int64_t dts)
291 {
292  int64_t pts = AV_NOPTS_VALUE;
293 
294  if (dts != AV_NOPTS_VALUE) {
296  ctx->pts_correction_last_dts = dts;
297  } else if (reordered_pts != AV_NOPTS_VALUE)
298  ctx->pts_correction_last_dts = reordered_pts;
299 
300  if (reordered_pts != AV_NOPTS_VALUE) {
301  ctx->pts_correction_num_faulty_pts += reordered_pts <= ctx->pts_correction_last_pts;
302  ctx->pts_correction_last_pts = reordered_pts;
303  } else if(dts != AV_NOPTS_VALUE)
304  ctx->pts_correction_last_pts = dts;
305 
307  && reordered_pts != AV_NOPTS_VALUE)
308  pts = reordered_pts;
309  else
310  pts = dts;
311 
312  return pts;
313 }
314 
315 /*
316  * The core of the receive_frame_wrapper for the decoders implementing
317  * the simple API. Certain decoders might consume partial packets without
318  * returning any output, so this function needs to be called in a loop until it
319  * returns EAGAIN.
320  **/
322 {
323  AVCodecInternal *avci = avctx->internal;
324  DecodeSimpleContext *ds = &avci->ds;
325  AVPacket *pkt = ds->in_pkt;
326  // copy to ensure we do not change pkt
327  int got_frame, actual_got_frame;
328  int ret;
329 
330  if (!pkt->data && !avci->draining) {
331  av_packet_unref(pkt);
332  ret = ff_decode_get_packet(avctx, pkt);
333  if (ret < 0 && ret != AVERROR_EOF)
334  return ret;
335  }
336 
337  // Some codecs (at least wma lossless) will crash when feeding drain packets
338  // after EOF was signaled.
339  if (avci->draining_done)
340  return AVERROR_EOF;
341 
342  if (!pkt->data &&
343  !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY ||
345  return AVERROR_EOF;
346 
347  got_frame = 0;
348 
349  if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME) {
350  ret = ff_thread_decode_frame(avctx, frame, &got_frame, pkt);
351  } else {
352  ret = avctx->codec->decode(avctx, frame, &got_frame, pkt);
353 
355  frame->pkt_dts = pkt->dts;
356  if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
357  if(!avctx->has_b_frames)
358  frame->pkt_pos = pkt->pos;
359  //FIXME these should be under if(!avctx->has_b_frames)
360  /* get_buffer is supposed to set frame parameters */
361  if (!(avctx->codec->capabilities & AV_CODEC_CAP_DR1)) {
362  if (!frame->sample_aspect_ratio.num) frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
363  if (!frame->width) frame->width = avctx->width;
364  if (!frame->height) frame->height = avctx->height;
365  if (frame->format == AV_PIX_FMT_NONE) frame->format = avctx->pix_fmt;
366  }
367  }
368  }
369  emms_c();
370  actual_got_frame = got_frame;
371 
372  if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
373  if (frame->flags & AV_FRAME_FLAG_DISCARD)
374  got_frame = 0;
375  if (got_frame)
377  frame->pts,
378  frame->pkt_dts);
379  } else if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
380  uint8_t *side;
381  int side_size;
382  uint32_t discard_padding = 0;
383  uint8_t skip_reason = 0;
384  uint8_t discard_reason = 0;
385 
386  if (ret >= 0 && got_frame) {
388  frame->pts,
389  frame->pkt_dts);
390  if (frame->format == AV_SAMPLE_FMT_NONE)
391  frame->format = avctx->sample_fmt;
392  if (!frame->channel_layout)
393  frame->channel_layout = avctx->channel_layout;
394  if (!frame->channels)
395  frame->channels = avctx->channels;
396  if (!frame->sample_rate)
397  frame->sample_rate = avctx->sample_rate;
398  }
399 
401  if(side && side_size>=10) {
402  avci->skip_samples = AV_RL32(side) * avci->skip_samples_multiplier;
403  discard_padding = AV_RL32(side + 4);
404  av_log(avctx, AV_LOG_DEBUG, "skip %d / discard %d samples due to side data\n",
405  avci->skip_samples, (int)discard_padding);
406  skip_reason = AV_RL8(side + 8);
407  discard_reason = AV_RL8(side + 9);
408  }
409 
410  if ((frame->flags & AV_FRAME_FLAG_DISCARD) && got_frame &&
411  !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
412  avci->skip_samples = FFMAX(0, avci->skip_samples - frame->nb_samples);
413  got_frame = 0;
414  }
415 
416  if (avci->skip_samples > 0 && got_frame &&
417  !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
418  if(frame->nb_samples <= avci->skip_samples){
419  got_frame = 0;
420  avci->skip_samples -= frame->nb_samples;
421  av_log(avctx, AV_LOG_DEBUG, "skip whole frame, skip left: %d\n",
422  avci->skip_samples);
423  } else {
424  av_samples_copy(frame->extended_data, frame->extended_data, 0, avci->skip_samples,
425  frame->nb_samples - avci->skip_samples, avctx->channels, frame->format);
426  if(avctx->pkt_timebase.num && avctx->sample_rate) {
427  int64_t diff_ts = av_rescale_q(avci->skip_samples,
428  (AVRational){1, avctx->sample_rate},
429  avctx->pkt_timebase);
430  if(frame->pts!=AV_NOPTS_VALUE)
431  frame->pts += diff_ts;
432 #if FF_API_PKT_PTS
434  if(frame->pkt_pts!=AV_NOPTS_VALUE)
435  frame->pkt_pts += diff_ts;
437 #endif
438  if(frame->pkt_dts!=AV_NOPTS_VALUE)
439  frame->pkt_dts += diff_ts;
440  if (frame->pkt_duration >= diff_ts)
441  frame->pkt_duration -= diff_ts;
442  } else {
443  av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for skipped samples.\n");
444  }
445  av_log(avctx, AV_LOG_DEBUG, "skip %d/%d samples\n",
446  avci->skip_samples, frame->nb_samples);
447  frame->nb_samples -= avci->skip_samples;
448  avci->skip_samples = 0;
449  }
450  }
451 
452  if (discard_padding > 0 && discard_padding <= frame->nb_samples && got_frame &&
453  !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
454  if (discard_padding == frame->nb_samples) {
455  got_frame = 0;
456  } else {
457  if(avctx->pkt_timebase.num && avctx->sample_rate) {
458  int64_t diff_ts = av_rescale_q(frame->nb_samples - discard_padding,
459  (AVRational){1, avctx->sample_rate},
460  avctx->pkt_timebase);
461  frame->pkt_duration = diff_ts;
462  } else {
463  av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for discarded samples.\n");
464  }
465  av_log(avctx, AV_LOG_DEBUG, "discard %d/%d samples\n",
466  (int)discard_padding, frame->nb_samples);
467  frame->nb_samples -= discard_padding;
468  }
469  }
470 
471  if ((avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL) && got_frame) {
473  if (fside) {
474  AV_WL32(fside->data, avci->skip_samples);
475  AV_WL32(fside->data + 4, discard_padding);
476  AV_WL8(fside->data + 8, skip_reason);
477  AV_WL8(fside->data + 9, discard_reason);
478  avci->skip_samples = 0;
479  }
480  }
481  }
482 
483  if (avctx->codec->type == AVMEDIA_TYPE_AUDIO &&
485  ret >= 0 && ret != pkt->size && !(avctx->codec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
486  av_log(avctx, AV_LOG_WARNING, "Multiple frames in a packet.\n");
487  avci->showed_multi_packet_warning = 1;
488  }
489 
490  if (!got_frame)
491  av_frame_unref(frame);
492 
493  if (ret >= 0 && avctx->codec->type == AVMEDIA_TYPE_VIDEO && !(avctx->flags & AV_CODEC_FLAG_TRUNCATED))
494  ret = pkt->size;
495 
496 #if FF_API_AVCTX_TIMEBASE
497  if (avctx->framerate.num > 0 && avctx->framerate.den > 0)
498  avctx->time_base = av_inv_q(av_mul_q(avctx->framerate, (AVRational){avctx->ticks_per_frame, 1}));
499 #endif
500 
501  /* do not stop draining when actual_got_frame != 0 or ret < 0 */
502  /* got_frame == 0 but actual_got_frame != 0 when frame is discarded */
503  if (avci->draining && !actual_got_frame) {
504  if (ret < 0) {
505  /* prevent infinite loop if a decoder wrongly always return error on draining */
506  /* reasonable nb_errors_max = maximum b frames + thread count */
507  int nb_errors_max = 20 + (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME ?
508  avctx->thread_count : 1);
509 
510  if (avci->nb_draining_errors++ >= nb_errors_max) {
511  av_log(avctx, AV_LOG_ERROR, "Too many errors when draining, this is a bug. "
512  "Stop draining and force EOF.\n");
513  avci->draining_done = 1;
514  ret = AVERROR_BUG;
515  }
516  } else {
517  avci->draining_done = 1;
518  }
519  }
520 
521  avci->compat_decode_consumed += ret;
522 
523  if (ret >= pkt->size || ret < 0) {
524  av_packet_unref(pkt);
526  } else {
527  int consumed = ret;
528 
529  pkt->data += consumed;
530  pkt->size -= consumed;
531  avci->last_pkt_props->size -= consumed; // See extract_packet_props() comment.
532  pkt->pts = AV_NOPTS_VALUE;
533  pkt->dts = AV_NOPTS_VALUE;
536  }
537 
538  if (got_frame)
539  av_assert0(frame->buf[0]);
540 
541  return ret < 0 ? ret : 0;
542 }
543 
545 {
546  int ret;
547 
548  while (!frame->buf[0]) {
549  ret = decode_simple_internal(avctx, frame);
550  if (ret < 0)
551  return ret;
552  }
553 
554  return 0;
555 }
556 
558 {
559  AVCodecInternal *avci = avctx->internal;
560  int ret;
561 
562  av_assert0(!frame->buf[0]);
563 
564  if (avctx->codec->receive_frame) {
565  ret = avctx->codec->receive_frame(avctx, frame);
566  if (ret != AVERROR(EAGAIN))
568  } else
569  ret = decode_simple_receive_frame(avctx, frame);
570 
571  if (ret == AVERROR_EOF)
572  avci->draining_done = 1;
573 
574  if (!ret) {
575  /* the only case where decode data is not set should be decoders
576  * that do not call ff_get_buffer() */
577  av_assert0((frame->private_ref && frame->private_ref->size == sizeof(FrameDecodeData)) ||
578  !(avctx->codec->capabilities & AV_CODEC_CAP_DR1));
579 
580  if (frame->private_ref) {
582 
583  if (fdd->post_process) {
584  ret = fdd->post_process(avctx, frame);
585  if (ret < 0) {
586  av_frame_unref(frame);
587  return ret;
588  }
589  }
590  }
591  }
592 
593  /* free the per-frame decode data */
594  av_buffer_unref(&frame->private_ref);
595 
596  return ret;
597 }
598 
599 int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
600 {
601  AVCodecInternal *avci = avctx->internal;
602  int ret;
603 
604  if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
605  return AVERROR(EINVAL);
606 
607  if (avctx->internal->draining)
608  return AVERROR_EOF;
609 
610  if (avpkt && !avpkt->size && avpkt->data)
611  return AVERROR(EINVAL);
612 
614  if (avpkt && (avpkt->data || avpkt->side_data_elems)) {
615  ret = av_packet_ref(avci->buffer_pkt, avpkt);
616  if (ret < 0)
617  return ret;
618  }
619 
620  ret = av_bsf_send_packet(avci->bsf, avci->buffer_pkt);
621  if (ret < 0) {
623  return ret;
624  }
625 
626  if (!avci->buffer_frame->buf[0]) {
627  ret = decode_receive_frame_internal(avctx, avci->buffer_frame);
628  if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
629  return ret;
630  }
631 
632  return 0;
633 }
634 
636 {
637  /* make sure we are noisy about decoders returning invalid cropping data */
638  if (frame->crop_left >= INT_MAX - frame->crop_right ||
639  frame->crop_top >= INT_MAX - frame->crop_bottom ||
640  (frame->crop_left + frame->crop_right) >= frame->width ||
641  (frame->crop_top + frame->crop_bottom) >= frame->height) {
642  av_log(avctx, AV_LOG_WARNING,
643  "Invalid cropping information set by a decoder: "
645  "(frame size %dx%d). This is a bug, please report it\n",
646  frame->crop_left, frame->crop_right, frame->crop_top, frame->crop_bottom,
647  frame->width, frame->height);
648  frame->crop_left = 0;
649  frame->crop_right = 0;
650  frame->crop_top = 0;
651  frame->crop_bottom = 0;
652  return 0;
653  }
654 
655  if (!avctx->apply_cropping)
656  return 0;
657 
658  return av_frame_apply_cropping(frame, avctx->flags & AV_CODEC_FLAG_UNALIGNED ?
660 }
661 
662 int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
663 {
664  AVCodecInternal *avci = avctx->internal;
665  int ret, changed;
666 
667  av_frame_unref(frame);
668 
669  if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
670  return AVERROR(EINVAL);
671 
672  if (avci->buffer_frame->buf[0]) {
673  av_frame_move_ref(frame, avci->buffer_frame);
674  } else {
675  ret = decode_receive_frame_internal(avctx, frame);
676  if (ret < 0)
677  return ret;
678  }
679 
680  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
681  ret = apply_cropping(avctx, frame);
682  if (ret < 0) {
683  av_frame_unref(frame);
684  return ret;
685  }
686  }
687 
688  avctx->frame_number++;
689 
690  if (avctx->flags & AV_CODEC_FLAG_DROPCHANGED) {
691 
692  if (avctx->frame_number == 1) {
693  avci->initial_format = frame->format;
694  switch(avctx->codec_type) {
695  case AVMEDIA_TYPE_VIDEO:
696  avci->initial_width = frame->width;
697  avci->initial_height = frame->height;
698  break;
699  case AVMEDIA_TYPE_AUDIO:
700  avci->initial_sample_rate = frame->sample_rate ? frame->sample_rate :
701  avctx->sample_rate;
702  avci->initial_channels = frame->channels;
703  avci->initial_channel_layout = frame->channel_layout;
704  break;
705  }
706  }
707 
708  if (avctx->frame_number > 1) {
709  changed = avci->initial_format != frame->format;
710 
711  switch(avctx->codec_type) {
712  case AVMEDIA_TYPE_VIDEO:
713  changed |= avci->initial_width != frame->width ||
714  avci->initial_height != frame->height;
715  break;
716  case AVMEDIA_TYPE_AUDIO:
717  changed |= avci->initial_sample_rate != frame->sample_rate ||
718  avci->initial_sample_rate != avctx->sample_rate ||
719  avci->initial_channels != frame->channels ||
720  avci->initial_channel_layout != frame->channel_layout;
721  break;
722  }
723 
724  if (changed) {
725  avci->changed_frames_dropped++;
726  av_log(avctx, AV_LOG_INFO, "dropped changed frame #%d pts %"PRId64
727  " drop count: %d \n",
728  avctx->frame_number, frame->pts,
729  avci->changed_frames_dropped);
730  av_frame_unref(frame);
731  return AVERROR_INPUT_CHANGED;
732  }
733  }
734  }
735  return 0;
736 }
737 
739  int *got_frame, const AVPacket *pkt)
740 {
741  AVCodecInternal *avci = avctx->internal;
742  int ret = 0;
743 
745 
746  if (avci->draining_done && pkt && pkt->size != 0) {
747  av_log(avctx, AV_LOG_WARNING, "Got unexpected packet after EOF\n");
748  avcodec_flush_buffers(avctx);
749  }
750 
751  *got_frame = 0;
752 
753  if (avci->compat_decode_partial_size > 0 &&
754  avci->compat_decode_partial_size != pkt->size) {
755  av_log(avctx, AV_LOG_ERROR,
756  "Got unexpected packet size after a partial decode\n");
757  ret = AVERROR(EINVAL);
758  goto finish;
759  }
760 
761  if (!avci->compat_decode_partial_size) {
762  ret = avcodec_send_packet(avctx, pkt);
763  if (ret == AVERROR_EOF)
764  ret = 0;
765  else if (ret == AVERROR(EAGAIN)) {
766  /* we fully drain all the output in each decode call, so this should not
767  * ever happen */
768  ret = AVERROR_BUG;
769  goto finish;
770  } else if (ret < 0)
771  goto finish;
772  }
773 
774  while (ret >= 0) {
775  ret = avcodec_receive_frame(avctx, frame);
776  if (ret < 0) {
777  if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
778  ret = 0;
779  goto finish;
780  }
781 
782  if (frame != avci->compat_decode_frame) {
783  if (!avctx->refcounted_frames) {
784  ret = unrefcount_frame(avci, frame);
785  if (ret < 0)
786  goto finish;
787  }
788 
789  *got_frame = 1;
790  frame = avci->compat_decode_frame;
791  } else {
792  if (!avci->compat_decode_warned) {
793  av_log(avctx, AV_LOG_WARNING, "The deprecated avcodec_decode_* "
794  "API cannot return all the frames for this decoder. "
795  "Some frames will be dropped. Update your code to the "
796  "new decoding API to fix this.\n");
797  avci->compat_decode_warned = 1;
798  }
799  }
800 
801  if (avci->draining || (!avctx->codec->bsfs && avci->compat_decode_consumed < pkt->size))
802  break;
803  }
804 
805 finish:
806  if (ret == 0) {
807  /* if there are any bsfs then assume full packet is always consumed */
808  if (avctx->codec->bsfs)
809  ret = pkt->size;
810  else
811  ret = FFMIN(avci->compat_decode_consumed, pkt->size);
812  }
813  avci->compat_decode_consumed = 0;
814  avci->compat_decode_partial_size = (ret >= 0) ? pkt->size - ret : 0;
815 
816  return ret;
817 }
818 
819 int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
820  int *got_picture_ptr,
821  const AVPacket *avpkt)
822 {
823  return compat_decode(avctx, picture, got_picture_ptr, avpkt);
824 }
825 
826 int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx,
827  AVFrame *frame,
828  int *got_frame_ptr,
829  const AVPacket *avpkt)
830 {
831  return compat_decode(avctx, frame, got_frame_ptr, avpkt);
832 }
833 
835 {
836  memset(sub, 0, sizeof(*sub));
837  sub->pts = AV_NOPTS_VALUE;
838 }
839 
840 #define UTF8_MAX_BYTES 4 /* 5 and 6 bytes sequences should not be used */
841 static int recode_subtitle(AVCodecContext *avctx,
842  AVPacket *outpkt, const AVPacket *inpkt)
843 {
844 #if CONFIG_ICONV
845  iconv_t cd = (iconv_t)-1;
846  int ret = 0;
847  char *inb, *outb;
848  size_t inl, outl;
849  AVPacket tmp;
850 #endif
851 
852  if (avctx->sub_charenc_mode != FF_SUB_CHARENC_MODE_PRE_DECODER || inpkt->size == 0)
853  return 0;
854 
855 #if CONFIG_ICONV
856  cd = iconv_open("UTF-8", avctx->sub_charenc);
857  av_assert0(cd != (iconv_t)-1);
858 
859  inb = inpkt->data;
860  inl = inpkt->size;
861 
862  if (inl >= INT_MAX / UTF8_MAX_BYTES - AV_INPUT_BUFFER_PADDING_SIZE) {
863  av_log(avctx, AV_LOG_ERROR, "Subtitles packet is too big for recoding\n");
864  ret = AVERROR(ENOMEM);
865  goto end;
866  }
867 
868  ret = av_new_packet(&tmp, inl * UTF8_MAX_BYTES);
869  if (ret < 0)
870  goto end;
871  outpkt->buf = tmp.buf;
872  outpkt->data = tmp.data;
873  outpkt->size = tmp.size;
874  outb = outpkt->data;
875  outl = outpkt->size;
876 
877  if (iconv(cd, &inb, &inl, &outb, &outl) == (size_t)-1 ||
878  iconv(cd, NULL, NULL, &outb, &outl) == (size_t)-1 ||
879  outl >= outpkt->size || inl != 0) {
880  ret = FFMIN(AVERROR(errno), -1);
881  av_log(avctx, AV_LOG_ERROR, "Unable to recode subtitle event \"%s\" "
882  "from %s to UTF-8\n", inpkt->data, avctx->sub_charenc);
883  av_packet_unref(&tmp);
884  goto end;
885  }
886  outpkt->size -= outl;
887  memset(outpkt->data + outpkt->size, 0, outl);
888 
889 end:
890  if (cd != (iconv_t)-1)
891  iconv_close(cd);
892  return ret;
893 #else
894  av_log(avctx, AV_LOG_ERROR, "requesting subtitles recoding without iconv");
895  return AVERROR(EINVAL);
896 #endif
897 }
898 
899 static int utf8_check(const uint8_t *str)
900 {
901  const uint8_t *byte;
902  uint32_t codepoint, min;
903 
904  while (*str) {
905  byte = str;
906  GET_UTF8(codepoint, *(byte++), return 0;);
907  min = byte - str == 1 ? 0 : byte - str == 2 ? 0x80 :
908  1 << (5 * (byte - str) - 4);
909  if (codepoint < min || codepoint >= 0x110000 ||
910  codepoint == 0xFFFE /* BOM */ ||
911  codepoint >= 0xD800 && codepoint <= 0xDFFF /* surrogates */)
912  return 0;
913  str = byte;
914  }
915  return 1;
916 }
917 
918 #if FF_API_ASS_TIMING
919 static void insert_ts(AVBPrint *buf, int ts)
920 {
921  if (ts == -1) {
922  av_bprintf(buf, "9:59:59.99,");
923  } else {
924  int h, m, s;
925 
926  h = ts/360000; ts -= 360000*h;
927  m = ts/ 6000; ts -= 6000*m;
928  s = ts/ 100; ts -= 100*s;
929  av_bprintf(buf, "%d:%02d:%02d.%02d,", h, m, s, ts);
930  }
931 }
932 
934 {
935  int i;
936  AVBPrint buf;
937 
939 
940  for (i = 0; i < sub->num_rects; i++) {
941  char *final_dialog;
942  const char *dialog;
943  AVSubtitleRect *rect = sub->rects[i];
944  int ts_start, ts_duration = -1;
945  long int layer;
946 
947  if (rect->type != SUBTITLE_ASS || !strncmp(rect->ass, "Dialogue: ", 10))
948  continue;
949 
950  av_bprint_clear(&buf);
951 
952  /* skip ReadOrder */
953  dialog = strchr(rect->ass, ',');
954  if (!dialog)
955  continue;
956  dialog++;
957 
958  /* extract Layer or Marked */
959  layer = strtol(dialog, (char**)&dialog, 10);
960  if (*dialog != ',')
961  continue;
962  dialog++;
963 
964  /* rescale timing to ASS time base (ms) */
965  ts_start = av_rescale_q(pkt->pts, tb, av_make_q(1, 100));
966  if (pkt->duration != -1)
967  ts_duration = av_rescale_q(pkt->duration, tb, av_make_q(1, 100));
968  sub->end_display_time = FFMAX(sub->end_display_time, 10 * ts_duration);
969 
970  /* construct ASS (standalone file form with timestamps) string */
971  av_bprintf(&buf, "Dialogue: %ld,", layer);
972  insert_ts(&buf, ts_start);
973  insert_ts(&buf, ts_duration == -1 ? -1 : ts_start + ts_duration);
974  av_bprintf(&buf, "%s\r\n", dialog);
975 
976  final_dialog = av_strdup(buf.str);
977  if (!av_bprint_is_complete(&buf) || !final_dialog) {
978  av_freep(&final_dialog);
979  av_bprint_finalize(&buf, NULL);
980  return AVERROR(ENOMEM);
981  }
982  av_freep(&rect->ass);
983  rect->ass = final_dialog;
984  }
985 
986  av_bprint_finalize(&buf, NULL);
987  return 0;
988 }
989 #endif
990 
992  int *got_sub_ptr,
993  AVPacket *avpkt)
994 {
995  int i, ret = 0;
996 
997  if (!avpkt->data && avpkt->size) {
998  av_log(avctx, AV_LOG_ERROR, "invalid packet: NULL data, size != 0\n");
999  return AVERROR(EINVAL);
1000  }
1001  if (!avctx->codec)
1002  return AVERROR(EINVAL);
1003  if (avctx->codec->type != AVMEDIA_TYPE_SUBTITLE) {
1004  av_log(avctx, AV_LOG_ERROR, "Invalid media type for subtitles\n");
1005  return AVERROR(EINVAL);
1006  }
1007 
1008  *got_sub_ptr = 0;
1009  get_subtitle_defaults(sub);
1010 
1011  if ((avctx->codec->capabilities & AV_CODEC_CAP_DELAY) || avpkt->size) {
1012  AVPacket pkt_recoded = *avpkt;
1013 
1014  ret = recode_subtitle(avctx, &pkt_recoded, avpkt);
1015  if (ret < 0) {
1016  *got_sub_ptr = 0;
1017  } else {
1018  ret = extract_packet_props(avctx->internal, &pkt_recoded);
1019  if (ret < 0)
1020  return ret;
1021 
1022  if (avctx->pkt_timebase.num && avpkt->pts != AV_NOPTS_VALUE)
1023  sub->pts = av_rescale_q(avpkt->pts,
1024  avctx->pkt_timebase, AV_TIME_BASE_Q);
1025  ret = avctx->codec->decode(avctx, sub, got_sub_ptr, &pkt_recoded);
1026  av_assert1((ret >= 0) >= !!*got_sub_ptr &&
1027  !!*got_sub_ptr >= !!sub->num_rects);
1028 
1029 #if FF_API_ASS_TIMING
1031  && *got_sub_ptr && sub->num_rects) {
1032  const AVRational tb = avctx->pkt_timebase.num ? avctx->pkt_timebase
1033  : avctx->time_base;
1034  int err = convert_sub_to_old_ass_form(sub, avpkt, tb);
1035  if (err < 0)
1036  ret = err;
1037  }
1038 #endif
1039 
1040  if (sub->num_rects && !sub->end_display_time && avpkt->duration &&
1041  avctx->pkt_timebase.num) {
1042  AVRational ms = { 1, 1000 };
1043  sub->end_display_time = av_rescale_q(avpkt->duration,
1044  avctx->pkt_timebase, ms);
1045  }
1046 
1048  sub->format = 0;
1049  else if (avctx->codec_descriptor->props & AV_CODEC_PROP_TEXT_SUB)
1050  sub->format = 1;
1051 
1052  for (i = 0; i < sub->num_rects; i++) {
1054  sub->rects[i]->ass && !utf8_check(sub->rects[i]->ass)) {
1055  av_log(avctx, AV_LOG_ERROR,
1056  "Invalid UTF-8 in decoded subtitles text; "
1057  "maybe missing -sub_charenc option\n");
1058  avsubtitle_free(sub);
1059  ret = AVERROR_INVALIDDATA;
1060  break;
1061  }
1062  }
1063 
1064  if (avpkt->data != pkt_recoded.data) { // did we recode?
1065  /* prevent from destroying side data from original packet */
1066  pkt_recoded.side_data = NULL;
1067  pkt_recoded.side_data_elems = 0;
1068 
1069  av_packet_unref(&pkt_recoded);
1070  }
1071  }
1072 
1073  if (*got_sub_ptr)
1074  avctx->frame_number++;
1075  }
1076 
1077  return ret;
1078 }
1079 
1081  const enum AVPixelFormat *fmt)
1082 {
1083  const AVPixFmtDescriptor *desc;
1084  const AVCodecHWConfig *config;
1085  int i, n;
1086 
1087  // If a device was supplied when the codec was opened, assume that the
1088  // user wants to use it.
1089  if (avctx->hw_device_ctx && avctx->codec->hw_configs) {
1090  AVHWDeviceContext *device_ctx =
1092  for (i = 0;; i++) {
1093  config = &avctx->codec->hw_configs[i]->public;
1094  if (!config)
1095  break;
1096  if (!(config->methods &
1098  continue;
1099  if (device_ctx->type != config->device_type)
1100  continue;
1101  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++) {
1102  if (config->pix_fmt == fmt[n])
1103  return fmt[n];
1104  }
1105  }
1106  }
1107  // No device or other setup, so we have to choose from things which
1108  // don't any other external information.
1109 
1110  // If the last element of the list is a software format, choose it
1111  // (this should be best software format if any exist).
1112  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++);
1113  desc = av_pix_fmt_desc_get(fmt[n - 1]);
1114  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
1115  return fmt[n - 1];
1116 
1117  // Finally, traverse the list in order and choose the first entry
1118  // with no external dependencies (if there is no hardware configuration
1119  // information available then this just picks the first entry).
1120  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++) {
1121  for (i = 0;; i++) {
1122  config = avcodec_get_hw_config(avctx->codec, i);
1123  if (!config)
1124  break;
1125  if (config->pix_fmt == fmt[n])
1126  break;
1127  }
1128  if (!config) {
1129  // No specific config available, so the decoder must be able
1130  // to handle this format without any additional setup.
1131  return fmt[n];
1132  }
1134  // Usable with only internal setup.
1135  return fmt[n];
1136  }
1137  }
1138 
1139  // Nothing is usable, give up.
1140  return AV_PIX_FMT_NONE;
1141 }
1142 
1144  enum AVHWDeviceType dev_type)
1145 {
1146  AVHWDeviceContext *device_ctx;
1147  AVHWFramesContext *frames_ctx;
1148  int ret;
1149 
1150  if (!avctx->hwaccel)
1151  return AVERROR(ENOSYS);
1152 
1153  if (avctx->hw_frames_ctx)
1154  return 0;
1155  if (!avctx->hw_device_ctx) {
1156  av_log(avctx, AV_LOG_ERROR, "A hardware frames or device context is "
1157  "required for hardware accelerated decoding.\n");
1158  return AVERROR(EINVAL);
1159  }
1160 
1161  device_ctx = (AVHWDeviceContext *)avctx->hw_device_ctx->data;
1162  if (device_ctx->type != dev_type) {
1163  av_log(avctx, AV_LOG_ERROR, "Device type %s expected for hardware "
1164  "decoding, but got %s.\n", av_hwdevice_get_type_name(dev_type),
1165  av_hwdevice_get_type_name(device_ctx->type));
1166  return AVERROR(EINVAL);
1167  }
1168 
1170  avctx->hw_device_ctx,
1171  avctx->hwaccel->pix_fmt,
1172  &avctx->hw_frames_ctx);
1173  if (ret < 0)
1174  return ret;
1175 
1176  frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1177 
1178 
1179  if (frames_ctx->initial_pool_size) {
1180  // We guarantee 4 base work surfaces. The function above guarantees 1
1181  // (the absolute minimum), so add the missing count.
1182  frames_ctx->initial_pool_size += 3;
1183  }
1184 
1185  ret = av_hwframe_ctx_init(avctx->hw_frames_ctx);
1186  if (ret < 0) {
1187  av_buffer_unref(&avctx->hw_frames_ctx);
1188  return ret;
1189  }
1190 
1191  return 0;
1192 }
1193 
1195  AVBufferRef *device_ref,
1197  AVBufferRef **out_frames_ref)
1198 {
1199  AVBufferRef *frames_ref = NULL;
1200  const AVCodecHWConfigInternal *hw_config;
1201  const AVHWAccel *hwa;
1202  int i, ret;
1203 
1204  for (i = 0;; i++) {
1205  hw_config = avctx->codec->hw_configs[i];
1206  if (!hw_config)
1207  return AVERROR(ENOENT);
1208  if (hw_config->public.pix_fmt == hw_pix_fmt)
1209  break;
1210  }
1211 
1212  hwa = hw_config->hwaccel;
1213  if (!hwa || !hwa->frame_params)
1214  return AVERROR(ENOENT);
1215 
1216  frames_ref = av_hwframe_ctx_alloc(device_ref);
1217  if (!frames_ref)
1218  return AVERROR(ENOMEM);
1219 
1220  ret = hwa->frame_params(avctx, frames_ref);
1221  if (ret >= 0) {
1222  AVHWFramesContext *frames_ctx = (AVHWFramesContext*)frames_ref->data;
1223 
1224  if (frames_ctx->initial_pool_size) {
1225  // If the user has requested that extra output surfaces be
1226  // available then add them here.
1227  if (avctx->extra_hw_frames > 0)
1228  frames_ctx->initial_pool_size += avctx->extra_hw_frames;
1229 
1230  // If frame threading is enabled then an extra surface per thread
1231  // is also required.
1232  if (avctx->active_thread_type & FF_THREAD_FRAME)
1233  frames_ctx->initial_pool_size += avctx->thread_count;
1234  }
1235 
1236  *out_frames_ref = frames_ref;
1237  } else {
1238  av_buffer_unref(&frames_ref);
1239  }
1240  return ret;
1241 }
1242 
1243 static int hwaccel_init(AVCodecContext *avctx,
1244  const AVCodecHWConfigInternal *hw_config)
1245 {
1246  const AVHWAccel *hwaccel;
1247  int err;
1248 
1249  hwaccel = hw_config->hwaccel;
1252  av_log(avctx, AV_LOG_WARNING, "Ignoring experimental hwaccel: %s\n",
1253  hwaccel->name);
1254  return AVERROR_PATCHWELCOME;
1255  }
1256 
1257  if (hwaccel->priv_data_size) {
1258  avctx->internal->hwaccel_priv_data =
1259  av_mallocz(hwaccel->priv_data_size);
1260  if (!avctx->internal->hwaccel_priv_data)
1261  return AVERROR(ENOMEM);
1262  }
1263 
1264  avctx->hwaccel = hwaccel;
1265  if (hwaccel->init) {
1266  err = hwaccel->init(avctx);
1267  if (err < 0) {
1268  av_log(avctx, AV_LOG_ERROR, "Failed setup for format %s: "
1269  "hwaccel initialisation returned error.\n",
1270  av_get_pix_fmt_name(hw_config->public.pix_fmt));
1272  avctx->hwaccel = NULL;
1273  return err;
1274  }
1275  }
1276 
1277  return 0;
1278 }
1279 
1280 static void hwaccel_uninit(AVCodecContext *avctx)
1281 {
1282  if (avctx->hwaccel && avctx->hwaccel->uninit)
1283  avctx->hwaccel->uninit(avctx);
1284 
1286 
1287  avctx->hwaccel = NULL;
1288 
1289  av_buffer_unref(&avctx->hw_frames_ctx);
1290 }
1291 
1292 int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
1293 {
1294  const AVPixFmtDescriptor *desc;
1295  enum AVPixelFormat *choices;
1296  enum AVPixelFormat ret, user_choice;
1297  const AVCodecHWConfigInternal *hw_config;
1298  const AVCodecHWConfig *config;
1299  int i, n, err;
1300 
1301  // Find end of list.
1302  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++);
1303  // Must contain at least one entry.
1304  av_assert0(n >= 1);
1305  // If a software format is available, it must be the last entry.
1306  desc = av_pix_fmt_desc_get(fmt[n - 1]);
1307  if (desc->flags & AV_PIX_FMT_FLAG_HWACCEL) {
1308  // No software format is available.
1309  } else {
1310  avctx->sw_pix_fmt = fmt[n - 1];
1311  }
1312 
1313  choices = av_malloc_array(n + 1, sizeof(*choices));
1314  if (!choices)
1315  return AV_PIX_FMT_NONE;
1316 
1317  memcpy(choices, fmt, (n + 1) * sizeof(*choices));
1318 
1319  for (;;) {
1320  // Remove the previous hwaccel, if there was one.
1321  hwaccel_uninit(avctx);
1322 
1323  user_choice = avctx->get_format(avctx, choices);
1324  if (user_choice == AV_PIX_FMT_NONE) {
1325  // Explicitly chose nothing, give up.
1326  ret = AV_PIX_FMT_NONE;
1327  break;
1328  }
1329 
1330  desc = av_pix_fmt_desc_get(user_choice);
1331  if (!desc) {
1332  av_log(avctx, AV_LOG_ERROR, "Invalid format returned by "
1333  "get_format() callback.\n");
1334  ret = AV_PIX_FMT_NONE;
1335  break;
1336  }
1337  av_log(avctx, AV_LOG_DEBUG, "Format %s chosen by get_format().\n",
1338  desc->name);
1339 
1340  for (i = 0; i < n; i++) {
1341  if (choices[i] == user_choice)
1342  break;
1343  }
1344  if (i == n) {
1345  av_log(avctx, AV_LOG_ERROR, "Invalid return from get_format(): "
1346  "%s not in possible list.\n", desc->name);
1347  ret = AV_PIX_FMT_NONE;
1348  break;
1349  }
1350 
1351  if (avctx->codec->hw_configs) {
1352  for (i = 0;; i++) {
1353  hw_config = avctx->codec->hw_configs[i];
1354  if (!hw_config)
1355  break;
1356  if (hw_config->public.pix_fmt == user_choice)
1357  break;
1358  }
1359  } else {
1360  hw_config = NULL;
1361  }
1362 
1363  if (!hw_config) {
1364  // No config available, so no extra setup required.
1365  ret = user_choice;
1366  break;
1367  }
1368  config = &hw_config->public;
1369 
1370  if (config->methods &
1372  avctx->hw_frames_ctx) {
1373  const AVHWFramesContext *frames_ctx =
1375  if (frames_ctx->format != user_choice) {
1376  av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
1377  "does not match the format of the provided frames "
1378  "context.\n", desc->name);
1379  goto try_again;
1380  }
1381  } else if (config->methods &
1383  avctx->hw_device_ctx) {
1384  const AVHWDeviceContext *device_ctx =
1386  if (device_ctx->type != config->device_type) {
1387  av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
1388  "does not match the type of the provided device "
1389  "context.\n", desc->name);
1390  goto try_again;
1391  }
1392  } else if (config->methods &
1394  // Internal-only setup, no additional configuration.
1395  } else if (config->methods &
1397  // Some ad-hoc configuration we can't see and can't check.
1398  } else {
1399  av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
1400  "missing configuration.\n", desc->name);
1401  goto try_again;
1402  }
1403  if (hw_config->hwaccel) {
1404  av_log(avctx, AV_LOG_DEBUG, "Format %s requires hwaccel "
1405  "initialisation.\n", desc->name);
1406  err = hwaccel_init(avctx, hw_config);
1407  if (err < 0)
1408  goto try_again;
1409  }
1410  ret = user_choice;
1411  break;
1412 
1413  try_again:
1414  av_log(avctx, AV_LOG_DEBUG, "Format %s not usable, retrying "
1415  "get_format() without it.\n", desc->name);
1416  for (i = 0; i < n; i++) {
1417  if (choices[i] == user_choice)
1418  break;
1419  }
1420  for (; i + 1 < n; i++)
1421  choices[i] = choices[i + 1];
1422  --n;
1423  }
1424 
1425  av_freep(&choices);
1426  return ret;
1427 }
1428 
1429 static void frame_pool_free(void *opaque, uint8_t *data)
1430 {
1431  FramePool *pool = (FramePool*)data;
1432  int i;
1433 
1434  for (i = 0; i < FF_ARRAY_ELEMS(pool->pools); i++)
1435  av_buffer_pool_uninit(&pool->pools[i]);
1436 
1437  av_freep(&data);
1438 }
1439 
1441 {
1442  FramePool *pool = av_mallocz(sizeof(*pool));
1443  AVBufferRef *buf;
1444 
1445  if (!pool)
1446  return NULL;
1447 
1448  buf = av_buffer_create((uint8_t*)pool, sizeof(*pool),
1449  frame_pool_free, NULL, 0);
1450  if (!buf) {
1451  av_freep(&pool);
1452  return NULL;
1453  }
1454 
1455  return buf;
1456 }
1457 
1459 {
1460  FramePool *pool = avctx->internal->pool ?
1461  (FramePool*)avctx->internal->pool->data : NULL;
1462  AVBufferRef *pool_buf;
1463  int i, ret, ch, planes;
1464 
1465  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
1466  int planar = av_sample_fmt_is_planar(frame->format);
1467  ch = frame->channels;
1468  planes = planar ? ch : 1;
1469  }
1470 
1471  if (pool && pool->format == frame->format) {
1472  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO &&
1473  pool->width == frame->width && pool->height == frame->height)
1474  return 0;
1475  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && pool->planes == planes &&
1476  pool->channels == ch && frame->nb_samples == pool->samples)
1477  return 0;
1478  }
1479 
1480  pool_buf = frame_pool_alloc();
1481  if (!pool_buf)
1482  return AVERROR(ENOMEM);
1483  pool = (FramePool*)pool_buf->data;
1484 
1485  switch (avctx->codec_type) {
1486  case AVMEDIA_TYPE_VIDEO: {
1487  int linesize[4];
1488  int w = frame->width;
1489  int h = frame->height;
1490  int unaligned;
1491  ptrdiff_t linesize1[4];
1492  size_t size[4];
1493 
1494  avcodec_align_dimensions2(avctx, &w, &h, pool->stride_align);
1495 
1496  do {
1497  // NOTE: do not align linesizes individually, this breaks e.g. assumptions
1498  // that linesize[0] == 2*linesize[1] in the MPEG-encoder for 4:2:2
1499  ret = av_image_fill_linesizes(linesize, avctx->pix_fmt, w);
1500  if (ret < 0)
1501  goto fail;
1502  // increase alignment of w for next try (rhs gives the lowest bit set in w)
1503  w += w & ~(w - 1);
1504 
1505  unaligned = 0;
1506  for (i = 0; i < 4; i++)
1507  unaligned |= linesize[i] % pool->stride_align[i];
1508  } while (unaligned);
1509 
1510  for (i = 0; i < 4; i++)
1511  linesize1[i] = linesize[i];
1512  ret = av_image_fill_plane_sizes(size, avctx->pix_fmt, h, linesize1);
1513  if (ret < 0)
1514  goto fail;
1515 
1516  for (i = 0; i < 4; i++) {
1517  pool->linesize[i] = linesize[i];
1518  if (size[i]) {
1519  if (size[i] > INT_MAX - (16 + STRIDE_ALIGN - 1)) {
1520  ret = AVERROR(EINVAL);
1521  goto fail;
1522  }
1523  pool->pools[i] = av_buffer_pool_init(size[i] + 16 + STRIDE_ALIGN - 1,
1524  CONFIG_MEMORY_POISONING ?
1525  NULL :
1527  if (!pool->pools[i]) {
1528  ret = AVERROR(ENOMEM);
1529  goto fail;
1530  }
1531  }
1532  }
1533  pool->format = frame->format;
1534  pool->width = frame->width;
1535  pool->height = frame->height;
1536 
1537  break;
1538  }
1539  case AVMEDIA_TYPE_AUDIO: {
1541  frame->nb_samples, frame->format, 0);
1542  if (ret < 0)
1543  goto fail;
1544 
1545  pool->pools[0] = av_buffer_pool_init(pool->linesize[0], NULL);
1546  if (!pool->pools[0]) {
1547  ret = AVERROR(ENOMEM);
1548  goto fail;
1549  }
1550 
1551  pool->format = frame->format;
1552  pool->planes = planes;
1553  pool->channels = ch;
1554  pool->samples = frame->nb_samples;
1555  break;
1556  }
1557  default: av_assert0(0);
1558  }
1559 
1560  av_buffer_unref(&avctx->internal->pool);
1561  avctx->internal->pool = pool_buf;
1562 
1563  return 0;
1564 fail:
1565  av_buffer_unref(&pool_buf);
1566  return ret;
1567 }
1568 
1570 {
1571  FramePool *pool = (FramePool*)avctx->internal->pool->data;
1572  int planes = pool->planes;
1573  int i;
1574 
1575  frame->linesize[0] = pool->linesize[0];
1576 
1578  frame->extended_data = av_mallocz_array(planes, sizeof(*frame->extended_data));
1581  sizeof(*frame->extended_buf));
1582  if (!frame->extended_data || !frame->extended_buf) {
1583  av_freep(&frame->extended_data);
1584  av_freep(&frame->extended_buf);
1585  return AVERROR(ENOMEM);
1586  }
1587  } else {
1588  frame->extended_data = frame->data;
1589  av_assert0(frame->nb_extended_buf == 0);
1590  }
1591 
1592  for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
1593  frame->buf[i] = av_buffer_pool_get(pool->pools[0]);
1594  if (!frame->buf[i])
1595  goto fail;
1596  frame->extended_data[i] = frame->data[i] = frame->buf[i]->data;
1597  }
1598  for (i = 0; i < frame->nb_extended_buf; i++) {
1599  frame->extended_buf[i] = av_buffer_pool_get(pool->pools[0]);
1600  if (!frame->extended_buf[i])
1601  goto fail;
1602  frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data;
1603  }
1604 
1605  if (avctx->debug & FF_DEBUG_BUFFERS)
1606  av_log(avctx, AV_LOG_DEBUG, "default_get_buffer called on frame %p", frame);
1607 
1608  return 0;
1609 fail:
1610  av_frame_unref(frame);
1611  return AVERROR(ENOMEM);
1612 }
1613 
1615 {
1616  FramePool *pool = (FramePool*)s->internal->pool->data;
1618  int i;
1619 
1620  if (pic->data[0] || pic->data[1] || pic->data[2] || pic->data[3]) {
1621  av_log(s, AV_LOG_ERROR, "pic->data[*]!=NULL in avcodec_default_get_buffer\n");
1622  return -1;
1623  }
1624 
1625  if (!desc) {
1626  av_log(s, AV_LOG_ERROR,
1627  "Unable to get pixel format descriptor for format %s\n",
1628  av_get_pix_fmt_name(pic->format));
1629  return AVERROR(EINVAL);
1630  }
1631 
1632  memset(pic->data, 0, sizeof(pic->data));
1633  pic->extended_data = pic->data;
1634 
1635  for (i = 0; i < 4 && pool->pools[i]; i++) {
1636  pic->linesize[i] = pool->linesize[i];
1637 
1638  pic->buf[i] = av_buffer_pool_get(pool->pools[i]);
1639  if (!pic->buf[i])
1640  goto fail;
1641 
1642  pic->data[i] = pic->buf[i]->data;
1643  }
1644  for (; i < AV_NUM_DATA_POINTERS; i++) {
1645  pic->data[i] = NULL;
1646  pic->linesize[i] = 0;
1647  }
1648  if (desc->flags & AV_PIX_FMT_FLAG_PAL ||
1649  ((desc->flags & FF_PSEUDOPAL) && pic->data[1]))
1650  avpriv_set_systematic_pal2((uint32_t *)pic->data[1], pic->format);
1651 
1652  if (s->debug & FF_DEBUG_BUFFERS)
1653  av_log(s, AV_LOG_DEBUG, "default_get_buffer called on pic %p\n", pic);
1654 
1655  return 0;
1656 fail:
1657  av_frame_unref(pic);
1658  return AVERROR(ENOMEM);
1659 }
1660 
1662 {
1663  int ret;
1664 
1665  if (avctx->hw_frames_ctx) {
1666  ret = av_hwframe_get_buffer(avctx->hw_frames_ctx, frame, 0);
1667  frame->width = avctx->coded_width;
1668  frame->height = avctx->coded_height;
1669  return ret;
1670  }
1671 
1672  if ((ret = update_frame_pool(avctx, frame)) < 0)
1673  return ret;
1674 
1675  switch (avctx->codec_type) {
1676  case AVMEDIA_TYPE_VIDEO:
1677  return video_get_buffer(avctx, frame);
1678  case AVMEDIA_TYPE_AUDIO:
1679  return audio_get_buffer(avctx, frame);
1680  default:
1681  return -1;
1682  }
1683 }
1684 
1686 {
1687  int size;
1688  const uint8_t *side_metadata;
1689 
1690  AVDictionary **frame_md = &frame->metadata;
1691 
1692  side_metadata = av_packet_get_side_data(avpkt,
1694  return av_packet_unpack_dictionary(side_metadata, size, frame_md);
1695 }
1696 
1698 {
1699  AVPacket *pkt = avctx->internal->last_pkt_props;
1700  int i;
1701  static const struct {
1702  enum AVPacketSideDataType packet;
1704  } sd[] = {
1715  };
1716 
1717  if (IS_EMPTY(pkt))
1719  &avctx->internal->pkt_props_tail,
1720  pkt);
1721 
1722  if (pkt) {
1723  frame->pts = pkt->pts;
1724 #if FF_API_PKT_PTS
1726  frame->pkt_pts = pkt->pts;
1728 #endif
1729  frame->pkt_pos = pkt->pos;
1730  frame->pkt_duration = pkt->duration;
1731  frame->pkt_size = pkt->size;
1732 
1733  for (i = 0; i < FF_ARRAY_ELEMS(sd); i++) {
1734  int size;
1735  uint8_t *packet_sd = av_packet_get_side_data(pkt, sd[i].packet, &size);
1736  if (packet_sd) {
1737  AVFrameSideData *frame_sd = av_frame_new_side_data(frame,
1738  sd[i].frame,
1739  size);
1740  if (!frame_sd)
1741  return AVERROR(ENOMEM);
1742 
1743  memcpy(frame_sd->data, packet_sd, size);
1744  }
1745  }
1746  add_metadata_from_side_data(pkt, frame);
1747 
1748  if (pkt->flags & AV_PKT_FLAG_DISCARD) {
1749  frame->flags |= AV_FRAME_FLAG_DISCARD;
1750  } else {
1751  frame->flags = (frame->flags & ~AV_FRAME_FLAG_DISCARD);
1752  }
1753  }
1754  frame->reordered_opaque = avctx->reordered_opaque;
1755 
1756  if (frame->color_primaries == AVCOL_PRI_UNSPECIFIED)
1757  frame->color_primaries = avctx->color_primaries;
1758  if (frame->color_trc == AVCOL_TRC_UNSPECIFIED)
1759  frame->color_trc = avctx->color_trc;
1760  if (frame->colorspace == AVCOL_SPC_UNSPECIFIED)
1761  frame->colorspace = avctx->colorspace;
1762  if (frame->color_range == AVCOL_RANGE_UNSPECIFIED)
1763  frame->color_range = avctx->color_range;
1765  frame->chroma_location = avctx->chroma_sample_location;
1766 
1767  switch (avctx->codec->type) {
1768  case AVMEDIA_TYPE_VIDEO:
1769  frame->format = avctx->pix_fmt;
1770  if (!frame->sample_aspect_ratio.num)
1771  frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
1772 
1773  if (frame->width && frame->height &&
1774  av_image_check_sar(frame->width, frame->height,
1775  frame->sample_aspect_ratio) < 0) {
1776  av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
1777  frame->sample_aspect_ratio.num,
1778  frame->sample_aspect_ratio.den);
1779  frame->sample_aspect_ratio = (AVRational){ 0, 1 };
1780  }
1781 
1782  break;
1783  case AVMEDIA_TYPE_AUDIO:
1784  if (!frame->sample_rate)
1785  frame->sample_rate = avctx->sample_rate;
1786  if (frame->format < 0)
1787  frame->format = avctx->sample_fmt;
1788  if (!frame->channel_layout) {
1789  if (avctx->channel_layout) {
1791  avctx->channels) {
1792  av_log(avctx, AV_LOG_ERROR, "Inconsistent channel "
1793  "configuration.\n");
1794  return AVERROR(EINVAL);
1795  }
1796 
1797  frame->channel_layout = avctx->channel_layout;
1798  } else {
1799  if (avctx->channels > FF_SANE_NB_CHANNELS) {
1800  av_log(avctx, AV_LOG_ERROR, "Too many channels: %d.\n",
1801  avctx->channels);
1802  return AVERROR(ENOSYS);
1803  }
1804  }
1805  }
1806  frame->channels = avctx->channels;
1807  break;
1808  }
1809  return 0;
1810 }
1811 
1813 {
1814  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
1815  int i;
1816  int num_planes = av_pix_fmt_count_planes(frame->format);
1818  int flags = desc ? desc->flags : 0;
1819  if (num_planes == 1 && (flags & AV_PIX_FMT_FLAG_PAL))
1820  num_planes = 2;
1821  if ((flags & FF_PSEUDOPAL) && frame->data[1])
1822  num_planes = 2;
1823  for (i = 0; i < num_planes; i++) {
1824  av_assert0(frame->data[i]);
1825  }
1826  // For formats without data like hwaccel allow unused pointers to be non-NULL.
1827  for (i = num_planes; num_planes > 0 && i < FF_ARRAY_ELEMS(frame->data); i++) {
1828  if (frame->data[i])
1829  av_log(avctx, AV_LOG_ERROR, "Buffer returned by get_buffer2() did not zero unused plane pointers\n");
1830  frame->data[i] = NULL;
1831  }
1832  }
1833 }
1834 
1835 static void decode_data_free(void *opaque, uint8_t *data)
1836 {
1837  FrameDecodeData *fdd = (FrameDecodeData*)data;
1838 
1839  if (fdd->post_process_opaque_free)
1841 
1842  if (fdd->hwaccel_priv_free)
1843  fdd->hwaccel_priv_free(fdd->hwaccel_priv);
1844 
1845  av_freep(&fdd);
1846 }
1847 
1849 {
1850  AVBufferRef *fdd_buf;
1851  FrameDecodeData *fdd;
1852 
1853  av_assert1(!frame->private_ref);
1854  av_buffer_unref(&frame->private_ref);
1855 
1856  fdd = av_mallocz(sizeof(*fdd));
1857  if (!fdd)
1858  return AVERROR(ENOMEM);
1859 
1860  fdd_buf = av_buffer_create((uint8_t*)fdd, sizeof(*fdd), decode_data_free,
1862  if (!fdd_buf) {
1863  av_freep(&fdd);
1864  return AVERROR(ENOMEM);
1865  }
1866 
1867  frame->private_ref = fdd_buf;
1868 
1869  return 0;
1870 }
1871 
1873 {
1874  const AVHWAccel *hwaccel = avctx->hwaccel;
1875  int override_dimensions = 1;
1876  int ret;
1877 
1878  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
1879  if ((ret = av_image_check_size2(FFALIGN(avctx->width, STRIDE_ALIGN), avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx)) < 0 || avctx->pix_fmt<0) {
1880  av_log(avctx, AV_LOG_ERROR, "video_get_buffer: image parameters invalid\n");
1881  ret = AVERROR(EINVAL);
1882  goto fail;
1883  }
1884 
1885  if (frame->width <= 0 || frame->height <= 0) {
1886  frame->width = FFMAX(avctx->width, AV_CEIL_RSHIFT(avctx->coded_width, avctx->lowres));
1887  frame->height = FFMAX(avctx->height, AV_CEIL_RSHIFT(avctx->coded_height, avctx->lowres));
1888  override_dimensions = 0;
1889  }
1890 
1891  if (frame->data[0] || frame->data[1] || frame->data[2] || frame->data[3]) {
1892  av_log(avctx, AV_LOG_ERROR, "pic->data[*]!=NULL in get_buffer_internal\n");
1893  ret = AVERROR(EINVAL);
1894  goto fail;
1895  }
1896  } else if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
1897  if (frame->nb_samples * (int64_t)avctx->channels > avctx->max_samples) {
1898  av_log(avctx, AV_LOG_ERROR, "samples per frame %d, exceeds max_samples %"PRId64"\n", frame->nb_samples, avctx->max_samples);
1899  ret = AVERROR(EINVAL);
1900  goto fail;
1901  }
1902  }
1903  ret = ff_decode_frame_props(avctx, frame);
1904  if (ret < 0)
1905  goto fail;
1906 
1907  if (hwaccel) {
1908  if (hwaccel->alloc_frame) {
1909  ret = hwaccel->alloc_frame(avctx, frame);
1910  goto end;
1911  }
1912  } else
1913  avctx->sw_pix_fmt = avctx->pix_fmt;
1914 
1915  ret = avctx->get_buffer2(avctx, frame, flags);
1916  if (ret < 0)
1917  goto fail;
1918 
1919  validate_avframe_allocation(avctx, frame);
1920 
1921  ret = ff_attach_decode_data(frame);
1922  if (ret < 0)
1923  goto fail;
1924 
1925 end:
1926  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO && !override_dimensions &&
1928  frame->width = avctx->width;
1929  frame->height = avctx->height;
1930  }
1931 
1932 fail:
1933  if (ret < 0) {
1934  av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
1935  av_frame_unref(frame);
1936  }
1937 
1938  return ret;
1939 }
1940 
1942 {
1943  AVFrame *tmp;
1944  int ret;
1945 
1947 
1948  if (frame->data[0] && (frame->width != avctx->width || frame->height != avctx->height || frame->format != avctx->pix_fmt)) {
1949  av_log(avctx, AV_LOG_WARNING, "Picture changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s in reget buffer()\n",
1950  frame->width, frame->height, av_get_pix_fmt_name(frame->format), avctx->width, avctx->height, av_get_pix_fmt_name(avctx->pix_fmt));
1951  av_frame_unref(frame);
1952  }
1953 
1954  if (!frame->data[0])
1955  return ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
1956 
1957  if ((flags & FF_REGET_BUFFER_FLAG_READONLY) || av_frame_is_writable(frame))
1958  return ff_decode_frame_props(avctx, frame);
1959 
1960  tmp = av_frame_alloc();
1961  if (!tmp)
1962  return AVERROR(ENOMEM);
1963 
1964  av_frame_move_ref(tmp, frame);
1965 
1966  ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
1967  if (ret < 0) {
1968  av_frame_free(&tmp);
1969  return ret;
1970  }
1971 
1972  av_frame_copy(frame, tmp);
1973  av_frame_free(&tmp);
1974 
1975  return 0;
1976 }
1977 
1979 {
1980  int ret = reget_buffer_internal(avctx, frame, flags);
1981  if (ret < 0)
1982  av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
1983  return ret;
1984 }
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:132
#define FF_SANE_NB_CHANNELS
Definition: internal.h:97
This struct aggregates all the (hardware/vendor-specific) "high-level" state, i.e.
Definition: hwcontext.h:61
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: avcodec.h:1594
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
callback to negotiate the pixelFormat
Definition: avcodec.h:778
AVCodecHWConfig public
This is the structure which will be returned to the user by avcodec_get_hw_config().
Definition: hwconfig.h:34
int nb_draining_errors
Definition: internal.h:197
#define FF_SUB_CHARENC_MODE_PRE_DECODER
the AVPacket data needs to be recoded to UTF-8 before being fed to the decoder, requires iconv ...
Definition: avcodec.h:2127
#define NULL
Definition: coverity.c:32
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1292
const struct AVCodec * codec
Definition: avcodec.h:535
AVRational framerate
Definition: avcodec.h:2069
const AVCodecDescriptor * codec_descriptor
AVCodecDescriptor.
Definition: avcodec.h:2090
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
#define AV_NUM_DATA_POINTERS
Definition: frame.h:309
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
AVPacketSideDataType
Definition: packet.h:40
int64_t pts_correction_num_faulty_dts
Number of incorrect PTS values so far.
Definition: avcodec.h:2107
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
#define GET_UTF8(val, GET_BYTE, ERROR)
Convert a UTF-8 character (up to 4 bytes) to its 32-bit UCS-4 encoded form.
Definition: common.h:427
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
This structure describes decoded (raw) audio or video data.
Definition: frame.h:308
int stride_align[AV_NUM_DATA_POINTERS]
Definition: decode.c:61
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
This side data must be associated with an audio frame and corresponds to enum AVAudioServiceType defi...
Definition: frame.h:113
int apply_cropping
Video decoding only.
Definition: avcodec.h:2314
static int decode_receive_frame_internal(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:557
const struct AVCodecHWConfigInternal ** hw_configs
Array of pointers to hardware configurations supported by the codec, or NULL if no hardware supported...
Definition: codec.h:321
#define AV_CODEC_FLAG2_SKIP_MANUAL
Do not skip samples and export skip information as frame side data.
Definition: avcodec.h:384
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:714
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:579
int capabilities
Hardware accelerated codec capabilities.
Definition: avcodec.h:2443
AVPacket * last_pkt_props
Properties (timestamps+side data) extracted from the last packet passed for decoding.
Definition: internal.h:147
const char * desc
Definition: libsvtav1.c:79
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
static int convert_sub_to_old_ass_form(AVSubtitle *sub, const AVPacket *pkt, AVRational tb)
Definition: decode.c:933
int changed_frames_dropped
Definition: internal.h:200
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2613
AVFrame * to_free
Definition: internal.h:134
int64_t pos
byte position in stream, -1 if unknown
Definition: packet.h:383
static void get_subtitle_defaults(AVSubtitle *sub)
Definition: decode.c:834
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:106
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:499
static int decode_simple_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:544
int width
Definition: decode.c:60
int(* receive_frame)(struct AVCodecContext *avctx, struct AVFrame *frame)
Decode API with decoupled packet/frame dataflow.
Definition: codec.h:296
This side data should be associated with a video stream and contains Stereoscopic 3D information in f...
Definition: packet.h:114
ATSC A53 Part 4 Closed Captions.
Definition: packet.h:242
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1161
int nb_extended_buf
Number of elements in extended_buf.
Definition: frame.h:517
int ff_decode_bsfs_init(AVCodecContext *avctx)
Called during avcodec_open2() to initialize avctx->internal->bsf.
Definition: decode.c:214
Content light level (based on CTA-861.3).
Definition: frame.h:136
int num
Numerator.
Definition: rational.h:59
Timecode which conforms to SMPTE ST 12-1.
Definition: frame.h:168
int size
Definition: packet.h:364
int av_codec_is_decoder(const AVCodec *codec)
Definition: utils.c:101
int initial_channels
Definition: internal.h:204
enum AVPixelFormat pix_fmt
Supported pixel format.
Definition: avcodec.h:2437
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:905
void(* hwaccel_priv_free)(void *priv)
Definition: decode.h:53
#define AV_CODEC_PROP_TEXT_SUB
Subtitle codec is text based.
Definition: codec_desc.h:102
static void frame_pool_free(void *opaque, uint8_t *data)
Definition: decode.c:1429
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
int samples
Definition: decode.c:65
static int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:321
int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, const AVPacket *avpkt)
Decode the audio frame of size avpkt->size from avpkt->data into frame.
Definition: decode.c:826
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:209
Mastering display metadata associated with a video frame.
Definition: frame.h:119
unsigned num_rects
Definition: avcodec.h:2698
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:595
enum AVMediaType type
Definition: codec.h:203
static int apply_param_change(AVCodecContext *avctx, const AVPacket *avpkt)
Definition: decode.c:68
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
static int recode_subtitle(AVCodecContext *avctx, AVPacket *outpkt, const AVPacket *inpkt)
Definition: decode.c:841
AVBufferPool * pools[4]
Pools for each data plane.
Definition: decode.c:54
int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
Set various frame properties from the codec context / packet data.
Definition: decode.c:1697
size_t crop_bottom
Definition: frame.h:669
static AVPacket pkt
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:991
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1690
int(* alloc_frame)(AVCodecContext *avctx, AVFrame *frame)
Allocate a custom buffer.
Definition: avcodec.h:2456
static int utf8_check(const uint8_t *str)
Definition: decode.c:899
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
static int apply_cropping(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:635
The codec supports this format by some internal method.
Definition: codec.h:409
Mastering display metadata (based on SMPTE-2086:2014).
Definition: packet.h:222
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:649
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
AVSubtitleRect ** rects
Definition: avcodec.h:2699
int(* uninit)(AVCodecContext *avctx)
Uninitialize the hwaccel private data.
Definition: avcodec.h:2548
static AVBufferRef * frame_pool_alloc(void)
Definition: decode.c:1440
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:75
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
int height
Definition: decode.c:60
enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Definition: decode.c:1080
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1194
uint8_t
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
int ff_attach_decode_data(AVFrame *frame)
Definition: decode.c:1848
int av_packet_unpack_dictionary(const uint8_t *data, int size, AVDictionary **dict)
Unpack a dictionary from side_data.
Definition: avpacket.c:530
AVOptions.
static int64_t guess_correct_pts(AVCodecContext *ctx, int64_t reordered_pts, int64_t dts)
Attempt to guess proper monotonic timestamps for decoded video frames which might have incorrect time...
Definition: decode.c:289
size_t crop_left
Definition: frame.h:670
AVPacket pkt
Definition: packet.h:397
int avpriv_set_systematic_pal2(uint32_t pal[256], enum AVPixelFormat pix_fmt)
Definition: imgutils.c:176
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:92
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:381
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: packet.h:108
#define AV_CODEC_FLAG_UNALIGNED
Allow decoders to produce frames with data planes that are not aligned to CPU requirements (e...
Definition: avcodec.h:271
#define AV_WL8(p, d)
Definition: intreadwrite.h:399
Multithreading support functions.
AVBufferRef * private_ref
AVBufferRef for internal use by a single libav* library.
Definition: frame.h:687
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
Definition: decode.c:248
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:401
enum AVPixelFormat pix_fmt
For decoders, a hardware pixel format which that decoder may be able to decode to if suitable hardwar...
Definition: codec.h:430
int planes
Definition: decode.c:63
Structure to hold side data for an AVFrame.
Definition: frame.h:214
int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar)
Check if the given sample aspect ratio of an image is valid.
Definition: imgutils.c:322
size_t compat_decode_consumed
Definition: internal.h:185
static void finish(void)
Definition: movenc.c:345
uint8_t * data
Definition: packet.h:363
#define FF_REGET_BUFFER_FLAG_READONLY
the returned buffer does not need to be writable
Definition: internal.h:298
#define AVERROR_EOF
End of file.
Definition: error.h:55
AVDictionary * metadata
metadata.
Definition: frame.h:594
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:1765
#define AV_BUFFER_FLAG_READONLY
Always treat the buffer as read-only, even when it has only one reference.
Definition: buffer.h:113
int(* init)(AVCodecContext *avctx)
Initialize the hwaccel private data.
Definition: avcodec.h:2540
ptrdiff_t size
Definition: opengl_enc.c:100
int initial_height
Definition: internal.h:202
int initial_format
Definition: internal.h:201
The codec supports this format by some ad-hoc method.
Definition: codec.h:418
The data represents the AVSphericalMapping structure defined in libavutil/spherical.h.
Definition: frame.h:130
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:1168
#define FFALIGN(x, a)
Definition: macros.h:48
#define av_log(a,...)
The buffer pool.
const char * name
Definition: pixdesc.h:82
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:615
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
Check if the sample format is planar.
Definition: samplefmt.c:112
void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, int linesize_align[AV_NUM_DATA_POINTERS])
Modify width and height values so that they will result in a memory buffer that is acceptable for the...
Definition: utils.c:156
int ff_thread_decode_frame(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, AVPacket *avpkt)
Submit a new frame to a decoding thread.
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
#define AV_RL8(x)
Definition: intreadwrite.h:398
AVPacketList * pkt_props_tail
Definition: internal.h:149
int av_new_packet(AVPacket *pkt, int size)
Allocate the payload of a packet and initialize its fields with default values.
Definition: avpacket.c:88
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
Definition: avcodec.h:2083
AVBSFContext * bsf
Definition: internal.h:141
int width
Definition: frame.h:366
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:816
void * post_process_opaque
Definition: decode.h:46
#define AV_BPRINT_SIZE_UNLIMITED
static int hwaccel_init(AVCodecContext *avctx, const AVCodecHWConfigInternal *hw_config)
Definition: decode.c:1243
static void validate_avframe_allocation(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:1812
An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows:
Definition: packet.h:72
int av_frame_apply_cropping(AVFrame *frame, int flags)
Crop the given video AVFrame according to its crop_left/crop_top/crop_right/ crop_bottom fields...
Definition: frame.c:903
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:353
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
ICC profile data consisting of an opaque octet buffer following the format described by ISO 15076-1...
Definition: packet.h:274
int64_t pts_correction_last_pts
Number of incorrect DTS values so far.
Definition: avcodec.h:2108
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Identical in function to ff_get_buffer(), except it reuses the existing buffer if available...
Definition: decode.c:1978
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1804
int methods
Bit set of AV_CODEC_HW_CONFIG_METHOD_* flags, describing the possible setup methods which can be used...
Definition: codec.h:435
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
int avcodec_is_open(AVCodecContext *s)
Definition: utils.c:1935
int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:662
AVFrame * buffer_frame
Definition: internal.h:180
int capabilities
Codec capabilities.
Definition: codec.h:209
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:552
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: packet.h:346
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:606
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:563
AVRational time_base_in
The timebase used for the timestamps of the input packets.
Definition: bsf.h:89
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:140
simple assert() macros that are a bit more flexible than ISO C assert().
int side_data_elems
Definition: packet.h:375
AVBufferRef * av_buffer_create(uint8_t *data, int size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:29
int64_t max_pixels
The number of pixels per image to maximally accept.
Definition: avcodec.h:2256
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:333
enum AVHWDeviceType type
This field identifies the underlying API used for hardware access.
Definition: hwcontext.h:79
#define FFMAX(a, b)
Definition: common.h:94
int av_hwframe_get_buffer(AVBufferRef *hwframe_ref, AVFrame *frame, int flags)
Allocate a new frame attached to the given AVHWFramesContext.
Definition: hwcontext.c:502
#define fail()
Definition: checkasm.h:123
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:812
const AVHWAccel * hwaccel
If this configuration uses a hwaccel, a pointer to it.
Definition: hwconfig.h:39
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: internal.h:66
int priv_data_size
Size of the private data to allocate in AVCodecInternal.hwaccel_priv_data.
Definition: avcodec.h:2554
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:369
int av_bsf_init(AVBSFContext *ctx)
Prepare the filter for use, after all the parameters and options have been set.
Definition: bsf.c:147
reference-counted frame API
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:1237
uint32_t end_display_time
Definition: avcodec.h:2697
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:2700
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:485
int props
Codec properties, a combination of AV_CODEC_PROP_* flags.
Definition: codec_desc.h:54
size_t crop_top
Definition: frame.h:668
common internal API header
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:106
int initial_pool_size
Initial size of the frame pool.
Definition: hwcontext.h:199
int av_bsf_list_parse_str(const char *str, AVBSFContext **bsf_lst)
Parse string describing list of bitstream filters and create single AVBSFContext describing the whole...
Definition: bsf.c:522
int av_packet_copy_props(AVPacket *dst, const AVPacket *src)
Copy only "properties" fields from src to dst.
Definition: avpacket.c:572
int channels
number of audio channels, only used for audio.
Definition: frame.h:614
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:545
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1655
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1796
const char * name
Name of the hardware accelerated codec.
Definition: avcodec.h:2416
#define FFMIN(a, b)
Definition: common.h:96
AVBufferRef ** extended_buf
For planar audio which requires more than AV_NUM_DATA_POINTERS AVBufferRef pointers, this array will hold all the references which cannot fit into AVFrame.buf.
Definition: frame.h:513
int channels
Definition: decode.c:64
AVFrame * compat_decode_frame
Definition: internal.h:189
int width
picture width / height.
Definition: avcodec.h:699
uint8_t w
Definition: llviddspenc.c:38
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames...
Definition: avcodec.h:2226
static int add_metadata_from_side_data(const AVPacket *avpkt, AVFrame *frame)
Definition: decode.c:1685
static int compat_decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, const AVPacket *pkt)
Definition: decode.c:738
AVPacket * in_pkt
Definition: internal.h:112
This side data should be associated with a video stream and corresponds to the AVSphericalMapping str...
Definition: packet.h:228
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
AVFormatContext * ctx
Definition: movenc.c:48
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:1140
AVFrameSideDataType
Definition: frame.h:48
#define AVERROR_INPUT_CHANGED
Input changed between calls. Reconfiguration is required. (can be OR-ed with AVERROR_OUTPUT_CHANGED) ...
Definition: error.h:73
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
uint16_t format
Definition: avcodec.h:2695
#define s(width, name)
Definition: cbs_vp9.c:257
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:143
#define FF_DEBUG_BUFFERS
Definition: avcodec.h:1633
int64_t reordered_opaque
opaque 64-bit number (generally a PTS) that will be reordered and output in AVFrame.reordered_opaque
Definition: avcodec.h:1683
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:1666
static int av_bprint_is_complete(const AVBPrint *buf)
Test if the print buffer is complete (not truncated).
Definition: bprint.h:185
const char * bsfs
Decoding only, a comma-separated list of bitstream filters to apply to packets before decoding...
Definition: codec.h:312
DecodeSimpleContext ds
Definition: internal.h:140
int avpriv_packet_list_put(AVPacketList **packet_buffer, AVPacketList **plast_pktl, AVPacket *pkt, int(*copy)(AVPacket *dst, const AVPacket *src), int flags)
Append an AVPacket to the list.
Definition: avpacket.c:729
char * sub_charenc
DTS of the last frame.
Definition: avcodec.h:2116
static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:1569
int draining
checks API usage: after codec draining, flush is required to resume operation
Definition: internal.h:174
#define FF_ARRAY_ELEMS(a)
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:1785
int linesize[4]
Definition: decode.c:62
int sub_charenc_mode
Subtitles character encoding mode.
Definition: avcodec.h:2124
if(ret)
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal codec state / flush internal buffers.
Definition: utils.c:1065
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
int(* post_process)(void *logctx, AVFrame *frame)
The callback to perform some delayed processing on the frame right before it is returned to the calle...
Definition: decode.h:45
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:381
Content light level (based on CTA-861.3).
Definition: packet.h:235
int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:599
int(* decode)(struct AVCodecContext *, void *outdata, int *outdata_size, struct AVPacket *avpkt)
Definition: codec.h:282
AVPacketList * pkt_props
Definition: internal.h:148
void(* post_process_opaque_free)(void *opaque)
Definition: decode.h:47
#define AV_LOG_INFO
Standard information.
Definition: log.h:205
int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt)
Submit a packet for filtering.
Definition: bsf.c:200
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:253
int avcodec_default_get_buffer2(AVCodecContext *avctx, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
Definition: decode.c:1661
Libavcodec external API header.
enum AVMediaType codec_type
Definition: avcodec.h:534
int compat_decode_warned
Definition: internal.h:182
int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown...
Definition: frame.h:587
A list of zero terminated key/value strings.
Definition: packet.h:172
int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, const AVPacket *avpkt)
Decode the video frame of size avpkt->size from avpkt->data into picture.
Definition: decode.c:819
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:84
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:607
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_WB16 unsigned int_TMPL byte
Definition: bytestream.h:87
int sample_rate
samples per second
Definition: avcodec.h:1186
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:339
The codec supports this format via the hw_device_ctx interface.
Definition: codec.h:389
The codec supports this format via the hw_frames_ctx interface.
Definition: codec.h:402
int initial_sample_rate
Definition: internal.h:203
int debug
debug
Definition: avcodec.h:1611
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63)))#define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};static void cpy1(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, len);}static void cpy2(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 2 *len);}static void cpy4(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 4 *len);}static void cpy8(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;}void swri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len){int ch;int off=0;const int os=(out->planar?1:out->ch_count)*out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){int planes=in->planar?in->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){int planes=out->planar?out->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){int planes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch const uint8_t **in ch off *out planar
Definition: audioconvert.c:56
const AVCodecHWConfig * avcodec_get_hw_config(const AVCodec *codec, int index)
Retrieve supported hardware configurations for a codec.
Definition: utils.c:1821
main external API structure.
Definition: avcodec.h:526
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:606
int skip_samples_multiplier
Definition: internal.h:194
uint8_t * data
The data buffer.
Definition: buffer.h:89
int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt)
Retrieve a filtered packet.
Definition: bsf.c:226
static int reget_buffer_internal(AVCodecContext *avctx, AVFrame *frame, int flags)
Definition: decode.c:1941
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:1110
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:396
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1872
uint8_t * data
Definition: frame.h:216
int avpriv_packet_list_get(AVPacketList **pkt_buffer, AVPacketList **pkt_buffer_end, AVPacket *pkt)
Remove the oldest AVPacket in the list and return it.
Definition: avpacket.c:766
#define AV_CODEC_PROP_BITMAP_SUB
Subtitle codec is bitmap based Decoded AVSubtitle data can be read from the AVSubtitleRect->pict fiel...
Definition: codec_desc.h:97
int av_samples_copy(uint8_t **dst, uint8_t *const *src, int dst_offset, int src_offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Copy samples from src to dst.
Definition: samplefmt.c:213
size_t crop_right
Definition: frame.h:671
int64_t max_samples
The number of samples per frame to maximally accept.
Definition: avcodec.h:2344
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:83
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
int coded_height
Definition: avcodec.h:714
int64_t reordered_opaque
reordered opaque 64 bits (generally an integer or a double precision float PTS but can be anything)...
Definition: frame.h:475
int sample_rate
Sample rate of the audio data.
Definition: frame.h:480
int(* get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags)
This callback is called at the beginning of each frame to get data buffer(s) for it.
Definition: avcodec.h:1341
int showed_multi_packet_warning
Definition: internal.h:192
Definition: f_ebur128.c:91
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
Definition: imgutils.c:89
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:739
const char * av_hwdevice_get_type_name(enum AVHWDeviceType type)
Get the string name of an AVHWDeviceType.
Definition: hwcontext.c:92
#define AV_CODEC_CAP_SUBFRAMES
Codec can output multiple frames per AVPacket Normally demuxers return one frame at a time...
Definition: codec.h:93
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:277
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:119
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1154
Rational number (pair of numerator and denominator).
Definition: rational.h:58
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:1147
int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
Definition: utils.c:2100
Recommmends skipping the specified number of samples.
Definition: packet.h:156
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:124
int sub_text_format
Control the form of AVSubtitle.rects[N]->ass.
Definition: avcodec.h:2233
int av_image_fill_plane_sizes(size_t sizes[4], enum AVPixelFormat pix_fmt, int height, const ptrdiff_t linesizes[4])
Fill plane sizes for an image with pixel format pix_fmt and height height.
Definition: imgutils.c:111
int skip_samples
Number of audio samples to skip at the start of the next decoded frame.
Definition: internal.h:164
#define STRIDE_ALIGN
Definition: internal.h:108
enum AVChromaLocation chroma_location
Definition: frame.h:565
int(* frame_params)(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx)
Fill the given hw_frames context with current codec parameters.
Definition: avcodec.h:2569
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:572
attribute_deprecated int refcounted_frames
If non-zero, the decoded audio and video frames returned from avcodec_decode_video2() and avcodec_dec...
Definition: avcodec.h:1357
AVBufferRef * pool
Definition: internal.h:136
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63)))#define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};static void cpy1(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, len);}static void cpy2(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 2 *len);}static void cpy4(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 4 *len);}static void cpy8(uint8_t **dst, const uint8_t **src, int len){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;}void swri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len){int ch;int off=0;const int os=(out->planar?1:out->ch_count)*out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){int planes=in->planar?in->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){int planes=out->planar?out->ch_count:1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){int planes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
#define AV_CODEC_FLAG_DROPCHANGED
Don&#39;t output frames whose parameters differ from first decoded frame in stream.
Definition: avcodec.h:292
int size
Size of data in bytes.
Definition: buffer.h:93
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:566
static int64_t pts
#define SIZE_SPECIFIER
Definition: internal.h:229
#define flags(name, subs,...)
Definition: cbs_av1.c:560
This side data should be associated with an audio stream and contains ReplayGain information in form ...
Definition: packet.h:99
#define FF_CODEC_CAP_SETS_PKT_DTS
Decoders marked with FF_CODEC_CAP_SETS_PKT_DTS want to set AVFrame.pkt_dts manually.
Definition: internal.h:55
int ff_decode_get_hw_frames_ctx(AVCodecContext *avctx, enum AVHWDeviceType dev_type)
Make sure avctx.hw_frames_ctx is set.
Definition: decode.c:1143
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:322
static void decode_data_free(void *opaque, uint8_t *data)
Definition: decode.c:1835
#define UTF8_MAX_BYTES
Definition: decode.c:840
attribute_deprecated int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Definition: frame.h:409
Timecode which conforms to SMPTE ST 12-1:2014.
Definition: packet.h:291
void av_bprint_clear(AVBPrint *buf)
Reset the string to "" but keep internal allocated data.
Definition: bprint.c:227
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
AVPacket * buffer_pkt
buffers for using new encode/decode API through legacy API
Definition: internal.h:179
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:417
A reference to a data buffer.
Definition: buffer.h:81
int extra_hw_frames
Definition: avcodec.h:2328
static int unrefcount_frame(AVCodecInternal *avci, AVFrame *frame)
Definition: decode.c:168
AVPacketSideData * side_data
Additional packet data that can be provided by the container.
Definition: packet.h:374
This struct stores per-frame lavc-internal data and is attached to it via private_ref.
Definition: decode.h:34
int avcodec_get_hw_frames_parameters(AVCodecContext *avctx, AVBufferRef *device_ref, enum AVPixelFormat hw_pix_fmt, AVBufferRef **out_frames_ref)
Create and return a AVHWFramesContext with values adequate for hardware decoding. ...
Definition: decode.c:1194
static enum AVPixelFormat hw_pix_fmt
Definition: hw_decode.c:46
#define AV_PKT_FLAG_DISCARD
Flag is used to discard packets which are required to maintain valid decoder state but are not requir...
Definition: packet.h:408
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
common internal api header.
common internal and external API header
AVBufferPool * av_buffer_pool_init(int size, AVBufferRef *(*alloc)(int size))
Allocate and initialize a buffer pool.
Definition: buffer.c:240
#define AV_HWACCEL_CODEC_CAP_EXPERIMENTAL
HWAccel is experimental and is thus avoided in favor of non experimental codecs.
Definition: avcodec.h:2576
#define AV_CODEC_CAP_PARAM_CHANGE
Codec supports changed parameters at any point.
Definition: codec.h:114
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
Definition: hwcontext.c:247
static void insert_ts(AVBPrint *buf, int ts)
Definition: decode.c:919
void * hwaccel_priv_data
hwaccel-specific private data
Definition: internal.h:169
int caps_internal
Internal codec capabilities.
Definition: codec.h:306
int den
Denominator.
Definition: rational.h:60
int av_image_check_size2(unsigned int w, unsigned int h, int64_t max_pixels, enum AVPixelFormat pix_fmt, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of a plane of an image with...
Definition: imgutils.c:288
uint64_t initial_channel_layout
Definition: internal.h:205
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:215
static int video_get_buffer(AVCodecContext *s, AVFrame *pic)
Definition: decode.c:1614
Formatted text, the ass field must be set by the decoder and is authoritative.
Definition: avcodec.h:2654
#define FF_PSEUDOPAL
Definition: internal.h:335
AVHWDeviceType
Definition: hwcontext.h:27
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:85
int channels
number of audio channels
Definition: avcodec.h:1187
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:561
char * ass
0 terminated ASS/SSA compatible event line.
Definition: avcodec.h:2689
#define AV_FRAME_FLAG_DISCARD
A flag to mark the frames which need to be decoded, but shouldn&#39;t be output.
Definition: frame.h:537
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:613
enum AVColorPrimaries color_primaries
Definition: frame.h:554
static int extract_packet_props(AVCodecInternal *avci, AVPacket *pkt)
Definition: decode.c:148
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: packet.h:362
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
int64_t pts_correction_last_dts
PTS of the last frame.
Definition: avcodec.h:2109
size_t compat_decode_partial_size
Definition: internal.h:188
#define AV_CODEC_FLAG_TRUNCATED
Input bitstream might be truncated at a random location instead of only at frame boundaries.
Definition: avcodec.h:317
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:1217
#define IS_EMPTY(pkt)
Definition: decode.c:146
static int update_frame_pool(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:1458
int height
Definition: frame.h:366
#define av_freep(p)
int64_t pts_correction_num_faulty_pts
Current statistics for PTS correction.
Definition: avcodec.h:2106
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:556
static float sub(float src0, float src1)
void av_bsf_free(AVBSFContext **pctx)
Free a bitstream filter context and everything associated with it; write NULL into the supplied point...
Definition: bsf.c:40
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:338
Recommmends skipping the specified number of samples.
Definition: frame.h:108
void * hwaccel_priv
Per-frame private data for hwaccels.
Definition: decode.h:52
#define av_malloc_array(a, b)
enum AVHWDeviceType device_type
The device type associated with the configuration.
Definition: codec.h:442
#define FF_SUB_TEXT_FMT_ASS_WITH_TIMINGS
Definition: avcodec.h:2236
#define FF_SUB_CHARENC_MODE_IGNORE
neither convert the subtitles, nor check them for valid UTF-8
Definition: avcodec.h:2128
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2489
ReplayGain information in the form of the AVReplayGain struct.
Definition: frame.h:76
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
Apply the maximum possible cropping, even if it requires setting the AVFrame.data[] entries to unalig...
Definition: frame.h:953
enum AVSubtitleType type
Definition: avcodec.h:2680
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:355
int format
Definition: decode.c:59
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:2278
int pkt_size
size of the corresponding packet containing the compressed frame.
Definition: frame.h:623
float min
Stereoscopic 3d metadata.
Definition: frame.h:63
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
static double val(void *priv, double ch)
Definition: aeval.c:76
This structure stores compressed data.
Definition: packet.h:340
AVCodecParameters * par_in
Parameters of the input stream.
Definition: bsf.h:77
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:509
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:374
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1589
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:356
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:2076
for(j=16;j >0;--j)
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:671
int i
Definition: input.c:407
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
static void hwaccel_uninit(AVCodecContext *avctx)
Definition: decode.c:1280
#define tb
Definition: regdef.h:68
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:190
This side data should be associated with an audio stream and corresponds to enum AVAudioServiceType.
Definition: packet.h:120
static uint8_t tmp[11]
Definition: aes_ctr.c:26