FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
h264.c
Go to the documentation of this file.
1 /*
2  * H.26L/H.264/AVC/JVT/14496-10/... decoder
3  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * H.264 / AVC / MPEG4 part10 codec.
25  * @author Michael Niedermayer <michaelni@gmx.at>
26  */
27 
28 #define UNCHECKED_BITSTREAM_READER 1
29 
30 #include "libavutil/avassert.h"
31 #include "libavutil/display.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/opt.h"
34 #include "libavutil/stereo3d.h"
35 #include "libavutil/timer.h"
36 #include "internal.h"
37 #include "cabac.h"
38 #include "cabac_functions.h"
39 #include "error_resilience.h"
40 #include "avcodec.h"
41 #include "h264.h"
42 #include "h264data.h"
43 #include "h264chroma.h"
44 #include "h264_mvpred.h"
45 #include "golomb.h"
46 #include "mathops.h"
47 #include "me_cmp.h"
48 #include "mpegutils.h"
49 #include "rectangle.h"
50 #include "svq3.h"
51 #include "thread.h"
52 #include "vdpau_compat.h"
53 
54 const uint16_t ff_h264_mb_sizes[4] = { 256, 384, 512, 768 };
55 
57 {
58  H264Context *h = avctx->priv_data;
59  return h ? h->sps.num_reorder_frames : 0;
60 }
61 
62 static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
63  int (*mv)[2][4][2],
64  int mb_x, int mb_y, int mb_intra, int mb_skipped)
65 {
66  H264Context *h = opaque;
67  H264SliceContext *sl = &h->slice_ctx[0];
68 
69  sl->mb_x = mb_x;
70  sl->mb_y = mb_y;
71  sl->mb_xy = mb_x + mb_y * h->mb_stride;
72  memset(sl->non_zero_count_cache, 0, sizeof(sl->non_zero_count_cache));
73  av_assert1(ref >= 0);
74  /* FIXME: It is possible albeit uncommon that slice references
75  * differ between slices. We take the easy approach and ignore
76  * it for now. If this turns out to have any relevance in
77  * practice then correct remapping should be added. */
78  if (ref >= sl->ref_count[0])
79  ref = 0;
80  if (!sl->ref_list[0][ref].data[0]) {
81  av_log(h->avctx, AV_LOG_DEBUG, "Reference not available for error concealing\n");
82  ref = 0;
83  }
84  if ((sl->ref_list[0][ref].reference&3) != 3) {
85  av_log(h->avctx, AV_LOG_DEBUG, "Reference invalid\n");
86  return;
87  }
88  fill_rectangle(&h->cur_pic.ref_index[0][4 * sl->mb_xy],
89  2, 2, 2, ref, 1);
90  fill_rectangle(&sl->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
91  fill_rectangle(sl->mv_cache[0][scan8[0]], 4, 4, 8,
92  pack16to32((*mv)[0][0][0], (*mv)[0][0][1]), 4);
93  sl->mb_mbaff =
94  sl->mb_field_decoding_flag = 0;
96 }
97 
99  int y, int height)
100 {
101  AVCodecContext *avctx = h->avctx;
102  const AVFrame *src = h->cur_pic.f;
103  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
104  int vshift = desc->log2_chroma_h;
105  const int field_pic = h->picture_structure != PICT_FRAME;
106  if (field_pic) {
107  height <<= 1;
108  y <<= 1;
109  }
110 
111  height = FFMIN(height, avctx->height - y);
112 
113  if (field_pic && h->first_field && !(avctx->slice_flags & SLICE_FLAG_ALLOW_FIELD))
114  return;
115 
116  if (avctx->draw_horiz_band) {
118  int i;
119 
120  offset[0] = y * src->linesize[0];
121  offset[1] =
122  offset[2] = (y >> vshift) * src->linesize[1];
123  for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
124  offset[i] = 0;
125 
126  emms_c();
127 
128  avctx->draw_horiz_band(avctx, src, offset,
129  y, h->picture_structure, height);
130  }
131 }
132 
133 /**
134  * Check if the top & left blocks are available if needed and
135  * change the dc mode so it only uses the available blocks.
136  */
138 {
139  static const int8_t top[12] = {
140  -1, 0, LEFT_DC_PRED, -1, -1, -1, -1, -1, 0
141  };
142  static const int8_t left[12] = {
143  0, -1, TOP_DC_PRED, 0, -1, -1, -1, 0, -1, DC_128_PRED
144  };
145  int i;
146 
147  if (!(sl->top_samples_available & 0x8000)) {
148  for (i = 0; i < 4; i++) {
149  int status = top[sl->intra4x4_pred_mode_cache[scan8[0] + i]];
150  if (status < 0) {
152  "top block unavailable for requested intra4x4 mode %d at %d %d\n",
153  status, sl->mb_x, sl->mb_y);
154  return AVERROR_INVALIDDATA;
155  } else if (status) {
156  sl->intra4x4_pred_mode_cache[scan8[0] + i] = status;
157  }
158  }
159  }
160 
161  if ((sl->left_samples_available & 0x8888) != 0x8888) {
162  static const int mask[4] = { 0x8000, 0x2000, 0x80, 0x20 };
163  for (i = 0; i < 4; i++)
164  if (!(sl->left_samples_available & mask[i])) {
165  int status = left[sl->intra4x4_pred_mode_cache[scan8[0] + 8 * i]];
166  if (status < 0) {
168  "left block unavailable for requested intra4x4 mode %d at %d %d\n",
169  status, sl->mb_x, sl->mb_y);
170  return AVERROR_INVALIDDATA;
171  } else if (status) {
172  sl->intra4x4_pred_mode_cache[scan8[0] + 8 * i] = status;
173  }
174  }
175  }
176 
177  return 0;
178 } // FIXME cleanup like ff_h264_check_intra_pred_mode
179 
180 /**
181  * Check if the top & left blocks are available if needed and
182  * change the dc mode so it only uses the available blocks.
183  */
185  int mode, int is_chroma)
186 {
187  static const int8_t top[4] = { LEFT_DC_PRED8x8, 1, -1, -1 };
188  static const int8_t left[5] = { TOP_DC_PRED8x8, -1, 2, -1, DC_128_PRED8x8 };
189 
190  if (mode > 3U) {
192  "out of range intra chroma pred mode at %d %d\n",
193  sl->mb_x, sl->mb_y);
194  return AVERROR_INVALIDDATA;
195  }
196 
197  if (!(sl->top_samples_available & 0x8000)) {
198  mode = top[mode];
199  if (mode < 0) {
201  "top block unavailable for requested intra mode at %d %d\n",
202  sl->mb_x, sl->mb_y);
203  return AVERROR_INVALIDDATA;
204  }
205  }
206 
207  if ((sl->left_samples_available & 0x8080) != 0x8080) {
208  mode = left[mode];
209  if (mode < 0) {
211  "left block unavailable for requested intra mode at %d %d\n",
212  sl->mb_x, sl->mb_y);
213  return AVERROR_INVALIDDATA;
214  }
215  if (is_chroma && (sl->left_samples_available & 0x8080)) {
216  // mad cow disease mode, aka MBAFF + constrained_intra_pred
217  mode = ALZHEIMER_DC_L0T_PRED8x8 +
218  (!(sl->left_samples_available & 0x8000)) +
219  2 * (mode == DC_128_PRED8x8);
220  }
221  }
222 
223  return mode;
224 }
225 
227  const uint8_t *src,
228  int *dst_length, int *consumed, int length)
229 {
230  int i, si, di;
231  uint8_t *dst;
232 
233  // src[0]&0x80; // forbidden bit
234  h->nal_ref_idc = src[0] >> 5;
235  h->nal_unit_type = src[0] & 0x1F;
236 
237  src++;
238  length--;
239 
240 #define STARTCODE_TEST \
241  if (i + 2 < length && src[i + 1] == 0 && src[i + 2] <= 3) { \
242  if (src[i + 2] != 3 && src[i + 2] != 0) { \
243  /* startcode, so we must be past the end */ \
244  length = i; \
245  } \
246  break; \
247  }
248 
249 #if HAVE_FAST_UNALIGNED
250 #define FIND_FIRST_ZERO \
251  if (i > 0 && !src[i]) \
252  i--; \
253  while (src[i]) \
254  i++
255 
256 #if HAVE_FAST_64BIT
257  for (i = 0; i + 1 < length; i += 9) {
258  if (!((~AV_RN64A(src + i) &
259  (AV_RN64A(src + i) - 0x0100010001000101ULL)) &
260  0x8000800080008080ULL))
261  continue;
262  FIND_FIRST_ZERO;
264  i -= 7;
265  }
266 #else
267  for (i = 0; i + 1 < length; i += 5) {
268  if (!((~AV_RN32A(src + i) &
269  (AV_RN32A(src + i) - 0x01000101U)) &
270  0x80008080U))
271  continue;
272  FIND_FIRST_ZERO;
274  i -= 3;
275  }
276 #endif
277 #else
278  for (i = 0; i + 1 < length; i += 2) {
279  if (src[i])
280  continue;
281  if (i > 0 && src[i - 1] == 0)
282  i--;
284  }
285 #endif
286 
288  dst = sl->rbsp_buffer;
289 
290  if (!dst)
291  return NULL;
292 
293  if(i>=length-1){ //no escaped 0
294  *dst_length= length;
295  *consumed= length+1; //+1 for the header
296  if(h->avctx->flags2 & CODEC_FLAG2_FAST){
297  return src;
298  }else{
299  memcpy(dst, src, length);
300  return dst;
301  }
302  }
303 
304  memcpy(dst, src, i);
305  si = di = i;
306  while (si + 2 < length) {
307  // remove escapes (very rare 1:2^22)
308  if (src[si + 2] > 3) {
309  dst[di++] = src[si++];
310  dst[di++] = src[si++];
311  } else if (src[si] == 0 && src[si + 1] == 0 && src[si + 2] != 0) {
312  if (src[si + 2] == 3) { // escape
313  dst[di++] = 0;
314  dst[di++] = 0;
315  si += 3;
316  continue;
317  } else // next start code
318  goto nsc;
319  }
320 
321  dst[di++] = src[si++];
322  }
323  while (si < length)
324  dst[di++] = src[si++];
325 
326 nsc:
327  memset(dst + di, 0, FF_INPUT_BUFFER_PADDING_SIZE);
328 
329  *dst_length = di;
330  *consumed = si + 1; // +1 for the header
331  /* FIXME store exact number of bits in the getbitcontext
332  * (it is needed for decoding) */
333  return dst;
334 }
335 
336 /**
337  * Identify the exact end of the bitstream
338  * @return the length of the trailing, or 0 if damaged
339  */
341 {
342  int v = *src;
343  int r;
344 
345  ff_tlog(h->avctx, "rbsp trailing %X\n", v);
346 
347  for (r = 1; r < 9; r++) {
348  if (v & 1)
349  return r;
350  v >>= 1;
351  }
352  return 0;
353 }
354 
356 {
357  int i;
358 
361  av_freep(&h->cbp_table);
362  av_freep(&h->mvd_table[0]);
363  av_freep(&h->mvd_table[1]);
364  av_freep(&h->direct_table);
367  h->slice_table = NULL;
368  av_freep(&h->list_counts);
369 
370  av_freep(&h->mb2b_xy);
371  av_freep(&h->mb2br_xy);
372 
377 
378  for (i = 0; i < h->nb_slice_ctx; i++) {
379  H264SliceContext *sl = &h->slice_ctx[i];
380 
381  av_freep(&sl->dc_val_base);
382  av_freep(&sl->er.mb_index2xy);
384  av_freep(&sl->er.er_temp_buffer);
385 
388  av_freep(&sl->top_borders[0]);
389  av_freep(&sl->top_borders[1]);
390 
393  sl->top_borders_allocated[0] = 0;
394  sl->top_borders_allocated[1] = 0;
395  }
396 }
397 
399 {
400  const int big_mb_num = h->mb_stride * (h->mb_height + 1);
401  const int row_mb_num = 2*h->mb_stride*FFMAX(h->avctx->thread_count, 1);
402  int x, y;
403 
405  row_mb_num, 8 * sizeof(uint8_t), fail)
407 
409  big_mb_num * 48 * sizeof(uint8_t), fail)
411  (big_mb_num + h->mb_stride) * sizeof(*h->slice_table_base), fail)
413  big_mb_num * sizeof(uint16_t), fail)
415  big_mb_num * sizeof(uint8_t), fail)
417  row_mb_num, 16 * sizeof(uint8_t), fail);
419  row_mb_num, 16 * sizeof(uint8_t), fail);
420  h->slice_ctx[0].mvd_table[0] = h->mvd_table[0];
421  h->slice_ctx[0].mvd_table[1] = h->mvd_table[1];
422 
424  4 * big_mb_num * sizeof(uint8_t), fail);
426  big_mb_num * sizeof(uint8_t), fail)
427 
428  memset(h->slice_table_base, -1,
429  (big_mb_num + h->mb_stride) * sizeof(*h->slice_table_base));
430  h->slice_table = h->slice_table_base + h->mb_stride * 2 + 1;
431 
433  big_mb_num * sizeof(uint32_t), fail);
435  big_mb_num * sizeof(uint32_t), fail);
436  for (y = 0; y < h->mb_height; y++)
437  for (x = 0; x < h->mb_width; x++) {
438  const int mb_xy = x + y * h->mb_stride;
439  const int b_xy = 4 * x + 4 * y * h->b_stride;
440 
441  h->mb2b_xy[mb_xy] = b_xy;
442  h->mb2br_xy[mb_xy] = 8 * (FMO ? mb_xy : (mb_xy % (2 * h->mb_stride)));
443  }
444 
445  if (!h->dequant4_coeff[0])
447 
448  return 0;
449 
450 fail:
452  return AVERROR(ENOMEM);
453 }
454 
455 /**
456  * Init context
457  * Allocate buffers which are not shared amongst multiple threads.
458  */
460 {
461  ERContext *er = &sl->er;
462  int mb_array_size = h->mb_height * h->mb_stride;
463  int y_size = (2 * h->mb_width + 1) * (2 * h->mb_height + 1);
464  int c_size = h->mb_stride * (h->mb_height + 1);
465  int yc_size = y_size + 2 * c_size;
466  int x, y, i;
467 
468  sl->ref_cache[0][scan8[5] + 1] =
469  sl->ref_cache[0][scan8[7] + 1] =
470  sl->ref_cache[0][scan8[13] + 1] =
471  sl->ref_cache[1][scan8[5] + 1] =
472  sl->ref_cache[1][scan8[7] + 1] =
473  sl->ref_cache[1][scan8[13] + 1] = PART_NOT_AVAILABLE;
474 
475  if (sl != h->slice_ctx) {
476  memset(er, 0, sizeof(*er));
477  } else
478  if (CONFIG_ERROR_RESILIENCE) {
479 
480  /* init ER */
481  er->avctx = h->avctx;
483  er->opaque = h;
484  er->quarter_sample = 1;
485 
486  er->mb_num = h->mb_num;
487  er->mb_width = h->mb_width;
488  er->mb_height = h->mb_height;
489  er->mb_stride = h->mb_stride;
490  er->b8_stride = h->mb_width * 2 + 1;
491 
492  // error resilience code looks cleaner with this
494  (h->mb_num + 1) * sizeof(int), fail);
495 
496  for (y = 0; y < h->mb_height; y++)
497  for (x = 0; x < h->mb_width; x++)
498  er->mb_index2xy[x + y * h->mb_width] = x + y * h->mb_stride;
499 
500  er->mb_index2xy[h->mb_height * h->mb_width] = (h->mb_height - 1) *
501  h->mb_stride + h->mb_width;
502 
504  mb_array_size * sizeof(uint8_t), fail);
505 
507  h->mb_height * h->mb_stride, fail);
508 
510  yc_size * sizeof(int16_t), fail);
511  er->dc_val[0] = sl->dc_val_base + h->mb_width * 2 + 2;
512  er->dc_val[1] = sl->dc_val_base + y_size + h->mb_stride + 1;
513  er->dc_val[2] = er->dc_val[1] + c_size;
514  for (i = 0; i < yc_size; i++)
515  sl->dc_val_base[i] = 1024;
516  }
517 
518  return 0;
519 
520 fail:
521  return AVERROR(ENOMEM); // ff_h264_free_tables will clean up for us
522 }
523 
524 static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size,
525  int parse_extradata);
526 
528 {
529  AVCodecContext *avctx = h->avctx;
530  int ret;
531 
532  if (!buf || size <= 0)
533  return -1;
534 
535  if (buf[0] == 1) {
536  int i, cnt, nalsize;
537  const unsigned char *p = buf;
538 
539  h->is_avc = 1;
540 
541  if (size < 7) {
542  av_log(avctx, AV_LOG_ERROR,
543  "avcC %d too short\n", size);
544  return AVERROR_INVALIDDATA;
545  }
546  /* sps and pps in the avcC always have length coded with 2 bytes,
547  * so put a fake nal_length_size = 2 while parsing them */
548  h->nal_length_size = 2;
549  // Decode sps from avcC
550  cnt = *(p + 5) & 0x1f; // Number of sps
551  p += 6;
552  for (i = 0; i < cnt; i++) {
553  nalsize = AV_RB16(p) + 2;
554  if(nalsize > size - (p-buf))
555  return AVERROR_INVALIDDATA;
556  ret = decode_nal_units(h, p, nalsize, 1);
557  if (ret < 0) {
558  av_log(avctx, AV_LOG_ERROR,
559  "Decoding sps %d from avcC failed\n", i);
560  return ret;
561  }
562  p += nalsize;
563  }
564  // Decode pps from avcC
565  cnt = *(p++); // Number of pps
566  for (i = 0; i < cnt; i++) {
567  nalsize = AV_RB16(p) + 2;
568  if(nalsize > size - (p-buf))
569  return AVERROR_INVALIDDATA;
570  ret = decode_nal_units(h, p, nalsize, 1);
571  if (ret < 0) {
572  av_log(avctx, AV_LOG_ERROR,
573  "Decoding pps %d from avcC failed\n", i);
574  return ret;
575  }
576  p += nalsize;
577  }
578  // Store right nal length size that will be used to parse all other nals
579  h->nal_length_size = (buf[4] & 0x03) + 1;
580  } else {
581  h->is_avc = 0;
582  ret = decode_nal_units(h, buf, size, 1);
583  if (ret < 0)
584  return ret;
585  }
586  return size;
587 }
588 
590 {
591  int i;
592 
593  h->avctx = avctx;
594  h->dequant_coeff_pps = -1;
595  h->current_sps_id = -1;
596  h->cur_chroma_format_idc = -1;
597 
599  h->slice_context_count = 1;
600  h->workaround_bugs = avctx->workaround_bugs;
601  h->flags = avctx->flags;
602  h->prev_poc_msb = 1 << 16;
603  h->x264_build = -1;
604  h->recovery_frame = -1;
605  h->frame_recovered = 0;
606  h->prev_frame_num = -1;
608 
609  h->next_outputed_poc = INT_MIN;
610  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
611  h->last_pocs[i] = INT_MIN;
612 
614 
616 
618  h->slice_ctx = av_mallocz_array(h->nb_slice_ctx, sizeof(*h->slice_ctx));
619  if (!h->slice_ctx) {
620  h->nb_slice_ctx = 0;
621  return AVERROR(ENOMEM);
622  }
623 
624  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
625  h->DPB[i].f = av_frame_alloc();
626  if (!h->DPB[i].f)
627  return AVERROR(ENOMEM);
628  }
629 
630  h->cur_pic.f = av_frame_alloc();
631  if (!h->cur_pic.f)
632  return AVERROR(ENOMEM);
633 
635  if (!h->last_pic_for_ec.f)
636  return AVERROR(ENOMEM);
637 
638  for (i = 0; i < h->nb_slice_ctx; i++)
639  h->slice_ctx[i].h264 = h;
640 
641  return 0;
642 }
643 
645 {
646  H264Context *h = avctx->priv_data;
647  int ret;
648 
649  ret = h264_init_context(avctx, h);
650  if (ret < 0)
651  return ret;
652 
653  /* set defaults */
654  if (!avctx->has_b_frames)
655  h->low_delay = 1;
656 
658 
660 
661  if (avctx->codec_id == AV_CODEC_ID_H264) {
662  if (avctx->ticks_per_frame == 1) {
663  if(h->avctx->time_base.den < INT_MAX/2) {
664  h->avctx->time_base.den *= 2;
665  } else
666  h->avctx->time_base.num /= 2;
667  }
668  avctx->ticks_per_frame = 2;
669  }
670 
671  if (avctx->extradata_size > 0 && avctx->extradata) {
672  ret = ff_h264_decode_extradata(h, avctx->extradata, avctx->extradata_size);
673  if (ret < 0) {
675  return ret;
676  }
677  }
678 
682  h->low_delay = 0;
683  }
684 
685  avctx->internal->allocate_progress = 1;
686 
688 
689  if (h->enable_er < 0 && (avctx->active_thread_type & FF_THREAD_SLICE))
690  h->enable_er = 0;
691 
692  if (h->enable_er && (avctx->active_thread_type & FF_THREAD_SLICE)) {
693  av_log(avctx, AV_LOG_WARNING,
694  "Error resilience with slice threads is enabled. It is unsafe and unsupported and may crash. "
695  "Use it at your own risk\n");
696  }
697 
698  return 0;
699 }
700 
702 {
703  H264Context *h = avctx->priv_data;
704  int ret;
705 
706  if (!avctx->internal->is_copy)
707  return 0;
708 
709  memset(h, 0, sizeof(*h));
710 
711  ret = h264_init_context(avctx, h);
712  if (ret < 0)
713  return ret;
714 
715  h->context_initialized = 0;
716 
717  return 0;
718 }
719 
720 /**
721  * Run setup operations that must be run after slice header decoding.
722  * This includes finding the next displayed frame.
723  *
724  * @param h h264 master context
725  * @param setup_finished enough NALs have been read that we can call
726  * ff_thread_finish_setup()
727  */
728 static void decode_postinit(H264Context *h, int setup_finished)
729 {
731  H264Picture *cur = h->cur_pic_ptr;
732  int i, pics, out_of_order, out_idx;
733 
734  h->cur_pic_ptr->f->pict_type = h->pict_type;
735 
736  if (h->next_output_pic)
737  return;
738 
739  if (cur->field_poc[0] == INT_MAX || cur->field_poc[1] == INT_MAX) {
740  /* FIXME: if we have two PAFF fields in one packet, we can't start
741  * the next thread here. If we have one field per packet, we can.
742  * The check in decode_nal_units() is not good enough to find this
743  * yet, so we assume the worst for now. */
744  // if (setup_finished)
745  // ff_thread_finish_setup(h->avctx);
746  if (cur->field_poc[0] == INT_MAX && cur->field_poc[1] == INT_MAX)
747  return;
748  if (h->avctx->hwaccel || h->missing_fields <=1)
749  return;
750  }
751 
752  cur->f->interlaced_frame = 0;
753  cur->f->repeat_pict = 0;
754 
755  /* Signal interlacing information externally. */
756  /* Prioritize picture timing SEI information over used
757  * decoding process if it exists. */
758 
759  if (h->sps.pic_struct_present_flag) {
760  switch (h->sei_pic_struct) {
762  break;
765  cur->f->interlaced_frame = 1;
766  break;
769  if (FIELD_OR_MBAFF_PICTURE(h))
770  cur->f->interlaced_frame = 1;
771  else
772  // try to flag soft telecine progressive
774  break;
777  /* Signal the possibility of telecined film externally
778  * (pic_struct 5,6). From these hints, let the applications
779  * decide if they apply deinterlacing. */
780  cur->f->repeat_pict = 1;
781  break;
783  cur->f->repeat_pict = 2;
784  break;
786  cur->f->repeat_pict = 4;
787  break;
788  }
789 
790  if ((h->sei_ct_type & 3) &&
792  cur->f->interlaced_frame = (h->sei_ct_type & (1 << 1)) != 0;
793  } else {
794  /* Derive interlacing flag from used decoding process. */
796  }
798 
799  if (cur->field_poc[0] != cur->field_poc[1]) {
800  /* Derive top_field_first from field pocs. */
801  cur->f->top_field_first = cur->field_poc[0] < cur->field_poc[1];
802  } else {
803  if (cur->f->interlaced_frame || h->sps.pic_struct_present_flag) {
804  /* Use picture timing SEI information. Even if it is a
805  * information of a past frame, better than nothing. */
808  cur->f->top_field_first = 1;
809  else
810  cur->f->top_field_first = 0;
811  } else {
812  /* Most likely progressive */
813  cur->f->top_field_first = 0;
814  }
815  }
816 
817  if (h->sei_frame_packing_present &&
822  AVStereo3D *stereo = av_stereo3d_create_side_data(cur->f);
823  if (stereo) {
824  switch (h->frame_packing_arrangement_type) {
825  case 0:
826  stereo->type = AV_STEREO3D_CHECKERBOARD;
827  break;
828  case 1:
829  stereo->type = AV_STEREO3D_COLUMNS;
830  break;
831  case 2:
832  stereo->type = AV_STEREO3D_LINES;
833  break;
834  case 3:
835  if (h->quincunx_subsampling)
837  else
838  stereo->type = AV_STEREO3D_SIDEBYSIDE;
839  break;
840  case 4:
841  stereo->type = AV_STEREO3D_TOPBOTTOM;
842  break;
843  case 5:
845  break;
846  case 6:
847  stereo->type = AV_STEREO3D_2D;
848  break;
849  }
850 
851  if (h->content_interpretation_type == 2)
852  stereo->flags = AV_STEREO3D_FLAG_INVERT;
853  }
854  }
855 
858  double angle = h->sei_anticlockwise_rotation * 360 / (double) (1 << 16);
859  AVFrameSideData *rotation = av_frame_new_side_data(cur->f,
861  sizeof(int32_t) * 9);
862  if (rotation) {
863  av_display_rotation_set((int32_t *)rotation->data, angle);
864  av_display_matrix_flip((int32_t *)rotation->data,
865  h->sei_hflip, h->sei_vflip);
866  }
867  }
868 
869  cur->mmco_reset = h->mmco_reset;
870  h->mmco_reset = 0;
871 
872  // FIXME do something with unavailable reference frames
873 
874  /* Sort B-frames into display order */
875 
879  h->low_delay = 0;
880  }
881 
885  h->low_delay = 0;
886  }
887 
888  for (i = 0; 1; i++) {
889  if(i == MAX_DELAYED_PIC_COUNT || cur->poc < h->last_pocs[i]){
890  if(i)
891  h->last_pocs[i-1] = cur->poc;
892  break;
893  } else if(i) {
894  h->last_pocs[i-1]= h->last_pocs[i];
895  }
896  }
897  out_of_order = MAX_DELAYED_PIC_COUNT - i;
898  if( cur->f->pict_type == AV_PICTURE_TYPE_B
900  out_of_order = FFMAX(out_of_order, 1);
901  if (out_of_order == MAX_DELAYED_PIC_COUNT) {
902  av_log(h->avctx, AV_LOG_VERBOSE, "Invalid POC %d<%d\n", cur->poc, h->last_pocs[0]);
903  for (i = 1; i < MAX_DELAYED_PIC_COUNT; i++)
904  h->last_pocs[i] = INT_MIN;
905  h->last_pocs[0] = cur->poc;
906  cur->mmco_reset = 1;
907  } else if(h->avctx->has_b_frames < out_of_order && !h->sps.bitstream_restriction_flag){
908  av_log(h->avctx, AV_LOG_VERBOSE, "Increasing reorder buffer to %d\n", out_of_order);
909  h->avctx->has_b_frames = out_of_order;
910  h->low_delay = 0;
911  }
912 
913  pics = 0;
914  while (h->delayed_pic[pics])
915  pics++;
916 
918 
919  h->delayed_pic[pics++] = cur;
920  if (cur->reference == 0)
921  cur->reference = DELAYED_PIC_REF;
922 
923  out = h->delayed_pic[0];
924  out_idx = 0;
925  for (i = 1; h->delayed_pic[i] &&
926  !h->delayed_pic[i]->f->key_frame &&
927  !h->delayed_pic[i]->mmco_reset;
928  i++)
929  if (h->delayed_pic[i]->poc < out->poc) {
930  out = h->delayed_pic[i];
931  out_idx = i;
932  }
933  if (h->avctx->has_b_frames == 0 &&
934  (h->delayed_pic[0]->f->key_frame || h->delayed_pic[0]->mmco_reset))
935  h->next_outputed_poc = INT_MIN;
936  out_of_order = out->poc < h->next_outputed_poc;
937 
938  if (out_of_order || pics > h->avctx->has_b_frames) {
939  out->reference &= ~DELAYED_PIC_REF;
940  // for frame threading, the owner must be the second field's thread or
941  // else the first thread can release the picture and reuse it unsafely
942  for (i = out_idx; h->delayed_pic[i]; i++)
943  h->delayed_pic[i] = h->delayed_pic[i + 1];
944  }
945  if (!out_of_order && pics > h->avctx->has_b_frames) {
946  h->next_output_pic = out;
947  if (out_idx == 0 && h->delayed_pic[0] && (h->delayed_pic[0]->f->key_frame || h->delayed_pic[0]->mmco_reset)) {
948  h->next_outputed_poc = INT_MIN;
949  } else
950  h->next_outputed_poc = out->poc;
951  } else {
952  av_log(h->avctx, AV_LOG_DEBUG, "no picture %s\n", out_of_order ? "ooo" : "");
953  }
954 
955  if (h->next_output_pic) {
956  if (h->next_output_pic->recovered) {
957  // We have reached an recovery point and all frames after it in
958  // display order are "recovered".
960  }
962  }
963 
964  if (setup_finished && !h->avctx->hwaccel)
966 }
967 
969 {
970  int list, i;
971  int luma_def, chroma_def;
972 
973  sl->use_weight = 0;
974  sl->use_weight_chroma = 0;
976  if (h->sps.chroma_format_idc)
978 
979  if (sl->luma_log2_weight_denom > 7U) {
980  av_log(h->avctx, AV_LOG_ERROR, "luma_log2_weight_denom %d is out of range\n", sl->luma_log2_weight_denom);
981  sl->luma_log2_weight_denom = 0;
982  }
983  if (sl->chroma_log2_weight_denom > 7U) {
984  av_log(h->avctx, AV_LOG_ERROR, "chroma_log2_weight_denom %d is out of range\n", sl->chroma_log2_weight_denom);
985  sl->chroma_log2_weight_denom = 0;
986  }
987 
988  luma_def = 1 << sl->luma_log2_weight_denom;
989  chroma_def = 1 << sl->chroma_log2_weight_denom;
990 
991  for (list = 0; list < 2; list++) {
992  sl->luma_weight_flag[list] = 0;
993  sl->chroma_weight_flag[list] = 0;
994  for (i = 0; i < sl->ref_count[list]; i++) {
995  int luma_weight_flag, chroma_weight_flag;
996 
997  luma_weight_flag = get_bits1(&sl->gb);
998  if (luma_weight_flag) {
999  sl->luma_weight[i][list][0] = get_se_golomb(&sl->gb);
1000  sl->luma_weight[i][list][1] = get_se_golomb(&sl->gb);
1001  if (sl->luma_weight[i][list][0] != luma_def ||
1002  sl->luma_weight[i][list][1] != 0) {
1003  sl->use_weight = 1;
1004  sl->luma_weight_flag[list] = 1;
1005  }
1006  } else {
1007  sl->luma_weight[i][list][0] = luma_def;
1008  sl->luma_weight[i][list][1] = 0;
1009  }
1010 
1011  if (h->sps.chroma_format_idc) {
1012  chroma_weight_flag = get_bits1(&sl->gb);
1013  if (chroma_weight_flag) {
1014  int j;
1015  for (j = 0; j < 2; j++) {
1016  sl->chroma_weight[i][list][j][0] = get_se_golomb(&sl->gb);
1017  sl->chroma_weight[i][list][j][1] = get_se_golomb(&sl->gb);
1018  if (sl->chroma_weight[i][list][j][0] != chroma_def ||
1019  sl->chroma_weight[i][list][j][1] != 0) {
1020  sl->use_weight_chroma = 1;
1021  sl->chroma_weight_flag[list] = 1;
1022  }
1023  }
1024  } else {
1025  int j;
1026  for (j = 0; j < 2; j++) {
1027  sl->chroma_weight[i][list][j][0] = chroma_def;
1028  sl->chroma_weight[i][list][j][1] = 0;
1029  }
1030  }
1031  }
1032  }
1033  if (sl->slice_type_nos != AV_PICTURE_TYPE_B)
1034  break;
1035  }
1036  sl->use_weight = sl->use_weight || sl->use_weight_chroma;
1037  return 0;
1038 }
1039 
1040 /**
1041  * instantaneous decoder refresh.
1042  */
1043 static void idr(H264Context *h)
1044 {
1045  int i;
1047  h->prev_frame_num =
1048  h->prev_frame_num_offset = 0;
1049  h->prev_poc_msb = 1<<16;
1050  h->prev_poc_lsb = 0;
1051  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
1052  h->last_pocs[i] = INT_MIN;
1053 }
1054 
1055 /* forget old pics after a seek */
1057 {
1058  int i, j;
1059 
1060  h->next_outputed_poc = INT_MIN;
1061  h->prev_interlaced_frame = 1;
1062  idr(h);
1063 
1064  h->prev_frame_num = -1;
1065  if (h->cur_pic_ptr) {
1066  h->cur_pic_ptr->reference = 0;
1067  for (j=i=0; h->delayed_pic[i]; i++)
1068  if (h->delayed_pic[i] != h->cur_pic_ptr)
1069  h->delayed_pic[j++] = h->delayed_pic[i];
1070  h->delayed_pic[j] = NULL;
1071  }
1073 
1074  h->first_field = 0;
1075  ff_h264_reset_sei(h);
1076  h->recovery_frame = -1;
1077  h->frame_recovered = 0;
1078  h->current_slice = 0;
1079  h->mmco_reset = 1;
1080  for (i = 0; i < h->nb_slice_ctx; i++)
1081  h->slice_ctx[i].list_count = 0;
1082 }
1083 
1084 /* forget old pics after a seek */
1085 static void flush_dpb(AVCodecContext *avctx)
1086 {
1087  H264Context *h = avctx->priv_data;
1088  int i;
1089 
1090  memset(h->delayed_pic, 0, sizeof(h->delayed_pic));
1091 
1093 
1094  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
1095  ff_h264_unref_picture(h, &h->DPB[i]);
1096  h->cur_pic_ptr = NULL;
1098 
1099  h->mb_y = 0;
1100 
1102  h->context_initialized = 0;
1103 }
1104 
1105 int ff_init_poc(H264Context *h, int pic_field_poc[2], int *pic_poc)
1106 {
1107  const int max_frame_num = 1 << h->sps.log2_max_frame_num;
1108  int field_poc[2];
1109 
1111  if (h->frame_num < h->prev_frame_num)
1112  h->frame_num_offset += max_frame_num;
1113 
1114  if (h->sps.poc_type == 0) {
1115  const int max_poc_lsb = 1 << h->sps.log2_max_poc_lsb;
1116 
1117  if (h->poc_lsb < h->prev_poc_lsb &&
1118  h->prev_poc_lsb - h->poc_lsb >= max_poc_lsb / 2)
1119  h->poc_msb = h->prev_poc_msb + max_poc_lsb;
1120  else if (h->poc_lsb > h->prev_poc_lsb &&
1121  h->prev_poc_lsb - h->poc_lsb < -max_poc_lsb / 2)
1122  h->poc_msb = h->prev_poc_msb - max_poc_lsb;
1123  else
1124  h->poc_msb = h->prev_poc_msb;
1125  field_poc[0] =
1126  field_poc[1] = h->poc_msb + h->poc_lsb;
1127  if (h->picture_structure == PICT_FRAME)
1128  field_poc[1] += h->delta_poc_bottom;
1129  } else if (h->sps.poc_type == 1) {
1130  int abs_frame_num, expected_delta_per_poc_cycle, expectedpoc;
1131  int i;
1132 
1133  if (h->sps.poc_cycle_length != 0)
1134  abs_frame_num = h->frame_num_offset + h->frame_num;
1135  else
1136  abs_frame_num = 0;
1137 
1138  if (h->nal_ref_idc == 0 && abs_frame_num > 0)
1139  abs_frame_num--;
1140 
1141  expected_delta_per_poc_cycle = 0;
1142  for (i = 0; i < h->sps.poc_cycle_length; i++)
1143  // FIXME integrate during sps parse
1144  expected_delta_per_poc_cycle += h->sps.offset_for_ref_frame[i];
1145 
1146  if (abs_frame_num > 0) {
1147  int poc_cycle_cnt = (abs_frame_num - 1) / h->sps.poc_cycle_length;
1148  int frame_num_in_poc_cycle = (abs_frame_num - 1) % h->sps.poc_cycle_length;
1149 
1150  expectedpoc = poc_cycle_cnt * expected_delta_per_poc_cycle;
1151  for (i = 0; i <= frame_num_in_poc_cycle; i++)
1152  expectedpoc = expectedpoc + h->sps.offset_for_ref_frame[i];
1153  } else
1154  expectedpoc = 0;
1155 
1156  if (h->nal_ref_idc == 0)
1157  expectedpoc = expectedpoc + h->sps.offset_for_non_ref_pic;
1158 
1159  field_poc[0] = expectedpoc + h->delta_poc[0];
1160  field_poc[1] = field_poc[0] + h->sps.offset_for_top_to_bottom_field;
1161 
1162  if (h->picture_structure == PICT_FRAME)
1163  field_poc[1] += h->delta_poc[1];
1164  } else {
1165  int poc = 2 * (h->frame_num_offset + h->frame_num);
1166 
1167  if (!h->nal_ref_idc)
1168  poc--;
1169 
1170  field_poc[0] = poc;
1171  field_poc[1] = poc;
1172  }
1173 
1175  pic_field_poc[0] = field_poc[0];
1177  pic_field_poc[1] = field_poc[1];
1178  *pic_poc = FFMIN(pic_field_poc[0], pic_field_poc[1]);
1179 
1180  return 0;
1181 }
1182 
1183 /**
1184  * Compute profile from profile_idc and constraint_set?_flags.
1185  *
1186  * @param sps SPS
1187  *
1188  * @return profile as defined by FF_PROFILE_H264_*
1189  */
1191 {
1192  int profile = sps->profile_idc;
1193 
1194  switch (sps->profile_idc) {
1196  // constraint_set1_flag set to 1
1197  profile |= (sps->constraint_set_flags & 1 << 1) ? FF_PROFILE_H264_CONSTRAINED : 0;
1198  break;
1202  // constraint_set3_flag set to 1
1203  profile |= (sps->constraint_set_flags & 1 << 3) ? FF_PROFILE_H264_INTRA : 0;
1204  break;
1205  }
1206 
1207  return profile;
1208 }
1209 
1211 {
1212  int ref_count[2], list_count;
1213  int num_ref_idx_active_override_flag;
1214 
1215  // set defaults, might be overridden a few lines later
1216  ref_count[0] = h->pps.ref_count[0];
1217  ref_count[1] = h->pps.ref_count[1];
1218 
1219  if (sl->slice_type_nos != AV_PICTURE_TYPE_I) {
1220  unsigned max[2];
1221  max[0] = max[1] = h->picture_structure == PICT_FRAME ? 15 : 31;
1222 
1223  if (sl->slice_type_nos == AV_PICTURE_TYPE_B)
1224  sl->direct_spatial_mv_pred = get_bits1(&sl->gb);
1225  num_ref_idx_active_override_flag = get_bits1(&sl->gb);
1226 
1227  if (num_ref_idx_active_override_flag) {
1228  ref_count[0] = get_ue_golomb(&sl->gb) + 1;
1229  if (sl->slice_type_nos == AV_PICTURE_TYPE_B) {
1230  ref_count[1] = get_ue_golomb(&sl->gb) + 1;
1231  } else
1232  // full range is spec-ok in this case, even for frames
1233  ref_count[1] = 1;
1234  }
1235 
1236  if (ref_count[0]-1 > max[0] || ref_count[1]-1 > max[1]){
1237  av_log(h->avctx, AV_LOG_ERROR, "reference overflow %u > %u or %u > %u\n", ref_count[0]-1, max[0], ref_count[1]-1, max[1]);
1238  sl->ref_count[0] = sl->ref_count[1] = 0;
1239  sl->list_count = 0;
1240  return AVERROR_INVALIDDATA;
1241  }
1242 
1243  if (sl->slice_type_nos == AV_PICTURE_TYPE_B)
1244  list_count = 2;
1245  else
1246  list_count = 1;
1247  } else {
1248  list_count = 0;
1249  ref_count[0] = ref_count[1] = 0;
1250  }
1251 
1252  if (list_count != sl->list_count ||
1253  ref_count[0] != sl->ref_count[0] ||
1254  ref_count[1] != sl->ref_count[1]) {
1255  sl->ref_count[0] = ref_count[0];
1256  sl->ref_count[1] = ref_count[1];
1257  sl->list_count = list_count;
1258  return 1;
1259  }
1260 
1261  return 0;
1262 }
1263 
1264 static const uint8_t start_code[] = { 0x00, 0x00, 0x01 };
1265 
1267  const uint8_t *ptr, int dst_length,
1268  int i, int next_avc)
1269 {
1270  if ((h->workaround_bugs & FF_BUG_AUTODETECT) && i + 3 < next_avc &&
1271  buf[i] == 0x00 && buf[i + 1] == 0x00 &&
1272  buf[i + 2] == 0x01 && buf[i + 3] == 0xE0)
1274 
1275  if (!(h->workaround_bugs & FF_BUG_TRUNCATED))
1276  while (dst_length > 0 && ptr[dst_length - 1] == 0)
1277  dst_length--;
1278 
1279  if (!dst_length)
1280  return 0;
1281 
1282  return 8 * dst_length - decode_rbsp_trailing(h, ptr + dst_length - 1);
1283 }
1284 
1285 static int get_last_needed_nal(H264Context *h, const uint8_t *buf, int buf_size)
1286 {
1287  int next_avc = h->is_avc ? 0 : buf_size;
1288  int nal_index = 0;
1289  int buf_index = 0;
1290  int nals_needed = 0;
1291  int first_slice = 0;
1292 
1293  while(1) {
1294  GetBitContext gb;
1295  int nalsize = 0;
1296  int dst_length, bit_length, consumed;
1297  const uint8_t *ptr;
1298 
1299  if (buf_index >= next_avc) {
1300  nalsize = get_avc_nalsize(h, buf, buf_size, &buf_index);
1301  if (nalsize < 0)
1302  break;
1303  next_avc = buf_index + nalsize;
1304  } else {
1305  buf_index = find_start_code(buf, buf_size, buf_index, next_avc);
1306  if (buf_index >= buf_size)
1307  break;
1308  if (buf_index >= next_avc)
1309  continue;
1310  }
1311 
1312  ptr = ff_h264_decode_nal(h, &h->slice_ctx[0], buf + buf_index, &dst_length, &consumed,
1313  next_avc - buf_index);
1314 
1315  if (!ptr || dst_length < 0)
1316  return AVERROR_INVALIDDATA;
1317 
1318  buf_index += consumed;
1319 
1320  bit_length = get_bit_length(h, buf, ptr, dst_length,
1321  buf_index, next_avc);
1322  nal_index++;
1323 
1324  /* packets can sometimes contain multiple PPS/SPS,
1325  * e.g. two PAFF field pictures in one packet, or a demuxer
1326  * which splits NALs strangely if so, when frame threading we
1327  * can't start the next thread until we've read all of them */
1328  switch (h->nal_unit_type) {
1329  case NAL_SPS:
1330  case NAL_PPS:
1331  nals_needed = nal_index;
1332  break;
1333  case NAL_DPA:
1334  case NAL_IDR_SLICE:
1335  case NAL_SLICE:
1336  init_get_bits(&gb, ptr, bit_length);
1337  if (!get_ue_golomb(&gb) ||
1338  !first_slice ||
1339  first_slice != h->nal_unit_type)
1340  nals_needed = nal_index;
1341  if (!first_slice)
1342  first_slice = h->nal_unit_type;
1343  }
1344  }
1345 
1346  return nals_needed;
1347 }
1348 
1349 static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size,
1350  int parse_extradata)
1351 {
1352  AVCodecContext *const avctx = h->avctx;
1353  H264SliceContext *sl;
1354  int buf_index;
1355  unsigned context_count;
1356  int next_avc;
1357  int nals_needed = 0; ///< number of NALs that need decoding before the next frame thread starts
1358  int nal_index;
1359  int idr_cleared=0;
1360  int ret = 0;
1361 
1362  h->nal_unit_type= 0;
1363 
1364  if(!h->slice_context_count)
1365  h->slice_context_count= 1;
1367  if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS)) {
1368  h->current_slice = 0;
1369  if (!h->first_field)
1370  h->cur_pic_ptr = NULL;
1371  ff_h264_reset_sei(h);
1372  }
1373 
1374  if (h->nal_length_size == 4) {
1375  if (buf_size > 8 && AV_RB32(buf) == 1 && AV_RB32(buf+5) > (unsigned)buf_size) {
1376  h->is_avc = 0;
1377  }else if(buf_size > 3 && AV_RB32(buf) > 1 && AV_RB32(buf) <= (unsigned)buf_size)
1378  h->is_avc = 1;
1379  }
1380 
1381  if (avctx->active_thread_type & FF_THREAD_FRAME)
1382  nals_needed = get_last_needed_nal(h, buf, buf_size);
1383 
1384  {
1385  buf_index = 0;
1386  context_count = 0;
1387  next_avc = h->is_avc ? 0 : buf_size;
1388  nal_index = 0;
1389  for (;;) {
1390  int consumed;
1391  int dst_length;
1392  int bit_length;
1393  const uint8_t *ptr;
1394  int nalsize = 0;
1395  int err;
1396 
1397  if (buf_index >= next_avc) {
1398  nalsize = get_avc_nalsize(h, buf, buf_size, &buf_index);
1399  if (nalsize < 0)
1400  break;
1401  next_avc = buf_index + nalsize;
1402  } else {
1403  buf_index = find_start_code(buf, buf_size, buf_index, next_avc);
1404  if (buf_index >= buf_size)
1405  break;
1406  if (buf_index >= next_avc)
1407  continue;
1408  }
1409 
1410  sl = &h->slice_ctx[context_count];
1411 
1412  ptr = ff_h264_decode_nal(h, sl, buf + buf_index, &dst_length,
1413  &consumed, next_avc - buf_index);
1414  if (!ptr || dst_length < 0) {
1415  ret = -1;
1416  goto end;
1417  }
1418 
1419  bit_length = get_bit_length(h, buf, ptr, dst_length,
1420  buf_index + consumed, next_avc);
1421 
1422  if (h->avctx->debug & FF_DEBUG_STARTCODE)
1424  "NAL %d/%d at %d/%d length %d\n",
1425  h->nal_unit_type, h->nal_ref_idc, buf_index, buf_size, dst_length);
1426 
1427  if (h->is_avc && (nalsize != consumed) && nalsize)
1429  "AVC: Consumed only %d bytes instead of %d\n",
1430  consumed, nalsize);
1431 
1432  buf_index += consumed;
1433  nal_index++;
1434 
1435  if (avctx->skip_frame >= AVDISCARD_NONREF &&
1436  h->nal_ref_idc == 0 &&
1437  h->nal_unit_type != NAL_SEI)
1438  continue;
1439 
1440 again:
1441  /* Ignore per frame NAL unit type during extradata
1442  * parsing. Decoding slices is not possible in codec init
1443  * with frame-mt */
1444  if (parse_extradata) {
1445  switch (h->nal_unit_type) {
1446  case NAL_IDR_SLICE:
1447  case NAL_SLICE:
1448  case NAL_DPA:
1449  case NAL_DPB:
1450  case NAL_DPC:
1452  "Ignoring NAL %d in global header/extradata\n",
1453  h->nal_unit_type);
1454  // fall through to next case
1455  case NAL_AUXILIARY_SLICE:
1457  }
1458  }
1459 
1460  err = 0;
1461 
1462  switch (h->nal_unit_type) {
1463  case NAL_IDR_SLICE:
1464  if ((ptr[0] & 0xFC) == 0x98) {
1465  av_log(h->avctx, AV_LOG_ERROR, "Invalid inter IDR frame\n");
1466  h->next_outputed_poc = INT_MIN;
1467  ret = -1;
1468  goto end;
1469  }
1470  if (h->nal_unit_type != NAL_IDR_SLICE) {
1472  "Invalid mix of idr and non-idr slices\n");
1473  ret = -1;
1474  goto end;
1475  }
1476  if(!idr_cleared) {
1477  if (h->current_slice && (avctx->active_thread_type & FF_THREAD_SLICE)) {
1478  av_log(h, AV_LOG_ERROR, "invalid mixed IDR / non IDR frames cannot be decoded in slice multithreading mode\n");
1479  ret = AVERROR_INVALIDDATA;
1480  goto end;
1481  }
1482  idr(h); // FIXME ensure we don't lose some frames if there is reordering
1483  }
1484  idr_cleared = 1;
1485  h->has_recovery_point = 1;
1486  case NAL_SLICE:
1487  init_get_bits(&sl->gb, ptr, bit_length);
1488 
1489  if ( nals_needed >= nal_index
1490  || (!(avctx->active_thread_type & FF_THREAD_FRAME) && !context_count))
1491  h->au_pps_id = -1;
1492 
1493  if ((err = ff_h264_decode_slice_header(h, sl)))
1494  break;
1495 
1496  if (h->sei_recovery_frame_cnt >= 0) {
1498  h->valid_recovery_point = 1;
1499 
1500  if ( h->recovery_frame < 0
1501  || av_mod_uintp2(h->recovery_frame - h->frame_num, h->sps.log2_max_frame_num) > h->sei_recovery_frame_cnt) {
1502  h->recovery_frame = av_mod_uintp2(h->frame_num + h->sei_recovery_frame_cnt, h->sps.log2_max_frame_num);
1503 
1504  if (!h->valid_recovery_point)
1505  h->recovery_frame = h->frame_num;
1506  }
1507  }
1508 
1509  h->cur_pic_ptr->f->key_frame |=
1510  (h->nal_unit_type == NAL_IDR_SLICE);
1511 
1512  if (h->nal_unit_type == NAL_IDR_SLICE ||
1513  h->recovery_frame == h->frame_num) {
1514  h->recovery_frame = -1;
1515  h->cur_pic_ptr->recovered = 1;
1516  }
1517  // If we have an IDR, all frames after it in decoded order are
1518  // "recovered".
1519  if (h->nal_unit_type == NAL_IDR_SLICE)
1521  h->frame_recovered |= 3*!!(avctx->flags2 & CODEC_FLAG2_SHOW_ALL);
1522  h->frame_recovered |= 3*!!(avctx->flags & CODEC_FLAG_OUTPUT_CORRUPT);
1523 #if 1
1525 #else
1527 #endif
1528 
1529  if (h->current_slice == 1) {
1530  if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS))
1531  decode_postinit(h, nal_index >= nals_needed);
1532 
1533  if (h->avctx->hwaccel &&
1534  (ret = h->avctx->hwaccel->start_frame(h->avctx, buf, buf_size)) < 0)
1535  goto end;
1536  if (CONFIG_H264_VDPAU_DECODER &&
1539  }
1540 
1541  if (sl->redundant_pic_count == 0) {
1542  if (avctx->hwaccel) {
1543  ret = avctx->hwaccel->decode_slice(avctx,
1544  &buf[buf_index - consumed],
1545  consumed);
1546  if (ret < 0)
1547  goto end;
1548  } else if (CONFIG_H264_VDPAU_DECODER &&
1551  start_code,
1552  sizeof(start_code));
1554  &buf[buf_index - consumed],
1555  consumed);
1556  } else
1557  context_count++;
1558  }
1559  break;
1560  case NAL_DPA:
1561  case NAL_DPB:
1562  case NAL_DPC:
1563  avpriv_request_sample(avctx, "data partitioning");
1564  break;
1565  case NAL_SEI:
1566  init_get_bits(&h->gb, ptr, bit_length);
1567  ret = ff_h264_decode_sei(h);
1568  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1569  goto end;
1570  break;
1571  case NAL_SPS:
1572  init_get_bits(&h->gb, ptr, bit_length);
1573  if (ff_h264_decode_seq_parameter_set(h, 0) >= 0)
1574  break;
1575  if (h->is_avc ? nalsize : 1) {
1577  "SPS decoding failure, trying again with the complete NAL\n");
1578  if (h->is_avc)
1579  av_assert0(next_avc - buf_index + consumed == nalsize);
1580  if ((next_avc - buf_index + consumed - 1) >= INT_MAX/8)
1581  break;
1582  init_get_bits(&h->gb, &buf[buf_index + 1 - consumed],
1583  8*(next_avc - buf_index + consumed - 1));
1584  if (ff_h264_decode_seq_parameter_set(h, 0) >= 0)
1585  break;
1586  }
1587  init_get_bits(&h->gb, ptr, bit_length);
1589 
1590  break;
1591  case NAL_PPS:
1592  init_get_bits(&h->gb, ptr, bit_length);
1593  ret = ff_h264_decode_picture_parameter_set(h, bit_length);
1594  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1595  goto end;
1596  break;
1597  case NAL_AUD:
1598  case NAL_END_SEQUENCE:
1599  case NAL_END_STREAM:
1600  case NAL_FILLER_DATA:
1601  case NAL_SPS_EXT:
1602  case NAL_AUXILIARY_SLICE:
1603  break;
1604  case NAL_FF_IGNORE:
1605  break;
1606  default:
1607  av_log(avctx, AV_LOG_DEBUG, "Unknown NAL code: %d (%d bits)\n",
1608  h->nal_unit_type, bit_length);
1609  }
1610 
1611  if (context_count == h->max_contexts) {
1612  ret = ff_h264_execute_decode_slices(h, context_count);
1613  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1614  goto end;
1615  context_count = 0;
1616  }
1617 
1618  if (err < 0 || err == SLICE_SKIPED) {
1619  if (err < 0)
1620  av_log(h->avctx, AV_LOG_ERROR, "decode_slice_header error\n");
1621  sl->ref_count[0] = sl->ref_count[1] = sl->list_count = 0;
1622  } else if (err == SLICE_SINGLETHREAD) {
1623  if (context_count > 1) {
1624  ret = ff_h264_execute_decode_slices(h, context_count - 1);
1625  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1626  goto end;
1627  context_count = 0;
1628  }
1629  /* Slice could not be decoded in parallel mode, restart. Note
1630  * that rbsp_buffer is not transferred, but since we no longer
1631  * run in parallel mode this should not be an issue. */
1632  sl = &h->slice_ctx[0];
1633  goto again;
1634  }
1635  }
1636  }
1637  if (context_count) {
1638  ret = ff_h264_execute_decode_slices(h, context_count);
1639  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1640  goto end;
1641  }
1642 
1643  ret = 0;
1644 end:
1645  /* clean up */
1646  if (h->cur_pic_ptr && !h->droppable) {
1649  }
1650 
1651  return (ret < 0) ? ret : buf_index;
1652 }
1653 
1654 /**
1655  * Return the number of bytes consumed for building the current frame.
1656  */
1657 static int get_consumed_bytes(int pos, int buf_size)
1658 {
1659  if (pos == 0)
1660  pos = 1; // avoid infinite loops (I doubt that is needed but...)
1661  if (pos + 10 > buf_size)
1662  pos = buf_size; // oops ;)
1663 
1664  return pos;
1665 }
1666 
1668 {
1669  AVFrame *src = srcp->f;
1670  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(src->format);
1671  int i;
1672  int ret = av_frame_ref(dst, src);
1673  if (ret < 0)
1674  return ret;
1675 
1676  av_dict_set(&dst->metadata, "stereo_mode", ff_h264_sei_stereo_mode(h), 0);
1677 
1678  if (srcp->sei_recovery_frame_cnt == 0)
1679  dst->key_frame = 1;
1680  if (!srcp->crop)
1681  return 0;
1682 
1683  for (i = 0; i < desc->nb_components; i++) {
1684  int hshift = (i > 0) ? desc->log2_chroma_w : 0;
1685  int vshift = (i > 0) ? desc->log2_chroma_h : 0;
1686  int off = ((srcp->crop_left >> hshift) << h->pixel_shift) +
1687  (srcp->crop_top >> vshift) * dst->linesize[i];
1688  dst->data[i] += off;
1689  }
1690  return 0;
1691 }
1692 
1693 static int is_extra(const uint8_t *buf, int buf_size)
1694 {
1695  int cnt= buf[5]&0x1f;
1696  const uint8_t *p= buf+6;
1697  while(cnt--){
1698  int nalsize= AV_RB16(p) + 2;
1699  if(nalsize > buf_size - (p-buf) || p[2]!=0x67)
1700  return 0;
1701  p += nalsize;
1702  }
1703  cnt = *(p++);
1704  if(!cnt)
1705  return 0;
1706  while(cnt--){
1707  int nalsize= AV_RB16(p) + 2;
1708  if(nalsize > buf_size - (p-buf) || p[2]!=0x68)
1709  return 0;
1710  p += nalsize;
1711  }
1712  return 1;
1713 }
1714 
1715 static int h264_decode_frame(AVCodecContext *avctx, void *data,
1716  int *got_frame, AVPacket *avpkt)
1717 {
1718  const uint8_t *buf = avpkt->data;
1719  int buf_size = avpkt->size;
1720  H264Context *h = avctx->priv_data;
1721  AVFrame *pict = data;
1722  int buf_index = 0;
1723  H264Picture *out;
1724  int i, out_idx;
1725  int ret;
1726 
1727  h->flags = avctx->flags;
1728 
1730 
1731  /* end of stream, output what is still in the buffers */
1732  if (buf_size == 0) {
1733  out:
1734 
1735  h->cur_pic_ptr = NULL;
1736  h->first_field = 0;
1737 
1738  // FIXME factorize this with the output code below
1739  out = h->delayed_pic[0];
1740  out_idx = 0;
1741  for (i = 1;
1742  h->delayed_pic[i] &&
1743  !h->delayed_pic[i]->f->key_frame &&
1744  !h->delayed_pic[i]->mmco_reset;
1745  i++)
1746  if (h->delayed_pic[i]->poc < out->poc) {
1747  out = h->delayed_pic[i];
1748  out_idx = i;
1749  }
1750 
1751  for (i = out_idx; h->delayed_pic[i]; i++)
1752  h->delayed_pic[i] = h->delayed_pic[i + 1];
1753 
1754  if (out) {
1755  out->reference &= ~DELAYED_PIC_REF;
1756  ret = output_frame(h, pict, out);
1757  if (ret < 0)
1758  return ret;
1759  *got_frame = 1;
1760  }
1761 
1762  return buf_index;
1763  }
1765  int side_size;
1766  uint8_t *side = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, &side_size);
1767  if (is_extra(side, side_size))
1768  ff_h264_decode_extradata(h, side, side_size);
1769  }
1770  if(h->is_avc && buf_size >= 9 && buf[0]==1 && buf[2]==0 && (buf[4]&0xFC)==0xFC && (buf[5]&0x1F) && buf[8]==0x67){
1771  if (is_extra(buf, buf_size))
1772  return ff_h264_decode_extradata(h, buf, buf_size);
1773  }
1774 
1775  buf_index = decode_nal_units(h, buf, buf_size, 0);
1776  if (buf_index < 0)
1777  return AVERROR_INVALIDDATA;
1778 
1779  if (!h->cur_pic_ptr && h->nal_unit_type == NAL_END_SEQUENCE) {
1780  av_assert0(buf_index <= buf_size);
1781  goto out;
1782  }
1783 
1784  if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS) && !h->cur_pic_ptr) {
1785  if (avctx->skip_frame >= AVDISCARD_NONREF ||
1786  buf_size >= 4 && !memcmp("Q264", buf, 4))
1787  return buf_size;
1788  av_log(avctx, AV_LOG_ERROR, "no frame!\n");
1789  return AVERROR_INVALIDDATA;
1790  }
1791 
1792  if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS) ||
1793  (h->mb_y >= h->mb_height && h->mb_height)) {
1794  if (avctx->flags2 & CODEC_FLAG2_CHUNKS)
1795  decode_postinit(h, 1);
1796 
1797  ff_h264_field_end(h, &h->slice_ctx[0], 0);
1798 
1799  /* Wait for second field. */
1800  *got_frame = 0;
1801  if (h->next_output_pic && (
1802  h->next_output_pic->recovered)) {
1803  if (!h->next_output_pic->recovered)
1805 
1806  if (!h->avctx->hwaccel &&
1807  (h->next_output_pic->field_poc[0] == INT_MAX ||
1808  h->next_output_pic->field_poc[1] == INT_MAX)
1809  ) {
1810  int p;
1811  AVFrame *f = h->next_output_pic->f;
1812  int field = h->next_output_pic->field_poc[0] == INT_MAX;
1813  uint8_t *dst_data[4];
1814  int linesizes[4];
1815  const uint8_t *src_data[4];
1816 
1817  av_log(h->avctx, AV_LOG_DEBUG, "Duplicating field %d to fill missing\n", field);
1818 
1819  for (p = 0; p<4; p++) {
1820  dst_data[p] = f->data[p] + (field^1)*f->linesize[p];
1821  src_data[p] = f->data[p] + field *f->linesize[p];
1822  linesizes[p] = 2*f->linesize[p];
1823  }
1824 
1825  av_image_copy(dst_data, linesizes, src_data, linesizes,
1826  f->format, f->width, f->height>>1);
1827  }
1828 
1829  ret = output_frame(h, pict, h->next_output_pic);
1830  if (ret < 0)
1831  return ret;
1832  *got_frame = 1;
1833  if (CONFIG_MPEGVIDEO) {
1834  ff_print_debug_info2(h->avctx, pict, NULL,
1838  &h->low_delay,
1839  h->mb_width, h->mb_height, h->mb_stride, 1);
1840  }
1841  }
1842  }
1843 
1844  av_assert0(pict->buf[0] || !*got_frame);
1845 
1847 
1848  return get_consumed_bytes(buf_index, buf_size);
1849 }
1850 
1852 {
1853  int i;
1854 
1856 
1857  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
1858  ff_h264_unref_picture(h, &h->DPB[i]);
1859  av_frame_free(&h->DPB[i].f);
1860  }
1861  memset(h->delayed_pic, 0, sizeof(h->delayed_pic));
1862 
1863  h->cur_pic_ptr = NULL;
1864 
1865  for (i = 0; i < h->nb_slice_ctx; i++)
1866  av_freep(&h->slice_ctx[i].rbsp_buffer);
1867  av_freep(&h->slice_ctx);
1868  h->nb_slice_ctx = 0;
1869 
1870  for (i = 0; i < MAX_SPS_COUNT; i++)
1871  av_freep(h->sps_buffers + i);
1872 
1873  for (i = 0; i < MAX_PPS_COUNT; i++)
1874  av_freep(h->pps_buffers + i);
1875 }
1876 
1878 {
1879  H264Context *h = avctx->priv_data;
1880 
1883 
1885  av_frame_free(&h->cur_pic.f);
1888 
1889  return 0;
1890 }
1891 
1892 #define OFFSET(x) offsetof(H264Context, x)
1893 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
1894 static const AVOption h264_options[] = {
1895  {"is_avc", "is avc", offsetof(H264Context, is_avc), FF_OPT_TYPE_INT, {.i64 = 0}, 0, 1, 0},
1896  {"nal_length_size", "nal_length_size", offsetof(H264Context, nal_length_size), FF_OPT_TYPE_INT, {.i64 = 0}, 0, 4, 0},
1897  { "enable_er", "Enable error resilience on damaged frames (unsafe)", OFFSET(enable_er), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, VD },
1898  { NULL },
1899 };
1900 
1901 static const AVClass h264_class = {
1902  .class_name = "H264 Decoder",
1903  .item_name = av_default_item_name,
1904  .option = h264_options,
1905  .version = LIBAVUTIL_VERSION_INT,
1906 };
1907 
1908 static const AVProfile profiles[] = {
1909  { FF_PROFILE_H264_BASELINE, "Baseline" },
1910  { FF_PROFILE_H264_CONSTRAINED_BASELINE, "Constrained Baseline" },
1911  { FF_PROFILE_H264_MAIN, "Main" },
1912  { FF_PROFILE_H264_EXTENDED, "Extended" },
1913  { FF_PROFILE_H264_HIGH, "High" },
1914  { FF_PROFILE_H264_HIGH_10, "High 10" },
1915  { FF_PROFILE_H264_HIGH_10_INTRA, "High 10 Intra" },
1916  { FF_PROFILE_H264_HIGH_422, "High 4:2:2" },
1917  { FF_PROFILE_H264_HIGH_422_INTRA, "High 4:2:2 Intra" },
1918  { FF_PROFILE_H264_HIGH_444, "High 4:4:4" },
1919  { FF_PROFILE_H264_HIGH_444_PREDICTIVE, "High 4:4:4 Predictive" },
1920  { FF_PROFILE_H264_HIGH_444_INTRA, "High 4:4:4 Intra" },
1921  { FF_PROFILE_H264_CAVLC_444, "CAVLC 4:4:4" },
1922  { FF_PROFILE_UNKNOWN },
1923 };
1924 
1926  .name = "h264",
1927  .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"),
1928  .type = AVMEDIA_TYPE_VIDEO,
1929  .id = AV_CODEC_ID_H264,
1930  .priv_data_size = sizeof(H264Context),
1932  .close = h264_decode_end,
1934  .capabilities = /*CODEC_CAP_DRAW_HORIZ_BAND |*/ CODEC_CAP_DR1 |
1937  .flush = flush_dpb,
1939  .update_thread_context = ONLY_IF_THREADS_ENABLED(ff_h264_update_thread_context),
1940  .profiles = NULL_IF_CONFIG_SMALL(profiles),
1941  .priv_class = &h264_class,
1942 };
1943 
1944 #if CONFIG_H264_VDPAU_DECODER
1945 static const AVClass h264_vdpau_class = {
1946  .class_name = "H264 VDPAU Decoder",
1947  .item_name = av_default_item_name,
1948  .option = h264_options,
1949  .version = LIBAVUTIL_VERSION_INT,
1950 };
1951 
1952 AVCodec ff_h264_vdpau_decoder = {
1953  .name = "h264_vdpau",
1954  .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (VDPAU acceleration)"),
1955  .type = AVMEDIA_TYPE_VIDEO,
1956  .id = AV_CODEC_ID_H264,
1957  .priv_data_size = sizeof(H264Context),
1959  .close = h264_decode_end,
1962  .flush = flush_dpb,
1963  .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_VDPAU_H264,
1964  AV_PIX_FMT_NONE},
1965  .profiles = NULL_IF_CONFIG_SMALL(profiles),
1966  .priv_class = &h264_vdpau_class,
1967 };
1968 #endif
int chroma_format_idc
Definition: h264.h:177
struct H264Context * h264
Definition: h264.h:346
#define ff_tlog(ctx,...)
Definition: internal.h:60
void ff_h264_unref_picture(H264Context *h, H264Picture *pic)
Definition: h264_picture.c:47
#define FF_PROFILE_H264_HIGH_10
Definition: avcodec.h:2872
#define NULL
Definition: coverity.c:32
const struct AVCodec * codec
Definition: avcodec.h:1250
int ff_h264_check_intra_pred_mode(const H264Context *h, H264SliceContext *sl, int mode, int is_chroma)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264.c:184
void ff_h264_flush_change(H264Context *h)
Definition: h264.c:1056
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
Definition: avcodec.h:3351
int workaround_bugs
Definition: h264.h:528
float v
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size, int parse_extradata)
Definition: h264.c:1349
#define DC_128_PRED8x8
Definition: h264pred.h:76
GetBitContext gb
Definition: h264.h:506
#define CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:765
int sei_recovery_frame_cnt
Definition: h264.h:327
Views are packed per line, as if interlaced.
Definition: stereo3d.h:97
#define AV_NUM_DATA_POINTERS
Definition: frame.h:172
int ff_h264_execute_decode_slices(H264Context *h, unsigned context_count)
Call decode_slice() for each context.
Definition: h264_slice.c:2479
5: top field, bottom field, top field repeated, in that order
Definition: h264.h:151
int low_delay
Definition: h264.h:524
int mb_num
Definition: h264.h:595
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2090
mpeg2/4 4:2:0, h264 default for 4:2:0
Definition: pixfmt.h:544
This structure describes decoded (raw) audio or video data.
Definition: frame.h:171
#define FF_ALLOCZ_ARRAY_OR_GOTO(ctx, p, nelem, elsize, label)
Definition: internal.h:156
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
Definition: h264.h:468
AVOption.
Definition: opt.h:255
static const AVClass h264_class
Definition: h264.c:1901
int delta_poc[2]
Definition: h264.h:623
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
Views are alternated temporally.
Definition: stereo3d.h:66
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
Definition: golomb.h:183
int luma_weight[48][2][2]
Definition: h264.h:373
int quincunx_subsampling
Definition: h264.h:709
int edge_emu_buffer_allocated
Definition: h264.h:456
3: top field, bottom field, in that order
Definition: h264.h:149
#define FF_PROFILE_H264_HIGH_444
Definition: avcodec.h:2876
#define H264_MAX_PICTURE_COUNT
Definition: h264.h:46
int first_field
Definition: h264.h:565
misc image utilities
Views are next to each other, but when upscaling apply a checkerboard pattern.
Definition: stereo3d.h:87
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
#define LIBAVUTIL_VERSION_INT
Definition: version.h:62
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:441
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
uint16_t * cbp_table
Definition: h264.h:570
av_cold int ff_h264_decode_init(AVCodecContext *avctx)
Definition: h264.c:644
const uint8_t * ff_h264_decode_nal(H264Context *h, H264SliceContext *sl, const uint8_t *src, int *dst_length, int *consumed, int length)
Decode a network abstraction layer unit.
Definition: h264.c:226
7: frame doubling
Definition: h264.h:153
#define MAX_PPS_COUNT
Definition: h264.h:50
Sequence parameter set.
Definition: h264.h:173
int mb_y
Definition: h264.h:592
int bitstream_restriction_flag
Definition: h264.h:213
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264.h:242
#define FMO
Definition: h264.h:62
int num
numerator
Definition: rational.h:44
int repeat_pict
When decoding, this signals how much the picture must be delayed.
Definition: frame.h:362
int bipred_scratchpad_allocated
Definition: h264.h:455
static int get_last_needed_nal(H264Context *h, const uint8_t *buf, int buf_size)
Definition: h264.c:1285
int size
Definition: avcodec.h:1163
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output...
Definition: diracdec.c:74
AVBufferPool * mb_type_pool
Definition: h264.h:792
int crop
Definition: h264.h:329
void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl, int y, int height)
Definition: h264.c:98
int16_t(*[2] motion_val)[2]
Definition: h264.h:300
int flags
Definition: h264.h:527
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1444
int mb_height
Definition: h264.h:593
H264Picture * delayed_pic[MAX_DELAYED_PIC_COUNT+2]
Definition: h264.h:644
int is_avc
Used to parse AVC variant of h264.
Definition: h264.h:606
AVBufferPool * ref_index_pool
Definition: h264.h:794
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional FF_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:139
void ff_h264_free_tables(H264Context *h)
Definition: h264.c:355
int ff_h264_get_profile(SPS *sps)
Compute profile from profile_idc and constraint_set?_flags.
Definition: h264.c:1190
void ff_h264_decode_init_vlc(void)
Definition: h264_cavlc.c:327
H264Context.
Definition: h264.h:499
AVFrame * f
Definition: h264.h:293
int prev_poc_msb
poc_msb of the last reference pic for POC type 0
Definition: h264.h:625
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
Definition: bytestream.h:85
#define FF_PROFILE_H264_MAIN
Definition: avcodec.h:2869
4: bottom field, top field, in that order
Definition: h264.h:150
#define FRAME_RECOVERED_IDR
We have seen an IDR, so all the following frames in coded order are correctly decodable.
Definition: h264.h:763
AVCodec.
Definition: avcodec.h:3181
int picture_structure
Definition: h264.h:564
Definition: h264.h:117
int luma_weight_flag[2]
7.4.3.2 luma_weight_lX_flag
Definition: h264.h:370
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:80
int profile_idc
Definition: h264.h:175
unsigned current_sps_id
id of the current SPS
Definition: h264.h:549
#define CODEC_FLAG_OUTPUT_CORRUPT
Output even those frames that might be corrupted.
Definition: avcodec.h:714
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264.h:442
int avpriv_h264_has_num_reorder_frames(AVCodecContext *avctx)
Definition: h264.c:56
int ff_set_ref_count(H264Context *h, H264SliceContext *sl)
Definition: h264.c:1210
Definition: h264.h:118
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1369
uint8_t * chroma_pred_mode_table
Definition: h264.h:573
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:2947
#define AV_RN32A(p)
Definition: intreadwrite.h:526
BYTE int const BYTE * srcp
Definition: avisynth_c.h:676
struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:2644
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
static int decode_init_thread_copy(AVCodecContext *avctx)
Definition: h264.c:701
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
Definition: h264.h:119
void void avpriv_request_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
uint32_t(*[6] dequant4_coeff)[16]
Definition: h264.h:557
#define CODEC_FLAG2_CHUNKS
Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries...
Definition: avcodec.h:771
if()
Definition: avfilter.c:975
uint8_t
#define av_cold
Definition: attributes.h:74
int prev_frame_num_offset
for POC type 2
Definition: h264.h:628
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:135
int offset_for_non_ref_pic
Definition: h264.h:183
mode
Definition: f_perms.c:27
AVOptions.
void ff_h264_reset_sei(H264Context *h)
Reset SEI values at the beginning of the frame.
Definition: h264_sei.c:37
Stereo 3D type: this structure describes how two videos are packed within a single video surface...
Definition: stereo3d.h:123
#define FF_PROFILE_H264_EXTENDED
Definition: avcodec.h:2870
static int h264_init_context(AVCodecContext *avctx, H264Context *h)
Definition: h264.c:589
int poc
frame POC
Definition: h264.h:312
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:67
AVCodec ff_h264_decoder
Definition: h264.c:1925
Multithreading support functions.
#define CODEC_CAP_HWACCEL_VDPAU
Codec can export data for HW decoding (VDPAU).
Definition: avcodec.h:834
#define TOP_DC_PRED8x8
Definition: h264pred.h:75
static int find_start_code(const uint8_t *buf, int buf_size, int buf_index, int next_avc)
Definition: h264.h:1136
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:363
#define FF_PROFILE_UNKNOWN
Definition: avcodec.h:2836
#define FF_PROFILE_H264_CONSTRAINED
Definition: avcodec.h:2864
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1355
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:85
uint8_t(*[2] top_borders)[(16 *3)*2]
Definition: h264.h:454
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:789
#define FF_PROFILE_H264_HIGH_444_INTRA
Definition: avcodec.h:2878
int frame_recovered
Initial frame has been completely recovered.
Definition: h264.h:770
Structure to hold side data for an AVFrame.
Definition: frame.h:134
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:34
uint8_t * data
Definition: avcodec.h:1162
static int decode_rbsp_trailing(H264Context *h, const uint8_t *src)
Identify the exact end of the bitstream.
Definition: h264.c:340
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
AVDictionary * metadata
metadata.
Definition: frame.h:543
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:367
#define MAX_DELAYED_PIC_COUNT
Definition: h264.h:54
static void fill_rectangle(SDL_Surface *screen, int x, int y, int w, int h, int color, int update)
Definition: ffplay.c:780
ptrdiff_t size
Definition: opengl_enc.c:101
#define FF_PROFILE_H264_HIGH_422_INTRA
Definition: avcodec.h:2875
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
high precision timer, useful to profile code
int recovered
picture at IDR or recovery point + recovery count
Definition: h264.h:325
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:1967
#define av_log(a,...)
int sei_vflip
Definition: h264.h:716
unsigned int rbsp_buffer_size
Definition: h264.h:493
int last_pocs[MAX_DELAYED_PIC_COUNT]
Definition: h264.h:645
H.264 / AVC / MPEG4 part10 codec.
#define U(x)
Definition: vp56_arith.h:37
int frame_num
Definition: h264.h:624
void ff_h264_hl_decode_mb(const H264Context *h, H264SliceContext *sl)
Definition: h264_mb.c:818
H264Picture DPB[H264_MAX_PICTURE_COUNT]
Definition: h264.h:508
int width
width and height of the video frame
Definition: frame.h:220
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1533
int flags
Additional information about the frame packing.
Definition: stereo3d.h:132
static int get_ue_golomb(GetBitContext *gb)
read unsigned exp golomb code.
Definition: golomb.h:53
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:89
static int get_consumed_bytes(int pos, int buf_size)
Return the number of bytes consumed for building the current frame.
Definition: h264.c:1657
int16_t * dc_val_base
Definition: h264.h:450
int poc_type
pic_order_cnt_type
Definition: h264.h:180
int context_initialized
Definition: h264.h:526
int profile
Definition: mxfenc.c:1804
static const uint16_t mask[17]
Definition: lzw.c:38
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:2623
ERContext er
Definition: h264.h:348
#define CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:824
int nal_unit_type
Definition: h264.h:601
av_default_item_name
Definition: h264.h:115
int num_reorder_frames
Definition: h264.h:214
int is_copy
Whether the parent AVCodecContext is a copy of the context which had init() called on it...
Definition: internal.h:102
#define AVERROR(e)
Definition: error.h:43
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:148
#define ALZHEIMER_DC_L0T_PRED8x8
Definition: h264pred.h:79
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:175
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:2772
int8_t intra4x4_pred_mode_cache[5 *8]
Definition: h264.h:383
int ff_init_poc(H264Context *h, int pic_field_poc[2], int *pic_poc)
Definition: h264.c:1105
const char * r
Definition: vf_curves.c:107
static void flush_dpb(AVCodecContext *avctx)
Definition: h264.c:1085
int capabilities
Codec capabilities.
Definition: avcodec.h:3200
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
static const AVOption h264_options[]
Definition: h264.c:1894
int ff_pred_weight_table(H264Context *h, H264SliceContext *sl)
Definition: h264.c:968
void(* decode_mb)(void *opaque, int ref, int mv_dir, int mv_type, int(*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped)
PPS pps
current pps
Definition: h264.h:551
#define FF_BUG_TRUNCATED
Definition: avcodec.h:2529
uint8_t(*[2] mvd_table)[2]
Definition: h264.h:574
int prev_interlaced_frame
Complement sei_pic_struct SEI_PIC_STRUCT_TOP_BOTTOM and SEI_PIC_STRUCT_BOTTOM_TOP indicate interlaced...
Definition: h264.h:701
int flags
CODEC_FLAG_*.
Definition: avcodec.h:1335
#define FF_BUG_AUTODETECT
autodetection
Definition: avcodec.h:2510
ThreadFrame tf
Definition: h264.h:294
0: frame
Definition: h264.h:146
simple assert() macros that are a bit more flexible than ISO C assert().
#define PICT_TOP_FIELD
Definition: mpegutils.h:33
GLsizei GLsizei * length
Definition: opengl_enc.c:115
const char * name
Name of the codec implementation.
Definition: avcodec.h:3188
int direct_spatial_mv_pred
Definition: h264.h:426
void ff_init_cabac_states(void)
Definition: cabac.c:69
unsigned int top_samples_available
Definition: h264.h:400
static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
Definition: h264.h:980
int valid_recovery_point
Are the SEI recovery points looking valid.
Definition: h264.h:747
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:35
static const uint8_t offset[127][2]
Definition: vf_spp.c:92
uint8_t * list_counts
Array of list_count per MB specifying the slice type.
Definition: h264.h:567
#define FFMAX(a, b)
Definition: common.h:64
Libavcodec external API header.
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
Definition: imgutils.c:288
int * mb_index2xy
int offset_for_top_to_bottom_field
Definition: h264.h:184
#define FIELD_OR_MBAFF_PICTURE(h)
Definition: h264.h:91
int slice_type_nos
S free slice type (SI/SP are remapped to I/P)
Definition: h264.h:352
int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
Decode a slice header.
Definition: h264_slice.c:1140
static const uint8_t scan8[16 *3+3]
Definition: h264.h:964
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:214
int crop_left
Definition: h264.h:330
uint8_t * error_status_table
int use_weight
Definition: h264.h:366
uint8_t * direct_table
Definition: h264.h:575
#define CODEC_FLAG2_SHOW_ALL
Show all frames before the first keyframe.
Definition: avcodec.h:772
#define FF_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:630
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:2579
int nal_length_size
Number of bytes used for nal length (1, 2 or 4)
Definition: h264.h:607
useful rectangle filling function
uint8_t * data[3]
Definition: h264.h:335
void ff_vdpau_h264_picture_start(H264Context *h)
uint8_t nb_components
The number of components each pixel has, (1-4)
Definition: pixdesc.h:71
static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type, int(*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped)
Definition: h264.c:62
int sei_anticlockwise_rotation
Definition: h264.h:715
void(* draw_horiz_band)(struct AVCodecContext *s, const AVFrame *src, int offset[AV_NUM_DATA_POINTERS], int y, int type, int height)
If non NULL, 'draw_horiz_band' is called by the libavcodec decoder to draw a horizontal band...
Definition: avcodec.h:1478
Definition: h264.h:114
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:242
int frame_num_offset
for POC type 2
Definition: h264.h:627
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:482
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:2612
FPA sei_fpa
Definition: h264.h:749
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
int x264_build
Definition: h264.h:590
uint32_t * mb2br_xy
Definition: h264.h:545
uint8_t * er_temp_buffer
#define OFFSET(x)
Definition: h264.c:1892
#define FFMIN(a, b)
Definition: common.h:66
uint16_t * slice_table
slice_table_base + 2*mb_stride + 1
Definition: h264.h:560
#define H264_MAX_THREADS
Definition: h264.h:47
float y
int poc_cycle_length
num_ref_frames_in_pic_order_cnt_cycle
Definition: h264.h:185
int reference
Definition: h264.h:324
ret
Definition: avfilter.c:974
int sei_frame_packing_present
frame_packing_arrangment SEI message
Definition: h264.h:706
int redundant_pic_count
Definition: h264.h:419
int nb_slice_ctx
Definition: h264.h:514
#define FF_PROFILE_H264_HIGH_10_INTRA
Definition: avcodec.h:2873
uint32_t * mb_type
Definition: h264.h:303
#define AV_FRAME_FLAG_CORRUPT
The frame data may be corrupted, e.g.
Definition: frame.h:474
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
SPS sps
current sps
Definition: h264.h:550
int32_t
PPS * pps_buffers[MAX_PPS_COUNT]
Definition: h264.h:613
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:114
int sei_hflip
Definition: h264.h:716
#define MAX_SPS_COUNT
Definition: h264.h:49
int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length)
Decode PPS.
Definition: h264_ps.c:589
Context Adaptive Binary Arithmetic Coder inline functions.
int mmco_reset
Definition: h264.h:654
H264SliceContext * slice_ctx
Definition: h264.h:513
int poc_lsb
Definition: h264.h:620
int reference
Definition: h264.h:338
static int h264_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: h264.c:1715
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:1378
int top_borders_allocated[2]
Definition: h264.h:457
int chroma_log2_weight_denom
Definition: h264.h:369
static int output_frame(H264Context *h, AVFrame *dst, H264Picture *srcp)
Definition: h264.c:1667
#define LEFT_DC_PRED8x8
Definition: h264pred.h:74
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure counterclockwise rotation by the specified angle...
Definition: display.c:50
#define PART_NOT_AVAILABLE
Definition: h264.h:536
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:2753
static void flush(AVCodecContext *avctx)
Definition: aacdec.c:514
#define SLICE_FLAG_ALLOW_FIELD
allow draw_horiz_band() with field slices (MPEG2 field pics)
Definition: avcodec.h:1759
uint8_t * edge_emu_buffer
Definition: h264.h:453
int dequant_coeff_pps
reinit tables when pps changes
Definition: h264.h:615
SPS * sps_buffers[MAX_SPS_COUNT]
Definition: h264.h:612
static const int8_t mv[256][2]
Definition: 4xm.c:77
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:232
short offset_for_ref_frame[256]
Definition: h264.h:212
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:127
int ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl)
Init context Allocate buffers which are not shared amongst multiple threads.
Definition: h264.c:459
int mb_stride
Definition: h264.h:594
AVCodecContext * avctx
Definition: h264.h:501
AVS_Value src
Definition: avisynth_c.h:482
H264 / AVC / MPEG4 part10 codec data table
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:84
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:2765
static int get_bit_length(H264Context *h, const uint8_t *buf, const uint8_t *ptr, int dst_length, int i, int next_avc)
Definition: h264.c:1266
1: top field
Definition: h264.h:147
enum AVCodecID codec_id
Definition: avcodec.h:1258
void ff_h264_remove_all_refs(H264Context *h)
Definition: h264_refs.c:491
int prev_frame_num
frame_num of the last pic for POC type 1/2
Definition: h264.h:629
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:199
void ff_vdpau_add_data_chunk(uint8_t *data, const uint8_t *buf, int buf_size)
Definition: vdpau.c:429
int next_outputed_poc
Definition: h264.h:647
int ff_h264_decode_sei(H264Context *h)
Decode SEI.
Definition: h264_sei.c:282
int poc_msb
Definition: h264.h:621
int field_poc[2]
top/bottom POC
Definition: h264.h:311
int debug
debug
Definition: avcodec.h:2565
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
#define FF_PROFILE_H264_HIGH_444_PREDICTIVE
Definition: avcodec.h:2877
int max_contexts
Max number of threads / contexts.
Definition: h264.h:674
int recovery_frame
recovery_frame is the frame_num at which the next frame should be fully constructed.
Definition: h264.h:757
main external API structure.
Definition: avcodec.h:1241
void av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip)
Flip the input matrix horizontally and/or vertically.
Definition: display.c:65
static void decode_postinit(H264Context *h, int setup_finished)
Run setup operations that must be run after slice header decoding.
Definition: h264.c:728
int ff_h264_alloc_tables(H264Context *h)
Allocate tables.
Definition: h264.c:398
2: bottom field
Definition: h264.h:148
uint8_t * data
Definition: frame.h:136
int ff_h264_check_intra4x4_pred_mode(const H264Context *h, H264SliceContext *sl)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264.c:137
void * buf
Definition: avisynth_c.h:553
int frame_packing_arrangement_type
Definition: h264.h:707
void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table, uint32_t *mbtype_table, int8_t *qscale_table, int16_t(*motion_val[2])[2], int *low_delay, int mb_width, int mb_height, int mb_stride, int quarter_sample)
Print debugging info for the given picture.
Definition: mpegvideo.c:1993
int8_t * qscale_table
Definition: h264.h:297
int extradata_size
Definition: avcodec.h:1356
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:69
H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstre...
Definition: pixfmt.h:105
int constraint_set_flags
constraint_set[0-3]_flag
Definition: h264.h:229
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:304
SEI_PicStructType sei_pic_struct
pic_struct in picture timing SEI message
Definition: h264.h:693
BYTE int const BYTE int int int height
Definition: avisynth_c.h:676
int ff_h264_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: h264_slice.c:434
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:2764
int slice_flags
slice flags
Definition: avcodec.h:1757
static int get_avc_nalsize(H264Context *h, const uint8_t *buf, int buf_size, int *buf_index)
Definition: h264.h:1146
Describe the class of an AVClass context structure.
Definition: log.h:67
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:584
uint8_t non_zero_count_cache[15 *8]
non zero coeff count cache.
Definition: h264.h:463
static av_cold int h264_decode_end(AVCodecContext *avctx)
Definition: h264.c:1877
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:250
Definition: h264.h:120
int8_t * ref_index[2]
Definition: h264.h:309
int use_weight_chroma
Definition: h264.h:367
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:410
int pixel_shift
0 for 8-bit H264, 1 for high-bit-depth H264
Definition: h264.h:516
int mmco_reset
MMCO_RESET set this 1.
Definition: h264.h:314
H264Picture * cur_pic_ptr
Definition: h264.h:509
int mb_mbaff
mb_aff_frame && mb_field_decoding_flag
Definition: h264.h:417
int enable_er
Definition: h264.h:789
int frame_packing_arrangement_cancel_flag
is previous arrangement canceled, -1 if never received
Definition: h264.h:263
#define FF_PROFILE_H264_CAVLC_444
Definition: avcodec.h:2879
int allocate_progress
Whether to allocate progress for frame threading.
Definition: internal.h:117
int log2_max_poc_lsb
log2_max_pic_order_cnt_lsb_minus4
Definition: h264.h:181
6: bottom field, top field, bottom field repeated, in that order
Definition: h264.h:152
#define FF_PROFILE_H264_INTRA
Definition: avcodec.h:2865
static int is_extra(const uint8_t *buf, int buf_size)
Definition: h264.c:1693
AVCodecContext * avctx
static const uint8_t start_code[]
Definition: h264.c:1264
Views are on top of each other.
Definition: stereo3d.h:55
int pic_struct_present_flag
Definition: h264.h:220
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:32
unsigned int list_count
Definition: h264.h:443
av_cold void ff_h264_free_context(H264Context *h)
Free any data that may have been allocated in the H264 context like SPS, PPS etc. ...
Definition: h264.c:1851
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:182
int has_recovery_point
Definition: h264.h:772
Views are next to each other.
Definition: stereo3d.h:45
#define MAX_MBPAIR_SIZE
Definition: h264.h:56
static int decode(AVCodecContext *avctx, void *data, int *got_sub, AVPacket *avpkt)
Definition: ccaption_dec.c:522
static void idr(H264Context *h)
instantaneous decoder refresh.
Definition: h264.c:1043
int ff_h264_decode_seq_parameter_set(H264Context *h, int ignore_truncation)
Decode SPS.
Definition: h264_ps.c:303
discard all non reference
Definition: avcodec.h:665
AVBufferPool * qscale_table_pool
Definition: h264.h:791
H264Picture * next_output_pic
Definition: h264.h:646
int slice_context_count
Definition: h264.h:676
AVBufferPool * motion_val_pool
Definition: h264.h:793
#define FF_PROFILE_H264_HIGH
Definition: avcodec.h:2871
#define SLICE_SINGLETHREAD
Definition: h264.h:1177
#define CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: avcodec.h:870
common internal api header.
#define CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:866
int ff_h264_decode_extradata(H264Context *h, const uint8_t *buf, int size)
Definition: h264.c:527
#define FRAME_RECOVERED_SEI
Sufficient number of frames have been decoded since a SEI recovery point, so all the following frames...
Definition: h264.h:768
#define FF_ALLOC_OR_GOTO(ctx, p, size, label)
Definition: internal.h:129
uint16_t * slice_table_base
Definition: h264.h:617
int log2_max_frame_num
log2_max_frame_num_minus4 + 4
Definition: h264.h:179
int missing_fields
Definition: h264.h:774
int16_t * dc_val[3]
H.264 / AVC / MPEG4 part10 motion vector predicion.
Bi-dir predicted.
Definition: avutil.h:269
const char * ff_h264_sei_stereo_mode(H264Context *h)
Get stereo_mode string from the h264 frame_packing_arrangement.
Definition: h264_sei.c:359
AVProfile.
Definition: avcodec.h:3169
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
Definition: avcodec.h:2509
int cur_chroma_format_idc
Definition: h264.h:781
int8_t * intra4x4_pred_mode
Definition: h264.h:384
int den
denominator
Definition: rational.h:45
uint8_t * rbsp_buffer
Definition: h264.h:492
int sei_ct_type
Bit set of clock types for fields/frames in picture timing SEI message.
Definition: h264.h:723
#define FF_PROFILE_H264_CONSTRAINED_BASELINE
Definition: avcodec.h:2868
void * priv_data
Definition: avcodec.h:1283
int ff_h264_field_end(H264Context *h, H264SliceContext *sl, int in_setup)
Definition: h264_picture.c:154
#define PICT_FRAME
Definition: mpegutils.h:35
int prev_poc_lsb
poc_lsb of the last reference pic for POC type 0
Definition: h264.h:626
int8_t ref_cache[2][5 *8]
Definition: h264.h:469
Definition: h264.h:113
#define SLICE_SKIPED
Definition: h264.h:1178
#define VD
Definition: h264.c:1893
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:372
const uint16_t ff_h264_mb_sizes[4]
Definition: h264.c:54
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1291
#define FF_PROFILE_H264_BASELINE
Definition: avcodec.h:2867
int luma_log2_weight_denom
Definition: h264.h:368
int chroma_weight[48][2][2][2]
Definition: h264.h:374
Views are packed in a checkerboard-like structure per pixel.
Definition: stereo3d.h:76
#define FF_COMPLIANCE_STRICT
Strictly conform to all the things in the spec no matter what consequences.
Definition: avcodec.h:2545
H264Picture cur_pic
Definition: h264.h:510
int sei_display_orientation_present
display orientation SEI message
Definition: h264.h:714
int content_interpretation_type
Definition: h264.h:708
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:237
Views are packed per column.
Definition: stereo3d.h:107
int mb_width
Definition: h264.h:593
enum AVPictureType pict_type
Definition: h264.h:684
int current_slice
current slice number, used to initialize slice_num of each thread/context
Definition: h264.h:666
#define FF_PROFILE_H264_HIGH_422
Definition: avcodec.h:2874
int flags2
CODEC_FLAG2_*.
Definition: avcodec.h:1342
uint32_t * mb2b_xy
Definition: h264.h:544
H264Ref ref_list[2][48]
0..15: frame refs, 16..47: mbaff field refs.
Definition: h264.h:444
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> out
int delta_poc_bottom
Definition: h264.h:622
H264Picture last_pic_for_ec
Definition: h264.h:511
int au_pps_id
pps_id of current access unit
Definition: h264.h:553
static void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.h:228
int height
Definition: frame.h:220
int crop_top
Definition: h264.h:331
unsigned int left_samples_available
Definition: h264.h:402
uint8_t(*[2] mvd_table)[2]
Definition: h264.h:482
#define av_freep(p)
static int init_thread_copy(AVCodecContext *avctx)
Definition: alac.c:640
uint8_t * av_packet_get_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:324
int8_t * intra4x4_pred_mode
Definition: h264.h:530
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
Definition: avcodec.h:3365
8: frame tripling
Definition: h264.h:154
#define AV_RN64A(p)
Definition: intreadwrite.h:530
int mb_field_decoding_flag
Definition: h264.h:416
uint8_t(* non_zero_count)[48]
Definition: h264.h:533
exp golomb vlc stuff
uint8_t * bipred_scratchpad
Definition: h264.h:452
AVPixelFormat
Pixel format.
Definition: pixfmt.h:61
This structure stores compressed data.
Definition: avcodec.h:1139
int sei_recovery_frame_cnt
recovery_frame_cnt from SEI message
Definition: h264.h:742
int droppable
Definition: h264.h:522
int strict_std_compliance
strictly follow the standard (MPEG4, ...).
Definition: avcodec.h:2543
#define STARTCODE_TEST
int nal_ref_idc
Definition: h264.h:600
GetBitContext gb
Definition: h264.h:347
for(j=16;j >0;--j)
#define FF_ALLOCZ_OR_GOTO(ctx, p, size, label)
Definition: internal.h:138
int b_stride
Definition: h264.h:546
Context Adaptive Binary Arithmetic Coder.
int chroma_weight_flag[2]
7.4.3.2 chroma_weight_lX_flag
Definition: h264.h:371
static const AVProfile profiles[]
Definition: h264.c:1908
void ff_h264_init_dequant_tables(H264Context *h)
Definition: h264_slice.c:367