FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
h264.c
Go to the documentation of this file.
1 /*
2  * H.26L/H.264/AVC/JVT/14496-10/... decoder
3  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * H.264 / AVC / MPEG-4 part10 codec.
25  * @author Michael Niedermayer <michaelni@gmx.at>
26  */
27 
28 #define UNCHECKED_BITSTREAM_READER 1
29 
30 #include "libavutil/avassert.h"
31 #include "libavutil/display.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/opt.h"
34 #include "libavutil/stereo3d.h"
35 #include "libavutil/timer.h"
36 #include "internal.h"
37 #include "bytestream.h"
38 #include "cabac.h"
39 #include "cabac_functions.h"
40 #include "error_resilience.h"
41 #include "avcodec.h"
42 #include "h264.h"
43 #include "h2645_parse.h"
44 #include "h264data.h"
45 #include "h264chroma.h"
46 #include "h264_mvpred.h"
47 #include "golomb.h"
48 #include "mathops.h"
49 #include "me_cmp.h"
50 #include "mpegutils.h"
51 #include "profiles.h"
52 #include "rectangle.h"
53 #include "thread.h"
54 #include "vdpau_compat.h"
55 
56 static int h264_decode_end(AVCodecContext *avctx);
57 
58 const uint16_t ff_h264_mb_sizes[4] = { 256, 384, 512, 768 };
59 
61 {
62  H264Context *h = avctx->priv_data;
63  return h && h->ps.sps ? h->ps.sps->num_reorder_frames : 0;
64 }
65 
66 static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
67  int (*mv)[2][4][2],
68  int mb_x, int mb_y, int mb_intra, int mb_skipped)
69 {
70  H264Context *h = opaque;
71  H264SliceContext *sl = &h->slice_ctx[0];
72 
73  sl->mb_x = mb_x;
74  sl->mb_y = mb_y;
75  sl->mb_xy = mb_x + mb_y * h->mb_stride;
76  memset(sl->non_zero_count_cache, 0, sizeof(sl->non_zero_count_cache));
77  av_assert1(ref >= 0);
78  /* FIXME: It is possible albeit uncommon that slice references
79  * differ between slices. We take the easy approach and ignore
80  * it for now. If this turns out to have any relevance in
81  * practice then correct remapping should be added. */
82  if (ref >= sl->ref_count[0])
83  ref = 0;
84  if (!sl->ref_list[0][ref].data[0]) {
85  av_log(h->avctx, AV_LOG_DEBUG, "Reference not available for error concealing\n");
86  ref = 0;
87  }
88  if ((sl->ref_list[0][ref].reference&3) != 3) {
89  av_log(h->avctx, AV_LOG_DEBUG, "Reference invalid\n");
90  return;
91  }
92  fill_rectangle(&h->cur_pic.ref_index[0][4 * sl->mb_xy],
93  2, 2, 2, ref, 1);
94  fill_rectangle(&sl->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
95  fill_rectangle(sl->mv_cache[0][scan8[0]], 4, 4, 8,
96  pack16to32((*mv)[0][0][0], (*mv)[0][0][1]), 4);
97  sl->mb_mbaff =
98  sl->mb_field_decoding_flag = 0;
100 }
101 
103  int y, int height)
104 {
105  AVCodecContext *avctx = h->avctx;
106  const AVFrame *src = h->cur_pic.f;
108  int vshift = desc->log2_chroma_h;
109  const int field_pic = h->picture_structure != PICT_FRAME;
110  if (field_pic) {
111  height <<= 1;
112  y <<= 1;
113  }
114 
115  height = FFMIN(height, avctx->height - y);
116 
117  if (field_pic && h->first_field && !(avctx->slice_flags & SLICE_FLAG_ALLOW_FIELD))
118  return;
119 
120  if (avctx->draw_horiz_band) {
122  int i;
123 
124  offset[0] = y * src->linesize[0];
125  offset[1] =
126  offset[2] = (y >> vshift) * src->linesize[1];
127  for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
128  offset[i] = 0;
129 
130  emms_c();
131 
132  avctx->draw_horiz_band(avctx, src, offset,
133  y, h->picture_structure, height);
134  }
135 }
136 
138 {
139  int i;
140 
143  av_freep(&h->cbp_table);
144  av_freep(&h->mvd_table[0]);
145  av_freep(&h->mvd_table[1]);
146  av_freep(&h->direct_table);
149  h->slice_table = NULL;
150  av_freep(&h->list_counts);
151 
152  av_freep(&h->mb2b_xy);
153  av_freep(&h->mb2br_xy);
154 
159 
160  for (i = 0; i < h->nb_slice_ctx; i++) {
161  H264SliceContext *sl = &h->slice_ctx[i];
162 
163  av_freep(&sl->dc_val_base);
164  av_freep(&sl->er.mb_index2xy);
166  av_freep(&sl->er.er_temp_buffer);
167 
170  av_freep(&sl->top_borders[0]);
171  av_freep(&sl->top_borders[1]);
172 
175  sl->top_borders_allocated[0] = 0;
176  sl->top_borders_allocated[1] = 0;
177  }
178 }
179 
181 {
182  const int big_mb_num = h->mb_stride * (h->mb_height + 1);
183  const int row_mb_num = 2*h->mb_stride*FFMAX(h->nb_slice_ctx, 1);
184  int x, y;
185 
187  row_mb_num, 8 * sizeof(uint8_t), fail)
189 
191  big_mb_num * 48 * sizeof(uint8_t), fail)
193  (big_mb_num + h->mb_stride) * sizeof(*h->slice_table_base), fail)
195  big_mb_num * sizeof(uint16_t), fail)
197  big_mb_num * sizeof(uint8_t), fail)
199  row_mb_num, 16 * sizeof(uint8_t), fail);
201  row_mb_num, 16 * sizeof(uint8_t), fail);
202  h->slice_ctx[0].mvd_table[0] = h->mvd_table[0];
203  h->slice_ctx[0].mvd_table[1] = h->mvd_table[1];
204 
206  4 * big_mb_num * sizeof(uint8_t), fail);
208  big_mb_num * sizeof(uint8_t), fail)
209 
210  memset(h->slice_table_base, -1,
211  (big_mb_num + h->mb_stride) * sizeof(*h->slice_table_base));
212  h->slice_table = h->slice_table_base + h->mb_stride * 2 + 1;
213 
215  big_mb_num * sizeof(uint32_t), fail);
217  big_mb_num * sizeof(uint32_t), fail);
218  for (y = 0; y < h->mb_height; y++)
219  for (x = 0; x < h->mb_width; x++) {
220  const int mb_xy = x + y * h->mb_stride;
221  const int b_xy = 4 * x + 4 * y * h->b_stride;
222 
223  h->mb2b_xy[mb_xy] = b_xy;
224  h->mb2br_xy[mb_xy] = 8 * (FMO ? mb_xy : (mb_xy % (2 * h->mb_stride)));
225  }
226 
227  return 0;
228 
229 fail:
231  return AVERROR(ENOMEM);
232 }
233 
234 /**
235  * Init context
236  * Allocate buffers which are not shared amongst multiple threads.
237  */
239 {
240  ERContext *er = &sl->er;
241  int mb_array_size = h->mb_height * h->mb_stride;
242  int y_size = (2 * h->mb_width + 1) * (2 * h->mb_height + 1);
243  int c_size = h->mb_stride * (h->mb_height + 1);
244  int yc_size = y_size + 2 * c_size;
245  int x, y, i;
246 
247  sl->ref_cache[0][scan8[5] + 1] =
248  sl->ref_cache[0][scan8[7] + 1] =
249  sl->ref_cache[0][scan8[13] + 1] =
250  sl->ref_cache[1][scan8[5] + 1] =
251  sl->ref_cache[1][scan8[7] + 1] =
252  sl->ref_cache[1][scan8[13] + 1] = PART_NOT_AVAILABLE;
253 
254  if (sl != h->slice_ctx) {
255  memset(er, 0, sizeof(*er));
256  } else
257  if (CONFIG_ERROR_RESILIENCE) {
258 
259  /* init ER */
260  er->avctx = h->avctx;
262  er->opaque = h;
263  er->quarter_sample = 1;
264 
265  er->mb_num = h->mb_num;
266  er->mb_width = h->mb_width;
267  er->mb_height = h->mb_height;
268  er->mb_stride = h->mb_stride;
269  er->b8_stride = h->mb_width * 2 + 1;
270 
271  // error resilience code looks cleaner with this
273  (h->mb_num + 1) * sizeof(int), fail);
274 
275  for (y = 0; y < h->mb_height; y++)
276  for (x = 0; x < h->mb_width; x++)
277  er->mb_index2xy[x + y * h->mb_width] = x + y * h->mb_stride;
278 
279  er->mb_index2xy[h->mb_height * h->mb_width] = (h->mb_height - 1) *
280  h->mb_stride + h->mb_width;
281 
283  mb_array_size * sizeof(uint8_t), fail);
284 
286  h->mb_height * h->mb_stride, fail);
287 
289  yc_size * sizeof(int16_t), fail);
290  er->dc_val[0] = sl->dc_val_base + h->mb_width * 2 + 2;
291  er->dc_val[1] = sl->dc_val_base + y_size + h->mb_stride + 1;
292  er->dc_val[2] = er->dc_val[1] + c_size;
293  for (i = 0; i < yc_size; i++)
294  sl->dc_val_base[i] = 1024;
295  }
296 
297  return 0;
298 
299 fail:
300  return AVERROR(ENOMEM); // ff_h264_free_tables will clean up for us
301 }
302 
304 {
305  int i;
306 
307  h->avctx = avctx;
308  h->backup_width = -1;
309  h->backup_height = -1;
311  h->current_sps_id = -1;
312  h->cur_chroma_format_idc = -1;
313 
315  h->workaround_bugs = avctx->workaround_bugs;
316  h->flags = avctx->flags;
317  h->poc.prev_poc_msb = 1 << 16;
318  h->recovery_frame = -1;
319  h->frame_recovered = 0;
320  h->poc.prev_frame_num = -1;
322  h->sei.unregistered.x264_build = -1;
323 
324  h->next_outputed_poc = INT_MIN;
325  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
326  h->last_pocs[i] = INT_MIN;
327 
328  ff_h264_sei_uninit(&h->sei);
329 
331 
332  h->nb_slice_ctx = (avctx->active_thread_type & FF_THREAD_SLICE) ? avctx->thread_count : 1;
333  h->slice_ctx = av_mallocz_array(h->nb_slice_ctx, sizeof(*h->slice_ctx));
334  if (!h->slice_ctx) {
335  h->nb_slice_ctx = 0;
336  return AVERROR(ENOMEM);
337  }
338 
339  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
340  h->DPB[i].f = av_frame_alloc();
341  if (!h->DPB[i].f)
342  return AVERROR(ENOMEM);
343  }
344 
345  h->cur_pic.f = av_frame_alloc();
346  if (!h->cur_pic.f)
347  return AVERROR(ENOMEM);
348 
350  if (!h->last_pic_for_ec.f)
351  return AVERROR(ENOMEM);
352 
353  for (i = 0; i < h->nb_slice_ctx; i++)
354  h->slice_ctx[i].h264 = h;
355 
356  return 0;
357 }
358 
360 {
361  H264Context *h = avctx->priv_data;
362  int i;
363 
366 
367  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
368  ff_h264_unref_picture(h, &h->DPB[i]);
369  av_frame_free(&h->DPB[i].f);
370  }
371  memset(h->delayed_pic, 0, sizeof(h->delayed_pic));
372 
373  h->cur_pic_ptr = NULL;
374 
375  av_freep(&h->slice_ctx);
376  h->nb_slice_ctx = 0;
377 
378  ff_h264_sei_uninit(&h->sei);
379  ff_h264_ps_uninit(&h->ps);
380 
382 
384  av_frame_free(&h->cur_pic.f);
387 
388  return 0;
389 }
390 
392 
394 {
395  H264Context *h = avctx->priv_data;
396  int ret;
397 
398  ret = h264_init_context(avctx, h);
399  if (ret < 0)
400  return ret;
401 
402  ret = ff_thread_once(&h264_vlc_init, ff_h264_decode_init_vlc);
403  if (ret != 0) {
404  av_log(avctx, AV_LOG_ERROR, "pthread_once has failed.");
405  return AVERROR_UNKNOWN;
406  }
407 
408  if (avctx->codec_id == AV_CODEC_ID_H264) {
409  if (avctx->ticks_per_frame == 1) {
410  if(h->avctx->time_base.den < INT_MAX/2) {
411  h->avctx->time_base.den *= 2;
412  } else
413  h->avctx->time_base.num /= 2;
414  }
415  avctx->ticks_per_frame = 2;
416  }
417 
418  if (avctx->extradata_size > 0 && avctx->extradata) {
420  &h->ps, &h->is_avc, &h->nal_length_size,
421  avctx->err_recognition, avctx);
422  if (ret < 0) {
423  h264_decode_end(avctx);
424  return ret;
425  }
426  }
427 
428  if (h->ps.sps && h->ps.sps->bitstream_restriction_flag &&
431  }
432 
433  avctx->internal->allocate_progress = 1;
434 
436 
437  if (h->enable_er < 0 && (avctx->active_thread_type & FF_THREAD_SLICE))
438  h->enable_er = 0;
439 
440  if (h->enable_er && (avctx->active_thread_type & FF_THREAD_SLICE)) {
441  av_log(avctx, AV_LOG_WARNING,
442  "Error resilience with slice threads is enabled. It is unsafe and unsupported and may crash. "
443  "Use it at your own risk\n");
444  }
445 
446  return 0;
447 }
448 
449 #if HAVE_THREADS
450 static int decode_init_thread_copy(AVCodecContext *avctx)
451 {
452  H264Context *h = avctx->priv_data;
453  int ret;
454 
455  if (!avctx->internal->is_copy)
456  return 0;
457 
458  memset(h, 0, sizeof(*h));
459 
460  ret = h264_init_context(avctx, h);
461  if (ret < 0)
462  return ret;
463 
464  h->context_initialized = 0;
465 
466  return 0;
467 }
468 #endif
469 
470 /**
471  * Run setup operations that must be run after slice header decoding.
472  * This includes finding the next displayed frame.
473  *
474  * @param h h264 master context
475  * @param setup_finished enough NALs have been read that we can call
476  * ff_thread_finish_setup()
477  */
478 static void decode_postinit(H264Context *h, int setup_finished)
479 {
480  const SPS *sps = h->ps.sps;
482  H264Picture *cur = h->cur_pic_ptr;
483  int i, pics, out_of_order, out_idx;
484 
485  if (h->next_output_pic)
486  return;
487 
488  if (cur->field_poc[0] == INT_MAX || cur->field_poc[1] == INT_MAX) {
489  /* FIXME: if we have two PAFF fields in one packet, we can't start
490  * the next thread here. If we have one field per packet, we can.
491  * The check in decode_nal_units() is not good enough to find this
492  * yet, so we assume the worst for now. */
493  // if (setup_finished)
494  // ff_thread_finish_setup(h->avctx);
495  if (cur->field_poc[0] == INT_MAX && cur->field_poc[1] == INT_MAX)
496  return;
497  if (h->avctx->hwaccel || h->missing_fields <=1)
498  return;
499  }
500 
501  cur->f->interlaced_frame = 0;
502  cur->f->repeat_pict = 0;
503 
504  /* Signal interlacing information externally. */
505  /* Prioritize picture timing SEI information over used
506  * decoding process if it exists. */
507 
508  if (sps->pic_struct_present_flag) {
510  switch (pt->pic_struct) {
512  break;
515  cur->f->interlaced_frame = 1;
516  break;
519  if (FIELD_OR_MBAFF_PICTURE(h))
520  cur->f->interlaced_frame = 1;
521  else
522  // try to flag soft telecine progressive
524  break;
527  /* Signal the possibility of telecined film externally
528  * (pic_struct 5,6). From these hints, let the applications
529  * decide if they apply deinterlacing. */
530  cur->f->repeat_pict = 1;
531  break;
533  cur->f->repeat_pict = 2;
534  break;
536  cur->f->repeat_pict = 4;
537  break;
538  }
539 
540  if ((pt->ct_type & 3) &&
542  cur->f->interlaced_frame = (pt->ct_type & (1 << 1)) != 0;
543  } else {
544  /* Derive interlacing flag from used decoding process. */
546  }
548 
549  if (cur->field_poc[0] != cur->field_poc[1]) {
550  /* Derive top_field_first from field pocs. */
551  cur->f->top_field_first = cur->field_poc[0] < cur->field_poc[1];
552  } else {
553  if (sps->pic_struct_present_flag) {
554  /* Use picture timing SEI information. Even if it is a
555  * information of a past frame, better than nothing. */
558  cur->f->top_field_first = 1;
559  else
560  cur->f->top_field_first = 0;
561  } else if (cur->f->interlaced_frame) {
562  /* Default to top field first when pic_struct_present_flag
563  * is not set but interlaced frame detected */
564  cur->f->top_field_first = 1;
565  } else {
566  /* Most likely progressive */
567  cur->f->top_field_first = 0;
568  }
569  }
570 
571  if (h->sei.frame_packing.present &&
576  AVStereo3D *stereo = av_stereo3d_create_side_data(cur->f);
577  if (stereo) {
578  switch (fp->frame_packing_arrangement_type) {
579  case 0:
580  stereo->type = AV_STEREO3D_CHECKERBOARD;
581  break;
582  case 1:
583  stereo->type = AV_STEREO3D_COLUMNS;
584  break;
585  case 2:
586  stereo->type = AV_STEREO3D_LINES;
587  break;
588  case 3:
589  if (fp->quincunx_sampling_flag)
591  else
592  stereo->type = AV_STEREO3D_SIDEBYSIDE;
593  break;
594  case 4:
595  stereo->type = AV_STEREO3D_TOPBOTTOM;
596  break;
597  case 5:
599  break;
600  case 6:
601  stereo->type = AV_STEREO3D_2D;
602  break;
603  }
604 
605  if (fp->content_interpretation_type == 2)
606  stereo->flags = AV_STEREO3D_FLAG_INVERT;
607  }
608  }
609 
615  double angle = o->anticlockwise_rotation * 360 / (double) (1 << 16);
616  AVFrameSideData *rotation = av_frame_new_side_data(cur->f,
618  sizeof(int32_t) * 9);
619  if (rotation) {
620  av_display_rotation_set((int32_t *)rotation->data, angle);
621  av_display_matrix_flip((int32_t *)rotation->data,
622  o->hflip, o->vflip);
623  }
624  }
625 
626  if (h->sei.afd.present) {
628  sizeof(uint8_t));
629 
630  if (sd) {
632  h->sei.afd.present = 0;
633  }
634  }
635 
636  if (h->sei.a53_caption.a53_caption) {
637  H264SEIA53Caption *a53 = &h->sei.a53_caption;
640  a53->a53_caption_size);
641  if (sd)
642  memcpy(sd->data, a53->a53_caption, a53->a53_caption_size);
643  av_freep(&a53->a53_caption);
644  a53->a53_caption_size = 0;
646  }
647 
648  cur->mmco_reset = h->mmco_reset;
649  h->mmco_reset = 0;
650 
651  // FIXME do something with unavailable reference frames
652 
653  /* Sort B-frames into display order */
654  if (sps->bitstream_restriction_flag ||
657  }
658 
659  for (i = 0; 1; i++) {
660  if(i == MAX_DELAYED_PIC_COUNT || cur->poc < h->last_pocs[i]){
661  if(i)
662  h->last_pocs[i-1] = cur->poc;
663  break;
664  } else if(i) {
665  h->last_pocs[i-1]= h->last_pocs[i];
666  }
667  }
668  out_of_order = MAX_DELAYED_PIC_COUNT - i;
669  if( cur->f->pict_type == AV_PICTURE_TYPE_B
671  out_of_order = FFMAX(out_of_order, 1);
672  if (out_of_order == MAX_DELAYED_PIC_COUNT) {
673  av_log(h->avctx, AV_LOG_VERBOSE, "Invalid POC %d<%d\n", cur->poc, h->last_pocs[0]);
674  for (i = 1; i < MAX_DELAYED_PIC_COUNT; i++)
675  h->last_pocs[i] = INT_MIN;
676  h->last_pocs[0] = cur->poc;
677  cur->mmco_reset = 1;
678  } else if(h->avctx->has_b_frames < out_of_order && !sps->bitstream_restriction_flag){
679  av_log(h->avctx, AV_LOG_INFO, "Increasing reorder buffer to %d\n", out_of_order);
680  h->avctx->has_b_frames = out_of_order;
681  }
682 
683  pics = 0;
684  while (h->delayed_pic[pics])
685  pics++;
686 
688 
689  h->delayed_pic[pics++] = cur;
690  if (cur->reference == 0)
691  cur->reference = DELAYED_PIC_REF;
692 
693  out = h->delayed_pic[0];
694  out_idx = 0;
695  for (i = 1; h->delayed_pic[i] &&
696  !h->delayed_pic[i]->f->key_frame &&
697  !h->delayed_pic[i]->mmco_reset;
698  i++)
699  if (h->delayed_pic[i]->poc < out->poc) {
700  out = h->delayed_pic[i];
701  out_idx = i;
702  }
703  if (h->avctx->has_b_frames == 0 &&
704  (h->delayed_pic[0]->f->key_frame || h->delayed_pic[0]->mmco_reset))
705  h->next_outputed_poc = INT_MIN;
706  out_of_order = out->poc < h->next_outputed_poc;
707 
708  if (out_of_order || pics > h->avctx->has_b_frames) {
709  out->reference &= ~DELAYED_PIC_REF;
710  for (i = out_idx; h->delayed_pic[i]; i++)
711  h->delayed_pic[i] = h->delayed_pic[i + 1];
712  }
713  if (!out_of_order && pics > h->avctx->has_b_frames) {
714  h->next_output_pic = out;
715  if (out_idx == 0 && h->delayed_pic[0] && (h->delayed_pic[0]->f->key_frame || h->delayed_pic[0]->mmco_reset)) {
716  h->next_outputed_poc = INT_MIN;
717  } else
718  h->next_outputed_poc = out->poc;
719  } else {
720  av_log(h->avctx, AV_LOG_DEBUG, "no picture %s\n", out_of_order ? "ooo" : "");
721  }
722 
723  if (h->next_output_pic) {
724  if (h->next_output_pic->recovered) {
725  // We have reached an recovery point and all frames after it in
726  // display order are "recovered".
728  }
730  }
731 
732  if (setup_finished && !h->avctx->hwaccel) {
734 
736  h->setup_finished = 1;
737  }
738 }
739 
740 /**
741  * instantaneous decoder refresh.
742  */
743 static void idr(H264Context *h)
744 {
745  int i;
747  h->poc.prev_frame_num =
748  h->poc.prev_frame_num_offset = 0;
749  h->poc.prev_poc_msb = 1<<16;
750  h->poc.prev_poc_lsb = 0;
751  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
752  h->last_pocs[i] = INT_MIN;
753 }
754 
755 /* forget old pics after a seek */
757 {
758  int i, j;
759 
760  h->next_outputed_poc = INT_MIN;
761  h->prev_interlaced_frame = 1;
762  idr(h);
763 
764  h->poc.prev_frame_num = -1;
765  if (h->cur_pic_ptr) {
766  h->cur_pic_ptr->reference = 0;
767  for (j=i=0; h->delayed_pic[i]; i++)
768  if (h->delayed_pic[i] != h->cur_pic_ptr)
769  h->delayed_pic[j++] = h->delayed_pic[i];
770  h->delayed_pic[j] = NULL;
771  }
773 
774  h->first_field = 0;
775  ff_h264_sei_uninit(&h->sei);
776  h->recovery_frame = -1;
777  h->frame_recovered = 0;
778  h->current_slice = 0;
779  h->mmco_reset = 1;
780  for (i = 0; i < h->nb_slice_ctx; i++)
781  h->slice_ctx[i].list_count = 0;
782 }
783 
784 /* forget old pics after a seek */
785 static void flush_dpb(AVCodecContext *avctx)
786 {
787  H264Context *h = avctx->priv_data;
788  int i;
789 
790  memset(h->delayed_pic, 0, sizeof(h->delayed_pic));
791 
793 
794  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
795  ff_h264_unref_picture(h, &h->DPB[i]);
796  h->cur_pic_ptr = NULL;
798 
799  h->mb_y = 0;
800 
802  h->context_initialized = 0;
803 }
804 
805 #if FF_API_CAP_VDPAU
806 static const uint8_t start_code[] = { 0x00, 0x00, 0x01 };
807 #endif
808 
810 {
811  int nals_needed = 0;
812  int first_slice = 0;
813  int i;
814  int ret;
815 
816  for (i = 0; i < h->pkt.nb_nals; i++) {
817  H2645NAL *nal = &h->pkt.nals[i];
818  GetBitContext gb;
819 
820  /* packets can sometimes contain multiple PPS/SPS,
821  * e.g. two PAFF field pictures in one packet, or a demuxer
822  * which splits NALs strangely if so, when frame threading we
823  * can't start the next thread until we've read all of them */
824  switch (nal->type) {
825  case NAL_SPS:
826  case NAL_PPS:
827  nals_needed = i;
828  break;
829  case NAL_DPA:
830  case NAL_IDR_SLICE:
831  case NAL_SLICE:
832  ret = init_get_bits8(&gb, nal->data + 1, (nal->size - 1));
833  if (ret < 0)
834  return ret;
835  if (!get_ue_golomb_long(&gb) || // first_mb_in_slice
836  !first_slice ||
837  first_slice != nal->type)
838  nals_needed = i;
839  if (!first_slice)
840  first_slice = nal->type;
841  }
842  }
843 
844  return nals_needed;
845 }
846 
847 static void debug_green_metadata(const H264SEIGreenMetaData *gm, void *logctx)
848 {
849  av_log(logctx, AV_LOG_DEBUG, "Green Metadata Info SEI message\n");
850  av_log(logctx, AV_LOG_DEBUG, " green_metadata_type: %d\n", gm->green_metadata_type);
851 
852  if (gm->green_metadata_type == 0) {
853  av_log(logctx, AV_LOG_DEBUG, " green_metadata_period_type: %d\n", gm->period_type);
854 
855  if (gm->period_type == 2)
856  av_log(logctx, AV_LOG_DEBUG, " green_metadata_num_seconds: %d\n", gm->num_seconds);
857  else if (gm->period_type == 3)
858  av_log(logctx, AV_LOG_DEBUG, " green_metadata_num_pictures: %d\n", gm->num_pictures);
859 
860  av_log(logctx, AV_LOG_DEBUG, " SEI GREEN Complexity Metrics: %f %f %f %f\n",
861  (float)gm->percent_non_zero_macroblocks/255,
862  (float)gm->percent_intra_coded_macroblocks/255,
863  (float)gm->percent_six_tap_filtering/255,
865 
866  } else if (gm->green_metadata_type == 1) {
867  av_log(logctx, AV_LOG_DEBUG, " xsd_metric_type: %d\n", gm->xsd_metric_type);
868 
869  if (gm->xsd_metric_type == 0)
870  av_log(logctx, AV_LOG_DEBUG, " xsd_metric_value: %f\n",
871  (float)gm->xsd_metric_value/100);
872  }
873 }
874 
875 static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size)
876 {
877  AVCodecContext *const avctx = h->avctx;
878  unsigned context_count = 0;
879  int nals_needed = 0; ///< number of NALs that need decoding before the next frame thread starts
880  int idr_cleared=0;
881  int i, ret = 0;
882 
883  h->nal_unit_type= 0;
884 
885  h->max_contexts = h->nb_slice_ctx;
886  if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)) {
887  h->current_slice = 0;
888  if (!h->first_field)
889  h->cur_pic_ptr = NULL;
890  ff_h264_sei_uninit(&h->sei);
891  }
892 
893  if (h->nal_length_size == 4) {
894  if (buf_size > 8 && AV_RB32(buf) == 1 && AV_RB32(buf+5) > (unsigned)buf_size) {
895  h->is_avc = 0;
896  }else if(buf_size > 3 && AV_RB32(buf) > 1 && AV_RB32(buf) <= (unsigned)buf_size)
897  h->is_avc = 1;
898  }
899 
900  ret = ff_h2645_packet_split(&h->pkt, buf, buf_size, avctx, h->is_avc,
901  h->nal_length_size, avctx->codec_id);
902  if (ret < 0) {
903  av_log(avctx, AV_LOG_ERROR,
904  "Error splitting the input into NAL units.\n");
905  return ret;
906  }
907 
908  if (avctx->active_thread_type & FF_THREAD_FRAME)
909  nals_needed = get_last_needed_nal(h);
910  if (nals_needed < 0)
911  return nals_needed;
912 
913  for (i = 0; i < h->pkt.nb_nals; i++) {
914  H2645NAL *nal = &h->pkt.nals[i];
915  H264SliceContext *sl = &h->slice_ctx[context_count];
916  int err;
917 
918  if (avctx->skip_frame >= AVDISCARD_NONREF &&
919  nal->ref_idc == 0 && nal->type != NAL_SEI)
920  continue;
921 
922 again:
923  // FIXME these should stop being context-global variables
924  h->nal_ref_idc = nal->ref_idc;
925  h->nal_unit_type = nal->type;
926 
927  err = 0;
928  switch (nal->type) {
929  case NAL_IDR_SLICE:
930  if ((nal->data[1] & 0xFC) == 0x98) {
931  av_log(h->avctx, AV_LOG_ERROR, "Invalid inter IDR frame\n");
932  h->next_outputed_poc = INT_MIN;
933  ret = -1;
934  goto end;
935  }
936  if (nal->type != NAL_IDR_SLICE) {
938  "Invalid mix of idr and non-idr slices\n");
939  ret = -1;
940  goto end;
941  }
942  if(!idr_cleared) {
943  if (h->current_slice && (avctx->active_thread_type & FF_THREAD_SLICE)) {
944  av_log(h, AV_LOG_ERROR, "invalid mixed IDR / non IDR frames cannot be decoded in slice multithreading mode\n");
945  ret = AVERROR_INVALIDDATA;
946  goto end;
947  }
948  idr(h); // FIXME ensure we don't lose some frames if there is reordering
949  }
950  idr_cleared = 1;
951  h->has_recovery_point = 1;
952  case NAL_SLICE:
953  sl->gb = nal->gb;
954  if ( nals_needed >= i
955  || (!(avctx->active_thread_type & FF_THREAD_FRAME) && !context_count))
956  h->au_pps_id = -1;
957 
958  if ((err = ff_h264_decode_slice_header(h, sl)))
959  break;
960 
961  if (h->sei.recovery_point.recovery_frame_cnt >= 0) {
962  const int sei_recovery_frame_cnt = h->sei.recovery_point.recovery_frame_cnt;
963 
964  if (h->poc.frame_num != sei_recovery_frame_cnt || sl->slice_type_nos != AV_PICTURE_TYPE_I)
965  h->valid_recovery_point = 1;
966 
967  if ( h->recovery_frame < 0
968  || av_mod_uintp2(h->recovery_frame - h->poc.frame_num, h->ps.sps->log2_max_frame_num) > sei_recovery_frame_cnt) {
969  h->recovery_frame = av_mod_uintp2(h->poc.frame_num + sei_recovery_frame_cnt, h->ps.sps->log2_max_frame_num);
970 
971  if (!h->valid_recovery_point)
972  h->recovery_frame = h->poc.frame_num;
973  }
974  }
975 
976  h->cur_pic_ptr->f->key_frame |= (nal->type == NAL_IDR_SLICE);
977 
978  if (nal->type == NAL_IDR_SLICE ||
979  (h->recovery_frame == h->poc.frame_num && nal->ref_idc)) {
980  h->recovery_frame = -1;
981  h->cur_pic_ptr->recovered = 1;
982  }
983  // If we have an IDR, all frames after it in decoded order are
984  // "recovered".
985  if (nal->type == NAL_IDR_SLICE)
987 #if 1
989 #else
991 #endif
992 
993  if (h->current_slice == 1) {
994  if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS))
995  decode_postinit(h, i >= nals_needed);
996 
997  if (h->avctx->hwaccel &&
998  (ret = h->avctx->hwaccel->start_frame(h->avctx, buf, buf_size)) < 0)
999  goto end;
1000 #if FF_API_CAP_VDPAU
1001  if (CONFIG_H264_VDPAU_DECODER &&
1004 #endif
1005  }
1006 
1007  if (sl->redundant_pic_count == 0) {
1008  if (avctx->hwaccel) {
1009  ret = avctx->hwaccel->decode_slice(avctx,
1010  nal->raw_data,
1011  nal->raw_size);
1012  if (ret < 0)
1013  goto end;
1014 #if FF_API_CAP_VDPAU
1015  } else if (CONFIG_H264_VDPAU_DECODER &&
1018  start_code,
1019  sizeof(start_code));
1021  nal->raw_data,
1022  nal->raw_size);
1023 #endif
1024  } else
1025  context_count++;
1026  }
1027  break;
1028  case NAL_DPA:
1029  case NAL_DPB:
1030  case NAL_DPC:
1031  avpriv_request_sample(avctx, "data partitioning");
1032  break;
1033  case NAL_SEI:
1034  ret = ff_h264_sei_decode(&h->sei, &nal->gb, &h->ps, avctx);
1036  if (avctx->debug & FF_DEBUG_GREEN_MD)
1038 #if FF_API_AFD
1042 #endif /* FF_API_AFD */
1043  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1044  goto end;
1045  break;
1046  case NAL_SPS: {
1047  GetBitContext tmp_gb = nal->gb;
1048  if (ff_h264_decode_seq_parameter_set(&tmp_gb, avctx, &h->ps, 0) >= 0)
1049  break;
1051  "SPS decoding failure, trying again with the complete NAL\n");
1052  init_get_bits8(&tmp_gb, nal->raw_data + 1, nal->raw_size - 1);
1053  if (ff_h264_decode_seq_parameter_set(&tmp_gb, avctx, &h->ps, 0) >= 0)
1054  break;
1055  ff_h264_decode_seq_parameter_set(&nal->gb, avctx, &h->ps, 1);
1056  break;
1057  }
1058  case NAL_PPS:
1059  ret = ff_h264_decode_picture_parameter_set(&nal->gb, avctx, &h->ps,
1060  nal->size_bits);
1061  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1062  goto end;
1063  break;
1064  case NAL_AUD:
1065  case NAL_END_SEQUENCE:
1066  case NAL_END_STREAM:
1067  case NAL_FILLER_DATA:
1068  case NAL_SPS_EXT:
1069  case NAL_AUXILIARY_SLICE:
1070  break;
1071  case NAL_FF_IGNORE:
1072  break;
1073  default:
1074  av_log(avctx, AV_LOG_DEBUG, "Unknown NAL code: %d (%d bits)\n",
1075  nal->type, nal->size_bits);
1076  }
1077 
1078  if (context_count == h->max_contexts) {
1079  ret = ff_h264_execute_decode_slices(h, context_count);
1080  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1081  goto end;
1082  context_count = 0;
1083  }
1084 
1085  if (err < 0 || err == SLICE_SKIPED) {
1086  if (err < 0)
1087  av_log(h->avctx, AV_LOG_ERROR, "decode_slice_header error\n");
1088  sl->ref_count[0] = sl->ref_count[1] = sl->list_count = 0;
1089  } else if (err == SLICE_SINGLETHREAD) {
1090  if (context_count > 0) {
1091  ret = ff_h264_execute_decode_slices(h, context_count);
1092  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1093  goto end;
1094  context_count = 0;
1095  }
1096  /* Slice could not be decoded in parallel mode, restart. */
1097  sl = &h->slice_ctx[0];
1098  goto again;
1099  }
1100  }
1101  if (context_count) {
1102  ret = ff_h264_execute_decode_slices(h, context_count);
1103  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1104  goto end;
1105  }
1106 
1107  ret = 0;
1108 end:
1109 
1110 #if CONFIG_ERROR_RESILIENCE
1111  /*
1112  * FIXME: Error handling code does not seem to support interlaced
1113  * when slices span multiple rows
1114  * The ff_er_add_slice calls don't work right for bottom
1115  * fields; they cause massive erroneous error concealing
1116  * Error marking covers both fields (top and bottom).
1117  * This causes a mismatched s->error_count
1118  * and a bad error table. Further, the error count goes to
1119  * INT_MAX when called for bottom field, because mb_y is
1120  * past end by one (callers fault) and resync_mb_y != 0
1121  * causes problems for the first MB line, too.
1122  */
1123  if (!FIELD_PICTURE(h) && h->current_slice &&
1124  h->ps.sps == (const SPS*)h->ps.sps_list[h->ps.pps->sps_id]->data &&
1125  h->enable_er) {
1126 
1127  H264SliceContext *sl = h->slice_ctx;
1128  int use_last_pic = h->last_pic_for_ec.f->buf[0] && !sl->ref_count[0];
1129 
1131 
1132  if (use_last_pic) {
1134  sl->ref_list[0][0].parent = &h->last_pic_for_ec;
1135  memcpy(sl->ref_list[0][0].data, h->last_pic_for_ec.f->data, sizeof(sl->ref_list[0][0].data));
1136  memcpy(sl->ref_list[0][0].linesize, h->last_pic_for_ec.f->linesize, sizeof(sl->ref_list[0][0].linesize));
1138  } else if (sl->ref_count[0]) {
1139  ff_h264_set_erpic(&sl->er.last_pic, sl->ref_list[0][0].parent);
1140  } else
1142 
1143  if (sl->ref_count[1])
1144  ff_h264_set_erpic(&sl->er.next_pic, sl->ref_list[1][0].parent);
1145 
1146  sl->er.ref_count = sl->ref_count[0];
1147 
1148  ff_er_frame_end(&sl->er);
1149  if (use_last_pic)
1150  memset(&sl->ref_list[0][0], 0, sizeof(sl->ref_list[0][0]));
1151  }
1152 #endif /* CONFIG_ERROR_RESILIENCE */
1153  /* clean up */
1154  if (h->cur_pic_ptr && !h->droppable) {
1157  }
1158 
1159  return (ret < 0) ? ret : buf_size;
1160 }
1161 
1162 /**
1163  * Return the number of bytes consumed for building the current frame.
1164  */
1165 static int get_consumed_bytes(int pos, int buf_size)
1166 {
1167  if (pos == 0)
1168  pos = 1; // avoid infinite loops (I doubt that is needed but...)
1169  if (pos + 10 > buf_size)
1170  pos = buf_size; // oops ;)
1171 
1172  return pos;
1173 }
1174 
1176 {
1177  AVFrame *src = srcp->f;
1179  int i;
1180  int ret = av_frame_ref(dst, src);
1181  if (ret < 0)
1182  return ret;
1183 
1184  av_dict_set(&dst->metadata, "stereo_mode", ff_h264_sei_stereo_mode(&h->sei.frame_packing), 0);
1185 
1186  h->backup_width = h->avctx->width;
1187  h->backup_height = h->avctx->height;
1188  h->backup_pix_fmt = h->avctx->pix_fmt;
1189 
1190  h->avctx->width = dst->width;
1191  h->avctx->height = dst->height;
1192  h->avctx->pix_fmt = dst->format;
1193 
1194  if (srcp->sei_recovery_frame_cnt == 0)
1195  dst->key_frame = 1;
1196  if (!srcp->crop)
1197  return 0;
1198 
1199  for (i = 0; i < desc->nb_components; i++) {
1200  int hshift = (i > 0) ? desc->log2_chroma_w : 0;
1201  int vshift = (i > 0) ? desc->log2_chroma_h : 0;
1202  int off = ((srcp->crop_left >> hshift) << h->pixel_shift) +
1203  (srcp->crop_top >> vshift) * dst->linesize[i];
1204  dst->data[i] += off;
1205  }
1206  return 0;
1207 }
1208 
1209 static int is_extra(const uint8_t *buf, int buf_size)
1210 {
1211  int cnt= buf[5]&0x1f;
1212  const uint8_t *p= buf+6;
1213  while(cnt--){
1214  int nalsize= AV_RB16(p) + 2;
1215  if(nalsize > buf_size - (p-buf) || (p[2] & 0x9F) != 7)
1216  return 0;
1217  p += nalsize;
1218  }
1219  cnt = *(p++);
1220  if(!cnt)
1221  return 0;
1222  while(cnt--){
1223  int nalsize= AV_RB16(p) + 2;
1224  if(nalsize > buf_size - (p-buf) || (p[2] & 0x9F) != 8)
1225  return 0;
1226  p += nalsize;
1227  }
1228  return 1;
1229 }
1230 
1231 static int h264_decode_frame(AVCodecContext *avctx, void *data,
1232  int *got_frame, AVPacket *avpkt)
1233 {
1234  const uint8_t *buf = avpkt->data;
1235  int buf_size = avpkt->size;
1236  H264Context *h = avctx->priv_data;
1237  AVFrame *pict = data;
1238  int buf_index = 0;
1239  H264Picture *out;
1240  int i, out_idx;
1241  int ret;
1242 
1243  h->flags = avctx->flags;
1244  h->setup_finished = 0;
1245 
1246  if (h->backup_width != -1) {
1247  avctx->width = h->backup_width;
1248  h->backup_width = -1;
1249  }
1250  if (h->backup_height != -1) {
1251  avctx->height = h->backup_height;
1252  h->backup_height = -1;
1253  }
1254  if (h->backup_pix_fmt != AV_PIX_FMT_NONE) {
1255  avctx->pix_fmt = h->backup_pix_fmt;
1257  }
1258 
1260 
1261  /* end of stream, output what is still in the buffers */
1262  if (buf_size == 0) {
1263  out:
1264 
1265  h->cur_pic_ptr = NULL;
1266  h->first_field = 0;
1267 
1268  // FIXME factorize this with the output code below
1269  out = h->delayed_pic[0];
1270  out_idx = 0;
1271  for (i = 1;
1272  h->delayed_pic[i] &&
1273  !h->delayed_pic[i]->f->key_frame &&
1274  !h->delayed_pic[i]->mmco_reset;
1275  i++)
1276  if (h->delayed_pic[i]->poc < out->poc) {
1277  out = h->delayed_pic[i];
1278  out_idx = i;
1279  }
1280 
1281  for (i = out_idx; h->delayed_pic[i]; i++)
1282  h->delayed_pic[i] = h->delayed_pic[i + 1];
1283 
1284  if (out) {
1285  out->reference &= ~DELAYED_PIC_REF;
1286  ret = output_frame(h, pict, out);
1287  if (ret < 0)
1288  return ret;
1289  *got_frame = 1;
1290  }
1291 
1292  return buf_index;
1293  }
1295  int side_size;
1296  uint8_t *side = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, &side_size);
1297  if (is_extra(side, side_size))
1298  ff_h264_decode_extradata(side, side_size,
1299  &h->ps, &h->is_avc, &h->nal_length_size,
1300  avctx->err_recognition, avctx);
1301  }
1302  if(h->is_avc && buf_size >= 9 && buf[0]==1 && buf[2]==0 && (buf[4]&0xFC)==0xFC && (buf[5]&0x1F) && buf[8]==0x67){
1303  if (is_extra(buf, buf_size))
1304  return ff_h264_decode_extradata(buf, buf_size,
1305  &h->ps, &h->is_avc, &h->nal_length_size,
1306  avctx->err_recognition, avctx);
1307  }
1308 
1309  buf_index = decode_nal_units(h, buf, buf_size);
1310  if (buf_index < 0)
1311  return AVERROR_INVALIDDATA;
1312 
1313  if (!h->cur_pic_ptr && h->nal_unit_type == NAL_END_SEQUENCE) {
1314  av_assert0(buf_index <= buf_size);
1315  goto out;
1316  }
1317 
1318  if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) && !h->cur_pic_ptr) {
1319  if (avctx->skip_frame >= AVDISCARD_NONREF ||
1320  buf_size >= 4 && !memcmp("Q264", buf, 4))
1321  return buf_size;
1322  av_log(avctx, AV_LOG_ERROR, "no frame!\n");
1323  return AVERROR_INVALIDDATA;
1324  }
1325 
1326  if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) ||
1327  (h->mb_y >= h->mb_height && h->mb_height)) {
1328  if (avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)
1329  decode_postinit(h, 1);
1330 
1331  if ((ret = ff_h264_field_end(h, &h->slice_ctx[0], 0)) < 0)
1332  return ret;
1333 
1334  /* Wait for second field. */
1335  *got_frame = 0;
1336  if (h->next_output_pic && ((avctx->flags & AV_CODEC_FLAG_OUTPUT_CORRUPT) ||
1337  (avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL) ||
1338  h->next_output_pic->recovered)) {
1339  if (!h->next_output_pic->recovered)
1341 
1342  if (!h->avctx->hwaccel &&
1343  (h->next_output_pic->field_poc[0] == INT_MAX ||
1344  h->next_output_pic->field_poc[1] == INT_MAX)
1345  ) {
1346  int p;
1347  AVFrame *f = h->next_output_pic->f;
1348  int field = h->next_output_pic->field_poc[0] == INT_MAX;
1349  uint8_t *dst_data[4];
1350  int linesizes[4];
1351  const uint8_t *src_data[4];
1352 
1353  av_log(h->avctx, AV_LOG_DEBUG, "Duplicating field %d to fill missing\n", field);
1354 
1355  for (p = 0; p<4; p++) {
1356  dst_data[p] = f->data[p] + (field^1)*f->linesize[p];
1357  src_data[p] = f->data[p] + field *f->linesize[p];
1358  linesizes[p] = 2*f->linesize[p];
1359  }
1360 
1361  av_image_copy(dst_data, linesizes, src_data, linesizes,
1362  f->format, f->width, f->height>>1);
1363  }
1364 
1365  ret = output_frame(h, pict, h->next_output_pic);
1366  if (ret < 0)
1367  return ret;
1368  *got_frame = 1;
1369  if (CONFIG_MPEGVIDEO) {
1370  ff_print_debug_info2(h->avctx, pict, NULL,
1374  NULL,
1375  h->mb_width, h->mb_height, h->mb_stride, 1);
1376  }
1377  }
1378  }
1379 
1380  av_assert0(pict->buf[0] || !*got_frame);
1381 
1383 
1384  return get_consumed_bytes(buf_index, buf_size);
1385 }
1386 
1387 #define OFFSET(x) offsetof(H264Context, x)
1388 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
1389 static const AVOption h264_options[] = {
1390  {"is_avc", "is avc", offsetof(H264Context, is_avc), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, 0},
1391  {"nal_length_size", "nal_length_size", offsetof(H264Context, nal_length_size), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 4, 0},
1392  { "enable_er", "Enable error resilience on damaged frames (unsafe)", OFFSET(enable_er), AV_OPT_TYPE_BOOL, { .i64 = -1 }, -1, 1, VD },
1393  { NULL },
1394 };
1395 
1396 static const AVClass h264_class = {
1397  .class_name = "H264 Decoder",
1398  .item_name = av_default_item_name,
1399  .option = h264_options,
1400  .version = LIBAVUTIL_VERSION_INT,
1401 };
1402 
1404  .name = "h264",
1405  .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"),
1406  .type = AVMEDIA_TYPE_VIDEO,
1407  .id = AV_CODEC_ID_H264,
1408  .priv_data_size = sizeof(H264Context),
1410  .close = h264_decode_end,
1412  .capabilities = /*AV_CODEC_CAP_DRAW_HORIZ_BAND |*/ AV_CODEC_CAP_DR1 |
1415  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
1416  .flush = flush_dpb,
1417  .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1418  .update_thread_context = ONLY_IF_THREADS_ENABLED(ff_h264_update_thread_context),
1420  .priv_class = &h264_class,
1421 };
1422 
1423 #if CONFIG_H264_VDPAU_DECODER && FF_API_VDPAU
1424 static const AVClass h264_vdpau_class = {
1425  .class_name = "H264 VDPAU Decoder",
1426  .item_name = av_default_item_name,
1427  .option = h264_options,
1428  .version = LIBAVUTIL_VERSION_INT,
1429 };
1430 
1431 AVCodec ff_h264_vdpau_decoder = {
1432  .name = "h264_vdpau",
1433  .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (VDPAU acceleration)"),
1434  .type = AVMEDIA_TYPE_VIDEO,
1435  .id = AV_CODEC_ID_H264,
1436  .priv_data_size = sizeof(H264Context),
1438  .close = h264_decode_end,
1441  .flush = flush_dpb,
1442  .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_VDPAU_H264,
1443  AV_PIX_FMT_NONE},
1445  .priv_class = &h264_vdpau_class,
1446 };
1447 #endif
struct H264Context * h264
Definition: h264.h:317
void ff_h264_unref_picture(H264Context *h, H264Picture *pic)
Definition: h264_picture.c:47
#define NULL
Definition: coverity.c:32
const struct AVCodec * codec
Definition: avcodec.h:1658
void ff_h264_flush_change(H264Context *h)
Definition: h264.c:756
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
Definition: avcodec.h:3725
int workaround_bugs
Definition: h264.h:494
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
int sei_recovery_frame_cnt
Definition: h264.h:298
#define AV_NUM_DATA_POINTERS
Definition: frame.h:185
#define SLICE_FLAG_ALLOW_FIELD
allow draw_horiz_band() with field slices (MPEG-2 field pics)
Definition: avcodec.h:2193
enum AVPixelFormat backup_pix_fmt
Definition: h264.h:487
int ff_h264_execute_decode_slices(H264Context *h, unsigned context_count)
Call decode_slice() for each context.
Definition: h264_slice.c:2400
H264POCContext poc
Definition: h264.h:578
int mb_num
Definition: h264.h:557
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2222
MPEG-2/4 4:2:0, H.264 default for 4:2:0.
Definition: pixfmt.h:478
This structure describes decoded (raw) audio or video data.
Definition: frame.h:184
#define FF_ALLOCZ_ARRAY_OR_GOTO(ctx, p, nelem, elsize, label)
Definition: internal.h:157
static int get_last_needed_nal(H264Context *h)
Definition: h264.c:809
int recovery_frame_cnt
recovery_frame_cnt
Definition: h264_sei.h:109
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
Definition: h264.h:429
AVOption.
Definition: opt.h:245
static const AVClass h264_class
Definition: h264.c:1396
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
static void flush(AVCodecContext *avctx)
int size
Definition: h2645_parse.h:33
int edge_emu_buffer_allocated
Definition: h264.h:417
Definition: h264.h:123
uint16_t num_pictures
Definition: h264_sei.h:137
#define H264_MAX_PICTURE_COUNT
Definition: h264.h:51
int first_field
Definition: h264.h:529
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
static int init_thread_copy(AVCodecContext *avctx)
Definition: tta.c:390
#define LIBAVUTIL_VERSION_INT
Definition: version.h:70
AVBufferRef * sps_list[MAX_SPS_COUNT]
Definition: h264.h:230
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:363
const char * desc
Definition: nvenc.c:89
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
uint16_t * cbp_table
Definition: h264.h:534
av_cold int ff_h264_decode_init(AVCodecContext *avctx)
Definition: h264.c:393
int frame_packing_arrangement_cancel_flag
is previous arrangement canceled, -1 if never received
Definition: h264_sei.h:120
void ff_er_frame_end(ERContext *s)
Sequence parameter set.
Definition: h264.h:136
int mb_y
Definition: h264.h:554
int bitstream_restriction_flag
Definition: h264.h:176
#define FMO
Definition: h264.h:66
int num
numerator
Definition: rational.h:44
int repeat_pict
When decoding, this signals how much the picture must be delayed.
Definition: frame.h:313
int bipred_scratchpad_allocated
Definition: h264.h:416
int size
Definition: avcodec.h:1581
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output...
Definition: diracdec.c:64
AVBufferPool * mb_type_pool
Definition: h264.h:686
int ff_h2645_packet_split(H2645Packet *pkt, const uint8_t *buf, int length, void *logctx, int is_nalff, int nal_length_size, enum AVCodecID codec_id)
Split an input packet into NAL units.
Definition: h2645_parse.c:248
int crop
Definition: h264.h:300
void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl, int y, int height)
Definition: h264.c:102
int16_t(*[2] motion_val)[2]
Definition: h264.h:271
int flags
Definition: h264.h:493
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1877
int mb_height
Definition: h264.h:555
H264Picture * delayed_pic[MAX_DELAYED_PIC_COUNT+2]
Definition: h264.h:593
int is_avc
Used to parse AVC variant of H.264.
Definition: h264.h:568
3: top field, bottom field, in that order
Definition: h264_sei.h:45
AVBufferPool * ref_index_pool
Definition: h264.h:688
void ff_h264_free_tables(H264Context *h)
Definition: h264.c:137
void ff_h264_sei_uninit(H264SEIContext *h)
Reset SEI values at the beginning of the frame.
Definition: h264_sei.c:41
ERPicture last_pic
H264SEIDisplayOrientation display_orientation
Definition: h264_sei.h:154
void ff_h264_decode_init_vlc(void)
Definition: h264_cavlc.c:327
H264Context.
Definition: h264.h:456
AVFrame * f
Definition: h264.h:264
#define AV_CODEC_FLAG2_CHUNKS
Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries...
Definition: avcodec.h:917
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
Definition: bytestream.h:87
#define FRAME_RECOVERED_IDR
We have seen an IDR, so all the following frames in coded order are correctly decodable.
Definition: h264.h:658
8: frame tripling
Definition: h264_sei.h:50
Views are next to each other.
Definition: stereo3d.h:45
AVCodec.
Definition: avcodec.h:3542
int picture_structure
Definition: h264.h:528
static void debug_green_metadata(const H264SEIGreenMetaData *gm, void *logctx)
Definition: h264.c:847
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
unsigned current_sps_id
id of the current SPS
Definition: h264.h:520
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264.h:404
int avpriv_h264_has_num_reorder_frames(AVCodecContext *avctx)
Definition: h264.c:60
int size_bits
Size, in bits, of just the data, excluding the stop bit and any trailing padding. ...
Definition: h2645_parse.h:40
H264SEIGreenMetaData green_metadata
Definition: h264_sei.h:155
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1786
uint8_t * chroma_pred_mode_table
Definition: h264.h:537
int setup_finished
Definition: h264.h:675
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:3264
H264SEIContext sei
Definition: h264.h:683
const char * ff_h264_sei_stereo_mode(const H264SEIFramePacking *h)
Get stereo_mode string from the h264 frame_packing_arrangement.
Definition: h264_sei.c:454
BYTE int const BYTE * srcp
Definition: avisynth_c.h:676
struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:2968
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:981
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
void void avpriv_request_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
const PPS * pps
Definition: h264.h:236
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
uint8_t
#define av_cold
Definition: attributes.h:82
uint8_t green_metadata_type
Definition: h264_sei.h:134
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:140
AVOptions.
Stereo 3D type: this structure describes how two videos are packed within a single video surface...
Definition: stereo3d.h:123
static int h264_init_context(AVCodecContext *avctx, H264Context *h)
Definition: h264.c:303
int poc
frame POC
Definition: h264.h:283
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
AVCodec ff_h264_decoder
Definition: h264.c:1403
Multithreading support functions.
Definition: h264.h:117
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:374
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1764
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:87
uint8_t(*[2] top_borders)[(16 *3)*2]
Definition: h264.h:415
void ff_h2645_packet_uninit(H2645Packet *pkt)
Free all the allocated memory in the packet.
Definition: h2645_parse.c:367
ERPicture cur_pic
int frame_recovered
Initial frame has been completely recovered.
Definition: h264.h:665
Structure to hold side data for an AVFrame.
Definition: frame.h:143
int ff_h264_decode_extradata(const uint8_t *data, int size, H264ParamSets *ps, int *is_avc, int *nal_length_size, int err_recognition, void *logctx)
Definition: h264_parse.c:412
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:38
#define height
int pt
Definition: rtp.c:35
uint8_t * data
Definition: avcodec.h:1580
#define AV_CODEC_CAP_HWACCEL_VDPAU
Codec can export data for HW decoding (VDPAU).
Definition: avcodec.h:992
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
AVDictionary * metadata
metadata.
Definition: frame.h:471
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:318
#define MAX_DELAYED_PIC_COUNT
Definition: h264.h:58
static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size)
Definition: h264.c:875
static void fill_rectangle(SDL_Surface *screen, int x, int y, int w, int h, int color, int update)
Definition: ffplay.c:801
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
H264Picture * parent
Definition: h264.h:313
H264SEIAFD afd
Definition: h264_sei.h:148
high precision timer, useful to profile code
Views are alternated temporally.
Definition: stereo3d.h:66
int recovered
picture at IDR or recovery point + recovery count
Definition: h264.h:296
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
Definition: frame.h:88
#define AVOnce
Definition: thread.h:158
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:2392
7: frame doubling
Definition: h264_sei.h:49
#define av_log(a,...)
int last_pocs[MAX_DELAYED_PIC_COUNT]
Definition: h264.h:594
uint16_t num_seconds
Definition: h264_sei.h:136
uint8_t percent_six_tap_filtering
Definition: h264_sei.h:140
SEI_FpaType frame_packing_arrangement_type
Definition: h264_sei.h:121
int a53_caption_size
Definition: h264_sei.h:93
H.264 / AVC / MPEG-4 part10 codec.
Definition: h264.h:124
void ff_h264_hl_decode_mb(const H264Context *h, H264SliceContext *sl)
Definition: h264_mb.c:798
H264Picture DPB[H264_MAX_PICTURE_COUNT]
Definition: h264.h:465
int width
width and height of the video frame
Definition: frame.h:236
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1971
int flags
Additional information about the frame packing.
Definition: stereo3d.h:132
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
static int get_consumed_bytes(int pos, int buf_size)
Return the number of bytes consumed for building the current frame.
Definition: h264.c:1165
int16_t * dc_val_base
Definition: h264.h:411
int context_initialized
Definition: h264.h:492
ERContext er
Definition: h264.h:319
int nal_unit_type
Definition: h264.h:563
av_default_item_name
int num_reorder_frames
Definition: h264.h:177
int is_copy
Whether the parent AVCodecContext is a copy of the context which had init() called on it...
Definition: internal.h:111
#define AVERROR(e)
Definition: error.h:43
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:153
int backup_height
Definition: h264.h:486
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:176
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:3098
int backup_width
Backup frame properties: needed, because they can be different between returned frame and last decode...
Definition: h264.h:485
H2645Packet pkt
Definition: h264.h:473
static void flush_dpb(AVCodecContext *avctx)
Definition: h264.c:785
int capabilities
Codec capabilities.
Definition: avcodec.h:3561
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:57
static const AVOption h264_options[]
Definition: h264.c:1389
void(* decode_mb)(void *opaque, int ref, int mv_dir, int mv_type, int(*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped)
uint8_t(*[2] mvd_table)[2]
Definition: h264.h:538
int prev_interlaced_frame
Complement sei_pic_struct SEI_PIC_STRUCT_TOP_BOTTOM and SEI_PIC_STRUCT_BOTTOM_TOP indicate interlaced...
Definition: h264.h:639
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1744
ThreadFrame tf
Definition: h264.h:265
simple assert() macros that are a bit more flexible than ISO C assert().
const char * name
Name of the codec implementation.
Definition: avcodec.h:3549
H264SEIUnregistered unregistered
Definition: h264_sei.h:150
static AVOnce h264_vlc_init
Definition: h264.c:391
static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
Definition: h264.h:817
int valid_recovery_point
Are the SEI recovery points looking valid.
Definition: h264.h:644
static const uint8_t offset[127][2]
Definition: vf_spp.c:92
uint8_t * list_counts
Array of list_count per MB specifying the slice type.
Definition: h264.h:531
#define FFMAX(a, b)
Definition: common.h:94
#define fail()
Definition: checkasm.h:81
uint8_t active_format_description
Definition: h264_sei.h:89
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:1019
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
Definition: imgutils.c:302
int * mb_index2xy
int raw_size
Definition: h2645_parse.h:42
#define FIELD_OR_MBAFF_PICTURE(h)
Definition: h264.h:95
uint8_t percent_non_zero_macroblocks
Definition: h264_sei.h:138
int slice_type_nos
S free slice type (SI/SP are remapped to I/P)
Definition: h264.h:323
int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
Decode a slice header.
Definition: h264_slice.c:1031
static const uint8_t scan8[16 *3+3]
Definition: h264.h:801
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:215
int crop_left
Definition: h264.h:301
uint8_t * error_status_table
uint8_t * direct_table
Definition: h264.h:539
int nal_length_size
Number of bytes used for nal length (1, 2 or 4)
Definition: h264.h:569
useful rectangle filling function
uint8_t * data[3]
Definition: h264.h:306
int prev_poc_msb
poc_msb of the last reference pic for POC type 0
Definition: h264_parse.h:48
void ff_vdpau_h264_picture_start(H264Context *h)
uint8_t nb_components
The number of components each pixel has, (1-4)
Definition: pixdesc.h:83
static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type, int(*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped)
Definition: h264.c:66
Views are packed per line, as if interlaced.
Definition: stereo3d.h:97
void(* draw_horiz_band)(struct AVCodecContext *s, const AVFrame *src, int offset[AV_NUM_DATA_POINTERS], int y, int type, int height)
If non NULL, 'draw_horiz_band' is called by the libavcodec decoder to draw a horizontal band...
Definition: avcodec.h:1910
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:258
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:404
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:2936
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:3090
int ct_type
Bit set of clock types for fields/frames in picture timing SEI message.
Definition: h264_sei.h:74
uint32_t * mb2br_xy
Definition: h264.h:516
uint8_t * er_temp_buffer
#define OFFSET(x)
Definition: h264.c:1387
#define FFMIN(a, b)
Definition: common.h:96
uint16_t * slice_table
slice_table_base + 2*mb_stride + 1
Definition: h264.h:524
int reference
Definition: h264.h:295
#define FIELD_PICTURE(h)
Definition: h264.h:78
int width
picture width / height.
Definition: avcodec.h:1836
int redundant_pic_count
Definition: h264.h:381
int nb_slice_ctx
Definition: h264.h:471
uint32_t * mb_type
Definition: h264.h:274
#define AV_FRAME_FLAG_CORRUPT
The frame data may be corrupted, e.g.
Definition: frame.h:396
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
int32_t
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:3091
Context Adaptive Binary Arithmetic Coder inline functions.
int mmco_reset
Definition: h264.h:603
H264SliceContext * slice_ctx
Definition: h264.h:470
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:2947
int reference
Definition: h264.h:309
static int h264_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: h264.c:1231
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:1795
int top_borders_allocated[2]
Definition: h264.h:418
uint8_t * a53_caption
Definition: h264_sei.h:94
int ref_idc
H.264 only, nal_ref_idc.
Definition: h2645_parse.h:63
#define src
Definition: vp9dsp.c:530
static int output_frame(H264Context *h, AVFrame *dst, H264Picture *srcp)
Definition: h264.c:1175
Definition: h264.h:122
int quincunx_sampling_flag
Definition: h264_sei.h:124
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:114
int type
NAL unit type.
Definition: h2645_parse.h:50
2: bottom field
Definition: h264_sei.h:44
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure counterclockwise rotation by the specified angle...
Definition: display.c:50
#define PART_NOT_AVAILABLE
Definition: h264.h:507
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:3079
uint8_t * edge_emu_buffer
Definition: h264.h:414
Views are packed per column.
Definition: stereo3d.h:107
static unsigned get_ue_golomb_long(GetBitContext *gb)
Read an unsigned Exp-Golomb code in the range 0 to UINT32_MAX-1.
Definition: golomb.h:85
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: avcodec.h:1023
static const int8_t mv[256][2]
Definition: 4xm.c:77
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:248
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:127
int ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl)
Init context Allocate buffers which are not shared amongst multiple threads.
Definition: h264.c:238
int mb_stride
Definition: h264.h:556
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
AVCodecContext * avctx
Definition: h264.h:458
#define AV_ONCE_INIT
Definition: thread.h:159
Libavcodec external API header.
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:83
enum AVCodecID codec_id
Definition: avcodec.h:1666
void ff_h264_remove_all_refs(H264Context *h)
Definition: h264_refs.c:543
ERPicture next_pic
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:215
int next_outputed_poc
Definition: h264.h:596
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:437
Views are next to each other, but when upscaling apply a checkerboard pattern.
Definition: stereo3d.h:87
int field_poc[2]
top/bottom POC
Definition: h264.h:282
SEI_PicStructType pic_struct
Definition: h264_sei.h:67
int debug
debug
Definition: avcodec.h:2888
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
int max_contexts
Max number of threads / contexts.
Definition: h264.h:623
int recovery_frame
recovery_frame is the frame_num at which the next frame should be fully constructed.
Definition: h264.h:652
main external API structure.
Definition: avcodec.h:1649
void av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip)
Flip the input matrix horizontally and/or vertically.
Definition: display.c:65
uint8_t * data
The data buffer.
Definition: buffer.h:89
static void decode_postinit(H264Context *h, int setup_finished)
Run setup operations that must be run after slice header decoding.
Definition: h264.c:478
int ff_h264_alloc_tables(H264Context *h)
Allocate tables.
Definition: h264.c:180
const uint8_t * data
Definition: h2645_parse.h:34
#define fp
Definition: regdef.h:44
uint8_t * data
Definition: frame.h:145
H264SEIA53Caption a53_caption
Definition: h264_sei.h:149
void * buf
Definition: avisynth_c.h:553
void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table, uint32_t *mbtype_table, int8_t *qscale_table, int16_t(*motion_val[2])[2], int *low_delay, int mb_width, int mb_height, int mb_stride, int quarter_sample)
Print debugging info for the given picture.
Definition: mpegvideo.c:1579
int8_t * qscale_table
Definition: h264.h:268
int extradata_size
Definition: avcodec.h:1765
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:69
H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstre...
Definition: pixfmt.h:104
int ff_h264_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: h264_slice.c:297
int slice_flags
slice flags
Definition: avcodec.h:2191
Describe the class of an AVClass context structure.
Definition: log.h:67
int prev_frame_num
frame_num of the last pic for POC type 1/2
Definition: h264_parse.h:52
#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS
Definition: avcodec.h:3473
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:616
uint8_t non_zero_count_cache[15 *8]
non zero coeff count cache.
Definition: h264.h:424
Definition: h264.h:119
5: top field, bottom field, top field repeated, in that order
Definition: h264_sei.h:47
static int h264_decode_end(AVCodecContext *avctx)
Definition: h264.c:359
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:274
int8_t * ref_index[2]
Definition: h264.h:280
int pixel_shift
0 for 8-bit H.264, 1 for high-bit-depth H.264
Definition: h264.h:475
int mmco_reset
MMCO_RESET set this 1.
Definition: h264.h:285
int content_interpretation_type
Definition: h264_sei.h:123
H264Picture * cur_pic_ptr
Definition: h264.h:466
int mb_mbaff
mb_aff_frame && mb_field_decoding_flag
Definition: h264.h:379
#define FF_DEBUG_GREEN_MD
Definition: avcodec.h:2915
int enable_er
Definition: h264.h:681
uint8_t percent_intra_coded_macroblocks
Definition: h264_sei.h:139
#define FF_COMPLIANCE_STRICT
Strictly conform to all the things in the spec no matter what consequences.
Definition: avcodec.h:2868
unsigned int sps_id
Definition: h264.h:201
attribute_deprecated int dtg_active_format
DTG active format information (additional aspect ratio information only used in DVB MPEG-2 transport ...
Definition: avcodec.h:2154
H264SEIPictureTiming picture_timing
Definition: h264_sei.h:147
int allocate_progress
Whether to allocate progress for frame threading.
Definition: internal.h:126
H264SEIRecoveryPoint recovery_point
Definition: h264_sei.h:151
static int is_extra(const uint8_t *buf, int buf_size)
Definition: h264.c:1209
AVCodecContext * avctx
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:262
static const uint8_t start_code[]
Definition: h264.c:806
int linesize[3]
Definition: h264.h:307
int pic_struct_present_flag
Definition: h264.h:183
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:33
unsigned int list_count
Definition: h264.h:405
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:198
int prev_poc_lsb
poc_lsb of the last reference pic for POC type 0
Definition: h264_parse.h:49
uint8_t xsd_metric_type
Definition: h264_sei.h:142
int has_recovery_point
Definition: h264.h:667
int ff_h264_decode_picture_parameter_set(GetBitContext *gb, AVCodecContext *avctx, H264ParamSets *ps, int bit_length)
Decode PPS.
Definition: h264_ps.c:706
uint16_t xsd_metric_value
Definition: h264_sei.h:143
static int decode(AVCodecContext *avctx, void *data, int *got_sub, AVPacket *avpkt)
Definition: ccaption_dec.c:722
static void idr(H264Context *h)
instantaneous decoder refresh.
Definition: h264.c:743
discard all non reference
Definition: avcodec.h:780
AVBufferPool * qscale_table_pool
Definition: h264.h:685
H264Picture * next_output_pic
Definition: h264.h:595
AVBufferPool * motion_val_pool
Definition: h264.h:687
#define SLICE_SINGLETHREAD
Definition: h264.h:993
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:80
common internal api header.
if(ret< 0)
Definition: vf_mcdeint.c:282
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
#define FRAME_RECOVERED_SEI
Sufficient number of frames have been decoded since a SEI recovery point, so all the following frames...
Definition: h264.h:663
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:35
#define FF_ALLOC_OR_GOTO(ctx, p, size, label)
Definition: internal.h:130
uint16_t * slice_table_base
Definition: h264.h:576
int log2_max_frame_num
log2_max_frame_num_minus4 + 4
Definition: h264.h:142
int missing_fields
Definition: h264.h:669
Views are packed in a checkerboard-like structure per pixel.
Definition: stereo3d.h:76
H264ParamSets ps
Definition: h264.h:574
int16_t * dc_val[3]
H264SEIFramePacking frame_packing
Definition: h264_sei.h:153
H.264 / AVC / MPEG-4 part10 motion vector prediction.
Bi-dir predicted.
Definition: avutil.h:268
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
Definition: avcodec.h:2832
const AVProfile ff_h264_profiles[]
Definition: profiles.c:49
int cur_chroma_format_idc
Definition: h264.h:677
int8_t * intra4x4_pred_mode
Definition: h264.h:345
unsigned properties
Definition: avcodec.h:3471
int den
denominator
Definition: rational.h:45
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:71
Definition: h264.h:118
GetBitContext gb
Definition: h2645_parse.h:45
int ff_h264_decode_seq_parameter_set(GetBitContext *gb, AVCodecContext *avctx, H264ParamSets *ps, int ignore_truncation)
Decode SPS.
Definition: h264_ps.c:318
int present
Definition: h264_sei.h:88
void * priv_data
Definition: avcodec.h:1691
int ff_h264_field_end(H264Context *h, H264SliceContext *sl, int in_setup)
Definition: h264_picture.c:154
const uint8_t * raw_data
Definition: h2645_parse.h:43
#define PICT_FRAME
Definition: mpegutils.h:39
4: bottom field, top field, in that order
Definition: h264_sei.h:46
void ff_h264_set_erpic(ERPicture *dst, H264Picture *src)
Definition: h264_picture.c:131
int8_t ref_cache[2][5 *8]
Definition: h264.h:430
#define AV_CODEC_FLAG_OUTPUT_CORRUPT
Output even those frames that might be corrupted.
Definition: avcodec.h:838
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:81
#define SLICE_SKIPED
Definition: h264.h:994
#define VD
Definition: h264.c:1388
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:323
const uint16_t ff_h264_mb_sizes[4]
Definition: h264.c:58
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1699
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:161
H2645NAL * nals
Definition: h2645_parse.h:68
H264Picture cur_pic
Definition: h264.h:467
Views are on top of each other.
Definition: stereo3d.h:55
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:253
int mb_width
Definition: h264.h:555
int current_slice
current slice number, used to initialize slice_num of each thread/context
Definition: h264.h:615
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:1751
uint32_t * mb2b_xy
Definition: h264.h:515
H264Ref ref_list[2][48]
0..15: frame refs, 16..47: mbaff field refs.
Definition: h264.h:406
H264Picture last_pic_for_ec
Definition: h264.h:468
int au_pps_id
pps_id of current access unit
Definition: h264.h:522
SPS * sps
Definition: h264.h:238
static void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.h:229
int height
Definition: frame.h:236
int crop_top
Definition: h264.h:302
#define AV_CODEC_FLAG2_SHOW_ALL
Show all frames before the first keyframe.
Definition: avcodec.h:926
1: top field
Definition: h264_sei.h:43
FILE * out
Definition: movenc.c:54
uint8_t(*[2] mvd_table)[2]
Definition: h264.h:443
Definition: h264.h:121
#define av_freep(p)
void ff_h264_ps_uninit(H264ParamSets *ps)
Uninit H264 param sets structure.
Definition: h264_ps.c:301
int ff_h264_sei_decode(H264SEIContext *h, GetBitContext *gb, const H264ParamSets *ps, void *logctx)
Definition: h264_sei.c:382
uint8_t * av_packet_get_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:334
int prev_frame_num_offset
for POC type 2
Definition: h264_parse.h:51
void ff_vdpau_add_data_chunk(uint8_t *data, const uint8_t *buf, int buf_size)
int8_t * intra4x4_pred_mode
Definition: h264.h:501
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
Definition: avcodec.h:3739
int mb_field_decoding_flag
Definition: h264.h:378
uint8_t(* non_zero_count)[48]
Definition: h264.h:504
exp golomb vlc stuff
uint8_t * bipred_scratchpad
Definition: h264.h:413
AVPixelFormat
Pixel format.
Definition: pixfmt.h:60
This structure stores compressed data.
Definition: avcodec.h:1557
int droppable
Definition: h264.h:489
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:956
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:2866
int nal_ref_idc
Definition: h264.h:562
GetBitContext gb
Definition: h264.h:318
for(j=16;j >0;--j)
#define FF_ALLOCZ_OR_GOTO(ctx, p, size, label)
Definition: internal.h:139
int b_stride
Definition: h264.h:517
6: bottom field, top field, bottom field repeated, in that order
Definition: h264_sei.h:48
Context Adaptive Binary Arithmetic Coder.
uint8_t percent_alpha_point_deblocking_instance
Definition: h264_sei.h:141