FFmpeg
h264dec.c
Go to the documentation of this file.
1 /*
2  * H.26L/H.264/AVC/JVT/14496-10/... decoder
3  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * H.264 / AVC / MPEG-4 part10 codec.
25  * @author Michael Niedermayer <michaelni@gmx.at>
26  */
27 
28 #define UNCHECKED_BITSTREAM_READER 1
29 
30 #include "config_components.h"
31 
32 #include "libavutil/avassert.h"
33 #include "libavutil/imgutils.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/thread.h"
37 
38 #include "codec_internal.h"
39 #include "internal.h"
40 #include "error_resilience.h"
41 #include "avcodec.h"
42 #include "h264.h"
43 #include "h264dec.h"
44 #include "h2645_parse.h"
45 #include "h264data.h"
46 #include "h264_ps.h"
47 #include "golomb.h"
48 #include "hwconfig.h"
49 #include "mpegutils.h"
50 #include "profiles.h"
51 #include "rectangle.h"
52 #include "thread.h"
53 #include "threadframe.h"
54 
55 const uint16_t ff_h264_mb_sizes[4] = { 256, 384, 512, 768 };
56 
58 {
59  H264Context *h = avctx->priv_data;
60  return h && h->ps.sps ? h->ps.sps->num_reorder_frames : 0;
61 }
62 
63 static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
64  int (*mv)[2][4][2],
65  int mb_x, int mb_y, int mb_intra, int mb_skipped)
66 {
67  H264Context *h = opaque;
68  H264SliceContext *sl = &h->slice_ctx[0];
69 
70  sl->mb_x = mb_x;
71  sl->mb_y = mb_y;
72  sl->mb_xy = mb_x + mb_y * h->mb_stride;
73  memset(sl->non_zero_count_cache, 0, sizeof(sl->non_zero_count_cache));
74  av_assert1(ref >= 0);
75  /* FIXME: It is possible albeit uncommon that slice references
76  * differ between slices. We take the easy approach and ignore
77  * it for now. If this turns out to have any relevance in
78  * practice then correct remapping should be added. */
79  if (ref >= sl->ref_count[0])
80  ref = 0;
81  if (!sl->ref_list[0][ref].data[0]) {
82  av_log(h->avctx, AV_LOG_DEBUG, "Reference not available for error concealing\n");
83  ref = 0;
84  }
85  if ((sl->ref_list[0][ref].reference&3) != 3) {
86  av_log(h->avctx, AV_LOG_DEBUG, "Reference invalid\n");
87  return;
88  }
89  fill_rectangle(&h->cur_pic.ref_index[0][4 * sl->mb_xy],
90  2, 2, 2, ref, 1);
91  fill_rectangle(&sl->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
92  fill_rectangle(sl->mv_cache[0][scan8[0]], 4, 4, 8,
93  pack16to32((*mv)[0][0][0], (*mv)[0][0][1]), 4);
94  sl->mb_mbaff =
95  sl->mb_field_decoding_flag = 0;
96  ff_h264_hl_decode_mb(h, &h->slice_ctx[0]);
97 }
98 
100  int y, int height)
101 {
102  AVCodecContext *avctx = h->avctx;
103  const AVFrame *src = h->cur_pic.f;
105  int vshift = desc->log2_chroma_h;
106  const int field_pic = h->picture_structure != PICT_FRAME;
107  if (field_pic) {
108  height <<= 1;
109  y <<= 1;
110  }
111 
112  height = FFMIN(height, avctx->height - y);
113 
114  if (field_pic && h->first_field && !(avctx->slice_flags & SLICE_FLAG_ALLOW_FIELD))
115  return;
116 
117  if (avctx->draw_horiz_band) {
119  int i;
120 
121  offset[0] = y * src->linesize[0];
122  offset[1] =
123  offset[2] = (y >> vshift) * src->linesize[1];
124  for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
125  offset[i] = 0;
126 
127  emms_c();
128 
129  avctx->draw_horiz_band(avctx, src, offset,
130  y, h->picture_structure, height);
131  }
132 }
133 
135 {
136  int i;
137 
138  av_freep(&h->intra4x4_pred_mode);
139  av_freep(&h->chroma_pred_mode_table);
140  av_freep(&h->cbp_table);
141  av_freep(&h->mvd_table[0]);
142  av_freep(&h->mvd_table[1]);
143  av_freep(&h->direct_table);
144  av_freep(&h->non_zero_count);
145  av_freep(&h->slice_table_base);
146  h->slice_table = NULL;
147  av_freep(&h->list_counts);
148 
149  av_freep(&h->mb2b_xy);
150  av_freep(&h->mb2br_xy);
151 
152  av_buffer_pool_uninit(&h->qscale_table_pool);
153  av_buffer_pool_uninit(&h->mb_type_pool);
154  av_buffer_pool_uninit(&h->motion_val_pool);
155  av_buffer_pool_uninit(&h->ref_index_pool);
156 
157 #if CONFIG_ERROR_RESILIENCE
158  av_freep(&h->er.mb_index2xy);
159  av_freep(&h->er.error_status_table);
160  av_freep(&h->er.er_temp_buffer);
161  av_freep(&h->dc_val_base);
162 #endif
163 
164  for (i = 0; i < h->nb_slice_ctx; i++) {
165  H264SliceContext *sl = &h->slice_ctx[i];
166 
169  av_freep(&sl->top_borders[0]);
170  av_freep(&sl->top_borders[1]);
171 
174  sl->top_borders_allocated[0] = 0;
175  sl->top_borders_allocated[1] = 0;
176  }
177 }
178 
180 {
181  ERContext *const er = &h->er;
182  const int big_mb_num = h->mb_stride * (h->mb_height + 1);
183  const int row_mb_num = 2*h->mb_stride*FFMAX(h->nb_slice_ctx, 1);
184  const int st_size = big_mb_num + h->mb_stride;
185  int x, y;
186 
187  if (!FF_ALLOCZ_TYPED_ARRAY(h->intra4x4_pred_mode, row_mb_num * 8) ||
188  !FF_ALLOCZ_TYPED_ARRAY(h->non_zero_count, big_mb_num) ||
189  !FF_ALLOCZ_TYPED_ARRAY(h->slice_table_base, st_size) ||
190  !FF_ALLOCZ_TYPED_ARRAY(h->cbp_table, big_mb_num) ||
191  !FF_ALLOCZ_TYPED_ARRAY(h->chroma_pred_mode_table, big_mb_num) ||
192  !FF_ALLOCZ_TYPED_ARRAY(h->mvd_table[0], row_mb_num * 8) ||
193  !FF_ALLOCZ_TYPED_ARRAY(h->mvd_table[1], row_mb_num * 8) ||
194  !FF_ALLOCZ_TYPED_ARRAY(h->direct_table, big_mb_num * 4) ||
195  !FF_ALLOCZ_TYPED_ARRAY(h->list_counts, big_mb_num) ||
196  !FF_ALLOCZ_TYPED_ARRAY(h->mb2b_xy, big_mb_num) ||
197  !FF_ALLOCZ_TYPED_ARRAY(h->mb2br_xy, big_mb_num))
198  return AVERROR(ENOMEM);
199  h->slice_ctx[0].intra4x4_pred_mode = h->intra4x4_pred_mode;
200  h->slice_ctx[0].mvd_table[0] = h->mvd_table[0];
201  h->slice_ctx[0].mvd_table[1] = h->mvd_table[1];
202  memset(h->slice_table_base, -1,
203  st_size * sizeof(*h->slice_table_base));
204  h->slice_table = h->slice_table_base + h->mb_stride * 2 + 1;
205  for (y = 0; y < h->mb_height; y++)
206  for (x = 0; x < h->mb_width; x++) {
207  const int mb_xy = x + y * h->mb_stride;
208  const int b_xy = 4 * x + 4 * y * h->b_stride;
209 
210  h->mb2b_xy[mb_xy] = b_xy;
211  h->mb2br_xy[mb_xy] = 8 * (FMO ? mb_xy : (mb_xy % (2 * h->mb_stride)));
212  }
213 
214  if (CONFIG_ERROR_RESILIENCE) {
215  const int er_size = h->mb_height * h->mb_stride * (4*sizeof(int) + 1);
216  int mb_array_size = h->mb_height * h->mb_stride;
217  int y_size = (2 * h->mb_width + 1) * (2 * h->mb_height + 1);
218  int yc_size = y_size + 2 * big_mb_num;
219 
220  /* init ER */
221  er->avctx = h->avctx;
223  er->opaque = h;
224  er->quarter_sample = 1;
225 
226  er->mb_num = h->mb_num;
227  er->mb_width = h->mb_width;
228  er->mb_height = h->mb_height;
229  er->mb_stride = h->mb_stride;
230  er->b8_stride = h->mb_width * 2 + 1;
231 
232  // error resilience code looks cleaner with this
233  if (!FF_ALLOCZ_TYPED_ARRAY(er->mb_index2xy, h->mb_num + 1) ||
234  !FF_ALLOCZ_TYPED_ARRAY(er->error_status_table, mb_array_size) ||
235  !FF_ALLOCZ_TYPED_ARRAY(er->er_temp_buffer, er_size) ||
236  !FF_ALLOCZ_TYPED_ARRAY(h->dc_val_base, yc_size))
237  return AVERROR(ENOMEM); // ff_h264_free_tables will clean up for us
238 
239  for (y = 0; y < h->mb_height; y++)
240  for (x = 0; x < h->mb_width; x++)
241  er->mb_index2xy[x + y * h->mb_width] = x + y * h->mb_stride;
242 
243  er->mb_index2xy[h->mb_height * h->mb_width] = (h->mb_height - 1) *
244  h->mb_stride + h->mb_width;
245  er->dc_val[0] = h->dc_val_base + h->mb_width * 2 + 2;
246  er->dc_val[1] = h->dc_val_base + y_size + h->mb_stride + 1;
247  er->dc_val[2] = er->dc_val[1] + big_mb_num;
248  for (int i = 0; i < yc_size; i++)
249  h->dc_val_base[i] = 1024;
250  }
251 
252  return 0;
253 }
254 
255 /**
256  * Init slice context
257  */
259 {
260  sl->ref_cache[0][scan8[5] + 1] =
261  sl->ref_cache[0][scan8[7] + 1] =
262  sl->ref_cache[0][scan8[13] + 1] =
263  sl->ref_cache[1][scan8[5] + 1] =
264  sl->ref_cache[1][scan8[7] + 1] =
265  sl->ref_cache[1][scan8[13] + 1] = PART_NOT_AVAILABLE;
266 
267  sl->er = &h->er;
268 }
269 
270 static int h264_init_pic(H264Picture *pic)
271 {
272  pic->f = av_frame_alloc();
273  if (!pic->f)
274  return AVERROR(ENOMEM);
275 
276  pic->f_grain = av_frame_alloc();
277  if (!pic->f_grain)
278  return AVERROR(ENOMEM);
279 
280  return 0;
281 }
282 
284 {
285  int i, ret;
286 
287  h->avctx = avctx;
288  h->cur_chroma_format_idc = -1;
289 
290  h->width_from_caller = avctx->width;
291  h->height_from_caller = avctx->height;
292 
293  h->workaround_bugs = avctx->workaround_bugs;
294  h->flags = avctx->flags;
295  h->poc.prev_poc_msb = 1 << 16;
296  h->recovery_frame = -1;
297  h->frame_recovered = 0;
298  h->poc.prev_frame_num = -1;
299  h->sei.frame_packing.arrangement_cancel_flag = -1;
300  h->sei.unregistered.x264_build = -1;
301 
302  h->next_outputed_poc = INT_MIN;
303  for (i = 0; i < FF_ARRAY_ELEMS(h->last_pocs); i++)
304  h->last_pocs[i] = INT_MIN;
305 
306  ff_h264_sei_uninit(&h->sei);
307 
308  h->nb_slice_ctx = (avctx->active_thread_type & FF_THREAD_SLICE) ? avctx->thread_count : 1;
309  h->slice_ctx = av_calloc(h->nb_slice_ctx, sizeof(*h->slice_ctx));
310  if (!h->slice_ctx) {
311  h->nb_slice_ctx = 0;
312  return AVERROR(ENOMEM);
313  }
314 
315  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
316  if ((ret = h264_init_pic(&h->DPB[i])) < 0)
317  return ret;
318  }
319 
320  if ((ret = h264_init_pic(&h->cur_pic)) < 0)
321  return ret;
322 
323  if ((ret = h264_init_pic(&h->last_pic_for_ec)) < 0)
324  return ret;
325 
326  for (i = 0; i < h->nb_slice_ctx; i++)
327  h->slice_ctx[i].h264 = h;
328 
329  return 0;
330 }
331 
333 {
334  ff_h264_unref_picture(h, pic);
335  av_frame_free(&pic->f);
336  av_frame_free(&pic->f_grain);
337 }
338 
340 {
341  H264Context *h = avctx->priv_data;
342  int i;
343 
346 
347  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
348  h264_free_pic(h, &h->DPB[i]);
349  }
350  memset(h->delayed_pic, 0, sizeof(h->delayed_pic));
351 
352  h->cur_pic_ptr = NULL;
353 
354  av_freep(&h->slice_ctx);
355  h->nb_slice_ctx = 0;
356 
357  ff_h264_sei_uninit(&h->sei);
358  ff_h264_ps_uninit(&h->ps);
359 
360  ff_h2645_packet_uninit(&h->pkt);
361 
362  h264_free_pic(h, &h->cur_pic);
363  h264_free_pic(h, &h->last_pic_for_ec);
364 
365  return 0;
366 }
367 
369 
371 {
372  H264Context *h = avctx->priv_data;
373  int ret;
374 
375  ret = h264_init_context(avctx, h);
376  if (ret < 0)
377  return ret;
378 
380  if (ret != 0) {
381  av_log(avctx, AV_LOG_ERROR, "pthread_once has failed.");
382  return AVERROR_UNKNOWN;
383  }
384 
385  if (avctx->ticks_per_frame == 1) {
386  if(h->avctx->time_base.den < INT_MAX/2) {
387  h->avctx->time_base.den *= 2;
388  } else
389  h->avctx->time_base.num /= 2;
390  }
391  avctx->ticks_per_frame = 2;
392 
393  if (!avctx->internal->is_copy) {
394  if (avctx->extradata_size > 0 && avctx->extradata) {
396  &h->ps, &h->is_avc, &h->nal_length_size,
397  avctx->err_recognition, avctx);
398  if (ret < 0) {
399  int explode = avctx->err_recognition & AV_EF_EXPLODE;
400  av_log(avctx, explode ? AV_LOG_ERROR: AV_LOG_WARNING,
401  "Error decoding the extradata\n");
402  if (explode) {
403  return ret;
404  }
405  ret = 0;
406  }
407  }
408  }
409 
410  if (h->ps.sps && h->ps.sps->bitstream_restriction_flag &&
411  h->avctx->has_b_frames < h->ps.sps->num_reorder_frames) {
412  h->avctx->has_b_frames = h->ps.sps->num_reorder_frames;
413  }
414 
416 
417  if (h->enable_er < 0 && (avctx->active_thread_type & FF_THREAD_SLICE))
418  h->enable_er = 0;
419 
420  if (h->enable_er && (avctx->active_thread_type & FF_THREAD_SLICE)) {
421  av_log(avctx, AV_LOG_WARNING,
422  "Error resilience with slice threads is enabled. It is unsafe and unsupported and may crash. "
423  "Use it at your own risk\n");
424  }
425 
426  return 0;
427 }
428 
429 /**
430  * instantaneous decoder refresh.
431  */
432 static void idr(H264Context *h)
433 {
434  int i;
436  h->poc.prev_frame_num =
437  h->poc.prev_frame_num_offset = 0;
438  h->poc.prev_poc_msb = 1<<16;
439  h->poc.prev_poc_lsb = -1;
440  for (i = 0; i < FF_ARRAY_ELEMS(h->last_pocs); i++)
441  h->last_pocs[i] = INT_MIN;
442 }
443 
444 /* forget old pics after a seek */
446 {
447  int i, j;
448 
449  h->next_outputed_poc = INT_MIN;
450  h->prev_interlaced_frame = 1;
451  idr(h);
452 
453  h->poc.prev_frame_num = -1;
454  if (h->cur_pic_ptr) {
455  h->cur_pic_ptr->reference = 0;
456  for (j=i=0; h->delayed_pic[i]; i++)
457  if (h->delayed_pic[i] != h->cur_pic_ptr)
458  h->delayed_pic[j++] = h->delayed_pic[i];
459  h->delayed_pic[j] = NULL;
460  }
461  ff_h264_unref_picture(h, &h->last_pic_for_ec);
462 
463  h->first_field = 0;
464  h->recovery_frame = -1;
465  h->frame_recovered = 0;
466  h->current_slice = 0;
467  h->mmco_reset = 1;
468 }
469 
471 {
472  H264Context *h = avctx->priv_data;
473  int i;
474 
475  memset(h->delayed_pic, 0, sizeof(h->delayed_pic));
476 
478  ff_h264_sei_uninit(&h->sei);
479 
480  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
481  ff_h264_unref_picture(h, &h->DPB[i]);
482  h->cur_pic_ptr = NULL;
483  ff_h264_unref_picture(h, &h->cur_pic);
484 
485  h->mb_y = 0;
486 
488  h->context_initialized = 0;
489 }
490 
492 {
493  int nals_needed = 0;
494  int slice_type = 0;
495  int picture_intra_only = 1;
496  int first_slice = 0;
497  int i, ret;
498 
499  for (i = 0; i < h->pkt.nb_nals; i++) {
500  H2645NAL *nal = &h->pkt.nals[i];
501  GetBitContext gb;
502 
503  /* packets can sometimes contain multiple PPS/SPS,
504  * e.g. two PAFF field pictures in one packet, or a demuxer
505  * which splits NALs strangely if so, when frame threading we
506  * can't start the next thread until we've read all of them */
507  switch (nal->type) {
508  case H264_NAL_SPS:
509  case H264_NAL_PPS:
510  nals_needed = i;
511  break;
512  case H264_NAL_DPA:
513  case H264_NAL_IDR_SLICE:
514  case H264_NAL_SLICE:
515  ret = init_get_bits8(&gb, nal->data + 1, nal->size - 1);
516  if (ret < 0) {
517  av_log(h->avctx, AV_LOG_ERROR, "Invalid zero-sized VCL NAL unit\n");
518  if (h->avctx->err_recognition & AV_EF_EXPLODE)
519  return ret;
520 
521  break;
522  }
523  if (!get_ue_golomb_long(&gb) || // first_mb_in_slice
524  !first_slice ||
525  first_slice != nal->type)
526  nals_needed = i;
527  slice_type = get_ue_golomb_31(&gb);
528  if (slice_type > 9)
529  slice_type = 0;
530  if (slice_type > 4)
531  slice_type -= 5;
532 
533  slice_type = ff_h264_golomb_to_pict_type[slice_type];
534  picture_intra_only &= (slice_type & 3) == AV_PICTURE_TYPE_I;
535  if (!first_slice)
536  first_slice = nal->type;
537  }
538  }
539 
540  h->picture_intra_only = picture_intra_only;
541 
542  return nals_needed;
543 }
544 
545 static void debug_green_metadata(const H264SEIGreenMetaData *gm, void *logctx)
546 {
547  av_log(logctx, AV_LOG_DEBUG, "Green Metadata Info SEI message\n");
548  av_log(logctx, AV_LOG_DEBUG, " green_metadata_type: %d\n", gm->green_metadata_type);
549 
550  if (gm->green_metadata_type == 0) {
551  av_log(logctx, AV_LOG_DEBUG, " green_metadata_period_type: %d\n", gm->period_type);
552 
553  if (gm->period_type == 2)
554  av_log(logctx, AV_LOG_DEBUG, " green_metadata_num_seconds: %d\n", gm->num_seconds);
555  else if (gm->period_type == 3)
556  av_log(logctx, AV_LOG_DEBUG, " green_metadata_num_pictures: %d\n", gm->num_pictures);
557 
558  av_log(logctx, AV_LOG_DEBUG, " SEI GREEN Complexity Metrics: %f %f %f %f\n",
559  (float)gm->percent_non_zero_macroblocks/255,
560  (float)gm->percent_intra_coded_macroblocks/255,
561  (float)gm->percent_six_tap_filtering/255,
563 
564  } else if (gm->green_metadata_type == 1) {
565  av_log(logctx, AV_LOG_DEBUG, " xsd_metric_type: %d\n", gm->xsd_metric_type);
566 
567  if (gm->xsd_metric_type == 0)
568  av_log(logctx, AV_LOG_DEBUG, " xsd_metric_value: %f\n",
569  (float)gm->xsd_metric_value/100);
570  }
571 }
572 
573 static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size)
574 {
575  AVCodecContext *const avctx = h->avctx;
576  int nals_needed = 0; ///< number of NALs that need decoding before the next frame thread starts
577  int idr_cleared=0;
578  int i, ret = 0;
579 
580  h->has_slice = 0;
581  h->nal_unit_type= 0;
582 
583  if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)) {
584  h->current_slice = 0;
585  if (!h->first_field) {
586  h->cur_pic_ptr = NULL;
587  ff_h264_sei_uninit(&h->sei);
588  }
589  }
590 
591  if (h->nal_length_size == 4) {
592  if (buf_size > 8 && AV_RB32(buf) == 1 && AV_RB32(buf+5) > (unsigned)buf_size) {
593  h->is_avc = 0;
594  }else if(buf_size > 3 && AV_RB32(buf) > 1 && AV_RB32(buf) <= (unsigned)buf_size)
595  h->is_avc = 1;
596  }
597 
598  ret = ff_h2645_packet_split(&h->pkt, buf, buf_size, avctx, h->is_avc, h->nal_length_size,
599  avctx->codec_id, 0, 0);
600  if (ret < 0) {
601  av_log(avctx, AV_LOG_ERROR,
602  "Error splitting the input into NAL units.\n");
603  return ret;
604  }
605 
606  if (avctx->active_thread_type & FF_THREAD_FRAME)
607  nals_needed = get_last_needed_nal(h);
608  if (nals_needed < 0)
609  return nals_needed;
610 
611  for (i = 0; i < h->pkt.nb_nals; i++) {
612  H2645NAL *nal = &h->pkt.nals[i];
613  int max_slice_ctx, err;
614 
615  if (avctx->skip_frame >= AVDISCARD_NONREF &&
616  nal->ref_idc == 0 && nal->type != H264_NAL_SEI)
617  continue;
618 
619  // FIXME these should stop being context-global variables
620  h->nal_ref_idc = nal->ref_idc;
621  h->nal_unit_type = nal->type;
622 
623  err = 0;
624  switch (nal->type) {
625  case H264_NAL_IDR_SLICE:
626  if ((nal->data[1] & 0xFC) == 0x98) {
627  av_log(h->avctx, AV_LOG_ERROR, "Invalid inter IDR frame\n");
628  h->next_outputed_poc = INT_MIN;
629  ret = -1;
630  goto end;
631  }
632  if(!idr_cleared) {
633  idr(h); // FIXME ensure we don't lose some frames if there is reordering
634  }
635  idr_cleared = 1;
636  h->has_recovery_point = 1;
637  case H264_NAL_SLICE:
638  h->has_slice = 1;
639 
640  if ((err = ff_h264_queue_decode_slice(h, nal))) {
641  H264SliceContext *sl = h->slice_ctx + h->nb_slice_ctx_queued;
642  sl->ref_count[0] = sl->ref_count[1] = 0;
643  break;
644  }
645 
646  if (h->current_slice == 1) {
647  if (avctx->active_thread_type & FF_THREAD_FRAME &&
648  i >= nals_needed && !h->setup_finished && h->cur_pic_ptr) {
649  ff_thread_finish_setup(avctx);
650  h->setup_finished = 1;
651  }
652 
653  if (h->avctx->hwaccel &&
654  (ret = h->avctx->hwaccel->start_frame(h->avctx, buf, buf_size)) < 0)
655  goto end;
656  }
657 
658  max_slice_ctx = avctx->hwaccel ? 1 : h->nb_slice_ctx;
659  if (h->nb_slice_ctx_queued == max_slice_ctx) {
660  if (h->avctx->hwaccel) {
661  ret = avctx->hwaccel->decode_slice(avctx, nal->raw_data, nal->raw_size);
662  h->nb_slice_ctx_queued = 0;
663  } else
665  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
666  goto end;
667  }
668  break;
669  case H264_NAL_DPA:
670  case H264_NAL_DPB:
671  case H264_NAL_DPC:
672  avpriv_request_sample(avctx, "data partitioning");
673  break;
674  case H264_NAL_SEI:
675  if (h->setup_finished) {
676  avpriv_request_sample(avctx, "Late SEI");
677  break;
678  }
679  ret = ff_h264_sei_decode(&h->sei, &nal->gb, &h->ps, avctx);
680  h->has_recovery_point = h->has_recovery_point || h->sei.recovery_point.recovery_frame_cnt != -1;
681  if (avctx->debug & FF_DEBUG_GREEN_MD)
682  debug_green_metadata(&h->sei.green_metadata, h->avctx);
683  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
684  goto end;
685  break;
686  case H264_NAL_SPS: {
687  GetBitContext tmp_gb = nal->gb;
688  if (avctx->hwaccel && avctx->hwaccel->decode_params) {
689  ret = avctx->hwaccel->decode_params(avctx,
690  nal->type,
691  nal->raw_data,
692  nal->raw_size);
693  if (ret < 0)
694  goto end;
695  }
696  if (ff_h264_decode_seq_parameter_set(&tmp_gb, avctx, &h->ps, 0) >= 0)
697  break;
698  av_log(h->avctx, AV_LOG_DEBUG,
699  "SPS decoding failure, trying again with the complete NAL\n");
700  init_get_bits8(&tmp_gb, nal->raw_data + 1, nal->raw_size - 1);
701  if (ff_h264_decode_seq_parameter_set(&tmp_gb, avctx, &h->ps, 0) >= 0)
702  break;
703  ff_h264_decode_seq_parameter_set(&nal->gb, avctx, &h->ps, 1);
704  break;
705  }
706  case H264_NAL_PPS:
707  if (avctx->hwaccel && avctx->hwaccel->decode_params) {
708  ret = avctx->hwaccel->decode_params(avctx,
709  nal->type,
710  nal->raw_data,
711  nal->raw_size);
712  if (ret < 0)
713  goto end;
714  }
715  ret = ff_h264_decode_picture_parameter_set(&nal->gb, avctx, &h->ps,
716  nal->size_bits);
717  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
718  goto end;
719  break;
720  case H264_NAL_AUD:
722  case H264_NAL_END_STREAM:
724  case H264_NAL_SPS_EXT:
726  break;
727  default:
728  av_log(avctx, AV_LOG_DEBUG, "Unknown NAL code: %d (%d bits)\n",
729  nal->type, nal->size_bits);
730  }
731 
732  if (err < 0) {
733  av_log(h->avctx, AV_LOG_ERROR, "decode_slice_header error\n");
734  }
735  }
736 
738  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
739  goto end;
740 
741  // set decode_error_flags to allow users to detect concealed decoding errors
742  if ((ret < 0 || h->er.error_occurred) && h->cur_pic_ptr) {
743  h->cur_pic_ptr->f->decode_error_flags |= FF_DECODE_ERROR_DECODE_SLICES;
744  }
745 
746  ret = 0;
747 end:
748 
749 #if CONFIG_ERROR_RESILIENCE
750  /*
751  * FIXME: Error handling code does not seem to support interlaced
752  * when slices span multiple rows
753  * The ff_er_add_slice calls don't work right for bottom
754  * fields; they cause massive erroneous error concealing
755  * Error marking covers both fields (top and bottom).
756  * This causes a mismatched s->error_count
757  * and a bad error table. Further, the error count goes to
758  * INT_MAX when called for bottom field, because mb_y is
759  * past end by one (callers fault) and resync_mb_y != 0
760  * causes problems for the first MB line, too.
761  */
762  if (!FIELD_PICTURE(h) && h->current_slice && h->enable_er) {
763 
764  H264SliceContext *sl = h->slice_ctx;
765  int use_last_pic = h->last_pic_for_ec.f->buf[0] && !sl->ref_count[0];
766 
767  ff_h264_set_erpic(&h->er.cur_pic, h->cur_pic_ptr);
768 
769  if (use_last_pic) {
770  ff_h264_set_erpic(&h->er.last_pic, &h->last_pic_for_ec);
771  sl->ref_list[0][0].parent = &h->last_pic_for_ec;
772  memcpy(sl->ref_list[0][0].data, h->last_pic_for_ec.f->data, sizeof(sl->ref_list[0][0].data));
773  memcpy(sl->ref_list[0][0].linesize, h->last_pic_for_ec.f->linesize, sizeof(sl->ref_list[0][0].linesize));
774  sl->ref_list[0][0].reference = h->last_pic_for_ec.reference;
775  } else if (sl->ref_count[0]) {
776  ff_h264_set_erpic(&h->er.last_pic, sl->ref_list[0][0].parent);
777  } else
778  ff_h264_set_erpic(&h->er.last_pic, NULL);
779 
780  if (sl->ref_count[1])
781  ff_h264_set_erpic(&h->er.next_pic, sl->ref_list[1][0].parent);
782 
783  ff_er_frame_end(&h->er);
784  if (use_last_pic)
785  memset(&sl->ref_list[0][0], 0, sizeof(sl->ref_list[0][0]));
786  }
787 #endif /* CONFIG_ERROR_RESILIENCE */
788  /* clean up */
789  if (h->cur_pic_ptr && !h->droppable && h->has_slice) {
790  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
791  h->picture_structure == PICT_BOTTOM_FIELD);
792  }
793 
794  return (ret < 0) ? ret : buf_size;
795 }
796 
797 /**
798  * Return the number of bytes consumed for building the current frame.
799  */
800 static int get_consumed_bytes(int pos, int buf_size)
801 {
802  if (pos == 0)
803  pos = 1; // avoid infinite loops (I doubt that is needed but...)
804  if (pos + 10 > buf_size)
805  pos = buf_size; // oops ;)
806 
807  return pos;
808 }
809 
811 {
812  AVVideoEncParams *par;
813  unsigned int nb_mb = p->mb_height * p->mb_width;
814  unsigned int x, y;
815 
817  if (!par)
818  return AVERROR(ENOMEM);
819 
820  par->qp = p->pps->init_qp;
821 
822  par->delta_qp[1][0] = p->pps->chroma_qp_index_offset[0];
823  par->delta_qp[1][1] = p->pps->chroma_qp_index_offset[0];
824  par->delta_qp[2][0] = p->pps->chroma_qp_index_offset[1];
825  par->delta_qp[2][1] = p->pps->chroma_qp_index_offset[1];
826 
827  for (y = 0; y < p->mb_height; y++)
828  for (x = 0; x < p->mb_width; x++) {
829  const unsigned int block_idx = y * p->mb_width + x;
830  const unsigned int mb_xy = y * p->mb_stride + x;
832 
833  b->src_x = x * 16;
834  b->src_y = y * 16;
835  b->w = 16;
836  b->h = 16;
837 
838  b->delta_qp = p->qscale_table[mb_xy] - par->qp;
839  }
840 
841  return 0;
842 }
843 
844 static int output_frame(H264Context *h, AVFrame *dst, H264Picture *srcp)
845 {
846  int ret;
847 
848  ret = av_frame_ref(dst, srcp->needs_fg ? srcp->f_grain : srcp->f);
849  if (ret < 0)
850  return ret;
851 
852  if (srcp->needs_fg && (ret = av_frame_copy_props(dst, srcp->f)) < 0)
853  return ret;
854 
855  av_dict_set(&dst->metadata, "stereo_mode", ff_h264_sei_stereo_mode(&h->sei.frame_packing), 0);
856 
857  if (srcp->sei_recovery_frame_cnt == 0)
858  dst->key_frame = 1;
859 
860  if (h->avctx->export_side_data & AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS) {
861  ret = h264_export_enc_params(dst, srcp);
862  if (ret < 0)
863  goto fail;
864  }
865 
866  if (!(h->avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN))
868 
869  return 0;
870 fail:
871  av_frame_unref(dst);
872  return ret;
873 }
874 
875 static int is_avcc_extradata(const uint8_t *buf, int buf_size)
876 {
877  int cnt= buf[5]&0x1f;
878  const uint8_t *p= buf+6;
879  if (!cnt)
880  return 0;
881  while(cnt--){
882  int nalsize= AV_RB16(p) + 2;
883  if(nalsize > buf_size - (p-buf) || (p[2] & 0x9F) != 7)
884  return 0;
885  p += nalsize;
886  }
887  cnt = *(p++);
888  if(!cnt)
889  return 0;
890  while(cnt--){
891  int nalsize= AV_RB16(p) + 2;
892  if(nalsize > buf_size - (p-buf) || (p[2] & 0x9F) != 8)
893  return 0;
894  p += nalsize;
895  }
896  return 1;
897 }
898 
899 static int finalize_frame(H264Context *h, AVFrame *dst, H264Picture *out, int *got_frame)
900 {
901  int ret;
902 
903  if (((h->avctx->flags & AV_CODEC_FLAG_OUTPUT_CORRUPT) ||
904  (h->avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL) ||
905  out->recovered)) {
906 
907  if (!h->avctx->hwaccel &&
908  (out->field_poc[0] == INT_MAX ||
909  out->field_poc[1] == INT_MAX)
910  ) {
911  int p;
912  AVFrame *f = out->f;
913  int field = out->field_poc[0] == INT_MAX;
914  uint8_t *dst_data[4];
915  int linesizes[4];
916  const uint8_t *src_data[4];
917 
918  av_log(h->avctx, AV_LOG_DEBUG, "Duplicating field %d to fill missing\n", field);
919 
920  for (p = 0; p<4; p++) {
921  dst_data[p] = f->data[p] + (field^1)*f->linesize[p];
922  src_data[p] = f->data[p] + field *f->linesize[p];
923  linesizes[p] = 2*f->linesize[p];
924  }
925 
926  av_image_copy(dst_data, linesizes, src_data, linesizes,
927  f->format, f->width, f->height>>1);
928  }
929 
930  ret = output_frame(h, dst, out);
931  if (ret < 0)
932  return ret;
933 
934  *got_frame = 1;
935 
936  if (CONFIG_MPEGVIDEODEC) {
937  ff_print_debug_info2(h->avctx, dst, NULL,
938  out->mb_type,
939  out->qscale_table,
940  out->motion_val,
941  out->mb_width, out->mb_height, out->mb_stride, 1);
942  }
943  }
944 
945  return 0;
946 }
947 
949  int *got_frame, int buf_index)
950 {
951  int ret, i, out_idx;
952  H264Picture *out = h->delayed_pic[0];
953 
954  h->cur_pic_ptr = NULL;
955  h->first_field = 0;
956 
957  out_idx = 0;
958  for (i = 1;
959  h->delayed_pic[i] &&
960  !h->delayed_pic[i]->f->key_frame &&
961  !h->delayed_pic[i]->mmco_reset;
962  i++)
963  if (h->delayed_pic[i]->poc < out->poc) {
964  out = h->delayed_pic[i];
965  out_idx = i;
966  }
967 
968  for (i = out_idx; h->delayed_pic[i]; i++)
969  h->delayed_pic[i] = h->delayed_pic[i + 1];
970 
971  if (out) {
972  out->reference &= ~DELAYED_PIC_REF;
973  ret = finalize_frame(h, dst_frame, out, got_frame);
974  if (ret < 0)
975  return ret;
976  }
977 
978  return buf_index;
979 }
980 
981 static int h264_decode_frame(AVCodecContext *avctx, AVFrame *pict,
982  int *got_frame, AVPacket *avpkt)
983 {
984  const uint8_t *buf = avpkt->data;
985  int buf_size = avpkt->size;
986  H264Context *h = avctx->priv_data;
987  int buf_index;
988  int ret;
989 
990  h->flags = avctx->flags;
991  h->setup_finished = 0;
992  h->nb_slice_ctx_queued = 0;
993 
994  ff_h264_unref_picture(h, &h->last_pic_for_ec);
995 
996  /* end of stream, output what is still in the buffers */
997  if (buf_size == 0)
998  return send_next_delayed_frame(h, pict, got_frame, 0);
999 
1001  size_t side_size;
1002  uint8_t *side = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, &side_size);
1003  ff_h264_decode_extradata(side, side_size,
1004  &h->ps, &h->is_avc, &h->nal_length_size,
1005  avctx->err_recognition, avctx);
1006  }
1007  if (h->is_avc && buf_size >= 9 && buf[0]==1 && buf[2]==0 && (buf[4]&0xFC)==0xFC) {
1008  if (is_avcc_extradata(buf, buf_size))
1009  return ff_h264_decode_extradata(buf, buf_size,
1010  &h->ps, &h->is_avc, &h->nal_length_size,
1011  avctx->err_recognition, avctx);
1012  }
1013 
1014  buf_index = decode_nal_units(h, buf, buf_size);
1015  if (buf_index < 0)
1016  return AVERROR_INVALIDDATA;
1017 
1018  if (!h->cur_pic_ptr && h->nal_unit_type == H264_NAL_END_SEQUENCE) {
1019  av_assert0(buf_index <= buf_size);
1020  return send_next_delayed_frame(h, pict, got_frame, buf_index);
1021  }
1022 
1023  if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) && (!h->cur_pic_ptr || !h->has_slice)) {
1024  if (avctx->skip_frame >= AVDISCARD_NONREF ||
1025  buf_size >= 4 && !memcmp("Q264", buf, 4))
1026  return buf_size;
1027  av_log(avctx, AV_LOG_ERROR, "no frame!\n");
1028  return AVERROR_INVALIDDATA;
1029  }
1030 
1031  if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) ||
1032  (h->mb_y >= h->mb_height && h->mb_height)) {
1033  if ((ret = ff_h264_field_end(h, &h->slice_ctx[0], 0)) < 0)
1034  return ret;
1035 
1036  /* Wait for second field. */
1037  if (h->next_output_pic) {
1038  ret = finalize_frame(h, pict, h->next_output_pic, got_frame);
1039  if (ret < 0)
1040  return ret;
1041  }
1042  }
1043 
1044  av_assert0(pict->buf[0] || !*got_frame);
1045 
1046  ff_h264_unref_picture(h, &h->last_pic_for_ec);
1047 
1048  return get_consumed_bytes(buf_index, buf_size);
1049 }
1050 
1051 #define OFFSET(x) offsetof(H264Context, x)
1052 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
1053 #define VDX VD | AV_OPT_FLAG_EXPORT
1054 static const AVOption h264_options[] = {
1055  { "is_avc", "is avc", OFFSET(is_avc), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, VDX },
1056  { "nal_length_size", "nal_length_size", OFFSET(nal_length_size), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 4, VDX },
1057  { "enable_er", "Enable error resilience on damaged frames (unsafe)", OFFSET(enable_er), AV_OPT_TYPE_BOOL, { .i64 = -1 }, -1, 1, VD },
1058  { "x264_build", "Assume this x264 version if no x264 version found in any SEI", OFFSET(x264_build), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, VD },
1059  { NULL },
1060 };
1061 
1062 static const AVClass h264_class = {
1063  .class_name = "H264 Decoder",
1064  .item_name = av_default_item_name,
1065  .option = h264_options,
1066  .version = LIBAVUTIL_VERSION_INT,
1067 };
1068 
1070  .p.name = "h264",
1071  .p.long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"),
1072  .p.type = AVMEDIA_TYPE_VIDEO,
1073  .p.id = AV_CODEC_ID_H264,
1074  .priv_data_size = sizeof(H264Context),
1076  .close = h264_decode_end,
1078  .p.capabilities = /*AV_CODEC_CAP_DRAW_HORIZ_BAND |*/ AV_CODEC_CAP_DR1 |
1081  .hw_configs = (const AVCodecHWConfigInternal *const []) {
1082 #if CONFIG_H264_DXVA2_HWACCEL
1083  HWACCEL_DXVA2(h264),
1084 #endif
1085 #if CONFIG_H264_D3D11VA_HWACCEL
1086  HWACCEL_D3D11VA(h264),
1087 #endif
1088 #if CONFIG_H264_D3D11VA2_HWACCEL
1089  HWACCEL_D3D11VA2(h264),
1090 #endif
1091 #if CONFIG_H264_NVDEC_HWACCEL
1092  HWACCEL_NVDEC(h264),
1093 #endif
1094 #if CONFIG_H264_VAAPI_HWACCEL
1095  HWACCEL_VAAPI(h264),
1096 #endif
1097 #if CONFIG_H264_VDPAU_HWACCEL
1098  HWACCEL_VDPAU(h264),
1099 #endif
1100 #if CONFIG_H264_VIDEOTOOLBOX_HWACCEL
1101  HWACCEL_VIDEOTOOLBOX(h264),
1102 #endif
1103  NULL
1104  },
1107  .flush = h264_decode_flush,
1108  .update_thread_context = ONLY_IF_THREADS_ENABLED(ff_h264_update_thread_context),
1109  .update_thread_context_for_user = ONLY_IF_THREADS_ENABLED(ff_h264_update_thread_context_for_user),
1110  .p.profiles = NULL_IF_CONFIG_SMALL(ff_h264_profiles),
1111  .p.priv_class = &h264_class,
1112 };
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:98
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:38
AVVideoEncParams::qp
int32_t qp
Base quantisation parameter for the frame.
Definition: video_enc_params.h:103
hwconfig.h
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1379
H264SliceContext::mb_xy
int mb_xy
Definition: h264dec.h:224
ff_h264_unref_picture
void ff_h264_unref_picture(H264Context *h, H264Picture *pic)
Definition: h264_picture.c:36
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
H264SliceContext::ref_cache
int8_t ref_cache[2][5 *8]
Definition: h264dec.h:292
ff_h264_free_tables
void ff_h264_free_tables(H264Context *h)
Definition: h264dec.c:134
ff_h264_sei_uninit
void ff_h264_sei_uninit(H264SEIContext *h)
Reset SEI values at the beginning of the frame.
Definition: h264_sei.c:48
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:39
h2645_parse.h
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVCodecContext::workaround_bugs
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
Definition: avcodec.h:1271
opt.h
ff_h264_mb_sizes
const uint16_t ff_h264_mb_sizes[4]
Definition: h264dec.c:55
H264Picture::f
AVFrame * f
Definition: h264dec.h:108
ff_h264_ps_uninit
void ff_h264_ps_uninit(H264ParamSets *ps)
Uninit H264 param sets structure.
Definition: h264_ps.c:316
idr
static void idr(H264Context *h)
instantaneous decoder refresh.
Definition: h264dec.c:432
out
FILE * out
Definition: movenc.c:54
thread.h
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2662
h264_decode_init
static av_cold int h264_decode_init(AVCodecContext *avctx)
Definition: h264dec.c:370
SLICE_FLAG_ALLOW_FIELD
#define SLICE_FLAG_ALLOW_FIELD
allow draw_horiz_band() with field slices (MPEG-2 field pics)
Definition: avcodec.h:854
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1344
H264_NAL_AUXILIARY_SLICE
@ H264_NAL_AUXILIARY_SLICE
Definition: h264.h:53
mv
static const int8_t mv[256][2]
Definition: 4xm.c:80
ff_h264_set_erpic
void ff_h264_set_erpic(ERPicture *dst, H264Picture *src)
Definition: h264_picture.c:196
AV_FRAME_DATA_FILM_GRAIN_PARAMS
@ AV_FRAME_DATA_FILM_GRAIN_PARAMS
Film grain parameters for a frame, described by AVFilmGrainParams.
Definition: frame.h:184
H264Picture::pps
const PPS * pps
Definition: h264dec.h:153
H2645NAL::ref_idc
int ref_idc
H.264 only, nal_ref_idc.
Definition: h2645_parse.h:57
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:111
ff_h264_slice_context_init
void ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl)
Init slice context.
Definition: h264dec.c:258
ERContext::mb_index2xy
int * mb_index2xy
Definition: error_resilience.h:59
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
HWACCEL_DXVA2
#define HWACCEL_DXVA2(codec)
Definition: hwconfig.h:67
ff_h264_update_thread_context
int ff_h264_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: h264_slice.c:308
AVPacket::data
uint8_t * data
Definition: packet.h:374
AVOption
AVOption.
Definition: opt.h:251
HWACCEL_D3D11VA2
#define HWACCEL_D3D11VA2(codec)
Definition: hwconfig.h:69
ff_h2645_packet_uninit
void ff_h2645_packet_uninit(H2645Packet *pkt)
Free all the allocated memory in the packet.
Definition: h2645_parse.c:528
b
#define b
Definition: input.c:34
H264_NAL_DPB
@ H264_NAL_DPB
Definition: h264.h:37
H264SliceContext::ref_count
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264dec.h:260
H264_NAL_FILLER_DATA
@ H264_NAL_FILLER_DATA
Definition: h264.h:46
H264SEIGreenMetaData::xsd_metric_value
uint16_t xsd_metric_value
Definition: h264_sei.h:160
H264SEIGreenMetaData::period_type
uint8_t period_type
Definition: h264_sei.h:152
PPS::chroma_qp_index_offset
int chroma_qp_index_offset[2]
Definition: h264_ps.h:122
H264Picture::qscale_table
int8_t * qscale_table
Definition: h264dec.h:114
FFCodec
Definition: codec_internal.h:112
ERContext
Definition: error_resilience.h:53
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:37
ff_h264_update_thread_context_for_user
int ff_h264_update_thread_context_for_user(AVCodecContext *dst, const AVCodecContext *src)
Definition: h264_slice.c:476
decode_nal_units
static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size)
Definition: h264dec.c:573
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
h264_export_enc_params
static int h264_export_enc_params(AVFrame *f, H264Picture *p)
Definition: h264dec.c:810
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:73
mpegutils.h
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:525
H264SliceContext::mb_x
int mb_x
Definition: h264dec.h:223
H2645NAL::size_bits
int size_bits
Size, in bits, of just the data, excluding the stop bit and any trailing padding.
Definition: h2645_parse.h:42
ff_h264_decode_picture_parameter_set
int ff_h264_decode_picture_parameter_set(GetBitContext *gb, AVCodecContext *avctx, H264ParamSets *ps, int bit_length)
Decode PPS.
Definition: h264_ps.c:747
FF_DECODE_ERROR_DECODE_SLICES
#define FF_DECODE_ERROR_DECODE_SLICES
Definition: frame.h:633
init
static int init
Definition: av_tx.c:47
H264SliceContext
Definition: h264dec.h:170
golomb.h
exp golomb vlc stuff
AVCodecInternal::is_copy
int is_copy
When using frame-threaded decoding, this field is set for the first worker thread (e....
Definition: internal.h:53
H264SliceContext::mv_cache
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
Definition: h264dec.h:291
AV_CODEC_FLAG_OUTPUT_CORRUPT
#define AV_CODEC_FLAG_OUTPUT_CORRUPT
Output even those frames that might be corrupted.
Definition: avcodec.h:224
debug_green_metadata
static void debug_green_metadata(const H264SEIGreenMetaData *gm, void *logctx)
Definition: h264dec.c:545
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:116
H264SEIGreenMetaData::percent_non_zero_macroblocks
uint8_t percent_non_zero_macroblocks
Definition: h264_sei.h:155
AVVideoEncParams::delta_qp
int32_t delta_qp[4][2]
Quantisation parameter offset from the base (per-frame) qp for a given plane (first index) and AC/DC ...
Definition: video_enc_params.h:109
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1695
fail
#define fail()
Definition: checkasm.h:131
AVCodecContext::thread_count
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
Definition: avcodec.h:1463
ff_h264_sei_decode
int ff_h264_sei_decode(H264SEIContext *h, GetBitContext *gb, const H264ParamSets *ps, void *logctx)
Definition: h264_sei.c:463
GetBitContext
Definition: get_bits.h:61
AV_VIDEO_ENC_PARAMS_H264
@ AV_VIDEO_ENC_PARAMS_H264
H.264 stores:
Definition: video_enc_params.h:57
AVFrame::key_frame
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:417
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:469
HWACCEL_VDPAU
#define HWACCEL_VDPAU(codec)
Definition: hwconfig.h:75
H264Picture::mb_stride
int mb_stride
Definition: h264dec.h:156
scan8
static const uint8_t scan8[16 *3+3]
Definition: h264_parse.h:40
ff_h264_flush_change
void ff_h264_flush_change(H264Context *h)
Definition: h264dec.c:445
finalize_frame
static int finalize_frame(H264Context *h, AVFrame *dst, H264Picture *out, int *got_frame)
Definition: h264dec.c:899
H264Picture::f_grain
AVFrame * f_grain
Definition: h264dec.h:111
H264_NAL_DPA
@ H264_NAL_DPA
Definition: h264.h:36
ff_h264_golomb_to_pict_type
const uint8_t ff_h264_golomb_to_pict_type[5]
Definition: h264data.c:37
H264Ref::data
uint8_t * data[3]
Definition: h264dec.h:160
pack16to32
static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
Definition: h264_parse.h:127
AVVideoEncParams
Video encoding parameters for a given frame.
Definition: video_enc_params.h:73
H264_NAL_PPS
@ H264_NAL_PPS
Definition: h264.h:42
ERContext::mb_num
int mb_num
Definition: error_resilience.h:60
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:99
ERContext::avctx
AVCodecContext * avctx
Definition: error_resilience.h:54
ff_h264_hl_decode_mb
void ff_h264_hl_decode_mb(const H264Context *h, H264SliceContext *sl)
Definition: h264_mb.c:799
avassert.h
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:179
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
h264_decode_frame
static int h264_decode_frame(AVCodecContext *avctx, AVFrame *pict, int *got_frame, AVPacket *avpkt)
Definition: h264dec.c:981
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
h264_free_pic
static void h264_free_pic(H264Context *h, H264Picture *pic)
Definition: h264dec.c:332
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:667
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:595
H2645NAL::size
int size
Definition: h2645_parse.h:36
get_last_needed_nal
static int get_last_needed_nal(H264Context *h)
Definition: h264dec.c:491
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:491
ff_h264_queue_decode_slice
int ff_h264_queue_decode_slice(H264Context *h, const H2645NAL *nal)
Submit a slice for decoding.
Definition: h264_slice.c:2187
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:254
h264_init_pic
static int h264_init_pic(H264Picture *pic)
Definition: h264dec.c:270
h264_decode_end
static av_cold int h264_decode_end(AVCodecContext *avctx)
Definition: h264dec.c:339
get_consumed_bytes
static int get_consumed_bytes(int pos, int buf_size)
Return the number of bytes consumed for building the current frame.
Definition: h264dec.c:800
ff_h264_decode_extradata
int ff_h264_decode_extradata(const uint8_t *data, int size, H264ParamSets *ps, int *is_avc, int *nal_length_size, int err_recognition, void *logctx)
Definition: h264_parse.c:464
FIELD_PICTURE
#define FIELD_PICTURE(h)
Definition: h264dec.h:68
AVCodecContext::ticks_per_frame
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:521
avpriv_h264_has_num_reorder_frames
int avpriv_h264_has_num_reorder_frames(AVCodecContext *avctx)
Definition: h264dec.c:57
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
h264data.h
ff_h264_remove_all_refs
void ff_h264_remove_all_refs(H264Context *h)
Definition: h264_refs.c:564
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
H264Ref::parent
H264Picture * parent
Definition: h264dec.h:167
ERContext::dc_val
int16_t * dc_val[3]
Definition: error_resilience.h:69
H2645NAL::data
const uint8_t * data
Definition: h2645_parse.h:35
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
ff_h264_decoder
const FFCodec ff_h264_decoder
Definition: h264dec.c:1069
AVHWAccel::decode_params
int(* decode_params)(AVCodecContext *avctx, int type, const uint8_t *buf, uint32_t buf_size)
Callback for parameter data (SPS/PPS/VPS etc).
Definition: avcodec.h:2143
H264Picture::sei_recovery_frame_cnt
int sei_recovery_frame_cnt
Definition: h264dec.h:149
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: codec_id.h:77
H2645NAL::type
int type
NAL unit type.
Definition: h2645_parse.h:52
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:399
ff_h264_draw_horiz_band
void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl, int y, int height)
Definition: h264dec.c:99
if
if(ret)
Definition: filter_design.txt:179
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:113
threadframe.h
H264SEIGreenMetaData::percent_alpha_point_deblocking_instance
uint8_t percent_alpha_point_deblocking_instance
Definition: h264_sei.h:158
H2645NAL::raw_size
int raw_size
Definition: h2645_parse.h:44
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:177
h264_class
static const AVClass h264_class
Definition: h264dec.c:1062
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
ff_print_debug_info2
void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table, uint32_t *mbtype_table, int8_t *qscale_table, int16_t(*motion_val[2])[2], int mb_width, int mb_height, int mb_stride, int quarter_sample)
Print debugging info for the given picture.
Definition: mpegutils.c:103
h264_er_decode_mb
static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type, int(*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped)
Definition: h264dec.c:63
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:596
H264SliceContext::edge_emu_buffer
uint8_t * edge_emu_buffer
Definition: h264dec.h:276
AVCodecContext::slice_flags
int slice_flags
slice flags
Definition: avcodec.h:852
h264_init_context
static int h264_init_context(AVCodecContext *avctx, H264Context *h)
Definition: h264dec.c:283
H264Ref::linesize
int linesize[3]
Definition: h264dec.h:161
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:424
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
profiles.h
av_buffer_pool_uninit
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:322
ff_h264_profiles
const AVProfile ff_h264_profiles[]
Definition: profiles.c:58
PPS::init_qp
int init_qp
pic_init_qp_minus26 + 26
Definition: h264_ps.h:120
H264_NAL_AUD
@ H264_NAL_AUD
Definition: h264.h:43
H264_NAL_SEI
@ H264_NAL_SEI
Definition: h264.h:40
ONLY_IF_THREADS_ENABLED
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:156
H264Picture::mb_height
int mb_height
Definition: h264dec.h:155
H264SEIGreenMetaData
Definition: h264_sei.h:150
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:1355
H264_NAL_SPS
@ H264_NAL_SPS
Definition: h264.h:41
H264SliceContext::top_borders_allocated
int top_borders_allocated[2]
Definition: h264dec.h:280
AVOnce
#define AVOnce
Definition: thread.h:176
H264SEIGreenMetaData::percent_intra_coded_macroblocks
uint8_t percent_intra_coded_macroblocks
Definition: h264_sei.h:156
h264_ps.h
h264_vlc_init
static AVOnce h264_vlc_init
Definition: h264dec.c:368
ff_h264_sei_stereo_mode
const char * ff_h264_sei_stereo_mode(const H264SEIFramePacking *h)
Get stereo_mode string from the h264 frame_packing_arrangement.
Definition: h264_sei.c:546
DELAYED_PIC_REF
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output.
Definition: diracdec.c:69
av_video_enc_params_create_side_data
AVVideoEncParams * av_video_enc_params_create_side_data(AVFrame *frame, enum AVVideoEncParamsType type, unsigned int nb_blocks)
Allocates memory for AVEncodeInfoFrame plus an array of.
Definition: video_enc_params.c:58
ERContext::opaque
void * opaque
Definition: error_resilience.h:89
f
f
Definition: af_crystalizer.c:122
H264SEIGreenMetaData::percent_six_tap_filtering
uint8_t percent_six_tap_filtering
Definition: h264_sei.h:157
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:476
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:375
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
H264_NAL_END_STREAM
@ H264_NAL_END_STREAM
Definition: h264.h:45
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:343
h264_options
static const AVOption h264_options[]
Definition: h264dec.c:1054
codec_internal.h
H264SEIGreenMetaData::num_seconds
uint16_t num_seconds
Definition: h264_sei.h:153
rectangle.h
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
FF_CODEC_CAP_EXPORTS_CROPPING
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: codec_internal.h:57
AV_NUM_DATA_POINTERS
#define AV_NUM_DATA_POINTERS
Definition: frame.h:326
ERContext::b8_stride
ptrdiff_t b8_stride
Definition: error_resilience.h:63
FF_CODEC_CAP_ALLOCATE_PROGRESS
#define FF_CODEC_CAP_ALLOCATE_PROGRESS
Definition: codec_internal.h:66
H2645NAL::gb
GetBitContext gb
Definition: h2645_parse.h:47
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
H2645NAL
Definition: h2645_parse.h:34
H264SliceContext::top_borders
uint8_t(*[2] top_borders)[(16 *3) *2]
Definition: h264dec.h:277
H264SEIGreenMetaData::green_metadata_type
uint8_t green_metadata_type
Definition: h264_sei.h:151
ERContext::mb_stride
ptrdiff_t mb_stride
Definition: error_resilience.h:62
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1475
AVCodecHWConfigInternal
Definition: hwconfig.h:29
H264SliceContext::mb_y
int mb_y
Definition: h264dec.h:223
av_frame_remove_side_data
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
Definition: frame.c:784
ff_h2645_packet_split
int ff_h2645_packet_split(H2645Packet *pkt, const uint8_t *buf, int length, void *logctx, int is_nalff, int nal_length_size, enum AVCodecID codec_id, int small_padding, int use_ref)
Split an input packet into NAL units.
Definition: h2645_parse.c:396
height
#define height
ERContext::decode_mb
void(* decode_mb)(void *opaque, int ref, int mv_dir, int mv_type, int(*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped)
Definition: error_resilience.h:86
VD
#define VD
Definition: av1dec.c:1234
output_frame
static int output_frame(H264Context *h, AVFrame *dst, H264Picture *srcp)
Definition: h264dec.c:844
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:117
HWACCEL_D3D11VA
#define HWACCEL_D3D11VA(codec)
Definition: hwconfig.h:79
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:71
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1474
H264SliceContext::mb_mbaff
int mb_mbaff
mb_aff_frame && mb_field_decoding_flag
Definition: h264dec.h:235
H264_NAL_SPS_EXT
@ H264_NAL_SPS_EXT
Definition: h264.h:47
h264dec.h
ff_h264_decode_seq_parameter_set
int ff_h264_decode_seq_parameter_set(GetBitContext *gb, AVCodecContext *avctx, H264ParamSets *ps, int ignore_truncation)
Decode SPS.
Definition: h264_ps.c:332
H264_NAL_SLICE
@ H264_NAL_SLICE
Definition: h264.h:35
H264Picture::needs_fg
int needs_fg
whether picture needs film grain synthesis (see f_grain)
Definition: h264dec.h:150
is_avcc_extradata
static int is_avcc_extradata(const uint8_t *buf, int buf_size)
Definition: h264dec.c:875
H264Context
H264Context.
Definition: h264dec.h:330
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
AV_CODEC_FLAG2_SHOW_ALL
#define AV_CODEC_FLAG2_SHOW_ALL
Show all frames before the first keyframe.
Definition: avcodec.h:321
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:490
AVHWAccel::decode_slice
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
Definition: avcodec.h:2156
av_packet_get_side_data
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, size_t *size)
Get side information from packet.
Definition: avpacket.c:251
H264_NAL_DPC
@ H264_NAL_DPC
Definition: h264.h:38
ff_h264_execute_decode_slices
int ff_h264_execute_decode_slices(H264Context *h)
Call decode_slice() for each context.
Definition: h264_slice.c:2894
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
ERContext::mb_width
int mb_width
Definition: error_resilience.h:61
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: codec_internal.h:31
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:477
AVVideoBlockParams
Data structure for storing block-level encoding information.
Definition: video_enc_params.h:120
ff_h264_decode_init_vlc
av_cold void ff_h264_decode_init_vlc(void)
Definition: h264_cavlc.c:325
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:203
AVCodecContext::height
int height
Definition: avcodec.h:562
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:599
ff_h264_alloc_tables
int ff_h264_alloc_tables(H264Context *h)
Allocate tables.
Definition: h264dec.c:179
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:272
HWACCEL_VIDEOTOOLBOX
#define HWACCEL_VIDEOTOOLBOX(codec)
Definition: hwconfig.h:77
avcodec.h
ret
ret
Definition: filter_design.txt:187
H2645NAL::raw_data
const uint8_t * raw_data
Definition: h2645_parse.h:45
ERContext::mb_height
int mb_height
Definition: error_resilience.h:61
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
FMO
#define FMO
Definition: h264dec.h:56
FF_DEBUG_GREEN_MD
#define FF_DEBUG_GREEN_MD
Definition: avcodec.h:1336
pos
unsigned int pos
Definition: spdifenc.c:412
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
Decoding only.
Definition: avcodec.h:357
AV_CODEC_FLAG2_CHUNKS
#define AV_CODEC_FLAG2_CHUNKS
Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries.
Definition: avcodec.h:312
AVCodecContext::draw_horiz_band
void(* draw_horiz_band)(struct AVCodecContext *s, const AVFrame *src, int offset[AV_NUM_DATA_POINTERS], int y, int type, int height)
If non NULL, 'draw_horiz_band' is called by the libavcodec decoder to draw a horizontal band.
Definition: avcodec.h:624
OFFSET
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
AVCodecContext
main external API structure.
Definition: avcodec.h:389
AVCodecContext::active_thread_type
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1482
get_ue_golomb_31
static int get_ue_golomb_31(GetBitContext *gb)
read unsigned exp golomb code, constraint to a max of 31.
Definition: golomb.h:120
av_image_copy
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
Definition: imgutils.c:422
H264SliceContext::bipred_scratchpad
uint8_t * bipred_scratchpad
Definition: h264dec.h:275
AV_PKT_DATA_NEW_EXTRADATA
@ AV_PKT_DATA_NEW_EXTRADATA
The AV_PKT_DATA_NEW_EXTRADATA is used to notify the codec or the format that the extradata buffer was...
Definition: packet.h:56
error_resilience.h
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
H264Picture::mb_width
int mb_width
Definition: h264dec.h:155
AVFrame::metadata
AVDictionary * metadata
metadata.
Definition: frame.h:620
fill_rectangle
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:814
H264Picture
Definition: h264dec.h:107
ERContext::error_status_table
uint8_t * error_status_table
Definition: error_resilience.h:67
ERContext::quarter_sample
int quarter_sample
Definition: error_resilience.h:83
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:82
PART_NOT_AVAILABLE
#define PART_NOT_AVAILABLE
Definition: h264pred.h:89
ff_h264_field_end
int ff_h264_field_end(H264Context *h, H264SliceContext *sl, int in_setup)
Definition: h264_picture.c:219
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1322
H264SliceContext::ref_list
H264Ref ref_list[2][48]
0..15: frame refs, 16..47: mbaff field refs.
Definition: h264dec.h:262
H264SliceContext::non_zero_count_cache
uint8_t non_zero_count_cache[15 *8]
non zero coeff count cache.
Definition: h264dec.h:286
ERContext::er_temp_buffer
uint8_t * er_temp_buffer
Definition: error_resilience.h:68
h264_decode_flush
static void h264_decode_flush(AVCodecContext *avctx)
Definition: h264dec.c:470
desc
const char * desc
Definition: libsvtav1.c:83
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
get_ue_golomb_long
static unsigned get_ue_golomb_long(GetBitContext *gb)
Read an unsigned Exp-Golomb code in the range 0 to UINT32_MAX-1.
Definition: golomb.h:104
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
H264SliceContext::er
ERContext * er
Definition: h264dec.h:173
AVPacket
This structure stores compressed data.
Definition: packet.h:351
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:416
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:244
ff_er_frame_end
void ff_er_frame_end(ERContext *s)
Definition: error_resilience.c:892
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:73
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:562
h264.h
imgutils.h
H264SliceContext::edge_emu_buffer_allocated
int edge_emu_buffer_allocated
Definition: h264dec.h:279
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
av_video_enc_params_block
static av_always_inline AVVideoBlockParams * av_video_enc_params_block(AVVideoEncParams *par, unsigned int idx)
Definition: video_enc_params.h:143
h
h
Definition: vp9dsp_template.c:2038
H264_MAX_PICTURE_COUNT
#define H264_MAX_PICTURE_COUNT
Definition: h264dec.h:50
H264SEIGreenMetaData::num_pictures
uint16_t num_pictures
Definition: h264_sei.h:154
H264SliceContext::bipred_scratchpad_allocated
int bipred_scratchpad_allocated
Definition: h264dec.h:278
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:50
int
int
Definition: ffmpeg_filter.c:153
H264_NAL_IDR_SLICE
@ H264_NAL_IDR_SLICE
Definition: h264.h:39
send_next_delayed_frame
static int send_next_delayed_frame(H264Context *h, AVFrame *dst_frame, int *got_frame, int buf_index)
Definition: h264dec.c:948
H264Ref::reference
int reference
Definition: h264dec.h:163
H264_NAL_END_SEQUENCE
@ H264_NAL_END_SEQUENCE
Definition: h264.h:44
AV_CODEC_EXPORT_DATA_FILM_GRAIN
#define AV_CODEC_EXPORT_DATA_FILM_GRAIN
Decoding only.
Definition: avcodec.h:362
video_enc_params.h
AV_RB16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
Definition: bytestream.h:98
H264SliceContext::mb_field_decoding_flag
int mb_field_decoding_flag
Definition: h264dec.h:234
H264SEIGreenMetaData::xsd_metric_type
uint8_t xsd_metric_type
Definition: h264_sei.h:159