FFmpeg
h264dec.c
Go to the documentation of this file.
1 /*
2  * H.26L/H.264/AVC/JVT/14496-10/... decoder
3  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * H.264 / AVC / MPEG-4 part10 codec.
25  * @author Michael Niedermayer <michaelni@gmx.at>
26  */
27 
28 #define UNCHECKED_BITSTREAM_READER 1
29 
30 #include "libavutil/avassert.h"
31 #include "libavutil/display.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/opt.h"
34 #include "libavutil/stereo3d.h"
36 
37 #include "internal.h"
38 #include "bytestream.h"
39 #include "cabac.h"
40 #include "cabac_functions.h"
41 #include "error_resilience.h"
42 #include "avcodec.h"
43 #include "h264.h"
44 #include "h264dec.h"
45 #include "h2645_parse.h"
46 #include "h264data.h"
47 #include "h264chroma.h"
48 #include "h264_mvpred.h"
49 #include "h264_ps.h"
50 #include "golomb.h"
51 #include "hwconfig.h"
52 #include "mathops.h"
53 #include "me_cmp.h"
54 #include "mpegutils.h"
55 #include "profiles.h"
56 #include "rectangle.h"
57 #include "thread.h"
58 
59 const uint16_t ff_h264_mb_sizes[4] = { 256, 384, 512, 768 };
60 
62 {
63  H264Context *h = avctx->priv_data;
64  return h && h->ps.sps ? h->ps.sps->num_reorder_frames : 0;
65 }
66 
67 static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
68  int (*mv)[2][4][2],
69  int mb_x, int mb_y, int mb_intra, int mb_skipped)
70 {
71  H264Context *h = opaque;
72  H264SliceContext *sl = &h->slice_ctx[0];
73 
74  sl->mb_x = mb_x;
75  sl->mb_y = mb_y;
76  sl->mb_xy = mb_x + mb_y * h->mb_stride;
77  memset(sl->non_zero_count_cache, 0, sizeof(sl->non_zero_count_cache));
78  av_assert1(ref >= 0);
79  /* FIXME: It is possible albeit uncommon that slice references
80  * differ between slices. We take the easy approach and ignore
81  * it for now. If this turns out to have any relevance in
82  * practice then correct remapping should be added. */
83  if (ref >= sl->ref_count[0])
84  ref = 0;
85  if (!sl->ref_list[0][ref].data[0]) {
86  av_log(h->avctx, AV_LOG_DEBUG, "Reference not available for error concealing\n");
87  ref = 0;
88  }
89  if ((sl->ref_list[0][ref].reference&3) != 3) {
90  av_log(h->avctx, AV_LOG_DEBUG, "Reference invalid\n");
91  return;
92  }
93  fill_rectangle(&h->cur_pic.ref_index[0][4 * sl->mb_xy],
94  2, 2, 2, ref, 1);
95  fill_rectangle(&sl->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
96  fill_rectangle(sl->mv_cache[0][scan8[0]], 4, 4, 8,
97  pack16to32((*mv)[0][0][0], (*mv)[0][0][1]), 4);
98  sl->mb_mbaff =
99  sl->mb_field_decoding_flag = 0;
100  ff_h264_hl_decode_mb(h, &h->slice_ctx[0]);
101 }
102 
104  int y, int height)
105 {
106  AVCodecContext *avctx = h->avctx;
107  const AVFrame *src = h->cur_pic.f;
109  int vshift = desc->log2_chroma_h;
110  const int field_pic = h->picture_structure != PICT_FRAME;
111  if (field_pic) {
112  height <<= 1;
113  y <<= 1;
114  }
115 
116  height = FFMIN(height, avctx->height - y);
117 
118  if (field_pic && h->first_field && !(avctx->slice_flags & SLICE_FLAG_ALLOW_FIELD))
119  return;
120 
121  if (avctx->draw_horiz_band) {
123  int i;
124 
125  offset[0] = y * src->linesize[0];
126  offset[1] =
127  offset[2] = (y >> vshift) * src->linesize[1];
128  for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
129  offset[i] = 0;
130 
131  emms_c();
132 
133  avctx->draw_horiz_band(avctx, src, offset,
134  y, h->picture_structure, height);
135  }
136 }
137 
139 {
140  int i;
141 
142  av_freep(&h->intra4x4_pred_mode);
143  av_freep(&h->chroma_pred_mode_table);
144  av_freep(&h->cbp_table);
145  av_freep(&h->mvd_table[0]);
146  av_freep(&h->mvd_table[1]);
147  av_freep(&h->direct_table);
148  av_freep(&h->non_zero_count);
149  av_freep(&h->slice_table_base);
150  h->slice_table = NULL;
151  av_freep(&h->list_counts);
152 
153  av_freep(&h->mb2b_xy);
154  av_freep(&h->mb2br_xy);
155 
156  av_buffer_pool_uninit(&h->qscale_table_pool);
157  av_buffer_pool_uninit(&h->mb_type_pool);
158  av_buffer_pool_uninit(&h->motion_val_pool);
159  av_buffer_pool_uninit(&h->ref_index_pool);
160 
161  for (i = 0; i < h->nb_slice_ctx; i++) {
162  H264SliceContext *sl = &h->slice_ctx[i];
163 
164  av_freep(&sl->dc_val_base);
165  av_freep(&sl->er.mb_index2xy);
167  av_freep(&sl->er.er_temp_buffer);
168 
171  av_freep(&sl->top_borders[0]);
172  av_freep(&sl->top_borders[1]);
173 
176  sl->top_borders_allocated[0] = 0;
177  sl->top_borders_allocated[1] = 0;
178  }
179 }
180 
182 {
183  const int big_mb_num = h->mb_stride * (h->mb_height + 1);
184  const int row_mb_num = 2*h->mb_stride*FFMAX(h->nb_slice_ctx, 1);
185  int x, y;
186 
187  FF_ALLOCZ_ARRAY_OR_GOTO(h->avctx, h->intra4x4_pred_mode,
188  row_mb_num, 8 * sizeof(uint8_t), fail)
189  h->slice_ctx[0].intra4x4_pred_mode = h->intra4x4_pred_mode;
190 
191  FF_ALLOCZ_OR_GOTO(h->avctx, h->non_zero_count,
192  big_mb_num * 48 * sizeof(uint8_t), fail)
193  FF_ALLOCZ_OR_GOTO(h->avctx, h->slice_table_base,
194  (big_mb_num + h->mb_stride) * sizeof(*h->slice_table_base), fail)
195  FF_ALLOCZ_OR_GOTO(h->avctx, h->cbp_table,
196  big_mb_num * sizeof(uint16_t), fail)
197  FF_ALLOCZ_OR_GOTO(h->avctx, h->chroma_pred_mode_table,
198  big_mb_num * sizeof(uint8_t), fail)
199  FF_ALLOCZ_ARRAY_OR_GOTO(h->avctx, h->mvd_table[0],
200  row_mb_num, 16 * sizeof(uint8_t), fail);
201  FF_ALLOCZ_ARRAY_OR_GOTO(h->avctx, h->mvd_table[1],
202  row_mb_num, 16 * sizeof(uint8_t), fail);
203  h->slice_ctx[0].mvd_table[0] = h->mvd_table[0];
204  h->slice_ctx[0].mvd_table[1] = h->mvd_table[1];
205 
206  FF_ALLOCZ_OR_GOTO(h->avctx, h->direct_table,
207  4 * big_mb_num * sizeof(uint8_t), fail);
208  FF_ALLOCZ_OR_GOTO(h->avctx, h->list_counts,
209  big_mb_num * sizeof(uint8_t), fail)
210 
211  memset(h->slice_table_base, -1,
212  (big_mb_num + h->mb_stride) * sizeof(*h->slice_table_base));
213  h->slice_table = h->slice_table_base + h->mb_stride * 2 + 1;
214 
215  FF_ALLOCZ_OR_GOTO(h->avctx, h->mb2b_xy,
216  big_mb_num * sizeof(uint32_t), fail);
217  FF_ALLOCZ_OR_GOTO(h->avctx, h->mb2br_xy,
218  big_mb_num * sizeof(uint32_t), fail);
219  for (y = 0; y < h->mb_height; y++)
220  for (x = 0; x < h->mb_width; x++) {
221  const int mb_xy = x + y * h->mb_stride;
222  const int b_xy = 4 * x + 4 * y * h->b_stride;
223 
224  h->mb2b_xy[mb_xy] = b_xy;
225  h->mb2br_xy[mb_xy] = 8 * (FMO ? mb_xy : (mb_xy % (2 * h->mb_stride)));
226  }
227 
228  return 0;
229 
230 fail:
231  return AVERROR(ENOMEM);
232 }
233 
234 /**
235  * Init context
236  * Allocate buffers which are not shared amongst multiple threads.
237  */
239 {
240  ERContext *er = &sl->er;
241  int mb_array_size = h->mb_height * h->mb_stride;
242  int y_size = (2 * h->mb_width + 1) * (2 * h->mb_height + 1);
243  int c_size = h->mb_stride * (h->mb_height + 1);
244  int yc_size = y_size + 2 * c_size;
245  int x, y, i;
246 
247  sl->ref_cache[0][scan8[5] + 1] =
248  sl->ref_cache[0][scan8[7] + 1] =
249  sl->ref_cache[0][scan8[13] + 1] =
250  sl->ref_cache[1][scan8[5] + 1] =
251  sl->ref_cache[1][scan8[7] + 1] =
252  sl->ref_cache[1][scan8[13] + 1] = PART_NOT_AVAILABLE;
253 
254  if (sl != h->slice_ctx) {
255  memset(er, 0, sizeof(*er));
256  } else
257  if (CONFIG_ERROR_RESILIENCE) {
258 
259  /* init ER */
260  er->avctx = h->avctx;
262  er->opaque = h;
263  er->quarter_sample = 1;
264 
265  er->mb_num = h->mb_num;
266  er->mb_width = h->mb_width;
267  er->mb_height = h->mb_height;
268  er->mb_stride = h->mb_stride;
269  er->b8_stride = h->mb_width * 2 + 1;
270 
271  // error resilience code looks cleaner with this
272  FF_ALLOCZ_OR_GOTO(h->avctx, er->mb_index2xy,
273  (h->mb_num + 1) * sizeof(int), fail);
274 
275  for (y = 0; y < h->mb_height; y++)
276  for (x = 0; x < h->mb_width; x++)
277  er->mb_index2xy[x + y * h->mb_width] = x + y * h->mb_stride;
278 
279  er->mb_index2xy[h->mb_height * h->mb_width] = (h->mb_height - 1) *
280  h->mb_stride + h->mb_width;
281 
283  mb_array_size * sizeof(uint8_t), fail);
284 
285  FF_ALLOC_OR_GOTO(h->avctx, er->er_temp_buffer,
286  h->mb_height * h->mb_stride * (4*sizeof(int) + 1), fail);
287 
288  FF_ALLOCZ_OR_GOTO(h->avctx, sl->dc_val_base,
289  yc_size * sizeof(int16_t), fail);
290  er->dc_val[0] = sl->dc_val_base + h->mb_width * 2 + 2;
291  er->dc_val[1] = sl->dc_val_base + y_size + h->mb_stride + 1;
292  er->dc_val[2] = er->dc_val[1] + c_size;
293  for (i = 0; i < yc_size; i++)
294  sl->dc_val_base[i] = 1024;
295  }
296 
297  return 0;
298 
299 fail:
300  return AVERROR(ENOMEM); // ff_h264_free_tables will clean up for us
301 }
302 
304 {
305  int i;
306 
307  h->avctx = avctx;
308  h->cur_chroma_format_idc = -1;
309 
310  h->width_from_caller = avctx->width;
311  h->height_from_caller = avctx->height;
312 
313  h->workaround_bugs = avctx->workaround_bugs;
314  h->flags = avctx->flags;
315  h->poc.prev_poc_msb = 1 << 16;
316  h->recovery_frame = -1;
317  h->frame_recovered = 0;
318  h->poc.prev_frame_num = -1;
319  h->sei.frame_packing.arrangement_cancel_flag = -1;
320  h->sei.unregistered.x264_build = -1;
321 
322  h->next_outputed_poc = INT_MIN;
323  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
324  h->last_pocs[i] = INT_MIN;
325 
326  ff_h264_sei_uninit(&h->sei);
327 
328  h->nb_slice_ctx = (avctx->active_thread_type & FF_THREAD_SLICE) ? avctx->thread_count : 1;
329  h->slice_ctx = av_mallocz_array(h->nb_slice_ctx, sizeof(*h->slice_ctx));
330  if (!h->slice_ctx) {
331  h->nb_slice_ctx = 0;
332  return AVERROR(ENOMEM);
333  }
334 
335  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
336  h->DPB[i].f = av_frame_alloc();
337  if (!h->DPB[i].f)
338  return AVERROR(ENOMEM);
339  }
340 
341  h->cur_pic.f = av_frame_alloc();
342  if (!h->cur_pic.f)
343  return AVERROR(ENOMEM);
344 
345  h->last_pic_for_ec.f = av_frame_alloc();
346  if (!h->last_pic_for_ec.f)
347  return AVERROR(ENOMEM);
348 
349  for (i = 0; i < h->nb_slice_ctx; i++)
350  h->slice_ctx[i].h264 = h;
351 
352  return 0;
353 }
354 
356 {
357  H264Context *h = avctx->priv_data;
358  int i;
359 
362 
363  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
364  ff_h264_unref_picture(h, &h->DPB[i]);
365  av_frame_free(&h->DPB[i].f);
366  }
367  memset(h->delayed_pic, 0, sizeof(h->delayed_pic));
368 
369  h->cur_pic_ptr = NULL;
370 
371  av_freep(&h->slice_ctx);
372  h->nb_slice_ctx = 0;
373 
374  ff_h264_sei_uninit(&h->sei);
375  ff_h264_ps_uninit(&h->ps);
376 
377  ff_h2645_packet_uninit(&h->pkt);
378 
379  ff_h264_unref_picture(h, &h->cur_pic);
380  av_frame_free(&h->cur_pic.f);
381  ff_h264_unref_picture(h, &h->last_pic_for_ec);
382  av_frame_free(&h->last_pic_for_ec.f);
383 
384  return 0;
385 }
386 
388 
390 {
391  H264Context *h = avctx->priv_data;
392  int ret;
393 
394  ret = h264_init_context(avctx, h);
395  if (ret < 0)
396  return ret;
397 
399  if (ret != 0) {
400  av_log(avctx, AV_LOG_ERROR, "pthread_once has failed.");
401  return AVERROR_UNKNOWN;
402  }
403 
404  if (avctx->ticks_per_frame == 1) {
405  if(h->avctx->time_base.den < INT_MAX/2) {
406  h->avctx->time_base.den *= 2;
407  } else
408  h->avctx->time_base.num /= 2;
409  }
410  avctx->ticks_per_frame = 2;
411 
412  if (!avctx->internal->is_copy) {
413  if (avctx->extradata_size > 0 && avctx->extradata) {
415  &h->ps, &h->is_avc, &h->nal_length_size,
416  avctx->err_recognition, avctx);
417  if (ret < 0) {
418  int explode = avctx->err_recognition & AV_EF_EXPLODE;
419  av_log(avctx, explode ? AV_LOG_ERROR: AV_LOG_WARNING,
420  "Error decoding the extradata\n");
421  if (explode) {
422  return ret;
423  }
424  ret = 0;
425  }
426  }
427  }
428 
429  if (h->ps.sps && h->ps.sps->bitstream_restriction_flag &&
430  h->avctx->has_b_frames < h->ps.sps->num_reorder_frames) {
431  h->avctx->has_b_frames = h->ps.sps->num_reorder_frames;
432  }
433 
435 
436  if (h->enable_er < 0 && (avctx->active_thread_type & FF_THREAD_SLICE))
437  h->enable_er = 0;
438 
439  if (h->enable_er && (avctx->active_thread_type & FF_THREAD_SLICE)) {
440  av_log(avctx, AV_LOG_WARNING,
441  "Error resilience with slice threads is enabled. It is unsafe and unsupported and may crash. "
442  "Use it at your own risk\n");
443  }
444 
445  return 0;
446 }
447 
448 /**
449  * instantaneous decoder refresh.
450  */
451 static void idr(H264Context *h)
452 {
453  int i;
455  h->poc.prev_frame_num =
456  h->poc.prev_frame_num_offset = 0;
457  h->poc.prev_poc_msb = 1<<16;
458  h->poc.prev_poc_lsb = -1;
459  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
460  h->last_pocs[i] = INT_MIN;
461 }
462 
463 /* forget old pics after a seek */
465 {
466  int i, j;
467 
468  h->next_outputed_poc = INT_MIN;
469  h->prev_interlaced_frame = 1;
470  idr(h);
471 
472  h->poc.prev_frame_num = -1;
473  if (h->cur_pic_ptr) {
474  h->cur_pic_ptr->reference = 0;
475  for (j=i=0; h->delayed_pic[i]; i++)
476  if (h->delayed_pic[i] != h->cur_pic_ptr)
477  h->delayed_pic[j++] = h->delayed_pic[i];
478  h->delayed_pic[j] = NULL;
479  }
480  ff_h264_unref_picture(h, &h->last_pic_for_ec);
481 
482  h->first_field = 0;
483  h->recovery_frame = -1;
484  h->frame_recovered = 0;
485  h->current_slice = 0;
486  h->mmco_reset = 1;
487 }
488 
490 {
491  H264Context *h = avctx->priv_data;
492  int i;
493 
494  memset(h->delayed_pic, 0, sizeof(h->delayed_pic));
495 
497  ff_h264_sei_uninit(&h->sei);
498 
499  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
500  ff_h264_unref_picture(h, &h->DPB[i]);
501  h->cur_pic_ptr = NULL;
502  ff_h264_unref_picture(h, &h->cur_pic);
503 
504  h->mb_y = 0;
505 
507  h->context_initialized = 0;
508 }
509 
511 {
512  int nals_needed = 0;
513  int first_slice = 0;
514  int i, ret;
515 
516  for (i = 0; i < h->pkt.nb_nals; i++) {
517  H2645NAL *nal = &h->pkt.nals[i];
518  GetBitContext gb;
519 
520  /* packets can sometimes contain multiple PPS/SPS,
521  * e.g. two PAFF field pictures in one packet, or a demuxer
522  * which splits NALs strangely if so, when frame threading we
523  * can't start the next thread until we've read all of them */
524  switch (nal->type) {
525  case H264_NAL_SPS:
526  case H264_NAL_PPS:
527  nals_needed = i;
528  break;
529  case H264_NAL_DPA:
530  case H264_NAL_IDR_SLICE:
531  case H264_NAL_SLICE:
532  ret = init_get_bits8(&gb, nal->data + 1, nal->size - 1);
533  if (ret < 0) {
534  av_log(h->avctx, AV_LOG_ERROR, "Invalid zero-sized VCL NAL unit\n");
535  if (h->avctx->err_recognition & AV_EF_EXPLODE)
536  return ret;
537 
538  break;
539  }
540  if (!get_ue_golomb_long(&gb) || // first_mb_in_slice
541  !first_slice ||
542  first_slice != nal->type)
543  nals_needed = i;
544  if (!first_slice)
545  first_slice = nal->type;
546  }
547  }
548 
549  return nals_needed;
550 }
551 
552 static void debug_green_metadata(const H264SEIGreenMetaData *gm, void *logctx)
553 {
554  av_log(logctx, AV_LOG_DEBUG, "Green Metadata Info SEI message\n");
555  av_log(logctx, AV_LOG_DEBUG, " green_metadata_type: %d\n", gm->green_metadata_type);
556 
557  if (gm->green_metadata_type == 0) {
558  av_log(logctx, AV_LOG_DEBUG, " green_metadata_period_type: %d\n", gm->period_type);
559 
560  if (gm->period_type == 2)
561  av_log(logctx, AV_LOG_DEBUG, " green_metadata_num_seconds: %d\n", gm->num_seconds);
562  else if (gm->period_type == 3)
563  av_log(logctx, AV_LOG_DEBUG, " green_metadata_num_pictures: %d\n", gm->num_pictures);
564 
565  av_log(logctx, AV_LOG_DEBUG, " SEI GREEN Complexity Metrics: %f %f %f %f\n",
566  (float)gm->percent_non_zero_macroblocks/255,
567  (float)gm->percent_intra_coded_macroblocks/255,
568  (float)gm->percent_six_tap_filtering/255,
570 
571  } else if (gm->green_metadata_type == 1) {
572  av_log(logctx, AV_LOG_DEBUG, " xsd_metric_type: %d\n", gm->xsd_metric_type);
573 
574  if (gm->xsd_metric_type == 0)
575  av_log(logctx, AV_LOG_DEBUG, " xsd_metric_value: %f\n",
576  (float)gm->xsd_metric_value/100);
577  }
578 }
579 
580 static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size)
581 {
582  AVCodecContext *const avctx = h->avctx;
583  int nals_needed = 0; ///< number of NALs that need decoding before the next frame thread starts
584  int idr_cleared=0;
585  int i, ret = 0;
586 
587  h->has_slice = 0;
588  h->nal_unit_type= 0;
589 
590  if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)) {
591  h->current_slice = 0;
592  if (!h->first_field) {
593  h->cur_pic_ptr = NULL;
594  ff_h264_sei_uninit(&h->sei);
595  }
596  }
597 
598  if (h->nal_length_size == 4) {
599  if (buf_size > 8 && AV_RB32(buf) == 1 && AV_RB32(buf+5) > (unsigned)buf_size) {
600  h->is_avc = 0;
601  }else if(buf_size > 3 && AV_RB32(buf) > 1 && AV_RB32(buf) <= (unsigned)buf_size)
602  h->is_avc = 1;
603  }
604 
605  ret = ff_h2645_packet_split(&h->pkt, buf, buf_size, avctx, h->is_avc, h->nal_length_size,
606  avctx->codec_id, 0, 0);
607  if (ret < 0) {
608  av_log(avctx, AV_LOG_ERROR,
609  "Error splitting the input into NAL units.\n");
610  return ret;
611  }
612 
613  if (avctx->active_thread_type & FF_THREAD_FRAME)
614  nals_needed = get_last_needed_nal(h);
615  if (nals_needed < 0)
616  return nals_needed;
617 
618  for (i = 0; i < h->pkt.nb_nals; i++) {
619  H2645NAL *nal = &h->pkt.nals[i];
620  int max_slice_ctx, err;
621 
622  if (avctx->skip_frame >= AVDISCARD_NONREF &&
623  nal->ref_idc == 0 && nal->type != H264_NAL_SEI)
624  continue;
625 
626  // FIXME these should stop being context-global variables
627  h->nal_ref_idc = nal->ref_idc;
628  h->nal_unit_type = nal->type;
629 
630  err = 0;
631  switch (nal->type) {
632  case H264_NAL_IDR_SLICE:
633  if ((nal->data[1] & 0xFC) == 0x98) {
634  av_log(h->avctx, AV_LOG_ERROR, "Invalid inter IDR frame\n");
635  h->next_outputed_poc = INT_MIN;
636  ret = -1;
637  goto end;
638  }
639  if(!idr_cleared) {
640  idr(h); // FIXME ensure we don't lose some frames if there is reordering
641  }
642  idr_cleared = 1;
643  h->has_recovery_point = 1;
644  case H264_NAL_SLICE:
645  h->has_slice = 1;
646 
647  if ((err = ff_h264_queue_decode_slice(h, nal))) {
648  H264SliceContext *sl = h->slice_ctx + h->nb_slice_ctx_queued;
649  sl->ref_count[0] = sl->ref_count[1] = 0;
650  break;
651  }
652 
653  if (h->current_slice == 1) {
654  if (avctx->active_thread_type & FF_THREAD_FRAME &&
655  i >= nals_needed && !h->setup_finished && h->cur_pic_ptr) {
656  ff_thread_finish_setup(avctx);
657  h->setup_finished = 1;
658  }
659 
660  if (h->avctx->hwaccel &&
661  (ret = h->avctx->hwaccel->start_frame(h->avctx, buf, buf_size)) < 0)
662  goto end;
663  }
664 
665  max_slice_ctx = avctx->hwaccel ? 1 : h->nb_slice_ctx;
666  if (h->nb_slice_ctx_queued == max_slice_ctx) {
667  if (h->avctx->hwaccel) {
668  ret = avctx->hwaccel->decode_slice(avctx, nal->raw_data, nal->raw_size);
669  h->nb_slice_ctx_queued = 0;
670  } else
672  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
673  goto end;
674  }
675  break;
676  case H264_NAL_DPA:
677  case H264_NAL_DPB:
678  case H264_NAL_DPC:
679  avpriv_request_sample(avctx, "data partitioning");
680  break;
681  case H264_NAL_SEI:
682  ret = ff_h264_sei_decode(&h->sei, &nal->gb, &h->ps, avctx);
683  h->has_recovery_point = h->has_recovery_point || h->sei.recovery_point.recovery_frame_cnt != -1;
684  if (avctx->debug & FF_DEBUG_GREEN_MD)
685  debug_green_metadata(&h->sei.green_metadata, h->avctx);
686  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
687  goto end;
688  break;
689  case H264_NAL_SPS: {
690  GetBitContext tmp_gb = nal->gb;
691  if (avctx->hwaccel && avctx->hwaccel->decode_params) {
692  ret = avctx->hwaccel->decode_params(avctx,
693  nal->type,
694  nal->raw_data,
695  nal->raw_size);
696  if (ret < 0)
697  goto end;
698  }
699  if (ff_h264_decode_seq_parameter_set(&tmp_gb, avctx, &h->ps, 0) >= 0)
700  break;
701  av_log(h->avctx, AV_LOG_DEBUG,
702  "SPS decoding failure, trying again with the complete NAL\n");
703  init_get_bits8(&tmp_gb, nal->raw_data + 1, nal->raw_size - 1);
704  if (ff_h264_decode_seq_parameter_set(&tmp_gb, avctx, &h->ps, 0) >= 0)
705  break;
706  ff_h264_decode_seq_parameter_set(&nal->gb, avctx, &h->ps, 1);
707  break;
708  }
709  case H264_NAL_PPS:
710  if (avctx->hwaccel && avctx->hwaccel->decode_params) {
711  ret = avctx->hwaccel->decode_params(avctx,
712  nal->type,
713  nal->raw_data,
714  nal->raw_size);
715  if (ret < 0)
716  goto end;
717  }
718  ret = ff_h264_decode_picture_parameter_set(&nal->gb, avctx, &h->ps,
719  nal->size_bits);
720  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
721  goto end;
722  break;
723  case H264_NAL_AUD:
725  case H264_NAL_END_STREAM:
727  case H264_NAL_SPS_EXT:
729  break;
730  default:
731  av_log(avctx, AV_LOG_DEBUG, "Unknown NAL code: %d (%d bits)\n",
732  nal->type, nal->size_bits);
733  }
734 
735  if (err < 0) {
736  av_log(h->avctx, AV_LOG_ERROR, "decode_slice_header error\n");
737  }
738  }
739 
741  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
742  goto end;
743 
744  // set decode_error_flags to allow users to detect concealed decoding errors
745  if ((ret < 0 || h->slice_ctx->er.error_occurred) && h->cur_pic_ptr) {
746  h->cur_pic_ptr->f->decode_error_flags |= FF_DECODE_ERROR_DECODE_SLICES;
747  }
748 
749  ret = 0;
750 end:
751 
752 #if CONFIG_ERROR_RESILIENCE
753  /*
754  * FIXME: Error handling code does not seem to support interlaced
755  * when slices span multiple rows
756  * The ff_er_add_slice calls don't work right for bottom
757  * fields; they cause massive erroneous error concealing
758  * Error marking covers both fields (top and bottom).
759  * This causes a mismatched s->error_count
760  * and a bad error table. Further, the error count goes to
761  * INT_MAX when called for bottom field, because mb_y is
762  * past end by one (callers fault) and resync_mb_y != 0
763  * causes problems for the first MB line, too.
764  */
765  if (!FIELD_PICTURE(h) && h->current_slice && h->enable_er) {
766 
767  H264SliceContext *sl = h->slice_ctx;
768  int use_last_pic = h->last_pic_for_ec.f->buf[0] && !sl->ref_count[0];
769 
770  ff_h264_set_erpic(&sl->er.cur_pic, h->cur_pic_ptr);
771 
772  if (use_last_pic) {
773  ff_h264_set_erpic(&sl->er.last_pic, &h->last_pic_for_ec);
774  sl->ref_list[0][0].parent = &h->last_pic_for_ec;
775  memcpy(sl->ref_list[0][0].data, h->last_pic_for_ec.f->data, sizeof(sl->ref_list[0][0].data));
776  memcpy(sl->ref_list[0][0].linesize, h->last_pic_for_ec.f->linesize, sizeof(sl->ref_list[0][0].linesize));
777  sl->ref_list[0][0].reference = h->last_pic_for_ec.reference;
778  } else if (sl->ref_count[0]) {
779  ff_h264_set_erpic(&sl->er.last_pic, sl->ref_list[0][0].parent);
780  } else
782 
783  if (sl->ref_count[1])
784  ff_h264_set_erpic(&sl->er.next_pic, sl->ref_list[1][0].parent);
785 
786  sl->er.ref_count = sl->ref_count[0];
787 
788  ff_er_frame_end(&sl->er);
789  if (use_last_pic)
790  memset(&sl->ref_list[0][0], 0, sizeof(sl->ref_list[0][0]));
791  }
792 #endif /* CONFIG_ERROR_RESILIENCE */
793  /* clean up */
794  if (h->cur_pic_ptr && !h->droppable && h->has_slice) {
795  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
796  h->picture_structure == PICT_BOTTOM_FIELD);
797  }
798 
799  return (ret < 0) ? ret : buf_size;
800 }
801 
802 /**
803  * Return the number of bytes consumed for building the current frame.
804  */
805 static int get_consumed_bytes(int pos, int buf_size)
806 {
807  if (pos == 0)
808  pos = 1; // avoid infinite loops (I doubt that is needed but...)
809  if (pos + 10 > buf_size)
810  pos = buf_size; // oops ;)
811 
812  return pos;
813 }
814 
816 {
817  AVVideoEncParams *par;
818  unsigned int nb_mb = p->mb_height * p->mb_width;
819  unsigned int x, y;
820 
822  if (!par)
823  return AVERROR(ENOMEM);
824 
825  par->qp = p->pps->init_qp;
826 
827  par->delta_qp[1][0] = p->pps->chroma_qp_index_offset[0];
828  par->delta_qp[1][1] = p->pps->chroma_qp_index_offset[0];
829  par->delta_qp[2][0] = p->pps->chroma_qp_index_offset[1];
830  par->delta_qp[2][1] = p->pps->chroma_qp_index_offset[1];
831 
832  for (y = 0; y < p->mb_height; y++)
833  for (x = 0; x < p->mb_width; x++) {
834  const unsigned int block_idx = y * p->mb_width + x;
835  const unsigned int mb_xy = y * p->mb_stride + x;
837 
838  b->src_x = x * 16;
839  b->src_y = y * 16;
840  b->w = 16;
841  b->h = 16;
842 
843  b->delta_qp = p->qscale_table[mb_xy] - par->qp;
844  }
845 
846  return 0;
847 }
848 
849 static int output_frame(H264Context *h, AVFrame *dst, H264Picture *srcp)
850 {
851  AVFrame *src = srcp->f;
852  int ret;
853 
854  ret = av_frame_ref(dst, src);
855  if (ret < 0)
856  return ret;
857 
858  av_dict_set(&dst->metadata, "stereo_mode", ff_h264_sei_stereo_mode(&h->sei.frame_packing), 0);
859 
860  if (srcp->sei_recovery_frame_cnt == 0)
861  dst->key_frame = 1;
862 
863  if (h->avctx->export_side_data & AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS) {
864  ret = h264_export_enc_params(dst, srcp);
865  if (ret < 0)
866  goto fail;
867  }
868 
869  return 0;
870 fail:
871  av_frame_unref(dst);
872  return ret;
873 }
874 
875 static int is_extra(const uint8_t *buf, int buf_size)
876 {
877  int cnt= buf[5]&0x1f;
878  const uint8_t *p= buf+6;
879  if (!cnt)
880  return 0;
881  while(cnt--){
882  int nalsize= AV_RB16(p) + 2;
883  if(nalsize > buf_size - (p-buf) || (p[2] & 0x9F) != 7)
884  return 0;
885  p += nalsize;
886  }
887  cnt = *(p++);
888  if(!cnt)
889  return 0;
890  while(cnt--){
891  int nalsize= AV_RB16(p) + 2;
892  if(nalsize > buf_size - (p-buf) || (p[2] & 0x9F) != 8)
893  return 0;
894  p += nalsize;
895  }
896  return 1;
897 }
898 
899 static int finalize_frame(H264Context *h, AVFrame *dst, H264Picture *out, int *got_frame)
900 {
901  int ret;
902 
903  if (((h->avctx->flags & AV_CODEC_FLAG_OUTPUT_CORRUPT) ||
904  (h->avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL) ||
905  out->recovered)) {
906 
907  if (!h->avctx->hwaccel &&
908  (out->field_poc[0] == INT_MAX ||
909  out->field_poc[1] == INT_MAX)
910  ) {
911  int p;
912  AVFrame *f = out->f;
913  int field = out->field_poc[0] == INT_MAX;
914  uint8_t *dst_data[4];
915  int linesizes[4];
916  const uint8_t *src_data[4];
917 
918  av_log(h->avctx, AV_LOG_DEBUG, "Duplicating field %d to fill missing\n", field);
919 
920  for (p = 0; p<4; p++) {
921  dst_data[p] = f->data[p] + (field^1)*f->linesize[p];
922  src_data[p] = f->data[p] + field *f->linesize[p];
923  linesizes[p] = 2*f->linesize[p];
924  }
925 
926  av_image_copy(dst_data, linesizes, src_data, linesizes,
927  f->format, f->width, f->height>>1);
928  }
929 
930  ret = output_frame(h, dst, out);
931  if (ret < 0)
932  return ret;
933 
934  *got_frame = 1;
935 
936  if (CONFIG_MPEGVIDEO) {
937  ff_print_debug_info2(h->avctx, dst, NULL,
938  out->mb_type,
939  out->qscale_table,
940  out->motion_val,
941  NULL,
942  out->mb_width, out->mb_height, out->mb_stride, 1);
943  }
944  }
945 
946  return 0;
947 }
948 
950  int *got_frame, int buf_index)
951 {
952  int ret, i, out_idx;
953  H264Picture *out = h->delayed_pic[0];
954 
955  h->cur_pic_ptr = NULL;
956  h->first_field = 0;
957 
958  out_idx = 0;
959  for (i = 1;
960  h->delayed_pic[i] &&
961  !h->delayed_pic[i]->f->key_frame &&
962  !h->delayed_pic[i]->mmco_reset;
963  i++)
964  if (h->delayed_pic[i]->poc < out->poc) {
965  out = h->delayed_pic[i];
966  out_idx = i;
967  }
968 
969  for (i = out_idx; h->delayed_pic[i]; i++)
970  h->delayed_pic[i] = h->delayed_pic[i + 1];
971 
972  if (out) {
973  out->reference &= ~DELAYED_PIC_REF;
974  ret = finalize_frame(h, dst_frame, out, got_frame);
975  if (ret < 0)
976  return ret;
977  }
978 
979  return buf_index;
980 }
981 
982 static int h264_decode_frame(AVCodecContext *avctx, void *data,
983  int *got_frame, AVPacket *avpkt)
984 {
985  const uint8_t *buf = avpkt->data;
986  int buf_size = avpkt->size;
987  H264Context *h = avctx->priv_data;
988  AVFrame *pict = data;
989  int buf_index;
990  int ret;
991 
992  h->flags = avctx->flags;
993  h->setup_finished = 0;
994  h->nb_slice_ctx_queued = 0;
995 
996  ff_h264_unref_picture(h, &h->last_pic_for_ec);
997 
998  /* end of stream, output what is still in the buffers */
999  if (buf_size == 0)
1000  return send_next_delayed_frame(h, pict, got_frame, 0);
1001 
1002  if (h->is_avc && av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, NULL)) {
1003  int side_size;
1004  uint8_t *side = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, &side_size);
1005  if (is_extra(side, side_size))
1006  ff_h264_decode_extradata(side, side_size,
1007  &h->ps, &h->is_avc, &h->nal_length_size,
1008  avctx->err_recognition, avctx);
1009  }
1010  if (h->is_avc && buf_size >= 9 && buf[0]==1 && buf[2]==0 && (buf[4]&0xFC)==0xFC) {
1011  if (is_extra(buf, buf_size))
1012  return ff_h264_decode_extradata(buf, buf_size,
1013  &h->ps, &h->is_avc, &h->nal_length_size,
1014  avctx->err_recognition, avctx);
1015  }
1016 
1017  buf_index = decode_nal_units(h, buf, buf_size);
1018  if (buf_index < 0)
1019  return AVERROR_INVALIDDATA;
1020 
1021  if (!h->cur_pic_ptr && h->nal_unit_type == H264_NAL_END_SEQUENCE) {
1022  av_assert0(buf_index <= buf_size);
1023  return send_next_delayed_frame(h, pict, got_frame, buf_index);
1024  }
1025 
1026  if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) && (!h->cur_pic_ptr || !h->has_slice)) {
1027  if (avctx->skip_frame >= AVDISCARD_NONREF ||
1028  buf_size >= 4 && !memcmp("Q264", buf, 4))
1029  return buf_size;
1030  av_log(avctx, AV_LOG_ERROR, "no frame!\n");
1031  return AVERROR_INVALIDDATA;
1032  }
1033 
1034  if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) ||
1035  (h->mb_y >= h->mb_height && h->mb_height)) {
1036  if ((ret = ff_h264_field_end(h, &h->slice_ctx[0], 0)) < 0)
1037  return ret;
1038 
1039  /* Wait for second field. */
1040  if (h->next_output_pic) {
1041  ret = finalize_frame(h, pict, h->next_output_pic, got_frame);
1042  if (ret < 0)
1043  return ret;
1044  }
1045  }
1046 
1047  av_assert0(pict->buf[0] || !*got_frame);
1048 
1049  ff_h264_unref_picture(h, &h->last_pic_for_ec);
1050 
1051  return get_consumed_bytes(buf_index, buf_size);
1052 }
1053 
1054 #define OFFSET(x) offsetof(H264Context, x)
1055 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
1056 static const AVOption h264_options[] = {
1057  { "is_avc", "is avc", OFFSET(is_avc), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, 0 },
1058  { "nal_length_size", "nal_length_size", OFFSET(nal_length_size), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 4, 0 },
1059  { "enable_er", "Enable error resilience on damaged frames (unsafe)", OFFSET(enable_er), AV_OPT_TYPE_BOOL, { .i64 = -1 }, -1, 1, VD },
1060  { "x264_build", "Assume this x264 version if no x264 version found in any SEI", OFFSET(x264_build), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, VD },
1061  { NULL },
1062 };
1063 
1064 static const AVClass h264_class = {
1065  .class_name = "H264 Decoder",
1066  .item_name = av_default_item_name,
1067  .option = h264_options,
1068  .version = LIBAVUTIL_VERSION_INT,
1069 };
1070 
1072  .name = "h264",
1073  .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"),
1074  .type = AVMEDIA_TYPE_VIDEO,
1075  .id = AV_CODEC_ID_H264,
1076  .priv_data_size = sizeof(H264Context),
1078  .close = h264_decode_end,
1080  .capabilities = /*AV_CODEC_CAP_DRAW_HORIZ_BAND |*/ AV_CODEC_CAP_DR1 |
1083  .hw_configs = (const AVCodecHWConfigInternal*[]) {
1084 #if CONFIG_H264_DXVA2_HWACCEL
1085  HWACCEL_DXVA2(h264),
1086 #endif
1087 #if CONFIG_H264_D3D11VA_HWACCEL
1088  HWACCEL_D3D11VA(h264),
1089 #endif
1090 #if CONFIG_H264_D3D11VA2_HWACCEL
1091  HWACCEL_D3D11VA2(h264),
1092 #endif
1093 #if CONFIG_H264_NVDEC_HWACCEL
1094  HWACCEL_NVDEC(h264),
1095 #endif
1096 #if CONFIG_H264_VAAPI_HWACCEL
1097  HWACCEL_VAAPI(h264),
1098 #endif
1099 #if CONFIG_H264_VDPAU_HWACCEL
1100  HWACCEL_VDPAU(h264),
1101 #endif
1102 #if CONFIG_H264_VIDEOTOOLBOX_HWACCEL
1103  HWACCEL_VIDEOTOOLBOX(h264),
1104 #endif
1105  NULL
1106  },
1109  .flush = h264_decode_flush,
1110  .update_thread_context = ONLY_IF_THREADS_ENABLED(ff_h264_update_thread_context),
1112  .priv_class = &h264_class,
1113 };
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:39
AVVideoEncParams::qp
int32_t qp
Base quantisation parameter for the frame.
Definition: video_enc_params.h:95
hwconfig.h
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1690
AVCodec
AVCodec.
Definition: codec.h:190
h264_decode_frame
static int h264_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: h264dec.c:982
H264SliceContext::mb_xy
int mb_xy
Definition: h264dec.h:237
ff_h264_unref_picture
void ff_h264_unref_picture(H264Context *h, H264Picture *pic)
Definition: h264_picture.c:44
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
H264SliceContext::ref_cache
int8_t ref_cache[2][5 *8]
Definition: h264dec.h:306
ff_h264_free_tables
void ff_h264_free_tables(H264Context *h)
Definition: h264dec.c:138
ff_h264_sei_uninit
void ff_h264_sei_uninit(H264SEIContext *h)
Reset SEI values at the beginning of the frame.
Definition: h264_sei.c:41
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
h2645_parse.h
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVCodecContext::workaround_bugs
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
Definition: avcodec.h:1560
opt.h
ff_h264_mb_sizes
const uint16_t ff_h264_mb_sizes[4]
Definition: h264dec.c:59
H264Picture::f
AVFrame * f
Definition: h264dec.h:129
ff_h264_ps_uninit
void ff_h264_ps_uninit(H264ParamSets *ps)
Uninit H264 param sets structure.
Definition: h264_ps.c:317
idr
static void idr(H264Context *h)
instantaneous decoder refresh.
Definition: h264dec.c:451
out
FILE * out
Definition: movenc.c:54
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2549
h264_decode_init
static av_cold int h264_decode_init(AVCodecContext *avctx)
Definition: h264dec.c:389
SLICE_FLAG_ALLOW_FIELD
#define SLICE_FLAG_ALLOW_FIELD
allow draw_horiz_band() with field slices (MPEG-2 field pics)
Definition: avcodec.h:1006
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1655
mv
static const int8_t mv[256][2]
Definition: 4xm.c:77
ff_h264_slice_context_init
int ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl)
Init context Allocate buffers which are not shared amongst multiple threads.
Definition: h264dec.c:238
ff_h264_set_erpic
void ff_h264_set_erpic(ERPicture *dst, H264Picture *src)
Definition: h264_picture.c:136
H264Picture::pps
const PPS * pps
Definition: h264dec.h:166
H2645NAL::ref_idc
int ref_idc
H.264 only, nal_ref_idc.
Definition: h2645_parse.h:70
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
ERContext::mb_index2xy
int * mb_index2xy
Definition: error_resilience.h:58
end
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:92
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
HWACCEL_DXVA2
#define HWACCEL_DXVA2(codec)
Definition: hwconfig.h:67
ff_h264_update_thread_context
int ff_h264_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: h264_slice.c:296
AVPacket::data
uint8_t * data
Definition: packet.h:355
AVOption
AVOption.
Definition: opt.h:246
HWACCEL_D3D11VA2
#define HWACCEL_D3D11VA2(codec)
Definition: hwconfig.h:69
ff_h2645_packet_uninit
void ff_h2645_packet_uninit(H2645Packet *pkt)
Free all the allocated memory in the packet.
Definition: h2645_parse.c:519
b
#define b
Definition: input.c:41
data
const char data[16]
Definition: mxf.c:91
H264SliceContext::ref_count
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264dec.h:273
H264_NAL_SLICE
@ H264_NAL_SLICE
Definition: h264.h:35
H264SEIGreenMetaData::xsd_metric_value
uint16_t xsd_metric_value
Definition: h264_sei.h:174
H264SEIGreenMetaData::period_type
uint8_t period_type
Definition: h264_sei.h:166
PPS::chroma_qp_index_offset
int chroma_qp_index_offset[2]
Definition: h264_ps.h:122
H264Picture::qscale_table
int8_t * qscale_table
Definition: h264dec.h:133
ERContext
Definition: error_resilience.h:53
av_mallocz_array
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:190
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:38
decode_nal_units
static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size)
Definition: h264dec.c:580
h264_export_enc_params
static int h264_export_enc_params(AVFrame *f, H264Picture *p)
Definition: h264dec.c:815
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:71
mpegutils.h
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:491
H264SliceContext::mb_x
int mb_x
Definition: h264dec.h:236
h264_mvpred.h
H2645NAL::size_bits
int size_bits
Size, in bits, of just the data, excluding the stop bit and any trailing padding.
Definition: h2645_parse.h:42
ff_h264_decode_picture_parameter_set
int ff_h264_decode_picture_parameter_set(GetBitContext *gb, AVCodecContext *avctx, H264ParamSets *ps, int bit_length)
Decode PPS.
Definition: h264_ps.c:749
FF_DECODE_ERROR_DECODE_SLICES
#define FF_DECODE_ERROR_DECODE_SLICES
Definition: frame.h:599
H264SliceContext
Definition: h264dec.h:183
golomb.h
exp golomb vlc stuff
AVCodecInternal::is_copy
int is_copy
Whether the parent AVCodecContext is a copy of the context which had init() called on it.
Definition: internal.h:123
H264SliceContext::mv_cache
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
Definition: h264dec.h:305
AV_CODEC_FLAG_OUTPUT_CORRUPT
#define AV_CODEC_FLAG_OUTPUT_CORRUPT
Output even those frames that might be corrupted.
Definition: avcodec.h:283
debug_green_metadata
static void debug_green_metadata(const H264SEIGreenMetaData *gm, void *logctx)
Definition: h264dec.c:552
H264SEIGreenMetaData::percent_non_zero_macroblocks
uint8_t percent_non_zero_macroblocks
Definition: h264_sei.h:169
AVVideoEncParams::delta_qp
int32_t delta_qp[4][2]
Quantisation parameter offset from the base (per-frame) qp for a given plane (first index) and AC/DC ...
Definition: video_enc_params.h:101
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:2004
fail
#define fail()
Definition: checkasm.h:123
AVCodecContext::thread_count
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
Definition: avcodec.h:1785
ff_h264_sei_decode
int ff_h264_sei_decode(H264SEIContext *h, GetBitContext *gb, const H264ParamSets *ps, void *logctx)
Definition: h264_sei.c:418
GetBitContext
Definition: get_bits.h:61
AV_VIDEO_ENC_PARAMS_H264
@ AV_VIDEO_ENC_PARAMS_H264
H.264 stores:
Definition: video_enc_params.h:57
H264SliceContext::er
ERContext er
Definition: h264dec.h:186
ff_print_debug_info2
void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table, uint32_t *mbtype_table, int8_t *qscale_table, int16_t(*motion_val[2])[2], int *low_delay, int mb_width, int mb_height, int mb_stride, int quarter_sample)
Print debugging info for the given picture.
Definition: mpegutils.c:103
AVFrame::key_frame
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:378
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:606
HWACCEL_VDPAU
#define HWACCEL_VDPAU(codec)
Definition: hwconfig.h:75
H264Picture::mb_stride
int mb_stride
Definition: h264dec.h:169
ff_h264_flush_change
void ff_h264_flush_change(H264Context *h)
Definition: h264dec.c:464
finalize_frame
static int finalize_frame(H264Context *h, AVFrame *dst, H264Picture *out, int *got_frame)
Definition: h264dec.c:899
H264Ref::data
uint8_t * data[3]
Definition: h264dec.h:173
AVVideoEncParams
Video encoding parameters for a given frame.
Definition: video_enc_params.h:65
HWACCEL_VIDEOTOOLBOX
@ HWACCEL_VIDEOTOOLBOX
Definition: ffmpeg.h:62
ERContext::mb_num
int mb_num
Definition: error_resilience.h:59
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
ERContext::avctx
AVCodecContext * avctx
Definition: error_resilience.h:54
ff_h264_hl_decode_mb
void ff_h264_hl_decode_mb(const H264Context *h, H264SliceContext *sl)
Definition: h264_mb.c:799
avassert.h
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:175
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
av_cold
#define av_cold
Definition: attributes.h:90
H264SliceContext::dc_val_base
int16_t * dc_val_base
Definition: h264dec.h:287
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:568
H2645NAL::size
int size
Definition: h2645_parse.h:35
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
get_last_needed_nal
static int get_last_needed_nal(H264Context *h)
Definition: h264dec.c:510
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:628
ff_h264_queue_decode_slice
int ff_h264_queue_decode_slice(H264Context *h, const H2645NAL *nal)
Submit a slice for decoding.
Definition: h264_slice.c:2083
stereo3d.h
h264_decode_end
static av_cold int h264_decode_end(AVCodecContext *avctx)
Definition: h264dec.c:355
get_consumed_bytes
static int get_consumed_bytes(int pos, int buf_size)
Return the number of bytes consumed for building the current frame.
Definition: h264dec.c:805
H264_NAL_PPS
@ H264_NAL_PPS
Definition: h264.h:42
ff_h264_decode_extradata
int ff_h264_decode_extradata(const uint8_t *data, int size, H264ParamSets *ps, int *is_avc, int *nal_length_size, int err_recognition, void *logctx)
Definition: h264_parse.c:462
FIELD_PICTURE
#define FIELD_PICTURE(h)
Definition: h264dec.h:74
AVCodecContext::ticks_per_frame
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:658
avpriv_h264_has_num_reorder_frames
int avpriv_h264_has_num_reorder_frames(AVCodecContext *avctx)
Definition: h264dec.c:61
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
h264data.h
ff_h264_remove_all_refs
void ff_h264_remove_all_refs(H264Context *h)
Definition: h264_refs.c:565
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
H264_NAL_IDR_SLICE
@ H264_NAL_IDR_SLICE
Definition: h264.h:39
H264Ref::parent
H264Picture * parent
Definition: h264dec.h:180
ERContext::dc_val
int16_t * dc_val[3]
Definition: error_resilience.h:68
H2645NAL::data
const uint8_t * data
Definition: h2645_parse.h:36
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
AVHWAccel::decode_params
int(* decode_params)(AVCodecContext *avctx, int type, const uint8_t *buf, uint32_t buf_size)
Callback for parameter data (SPS/PPS/VPS etc).
Definition: avcodec.h:2486
H264Picture::sei_recovery_frame_cnt
int sei_recovery_frame_cnt
Definition: h264dec.h:163
f
#define f(width, name)
Definition: cbs_vp9.c:255
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: codec_id.h:76
H2645NAL::type
int type
NAL unit type.
Definition: h2645_parse.h:52
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:536
ff_h264_draw_horiz_band
void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl, int y, int height)
Definition: h264dec.c:103
av_packet_get_side_data
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:353
if
if(ret)
Definition: filter_design.txt:179
H264_NAL_SEI
@ H264_NAL_SEI
Definition: h264.h:40
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:106
H264SEIGreenMetaData::percent_alpha_point_deblocking_instance
uint8_t percent_alpha_point_deblocking_instance
Definition: h264_sei.h:172
H2645NAL::raw_size
int raw_size
Definition: h2645_parse.h:44
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:173
h264_class
static const AVClass h264_class
Definition: h264dec.c:1064
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
h264_er_decode_mb
static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type, int(*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped)
Definition: h264dec.c:67
NULL
#define NULL
Definition: coverity.c:32
is_extra
static int is_extra(const uint8_t *buf, int buf_size)
Definition: h264dec.c:875
H264SliceContext::edge_emu_buffer
uint8_t * edge_emu_buffer
Definition: h264dec.h:290
VD
#define VD
Definition: cuviddec.c:1080
AVCodecContext::slice_flags
int slice_flags
slice flags
Definition: avcodec.h:1004
h264_init_context
static int h264_init_context(AVCodecContext *avctx, H264Context *h)
Definition: h264dec.c:303
H264_NAL_END_SEQUENCE
@ H264_NAL_END_SEQUENCE
Definition: h264.h:44
H264Ref::linesize
int linesize[3]
Definition: h264dec.h:174
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:561
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
profiles.h
src
#define src
Definition: vp8dsp.c:254
av_buffer_pool_uninit
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:276
ff_h264_profiles
const AVProfile ff_h264_profiles[]
Definition: profiles.c:58
PPS::init_qp
int init_qp
pic_init_qp_minus26 + 26
Definition: h264_ps.h:120
ONLY_IF_THREADS_ENABLED
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:227
H264_NAL_AUXILIARY_SLICE
@ H264_NAL_AUXILIARY_SLICE
Definition: h264.h:53
ff_h264_decoder
AVCodec ff_h264_decoder
Definition: h264dec.c:1071
H264Picture::mb_height
int mb_height
Definition: h264dec.h:168
H264SEIGreenMetaData
Definition: h264_sei.h:164
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:1666
H264SliceContext::top_borders_allocated
int top_borders_allocated[2]
Definition: h264dec.h:294
FF_CODEC_CAP_EXPORTS_CROPPING
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: internal.h:66
H264_NAL_FILLER_DATA
@ H264_NAL_FILLER_DATA
Definition: h264.h:46
AVOnce
#define AVOnce
Definition: thread.h:172
H264SEIGreenMetaData::percent_intra_coded_macroblocks
uint8_t percent_intra_coded_macroblocks
Definition: h264_sei.h:170
h264_ps.h
H264_NAL_END_STREAM
@ H264_NAL_END_STREAM
Definition: h264.h:45
H264_NAL_DPA
@ H264_NAL_DPA
Definition: h264.h:36
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
h264_vlc_init
static AVOnce h264_vlc_init
Definition: h264dec.c:387
ff_h264_sei_stereo_mode
const char * ff_h264_sei_stereo_mode(const H264SEIFramePacking *h)
Get stereo_mode string from the h264 frame_packing_arrangement.
Definition: h264_sei.c:498
DELAYED_PIC_REF
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output.
Definition: diracdec.c:67
av_video_enc_params_create_side_data
AVVideoEncParams * av_video_enc_params_create_side_data(AVFrame *frame, enum AVVideoEncParamsType type, unsigned int nb_blocks)
Allocates memory for AVEncodeInfoFrame plus an array of.
Definition: video_enc_params.c:57
ERContext::opaque
void * opaque
Definition: error_resilience.h:89
desc
const char * desc
Definition: nvenc.c:79
H264SEIGreenMetaData::percent_six_tap_filtering
uint8_t percent_six_tap_filtering
Definition: h264_sei.h:171
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:613
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
AVPacket::size
int size
Definition: packet.h:356
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:444
h264_options
static const AVOption h264_options[]
Definition: h264dec.c:1056
H264SEIGreenMetaData::num_seconds
uint16_t num_seconds
Definition: h264_sei.h:167
H264_NAL_AUD
@ H264_NAL_AUD
Definition: h264.h:43
rectangle.h
H264_NAL_SPS
@ H264_NAL_SPS
Definition: h264.h:41
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
H264_NAL_DPC
@ H264_NAL_DPC
Definition: h264.h:38
MAX_DELAYED_PIC_COUNT
#define MAX_DELAYED_PIC_COUNT
Definition: h264dec.h:56
AV_NUM_DATA_POINTERS
#define AV_NUM_DATA_POINTERS
Definition: frame.h:301
ERContext::b8_stride
ptrdiff_t b8_stride
Definition: error_resilience.h:62
H2645NAL::gb
GetBitContext gb
Definition: h2645_parse.h:47
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:92
H2645NAL
Definition: h2645_parse.h:32
H264SliceContext::top_borders
uint8_t(*[2] top_borders)[(16 *3) *2]
Definition: h264dec.h:291
H264SEIGreenMetaData::green_metadata_type
uint8_t green_metadata_type
Definition: h264_sei.h:165
ERContext::mb_stride
ptrdiff_t mb_stride
Definition: error_resilience.h:61
h264chroma.h
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1797
AVCodecHWConfigInternal
Definition: hwconfig.h:29
H264SliceContext::mb_y
int mb_y
Definition: h264dec.h:236
ff_h2645_packet_split
int ff_h2645_packet_split(H2645Packet *pkt, const uint8_t *buf, int length, void *logctx, int is_nalff, int nal_length_size, enum AVCodecID codec_id, int small_padding, int use_ref)
Split an input packet into NAL units.
Definition: h2645_parse.c:392
height
#define height
ERContext::ref_count
int ref_count
Definition: error_resilience.h:84
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
ERContext::decode_mb
void(* decode_mb)(void *opaque, int ref, int mv_dir, int mv_type, int(*mv)[2][4][2], int mb_x, int mb_y, int mb_intra, int mb_skipped)
Definition: error_resilience.h:86
output_frame
static int output_frame(H264Context *h, AVFrame *dst, H264Picture *srcp)
Definition: h264dec.c:849
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:110
HWACCEL_D3D11VA
#define HWACCEL_D3D11VA(codec)
Definition: hwconfig.h:79
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:71
pack16to32
static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
Definition: h264dec.h:666
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1796
H264SliceContext::mb_mbaff
int mb_mbaff
mb_aff_frame && mb_field_decoding_flag
Definition: h264dec.h:248
H264_NAL_SPS_EXT
@ H264_NAL_SPS_EXT
Definition: h264.h:47
h264dec.h
ff_h264_decode_seq_parameter_set
int ff_h264_decode_seq_parameter_set(GetBitContext *gb, AVCodecContext *avctx, H264ParamSets *ps, int ignore_truncation)
Decode SPS.
Definition: h264_ps.c:333
H264Context
H264Context.
Definition: h264dec.h:343
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
AV_CODEC_FLAG2_SHOW_ALL
#define AV_CODEC_FLAG2_SHOW_ALL
Show all frames before the first keyframe.
Definition: avcodec.h:376
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:627
AVHWAccel::decode_slice
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
Definition: avcodec.h:2500
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:48
display.h
ff_h264_execute_decode_slices
int ff_h264_execute_decode_slices(H264Context *h)
Call decode_slice() for each context.
Definition: h264_slice.c:2792
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
ERContext::mb_width
int mb_width
Definition: error_resilience.h:60
uint8_t
uint8_t
Definition: audio_convert.c:194
cabac_functions.h
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:554
AVVideoBlockParams
Data structure for storing block-level encoding information.
Definition: video_enc_params.h:112
ff_h264_decode_init_vlc
av_cold void ff_h264_decode_init_vlc(void)
Definition: h264_cavlc.c:327
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:197
ERContext::cur_pic
ERPicture cur_pic
Definition: error_resilience.h:73
PART_NOT_AVAILABLE
#define PART_NOT_AVAILABLE
Definition: h264dec.h:397
AVCodecContext::height
int height
Definition: avcodec.h:699
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
ff_h264_alloc_tables
int ff_h264_alloc_tables(H264Context *h)
Allocate tables.
Definition: h264dec.c:181
avcodec.h
ret
ret
Definition: filter_design.txt:187
H2645NAL::raw_data
const uint8_t * raw_data
Definition: h2645_parse.h:45
ERContext::mb_height
int mb_height
Definition: error_resilience.h:60
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
FMO
#define FMO
Definition: h264dec.h:62
FF_DEBUG_GREEN_MD
#define FF_DEBUG_GREEN_MD
Definition: avcodec.h:1635
pos
unsigned int pos
Definition: spdifenc.c:412
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
ERContext::last_pic
ERPicture last_pic
Definition: error_resilience.h:74
AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
Decoding only.
Definition: avcodec.h:412
me_cmp.h
AV_CODEC_FLAG2_CHUNKS
#define AV_CODEC_FLAG2_CHUNKS
Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries.
Definition: avcodec.h:367
AVCodecContext::draw_horiz_band
void(* draw_horiz_band)(struct AVCodecContext *s, const AVFrame *src, int offset[AV_NUM_DATA_POINTERS], int y, int type, int height)
If non NULL, 'draw_horiz_band' is called by the libavcodec decoder to draw a horizontal band.
Definition: avcodec.h:761
H264_NAL_DPB
@ H264_NAL_DPB
Definition: h264.h:37
OFFSET
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your see the OFFSET() macro
AVCodecContext
main external API structure.
Definition: avcodec.h:526
AVCodecContext::active_thread_type
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1804
av_image_copy
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
Definition: imgutils.c:387
H264SliceContext::bipred_scratchpad
uint8_t * bipred_scratchpad
Definition: h264dec.h:289
AV_PKT_DATA_NEW_EXTRADATA
@ AV_PKT_DATA_NEW_EXTRADATA
The AV_PKT_DATA_NEW_EXTRADATA is used to notify the codec or the format that the extradata buffer was...
Definition: packet.h:55
error_resilience.h
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:223
H264Picture::mb_width
int mb_width
Definition: h264dec.h:168
AVFrame::metadata
AVDictionary * metadata
metadata.
Definition: frame.h:586
fill_rectangle
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:831
H264Picture
Definition: h264dec.h:128
ERContext::error_status_table
uint8_t * error_status_table
Definition: error_resilience.h:66
ERContext::quarter_sample
int quarter_sample
Definition: error_resilience.h:82
scan8
static const uint8_t scan8[16 *3+3]
Definition: h264dec.h:650
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:75
ff_h264_field_end
int ff_h264_field_end(H264Context *h, H264SliceContext *sl, int in_setup)
Definition: h264_picture.c:159
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1611
H264SliceContext::ref_list
H264Ref ref_list[2][48]
0..15: frame refs, 16..47: mbaff field refs.
Definition: h264dec.h:275
H264SliceContext::non_zero_count_cache
uint8_t non_zero_count_cache[15 *8]
non zero coeff count cache.
Definition: h264dec.h:300
ERContext::er_temp_buffer
uint8_t * er_temp_buffer
Definition: error_resilience.h:67
h264_decode_flush
static void h264_decode_flush(AVCodecContext *avctx)
Definition: h264dec.c:489
FF_ALLOC_OR_GOTO
#define FF_ALLOC_OR_GOTO(ctx, p, size, label)
Definition: internal.h:140
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
get_ue_golomb_long
static unsigned get_ue_golomb_long(GetBitContext *gb)
Read an unsigned Exp-Golomb code in the range 0 to UINT32_MAX-1.
Definition: golomb.h:105
FF_CODEC_CAP_ALLOCATE_PROGRESS
#define FF_CODEC_CAP_ALLOCATE_PROGRESS
Definition: internal.h:75
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:39
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
AVPacket
This structure stores compressed data.
Definition: packet.h:332
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:553
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:240
ff_er_frame_end
void ff_er_frame_end(ERContext *s)
Definition: error_resilience.c:900
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:73
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:699
bytestream.h
h264.h
imgutils.h
H264SliceContext::edge_emu_buffer_allocated
int edge_emu_buffer_allocated
Definition: h264dec.h:293
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
FF_ALLOCZ_OR_GOTO
#define FF_ALLOCZ_OR_GOTO(ctx, p, size, label)
Definition: internal.h:149
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
av_video_enc_params_block
static av_always_inline AVVideoBlockParams * av_video_enc_params_block(AVVideoEncParams *par, unsigned int idx)
Definition: video_enc_params.h:135
h
h
Definition: vp9dsp_template.c:2038
H264_MAX_PICTURE_COUNT
#define H264_MAX_PICTURE_COUNT
Definition: h264dec.h:52
H264SEIGreenMetaData::num_pictures
uint16_t num_pictures
Definition: h264_sei.h:168
H264SliceContext::bipred_scratchpad_allocated
int bipred_scratchpad_allocated
Definition: h264dec.h:292
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: avcodec.h:232
send_next_delayed_frame
static int send_next_delayed_frame(H264Context *h, AVFrame *dst_frame, int *got_frame, int buf_index)
Definition: h264dec.c:949
FF_ALLOCZ_ARRAY_OR_GOTO
#define FF_ALLOCZ_ARRAY_OR_GOTO(ctx, p, nelem, elsize, label)
Definition: internal.h:167
H264Ref::reference
int reference
Definition: h264dec.h:176
video_enc_params.h
AV_RB16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
Definition: bytestream.h:94
H264SliceContext::mb_field_decoding_flag
int mb_field_decoding_flag
Definition: h264dec.h:247
ERContext::next_pic
ERPicture next_pic
Definition: error_resilience.h:75
H264SEIGreenMetaData::xsd_metric_type
uint8_t xsd_metric_type
Definition: h264_sei.h:173