FFmpeg
h264_slice.c
Go to the documentation of this file.
1 /*
2  * H.26L/H.264/AVC/JVT/14496-10/... decoder
3  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * H.264 / AVC / MPEG-4 part10 codec.
25  * @author Michael Niedermayer <michaelni@gmx.at>
26  */
27 
28 #include "libavutil/avassert.h"
29 #include "libavutil/display.h"
30 #include "libavutil/imgutils.h"
32 #include "libavutil/stereo3d.h"
33 #include "libavutil/timecode.h"
34 #include "internal.h"
35 #include "cabac.h"
36 #include "cabac_functions.h"
37 #include "error_resilience.h"
38 #include "avcodec.h"
39 #include "h264.h"
40 #include "h264dec.h"
41 #include "h264data.h"
42 #include "h264chroma.h"
43 #include "h264_mvpred.h"
44 #include "h264_ps.h"
45 #include "golomb.h"
46 #include "mathops.h"
47 #include "mpegutils.h"
48 #include "rectangle.h"
49 #include "thread.h"
50 
51 static const uint8_t field_scan[16+1] = {
52  0 + 0 * 4, 0 + 1 * 4, 1 + 0 * 4, 0 + 2 * 4,
53  0 + 3 * 4, 1 + 1 * 4, 1 + 2 * 4, 1 + 3 * 4,
54  2 + 0 * 4, 2 + 1 * 4, 2 + 2 * 4, 2 + 3 * 4,
55  3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4, 3 + 3 * 4,
56 };
57 
58 static const uint8_t field_scan8x8[64+1] = {
59  0 + 0 * 8, 0 + 1 * 8, 0 + 2 * 8, 1 + 0 * 8,
60  1 + 1 * 8, 0 + 3 * 8, 0 + 4 * 8, 1 + 2 * 8,
61  2 + 0 * 8, 1 + 3 * 8, 0 + 5 * 8, 0 + 6 * 8,
62  0 + 7 * 8, 1 + 4 * 8, 2 + 1 * 8, 3 + 0 * 8,
63  2 + 2 * 8, 1 + 5 * 8, 1 + 6 * 8, 1 + 7 * 8,
64  2 + 3 * 8, 3 + 1 * 8, 4 + 0 * 8, 3 + 2 * 8,
65  2 + 4 * 8, 2 + 5 * 8, 2 + 6 * 8, 2 + 7 * 8,
66  3 + 3 * 8, 4 + 1 * 8, 5 + 0 * 8, 4 + 2 * 8,
67  3 + 4 * 8, 3 + 5 * 8, 3 + 6 * 8, 3 + 7 * 8,
68  4 + 3 * 8, 5 + 1 * 8, 6 + 0 * 8, 5 + 2 * 8,
69  4 + 4 * 8, 4 + 5 * 8, 4 + 6 * 8, 4 + 7 * 8,
70  5 + 3 * 8, 6 + 1 * 8, 6 + 2 * 8, 5 + 4 * 8,
71  5 + 5 * 8, 5 + 6 * 8, 5 + 7 * 8, 6 + 3 * 8,
72  7 + 0 * 8, 7 + 1 * 8, 6 + 4 * 8, 6 + 5 * 8,
73  6 + 6 * 8, 6 + 7 * 8, 7 + 2 * 8, 7 + 3 * 8,
74  7 + 4 * 8, 7 + 5 * 8, 7 + 6 * 8, 7 + 7 * 8,
75 };
76 
77 static const uint8_t field_scan8x8_cavlc[64+1] = {
78  0 + 0 * 8, 1 + 1 * 8, 2 + 0 * 8, 0 + 7 * 8,
79  2 + 2 * 8, 2 + 3 * 8, 2 + 4 * 8, 3 + 3 * 8,
80  3 + 4 * 8, 4 + 3 * 8, 4 + 4 * 8, 5 + 3 * 8,
81  5 + 5 * 8, 7 + 0 * 8, 6 + 6 * 8, 7 + 4 * 8,
82  0 + 1 * 8, 0 + 3 * 8, 1 + 3 * 8, 1 + 4 * 8,
83  1 + 5 * 8, 3 + 1 * 8, 2 + 5 * 8, 4 + 1 * 8,
84  3 + 5 * 8, 5 + 1 * 8, 4 + 5 * 8, 6 + 1 * 8,
85  5 + 6 * 8, 7 + 1 * 8, 6 + 7 * 8, 7 + 5 * 8,
86  0 + 2 * 8, 0 + 4 * 8, 0 + 5 * 8, 2 + 1 * 8,
87  1 + 6 * 8, 4 + 0 * 8, 2 + 6 * 8, 5 + 0 * 8,
88  3 + 6 * 8, 6 + 0 * 8, 4 + 6 * 8, 6 + 2 * 8,
89  5 + 7 * 8, 6 + 4 * 8, 7 + 2 * 8, 7 + 6 * 8,
90  1 + 0 * 8, 1 + 2 * 8, 0 + 6 * 8, 3 + 0 * 8,
91  1 + 7 * 8, 3 + 2 * 8, 2 + 7 * 8, 4 + 2 * 8,
92  3 + 7 * 8, 5 + 2 * 8, 4 + 7 * 8, 5 + 4 * 8,
93  6 + 3 * 8, 6 + 5 * 8, 7 + 3 * 8, 7 + 7 * 8,
94 };
95 
96 // zigzag_scan8x8_cavlc[i] = zigzag_scan8x8[(i/4) + 16*(i%4)]
97 static const uint8_t zigzag_scan8x8_cavlc[64+1] = {
98  0 + 0 * 8, 1 + 1 * 8, 1 + 2 * 8, 2 + 2 * 8,
99  4 + 1 * 8, 0 + 5 * 8, 3 + 3 * 8, 7 + 0 * 8,
100  3 + 4 * 8, 1 + 7 * 8, 5 + 3 * 8, 6 + 3 * 8,
101  2 + 7 * 8, 6 + 4 * 8, 5 + 6 * 8, 7 + 5 * 8,
102  1 + 0 * 8, 2 + 0 * 8, 0 + 3 * 8, 3 + 1 * 8,
103  3 + 2 * 8, 0 + 6 * 8, 4 + 2 * 8, 6 + 1 * 8,
104  2 + 5 * 8, 2 + 6 * 8, 6 + 2 * 8, 5 + 4 * 8,
105  3 + 7 * 8, 7 + 3 * 8, 4 + 7 * 8, 7 + 6 * 8,
106  0 + 1 * 8, 3 + 0 * 8, 0 + 4 * 8, 4 + 0 * 8,
107  2 + 3 * 8, 1 + 5 * 8, 5 + 1 * 8, 5 + 2 * 8,
108  1 + 6 * 8, 3 + 5 * 8, 7 + 1 * 8, 4 + 5 * 8,
109  4 + 6 * 8, 7 + 4 * 8, 5 + 7 * 8, 6 + 7 * 8,
110  0 + 2 * 8, 2 + 1 * 8, 1 + 3 * 8, 5 + 0 * 8,
111  1 + 4 * 8, 2 + 4 * 8, 6 + 0 * 8, 4 + 3 * 8,
112  0 + 7 * 8, 4 + 4 * 8, 7 + 2 * 8, 3 + 6 * 8,
113  5 + 5 * 8, 6 + 5 * 8, 6 + 6 * 8, 7 + 7 * 8,
114 };
115 
116 static void release_unused_pictures(H264Context *h, int remove_current)
117 {
118  int i;
119 
120  /* release non reference frames */
121  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
122  if (h->DPB[i].f->buf[0] && !h->DPB[i].reference &&
123  (remove_current || &h->DPB[i] != h->cur_pic_ptr)) {
124  ff_h264_unref_picture(h, &h->DPB[i]);
125  }
126  }
127 }
128 
129 static int alloc_scratch_buffers(H264SliceContext *sl, int linesize)
130 {
131  const H264Context *h = sl->h264;
132  int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
133 
134  av_fast_malloc(&sl->bipred_scratchpad, &sl->bipred_scratchpad_allocated, 16 * 6 * alloc_size);
135  // edge emu needs blocksize + filter length - 1
136  // (= 21x21 for H.264)
137  av_fast_malloc(&sl->edge_emu_buffer, &sl->edge_emu_buffer_allocated, alloc_size * 2 * 21);
138 
140  h->mb_width * 16 * 3 * sizeof(uint8_t) * 2);
142  h->mb_width * 16 * 3 * sizeof(uint8_t) * 2);
143 
144  if (!sl->bipred_scratchpad || !sl->edge_emu_buffer ||
145  !sl->top_borders[0] || !sl->top_borders[1]) {
148  av_freep(&sl->top_borders[0]);
149  av_freep(&sl->top_borders[1]);
150 
153  sl->top_borders_allocated[0] = 0;
154  sl->top_borders_allocated[1] = 0;
155  return AVERROR(ENOMEM);
156  }
157 
158  return 0;
159 }
160 
162 {
163  const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
164  const int mb_array_size = h->mb_stride * h->mb_height;
165  const int b4_stride = h->mb_width * 4 + 1;
166  const int b4_array_size = b4_stride * h->mb_height * 4;
167 
168  h->qscale_table_pool = av_buffer_pool_init(big_mb_num + h->mb_stride,
170  h->mb_type_pool = av_buffer_pool_init((big_mb_num + h->mb_stride) *
171  sizeof(uint32_t), av_buffer_allocz);
172  h->motion_val_pool = av_buffer_pool_init(2 * (b4_array_size + 4) *
173  sizeof(int16_t), av_buffer_allocz);
174  h->ref_index_pool = av_buffer_pool_init(4 * mb_array_size, av_buffer_allocz);
175 
176  if (!h->qscale_table_pool || !h->mb_type_pool || !h->motion_val_pool ||
177  !h->ref_index_pool) {
178  av_buffer_pool_uninit(&h->qscale_table_pool);
179  av_buffer_pool_uninit(&h->mb_type_pool);
180  av_buffer_pool_uninit(&h->motion_val_pool);
181  av_buffer_pool_uninit(&h->ref_index_pool);
182  return AVERROR(ENOMEM);
183  }
184 
185  return 0;
186 }
187 
189 {
190  int i, ret = 0;
191 
192  av_assert0(!pic->f->data[0]);
193 
194  pic->tf.f = pic->f;
195  ret = ff_thread_get_buffer(h->avctx, &pic->tf, pic->reference ?
197  if (ret < 0)
198  goto fail;
199 
200  if (pic->needs_fg) {
201  pic->tf_grain.f = pic->f_grain;
202  pic->f_grain->format = pic->f->format;
203  pic->f_grain->width = pic->f->width;
204  pic->f_grain->height = pic->f->height;
205  ret = ff_thread_get_buffer(h->avctx, &pic->tf_grain, 0);
206  if (ret < 0)
207  goto fail;
208  }
209 
210  if (h->avctx->hwaccel) {
211  const AVHWAccel *hwaccel = h->avctx->hwaccel;
213  if (hwaccel->frame_priv_data_size) {
215  if (!pic->hwaccel_priv_buf)
216  return AVERROR(ENOMEM);
218  }
219  }
220  if (CONFIG_GRAY && !h->avctx->hwaccel && h->flags & AV_CODEC_FLAG_GRAY && pic->f->data[2]) {
221  int h_chroma_shift, v_chroma_shift;
223  &h_chroma_shift, &v_chroma_shift);
224 
225  for(i=0; i<AV_CEIL_RSHIFT(pic->f->height, v_chroma_shift); i++) {
226  memset(pic->f->data[1] + pic->f->linesize[1]*i,
227  0x80, AV_CEIL_RSHIFT(pic->f->width, h_chroma_shift));
228  memset(pic->f->data[2] + pic->f->linesize[2]*i,
229  0x80, AV_CEIL_RSHIFT(pic->f->width, h_chroma_shift));
230  }
231  }
232 
233  if (!h->qscale_table_pool) {
235  if (ret < 0)
236  goto fail;
237  }
238 
239  pic->qscale_table_buf = av_buffer_pool_get(h->qscale_table_pool);
240  pic->mb_type_buf = av_buffer_pool_get(h->mb_type_pool);
241  if (!pic->qscale_table_buf || !pic->mb_type_buf)
242  goto fail;
243 
244  pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
245  pic->qscale_table = pic->qscale_table_buf->data + 2 * h->mb_stride + 1;
246 
247  for (i = 0; i < 2; i++) {
248  pic->motion_val_buf[i] = av_buffer_pool_get(h->motion_val_pool);
249  pic->ref_index_buf[i] = av_buffer_pool_get(h->ref_index_pool);
250  if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
251  goto fail;
252 
253  pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
254  pic->ref_index[i] = pic->ref_index_buf[i]->data;
255  }
256 
257  pic->pps_buf = av_buffer_ref(h->ps.pps_ref);
258  if (!pic->pps_buf)
259  goto fail;
260  pic->pps = (const PPS*)pic->pps_buf->data;
261 
262  pic->mb_width = h->mb_width;
263  pic->mb_height = h->mb_height;
264  pic->mb_stride = h->mb_stride;
265 
266  return 0;
267 fail:
268  ff_h264_unref_picture(h, pic);
269  return (ret < 0) ? ret : AVERROR(ENOMEM);
270 }
271 
273 {
274  int i;
275 
276  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
277  if (!h->DPB[i].f->buf[0])
278  return i;
279  }
280  return AVERROR_INVALIDDATA;
281 }
282 
283 
284 #define IN_RANGE(a, b, size) (((void*)(a) >= (void*)(b)) && ((void*)(a) < (void*)((b) + (size))))
285 
286 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
287  (((pic) && (pic) >= (old_ctx)->DPB && \
288  (pic) < (old_ctx)->DPB + H264_MAX_PICTURE_COUNT) ? \
289  &(new_ctx)->DPB[(pic) - (old_ctx)->DPB] : NULL)
290 
291 static void copy_picture_range(H264Picture **to, H264Picture **from, int count,
292  H264Context *new_base,
293  H264Context *old_base)
294 {
295  int i;
296 
297  for (i = 0; i < count; i++) {
298  av_assert1(!from[i] ||
299  IN_RANGE(from[i], old_base, 1) ||
300  IN_RANGE(from[i], old_base->DPB, H264_MAX_PICTURE_COUNT));
301  to[i] = REBASE_PICTURE(from[i], new_base, old_base);
302  }
303 }
304 
306 
308  const AVCodecContext *src)
309 {
310  H264Context *h = dst->priv_data, *h1 = src->priv_data;
311  int inited = h->context_initialized, err = 0;
312  int need_reinit = 0;
313  int i, ret;
314 
315  if (dst == src)
316  return 0;
317 
318  if (inited && !h1->ps.sps)
319  return AVERROR_INVALIDDATA;
320 
321  if (inited &&
322  (h->width != h1->width ||
323  h->height != h1->height ||
324  h->mb_width != h1->mb_width ||
325  h->mb_height != h1->mb_height ||
326  !h->ps.sps ||
327  h->ps.sps->bit_depth_luma != h1->ps.sps->bit_depth_luma ||
328  h->ps.sps->chroma_format_idc != h1->ps.sps->chroma_format_idc ||
329  h->ps.sps->colorspace != h1->ps.sps->colorspace)) {
330  need_reinit = 1;
331  }
332 
333  /* copy block_offset since frame_start may not be called */
334  memcpy(h->block_offset, h1->block_offset, sizeof(h->block_offset));
335 
336  // SPS/PPS
337  for (i = 0; i < FF_ARRAY_ELEMS(h->ps.sps_list); i++) {
338  ret = av_buffer_replace(&h->ps.sps_list[i], h1->ps.sps_list[i]);
339  if (ret < 0)
340  return ret;
341  }
342  for (i = 0; i < FF_ARRAY_ELEMS(h->ps.pps_list); i++) {
343  ret = av_buffer_replace(&h->ps.pps_list[i], h1->ps.pps_list[i]);
344  if (ret < 0)
345  return ret;
346  }
347 
348  ret = av_buffer_replace(&h->ps.pps_ref, h1->ps.pps_ref);
349  if (ret < 0)
350  return ret;
351  h->ps.pps = NULL;
352  h->ps.sps = NULL;
353  if (h1->ps.pps_ref) {
354  h->ps.pps = (const PPS*)h->ps.pps_ref->data;
355  h->ps.sps = h->ps.pps->sps;
356  }
357 
358  if (need_reinit || !inited) {
359  h->width = h1->width;
360  h->height = h1->height;
361  h->mb_height = h1->mb_height;
362  h->mb_width = h1->mb_width;
363  h->mb_num = h1->mb_num;
364  h->mb_stride = h1->mb_stride;
365  h->b_stride = h1->b_stride;
366  h->x264_build = h1->x264_build;
367 
368  if (h->context_initialized || h1->context_initialized) {
369  if ((err = h264_slice_header_init(h)) < 0) {
370  av_log(h->avctx, AV_LOG_ERROR, "h264_slice_header_init() failed");
371  return err;
372  }
373  }
374 
375  /* copy block_offset since frame_start may not be called */
376  memcpy(h->block_offset, h1->block_offset, sizeof(h->block_offset));
377  }
378 
379  h->avctx->coded_height = h1->avctx->coded_height;
380  h->avctx->coded_width = h1->avctx->coded_width;
381  h->avctx->width = h1->avctx->width;
382  h->avctx->height = h1->avctx->height;
383  h->width_from_caller = h1->width_from_caller;
384  h->height_from_caller = h1->height_from_caller;
385  h->coded_picture_number = h1->coded_picture_number;
386  h->first_field = h1->first_field;
387  h->picture_structure = h1->picture_structure;
388  h->mb_aff_frame = h1->mb_aff_frame;
389  h->droppable = h1->droppable;
390 
391  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
392  ret = ff_h264_replace_picture(h, &h->DPB[i], &h1->DPB[i]);
393  if (ret < 0)
394  return ret;
395  }
396 
397  h->cur_pic_ptr = REBASE_PICTURE(h1->cur_pic_ptr, h, h1);
398  ret = ff_h264_replace_picture(h, &h->cur_pic, &h1->cur_pic);
399  if (ret < 0)
400  return ret;
401 
402  h->enable_er = h1->enable_er;
403  h->workaround_bugs = h1->workaround_bugs;
404  h->droppable = h1->droppable;
405 
406  // extradata/NAL handling
407  h->is_avc = h1->is_avc;
408  h->nal_length_size = h1->nal_length_size;
409 
410  memcpy(&h->poc, &h1->poc, sizeof(h->poc));
411 
412  memcpy(h->short_ref, h1->short_ref, sizeof(h->short_ref));
413  memcpy(h->long_ref, h1->long_ref, sizeof(h->long_ref));
414  memcpy(h->delayed_pic, h1->delayed_pic, sizeof(h->delayed_pic));
415  memcpy(h->last_pocs, h1->last_pocs, sizeof(h->last_pocs));
416 
417  h->next_output_pic = h1->next_output_pic;
418  h->next_outputed_poc = h1->next_outputed_poc;
419  h->poc_offset = h1->poc_offset;
420 
421  memcpy(h->mmco, h1->mmco, sizeof(h->mmco));
422  h->nb_mmco = h1->nb_mmco;
423  h->mmco_reset = h1->mmco_reset;
424  h->explicit_ref_marking = h1->explicit_ref_marking;
425  h->long_ref_count = h1->long_ref_count;
426  h->short_ref_count = h1->short_ref_count;
427 
428  copy_picture_range(h->short_ref, h1->short_ref, 32, h, h1);
429  copy_picture_range(h->long_ref, h1->long_ref, 32, h, h1);
430  copy_picture_range(h->delayed_pic, h1->delayed_pic,
431  MAX_DELAYED_PIC_COUNT + 2, h, h1);
432 
433  h->frame_recovered = h1->frame_recovered;
434 
435  ret = av_buffer_replace(&h->sei.a53_caption.buf_ref, h1->sei.a53_caption.buf_ref);
436  if (ret < 0)
437  return ret;
438 
439  for (i = 0; i < h->sei.unregistered.nb_buf_ref; i++)
440  av_buffer_unref(&h->sei.unregistered.buf_ref[i]);
441  h->sei.unregistered.nb_buf_ref = 0;
442 
443  if (h1->sei.unregistered.nb_buf_ref) {
444  ret = av_reallocp_array(&h->sei.unregistered.buf_ref,
445  h1->sei.unregistered.nb_buf_ref,
446  sizeof(*h->sei.unregistered.buf_ref));
447  if (ret < 0)
448  return ret;
449 
450  for (i = 0; i < h1->sei.unregistered.nb_buf_ref; i++) {
451  h->sei.unregistered.buf_ref[i] = av_buffer_ref(h1->sei.unregistered.buf_ref[i]);
452  if (!h->sei.unregistered.buf_ref[i])
453  return AVERROR(ENOMEM);
454  h->sei.unregistered.nb_buf_ref++;
455  }
456  }
457  h->sei.unregistered.x264_build = h1->sei.unregistered.x264_build;
458 
459  if (!h->cur_pic_ptr)
460  return 0;
461 
462  if (!h->droppable) {
464  h->poc.prev_poc_msb = h->poc.poc_msb;
465  h->poc.prev_poc_lsb = h->poc.poc_lsb;
466  }
467  h->poc.prev_frame_num_offset = h->poc.frame_num_offset;
468  h->poc.prev_frame_num = h->poc.frame_num;
469 
470  h->recovery_frame = h1->recovery_frame;
471 
472  return err;
473 }
474 
476  const AVCodecContext *src)
477 {
478  H264Context *h = dst->priv_data;
479  const H264Context *h1 = src->priv_data;
480 
481  h->is_avc = h1->is_avc;
482  h->nal_length_size = h1->nal_length_size;
483 
484  return 0;
485 }
486 
488 {
489  H264Picture *pic;
490  int i, ret;
491  const int pixel_shift = h->pixel_shift;
492 
493  if (!ff_thread_can_start_frame(h->avctx)) {
494  av_log(h->avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
495  return -1;
496  }
497 
499  h->cur_pic_ptr = NULL;
500 
502  if (i < 0) {
503  av_log(h->avctx, AV_LOG_ERROR, "no frame buffer available\n");
504  return i;
505  }
506  pic = &h->DPB[i];
507 
508  pic->reference = h->droppable ? 0 : h->picture_structure;
509  pic->f->coded_picture_number = h->coded_picture_number++;
510  pic->field_picture = h->picture_structure != PICT_FRAME;
511  pic->frame_num = h->poc.frame_num;
512  /*
513  * Zero key_frame here; IDR markings per slice in frame or fields are ORed
514  * in later.
515  * See decode_nal_units().
516  */
517  pic->f->key_frame = 0;
518  pic->mmco_reset = 0;
519  pic->recovered = 0;
520  pic->invalid_gap = 0;
521  pic->sei_recovery_frame_cnt = h->sei.recovery_point.recovery_frame_cnt;
522 
523  pic->f->pict_type = h->slice_ctx[0].slice_type;
524 
525  pic->f->crop_left = h->crop_left;
526  pic->f->crop_right = h->crop_right;
527  pic->f->crop_top = h->crop_top;
528  pic->f->crop_bottom = h->crop_bottom;
529 
530  pic->needs_fg = h->sei.film_grain_characteristics.present && !h->avctx->hwaccel &&
531  !(h->avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN);
532 
533  if ((ret = alloc_picture(h, pic)) < 0)
534  return ret;
535 
536  h->cur_pic_ptr = pic;
537  ff_h264_unref_picture(h, &h->cur_pic);
538  if (CONFIG_ERROR_RESILIENCE) {
539  ff_h264_set_erpic(&h->slice_ctx[0].er.cur_pic, NULL);
540  }
541 
542  if ((ret = ff_h264_ref_picture(h, &h->cur_pic, h->cur_pic_ptr)) < 0)
543  return ret;
544 
545  for (i = 0; i < h->nb_slice_ctx; i++) {
546  h->slice_ctx[i].linesize = h->cur_pic_ptr->f->linesize[0];
547  h->slice_ctx[i].uvlinesize = h->cur_pic_ptr->f->linesize[1];
548  }
549 
550  if (CONFIG_ERROR_RESILIENCE && h->enable_er) {
551  ff_er_frame_start(&h->slice_ctx[0].er);
552  ff_h264_set_erpic(&h->slice_ctx[0].er.last_pic, NULL);
553  ff_h264_set_erpic(&h->slice_ctx[0].er.next_pic, NULL);
554  }
555 
556  for (i = 0; i < 16; i++) {
557  h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
558  h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
559  }
560  for (i = 0; i < 16; i++) {
561  h->block_offset[16 + i] =
562  h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
563  h->block_offset[48 + 16 + i] =
564  h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
565  }
566 
567  /* We mark the current picture as non-reference after allocating it, so
568  * that if we break out due to an error it can be released automatically
569  * in the next ff_mpv_frame_start().
570  */
571  h->cur_pic_ptr->reference = 0;
572 
573  h->cur_pic_ptr->field_poc[0] = h->cur_pic_ptr->field_poc[1] = INT_MAX;
574 
575  h->next_output_pic = NULL;
576 
577  h->postpone_filter = 0;
578 
579  h->mb_aff_frame = h->ps.sps->mb_aff && (h->picture_structure == PICT_FRAME);
580 
581  if (h->sei.unregistered.x264_build >= 0)
582  h->x264_build = h->sei.unregistered.x264_build;
583 
584  assert(h->cur_pic_ptr->long_ref == 0);
585 
586  return 0;
587 }
588 
590  uint8_t *src_y,
591  uint8_t *src_cb, uint8_t *src_cr,
592  int linesize, int uvlinesize,
593  int simple)
594 {
595  uint8_t *top_border;
596  int top_idx = 1;
597  const int pixel_shift = h->pixel_shift;
598  int chroma444 = CHROMA444(h);
599  int chroma422 = CHROMA422(h);
600 
601  src_y -= linesize;
602  src_cb -= uvlinesize;
603  src_cr -= uvlinesize;
604 
605  if (!simple && FRAME_MBAFF(h)) {
606  if (sl->mb_y & 1) {
607  if (!MB_MBAFF(sl)) {
608  top_border = sl->top_borders[0][sl->mb_x];
609  AV_COPY128(top_border, src_y + 15 * linesize);
610  if (pixel_shift)
611  AV_COPY128(top_border + 16, src_y + 15 * linesize + 16);
612  if (simple || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
613  if (chroma444) {
614  if (pixel_shift) {
615  AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize);
616  AV_COPY128(top_border + 48, src_cb + 15 * uvlinesize + 16);
617  AV_COPY128(top_border + 64, src_cr + 15 * uvlinesize);
618  AV_COPY128(top_border + 80, src_cr + 15 * uvlinesize + 16);
619  } else {
620  AV_COPY128(top_border + 16, src_cb + 15 * uvlinesize);
621  AV_COPY128(top_border + 32, src_cr + 15 * uvlinesize);
622  }
623  } else if (chroma422) {
624  if (pixel_shift) {
625  AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize);
626  AV_COPY128(top_border + 48, src_cr + 15 * uvlinesize);
627  } else {
628  AV_COPY64(top_border + 16, src_cb + 15 * uvlinesize);
629  AV_COPY64(top_border + 24, src_cr + 15 * uvlinesize);
630  }
631  } else {
632  if (pixel_shift) {
633  AV_COPY128(top_border + 32, src_cb + 7 * uvlinesize);
634  AV_COPY128(top_border + 48, src_cr + 7 * uvlinesize);
635  } else {
636  AV_COPY64(top_border + 16, src_cb + 7 * uvlinesize);
637  AV_COPY64(top_border + 24, src_cr + 7 * uvlinesize);
638  }
639  }
640  }
641  }
642  } else if (MB_MBAFF(sl)) {
643  top_idx = 0;
644  } else
645  return;
646  }
647 
648  top_border = sl->top_borders[top_idx][sl->mb_x];
649  /* There are two lines saved, the line above the top macroblock
650  * of a pair, and the line above the bottom macroblock. */
651  AV_COPY128(top_border, src_y + 16 * linesize);
652  if (pixel_shift)
653  AV_COPY128(top_border + 16, src_y + 16 * linesize + 16);
654 
655  if (simple || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
656  if (chroma444) {
657  if (pixel_shift) {
658  AV_COPY128(top_border + 32, src_cb + 16 * linesize);
659  AV_COPY128(top_border + 48, src_cb + 16 * linesize + 16);
660  AV_COPY128(top_border + 64, src_cr + 16 * linesize);
661  AV_COPY128(top_border + 80, src_cr + 16 * linesize + 16);
662  } else {
663  AV_COPY128(top_border + 16, src_cb + 16 * linesize);
664  AV_COPY128(top_border + 32, src_cr + 16 * linesize);
665  }
666  } else if (chroma422) {
667  if (pixel_shift) {
668  AV_COPY128(top_border + 32, src_cb + 16 * uvlinesize);
669  AV_COPY128(top_border + 48, src_cr + 16 * uvlinesize);
670  } else {
671  AV_COPY64(top_border + 16, src_cb + 16 * uvlinesize);
672  AV_COPY64(top_border + 24, src_cr + 16 * uvlinesize);
673  }
674  } else {
675  if (pixel_shift) {
676  AV_COPY128(top_border + 32, src_cb + 8 * uvlinesize);
677  AV_COPY128(top_border + 48, src_cr + 8 * uvlinesize);
678  } else {
679  AV_COPY64(top_border + 16, src_cb + 8 * uvlinesize);
680  AV_COPY64(top_border + 24, src_cr + 8 * uvlinesize);
681  }
682  }
683  }
684 }
685 
686 /**
687  * Initialize implicit_weight table.
688  * @param field 0/1 initialize the weight for interlaced MBAFF
689  * -1 initializes the rest
690  */
692 {
693  int ref0, ref1, i, cur_poc, ref_start, ref_count0, ref_count1;
694 
695  for (i = 0; i < 2; i++) {
696  sl->pwt.luma_weight_flag[i] = 0;
697  sl->pwt.chroma_weight_flag[i] = 0;
698  }
699 
700  if (field < 0) {
701  if (h->picture_structure == PICT_FRAME) {
702  cur_poc = h->cur_pic_ptr->poc;
703  } else {
704  cur_poc = h->cur_pic_ptr->field_poc[h->picture_structure - 1];
705  }
706  if (sl->ref_count[0] == 1 && sl->ref_count[1] == 1 && !FRAME_MBAFF(h) &&
707  sl->ref_list[0][0].poc + (int64_t)sl->ref_list[1][0].poc == 2LL * cur_poc) {
708  sl->pwt.use_weight = 0;
709  sl->pwt.use_weight_chroma = 0;
710  return;
711  }
712  ref_start = 0;
713  ref_count0 = sl->ref_count[0];
714  ref_count1 = sl->ref_count[1];
715  } else {
716  cur_poc = h->cur_pic_ptr->field_poc[field];
717  ref_start = 16;
718  ref_count0 = 16 + 2 * sl->ref_count[0];
719  ref_count1 = 16 + 2 * sl->ref_count[1];
720  }
721 
722  sl->pwt.use_weight = 2;
723  sl->pwt.use_weight_chroma = 2;
724  sl->pwt.luma_log2_weight_denom = 5;
726 
727  for (ref0 = ref_start; ref0 < ref_count0; ref0++) {
728  int64_t poc0 = sl->ref_list[0][ref0].poc;
729  for (ref1 = ref_start; ref1 < ref_count1; ref1++) {
730  int w = 32;
731  if (!sl->ref_list[0][ref0].parent->long_ref && !sl->ref_list[1][ref1].parent->long_ref) {
732  int poc1 = sl->ref_list[1][ref1].poc;
733  int td = av_clip_int8(poc1 - poc0);
734  if (td) {
735  int tb = av_clip_int8(cur_poc - poc0);
736  int tx = (16384 + (FFABS(td) >> 1)) / td;
737  int dist_scale_factor = (tb * tx + 32) >> 8;
738  if (dist_scale_factor >= -64 && dist_scale_factor <= 128)
739  w = 64 - dist_scale_factor;
740  }
741  }
742  if (field < 0) {
743  sl->pwt.implicit_weight[ref0][ref1][0] =
744  sl->pwt.implicit_weight[ref0][ref1][1] = w;
745  } else {
746  sl->pwt.implicit_weight[ref0][ref1][field] = w;
747  }
748  }
749  }
750 }
751 
752 /**
753  * initialize scan tables
754  */
756 {
757  int i;
758  for (i = 0; i < 16; i++) {
759 #define TRANSPOSE(x) ((x) >> 2) | (((x) << 2) & 0xF)
760  h->zigzag_scan[i] = TRANSPOSE(ff_zigzag_scan[i]);
761  h->field_scan[i] = TRANSPOSE(field_scan[i]);
762 #undef TRANSPOSE
763  }
764  for (i = 0; i < 64; i++) {
765 #define TRANSPOSE(x) ((x) >> 3) | (((x) & 7) << 3)
766  h->zigzag_scan8x8[i] = TRANSPOSE(ff_zigzag_direct[i]);
767  h->zigzag_scan8x8_cavlc[i] = TRANSPOSE(zigzag_scan8x8_cavlc[i]);
768  h->field_scan8x8[i] = TRANSPOSE(field_scan8x8[i]);
769  h->field_scan8x8_cavlc[i] = TRANSPOSE(field_scan8x8_cavlc[i]);
770 #undef TRANSPOSE
771  }
772  if (h->ps.sps->transform_bypass) { // FIXME same ugly
773  memcpy(h->zigzag_scan_q0 , ff_zigzag_scan , sizeof(h->zigzag_scan_q0 ));
774  memcpy(h->zigzag_scan8x8_q0 , ff_zigzag_direct , sizeof(h->zigzag_scan8x8_q0 ));
775  memcpy(h->zigzag_scan8x8_cavlc_q0 , zigzag_scan8x8_cavlc , sizeof(h->zigzag_scan8x8_cavlc_q0));
776  memcpy(h->field_scan_q0 , field_scan , sizeof(h->field_scan_q0 ));
777  memcpy(h->field_scan8x8_q0 , field_scan8x8 , sizeof(h->field_scan8x8_q0 ));
778  memcpy(h->field_scan8x8_cavlc_q0 , field_scan8x8_cavlc , sizeof(h->field_scan8x8_cavlc_q0 ));
779  } else {
780  memcpy(h->zigzag_scan_q0 , h->zigzag_scan , sizeof(h->zigzag_scan_q0 ));
781  memcpy(h->zigzag_scan8x8_q0 , h->zigzag_scan8x8 , sizeof(h->zigzag_scan8x8_q0 ));
782  memcpy(h->zigzag_scan8x8_cavlc_q0 , h->zigzag_scan8x8_cavlc , sizeof(h->zigzag_scan8x8_cavlc_q0));
783  memcpy(h->field_scan_q0 , h->field_scan , sizeof(h->field_scan_q0 ));
784  memcpy(h->field_scan8x8_q0 , h->field_scan8x8 , sizeof(h->field_scan8x8_q0 ));
785  memcpy(h->field_scan8x8_cavlc_q0 , h->field_scan8x8_cavlc , sizeof(h->field_scan8x8_cavlc_q0 ));
786  }
787 }
788 
789 static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
790 {
791 #define HWACCEL_MAX (CONFIG_H264_DXVA2_HWACCEL + \
792  (CONFIG_H264_D3D11VA_HWACCEL * 2) + \
793  CONFIG_H264_NVDEC_HWACCEL + \
794  CONFIG_H264_VAAPI_HWACCEL + \
795  CONFIG_H264_VIDEOTOOLBOX_HWACCEL + \
796  CONFIG_H264_VDPAU_HWACCEL)
797  enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmt = pix_fmts;
798  const enum AVPixelFormat *choices = pix_fmts;
799  int i;
800 
801  switch (h->ps.sps->bit_depth_luma) {
802  case 9:
803  if (CHROMA444(h)) {
804  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
805  *fmt++ = AV_PIX_FMT_GBRP9;
806  } else
807  *fmt++ = AV_PIX_FMT_YUV444P9;
808  } else if (CHROMA422(h))
809  *fmt++ = AV_PIX_FMT_YUV422P9;
810  else
811  *fmt++ = AV_PIX_FMT_YUV420P9;
812  break;
813  case 10:
814 #if CONFIG_H264_VIDEOTOOLBOX_HWACCEL
815  if (h->avctx->colorspace != AVCOL_SPC_RGB)
816  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
817 #endif
818  if (CHROMA444(h)) {
819  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
820  *fmt++ = AV_PIX_FMT_GBRP10;
821  } else
822  *fmt++ = AV_PIX_FMT_YUV444P10;
823  } else if (CHROMA422(h))
824  *fmt++ = AV_PIX_FMT_YUV422P10;
825  else
826  *fmt++ = AV_PIX_FMT_YUV420P10;
827  break;
828  case 12:
829  if (CHROMA444(h)) {
830  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
831  *fmt++ = AV_PIX_FMT_GBRP12;
832  } else
833  *fmt++ = AV_PIX_FMT_YUV444P12;
834  } else if (CHROMA422(h))
835  *fmt++ = AV_PIX_FMT_YUV422P12;
836  else
837  *fmt++ = AV_PIX_FMT_YUV420P12;
838  break;
839  case 14:
840  if (CHROMA444(h)) {
841  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
842  *fmt++ = AV_PIX_FMT_GBRP14;
843  } else
844  *fmt++ = AV_PIX_FMT_YUV444P14;
845  } else if (CHROMA422(h))
846  *fmt++ = AV_PIX_FMT_YUV422P14;
847  else
848  *fmt++ = AV_PIX_FMT_YUV420P14;
849  break;
850  case 8:
851 #if CONFIG_H264_VDPAU_HWACCEL
852  *fmt++ = AV_PIX_FMT_VDPAU;
853 #endif
854 #if CONFIG_H264_NVDEC_HWACCEL
855  *fmt++ = AV_PIX_FMT_CUDA;
856 #endif
857 #if CONFIG_H264_VIDEOTOOLBOX_HWACCEL
858  if (h->avctx->colorspace != AVCOL_SPC_RGB)
859  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
860 #endif
861  if (CHROMA444(h)) {
862  if (h->avctx->colorspace == AVCOL_SPC_RGB)
863  *fmt++ = AV_PIX_FMT_GBRP;
864  else if (h->avctx->color_range == AVCOL_RANGE_JPEG)
865  *fmt++ = AV_PIX_FMT_YUVJ444P;
866  else
867  *fmt++ = AV_PIX_FMT_YUV444P;
868  } else if (CHROMA422(h)) {
869  if (h->avctx->color_range == AVCOL_RANGE_JPEG)
870  *fmt++ = AV_PIX_FMT_YUVJ422P;
871  else
872  *fmt++ = AV_PIX_FMT_YUV422P;
873  } else {
874 #if CONFIG_H264_DXVA2_HWACCEL
875  *fmt++ = AV_PIX_FMT_DXVA2_VLD;
876 #endif
877 #if CONFIG_H264_D3D11VA_HWACCEL
878  *fmt++ = AV_PIX_FMT_D3D11VA_VLD;
879  *fmt++ = AV_PIX_FMT_D3D11;
880 #endif
881 #if CONFIG_H264_VAAPI_HWACCEL
882  *fmt++ = AV_PIX_FMT_VAAPI;
883 #endif
884  if (h->avctx->codec->pix_fmts)
885  choices = h->avctx->codec->pix_fmts;
886  else if (h->avctx->color_range == AVCOL_RANGE_JPEG)
887  *fmt++ = AV_PIX_FMT_YUVJ420P;
888  else
889  *fmt++ = AV_PIX_FMT_YUV420P;
890  }
891  break;
892  default:
893  av_log(h->avctx, AV_LOG_ERROR,
894  "Unsupported bit depth %d\n", h->ps.sps->bit_depth_luma);
895  return AVERROR_INVALIDDATA;
896  }
897 
898  *fmt = AV_PIX_FMT_NONE;
899 
900  for (i=0; choices[i] != AV_PIX_FMT_NONE; i++)
901  if (choices[i] == h->avctx->pix_fmt && !force_callback)
902  return choices[i];
903  return ff_thread_get_format(h->avctx, choices);
904 }
905 
906 /* export coded and cropped frame dimensions to AVCodecContext */
908 {
909  const SPS *sps = (const SPS*)h->ps.sps;
910  int cr = sps->crop_right;
911  int cl = sps->crop_left;
912  int ct = sps->crop_top;
913  int cb = sps->crop_bottom;
914  int width = h->width - (cr + cl);
915  int height = h->height - (ct + cb);
916  av_assert0(sps->crop_right + sps->crop_left < (unsigned)h->width);
917  av_assert0(sps->crop_top + sps->crop_bottom < (unsigned)h->height);
918 
919  /* handle container cropping */
920  if (h->width_from_caller > 0 && h->height_from_caller > 0 &&
921  !sps->crop_top && !sps->crop_left &&
922  FFALIGN(h->width_from_caller, 16) == FFALIGN(width, 16) &&
923  FFALIGN(h->height_from_caller, 16) == FFALIGN(height, 16) &&
924  h->width_from_caller <= width &&
925  h->height_from_caller <= height) {
926  width = h->width_from_caller;
927  height = h->height_from_caller;
928  cl = 0;
929  ct = 0;
930  cr = h->width - width;
931  cb = h->height - height;
932  } else {
933  h->width_from_caller = 0;
934  h->height_from_caller = 0;
935  }
936 
937  h->avctx->coded_width = h->width;
938  h->avctx->coded_height = h->height;
939  h->avctx->width = width;
940  h->avctx->height = height;
941  h->crop_right = cr;
942  h->crop_left = cl;
943  h->crop_top = ct;
944  h->crop_bottom = cb;
945 }
946 
948 {
949  const SPS *sps = h->ps.sps;
950  int i, ret;
951 
952  if (!sps) {
954  goto fail;
955  }
956 
957  ff_set_sar(h->avctx, sps->sar);
958  av_pix_fmt_get_chroma_sub_sample(h->avctx->pix_fmt,
959  &h->chroma_x_shift, &h->chroma_y_shift);
960 
961  if (sps->timing_info_present_flag) {
962  int64_t den = sps->time_scale;
963  if (h->x264_build < 44U)
964  den *= 2;
965  av_reduce(&h->avctx->framerate.den, &h->avctx->framerate.num,
966  sps->num_units_in_tick * h->avctx->ticks_per_frame, den, 1 << 30);
967  }
968 
970 
971  h->first_field = 0;
972  h->prev_interlaced_frame = 1;
973 
976  if (ret < 0) {
977  av_log(h->avctx, AV_LOG_ERROR, "Could not allocate memory\n");
978  goto fail;
979  }
980 
981  if (sps->bit_depth_luma < 8 || sps->bit_depth_luma > 14 ||
982  sps->bit_depth_luma == 11 || sps->bit_depth_luma == 13
983  ) {
984  av_log(h->avctx, AV_LOG_ERROR, "Unsupported bit depth %d\n",
985  sps->bit_depth_luma);
987  goto fail;
988  }
989 
990  h->cur_bit_depth_luma =
991  h->avctx->bits_per_raw_sample = sps->bit_depth_luma;
992  h->cur_chroma_format_idc = sps->chroma_format_idc;
993  h->pixel_shift = sps->bit_depth_luma > 8;
994  h->chroma_format_idc = sps->chroma_format_idc;
995  h->bit_depth_luma = sps->bit_depth_luma;
996 
997  ff_h264dsp_init(&h->h264dsp, sps->bit_depth_luma,
998  sps->chroma_format_idc);
999  ff_h264chroma_init(&h->h264chroma, sps->bit_depth_chroma);
1000  ff_h264qpel_init(&h->h264qpel, sps->bit_depth_luma);
1001  ff_h264_pred_init(&h->hpc, h->avctx->codec_id, sps->bit_depth_luma,
1002  sps->chroma_format_idc);
1003  ff_videodsp_init(&h->vdsp, sps->bit_depth_luma);
1004 
1005  if (!HAVE_THREADS || !(h->avctx->active_thread_type & FF_THREAD_SLICE)) {
1006  ret = ff_h264_slice_context_init(h, &h->slice_ctx[0]);
1007  if (ret < 0) {
1008  av_log(h->avctx, AV_LOG_ERROR, "context_init() failed.\n");
1009  goto fail;
1010  }
1011  } else {
1012  for (i = 0; i < h->nb_slice_ctx; i++) {
1013  H264SliceContext *sl = &h->slice_ctx[i];
1014 
1015  sl->h264 = h;
1016  sl->intra4x4_pred_mode = h->intra4x4_pred_mode + i * 8 * 2 * h->mb_stride;
1017  sl->mvd_table[0] = h->mvd_table[0] + i * 8 * 2 * h->mb_stride;
1018  sl->mvd_table[1] = h->mvd_table[1] + i * 8 * 2 * h->mb_stride;
1019 
1020  if ((ret = ff_h264_slice_context_init(h, sl)) < 0) {
1021  av_log(h->avctx, AV_LOG_ERROR, "context_init() failed.\n");
1022  goto fail;
1023  }
1024  }
1025  }
1026 
1027  h->context_initialized = 1;
1028 
1029  return 0;
1030 fail:
1032  h->context_initialized = 0;
1033  return ret;
1034 }
1035 
1037 {
1038  switch (a) {
1042  default:
1043  return a;
1044  }
1045 }
1046 
1047 static int h264_init_ps(H264Context *h, const H264SliceContext *sl, int first_slice)
1048 {
1049  const SPS *sps;
1050  int needs_reinit = 0, must_reinit, ret;
1051 
1052  if (first_slice) {
1053  av_buffer_unref(&h->ps.pps_ref);
1054  h->ps.pps = NULL;
1055  h->ps.pps_ref = av_buffer_ref(h->ps.pps_list[sl->pps_id]);
1056  if (!h->ps.pps_ref)
1057  return AVERROR(ENOMEM);
1058  h->ps.pps = (const PPS*)h->ps.pps_ref->data;
1059  }
1060 
1061  if (h->ps.sps != h->ps.pps->sps) {
1062  h->ps.sps = (const SPS*)h->ps.pps->sps;
1063 
1064  if (h->mb_width != h->ps.sps->mb_width ||
1065  h->mb_height != h->ps.sps->mb_height ||
1066  h->cur_bit_depth_luma != h->ps.sps->bit_depth_luma ||
1067  h->cur_chroma_format_idc != h->ps.sps->chroma_format_idc
1068  )
1069  needs_reinit = 1;
1070 
1071  if (h->bit_depth_luma != h->ps.sps->bit_depth_luma ||
1072  h->chroma_format_idc != h->ps.sps->chroma_format_idc)
1073  needs_reinit = 1;
1074  }
1075  sps = h->ps.sps;
1076 
1077  must_reinit = (h->context_initialized &&
1078  ( 16*sps->mb_width != h->avctx->coded_width
1079  || 16*sps->mb_height != h->avctx->coded_height
1080  || h->cur_bit_depth_luma != sps->bit_depth_luma
1081  || h->cur_chroma_format_idc != sps->chroma_format_idc
1082  || h->mb_width != sps->mb_width
1083  || h->mb_height != sps->mb_height
1084  ));
1085  if (h->avctx->pix_fmt == AV_PIX_FMT_NONE
1086  || (non_j_pixfmt(h->avctx->pix_fmt) != non_j_pixfmt(get_pixel_format(h, 0))))
1087  must_reinit = 1;
1088 
1089  if (first_slice && av_cmp_q(sps->sar, h->avctx->sample_aspect_ratio))
1090  must_reinit = 1;
1091 
1092  if (!h->setup_finished) {
1093  h->avctx->profile = ff_h264_get_profile(sps);
1094  h->avctx->level = sps->level_idc;
1095  h->avctx->refs = sps->ref_frame_count;
1096 
1097  h->mb_width = sps->mb_width;
1098  h->mb_height = sps->mb_height;
1099  h->mb_num = h->mb_width * h->mb_height;
1100  h->mb_stride = h->mb_width + 1;
1101 
1102  h->b_stride = h->mb_width * 4;
1103 
1104  h->chroma_y_shift = sps->chroma_format_idc <= 1; // 400 uses yuv420p
1105 
1106  h->width = 16 * h->mb_width;
1107  h->height = 16 * h->mb_height;
1108 
1109  init_dimensions(h);
1110 
1111  if (sps->video_signal_type_present_flag) {
1112  h->avctx->color_range = sps->full_range > 0 ? AVCOL_RANGE_JPEG
1113  : AVCOL_RANGE_MPEG;
1114  if (sps->colour_description_present_flag) {
1115  if (h->avctx->colorspace != sps->colorspace)
1116  needs_reinit = 1;
1117  h->avctx->color_primaries = sps->color_primaries;
1118  h->avctx->color_trc = sps->color_trc;
1119  h->avctx->colorspace = sps->colorspace;
1120  }
1121  }
1122 
1123  if (h->sei.alternative_transfer.present &&
1124  av_color_transfer_name(h->sei.alternative_transfer.preferred_transfer_characteristics) &&
1125  h->sei.alternative_transfer.preferred_transfer_characteristics != AVCOL_TRC_UNSPECIFIED) {
1126  h->avctx->color_trc = h->sei.alternative_transfer.preferred_transfer_characteristics;
1127  }
1128  }
1129  h->avctx->chroma_sample_location = sps->chroma_location;
1130 
1131  if (!h->context_initialized || must_reinit || needs_reinit) {
1132  int flush_changes = h->context_initialized;
1133  h->context_initialized = 0;
1134  if (sl != h->slice_ctx) {
1135  av_log(h->avctx, AV_LOG_ERROR,
1136  "changing width %d -> %d / height %d -> %d on "
1137  "slice %d\n",
1138  h->width, h->avctx->coded_width,
1139  h->height, h->avctx->coded_height,
1140  h->current_slice + 1);
1141  return AVERROR_INVALIDDATA;
1142  }
1143 
1144  av_assert1(first_slice);
1145 
1146  if (flush_changes)
1148 
1149  if ((ret = get_pixel_format(h, 1)) < 0)
1150  return ret;
1151  h->avctx->pix_fmt = ret;
1152 
1153  av_log(h->avctx, AV_LOG_VERBOSE, "Reinit context to %dx%d, "
1154  "pix_fmt: %s\n", h->width, h->height, av_get_pix_fmt_name(h->avctx->pix_fmt));
1155 
1156  if ((ret = h264_slice_header_init(h)) < 0) {
1157  av_log(h->avctx, AV_LOG_ERROR,
1158  "h264_slice_header_init() failed\n");
1159  return ret;
1160  }
1161  }
1162 
1163  return 0;
1164 }
1165 
1167 {
1168  const SPS *sps = h->ps.sps;
1169  H264Picture *cur = h->cur_pic_ptr;
1170  AVFrame *out = cur->f;
1171 
1172  out->interlaced_frame = 0;
1173  out->repeat_pict = 0;
1174 
1175  /* Signal interlacing information externally. */
1176  /* Prioritize picture timing SEI information over used
1177  * decoding process if it exists. */
1178  if (h->sei.picture_timing.present) {
1179  int ret = ff_h264_sei_process_picture_timing(&h->sei.picture_timing, sps,
1180  h->avctx);
1181  if (ret < 0) {
1182  av_log(h->avctx, AV_LOG_ERROR, "Error processing a picture timing SEI\n");
1183  if (h->avctx->err_recognition & AV_EF_EXPLODE)
1184  return ret;
1185  h->sei.picture_timing.present = 0;
1186  }
1187  }
1188 
1189  if (sps->pic_struct_present_flag && h->sei.picture_timing.present) {
1190  H264SEIPictureTiming *pt = &h->sei.picture_timing;
1191  switch (pt->pic_struct) {
1193  break;
1196  out->interlaced_frame = 1;
1197  break;
1201  out->interlaced_frame = 1;
1202  else
1203  // try to flag soft telecine progressive
1204  out->interlaced_frame = h->prev_interlaced_frame;
1205  break;
1208  /* Signal the possibility of telecined film externally
1209  * (pic_struct 5,6). From these hints, let the applications
1210  * decide if they apply deinterlacing. */
1211  out->repeat_pict = 1;
1212  break;
1214  out->repeat_pict = 2;
1215  break;
1217  out->repeat_pict = 4;
1218  break;
1219  }
1220 
1221  if ((pt->ct_type & 3) &&
1222  pt->pic_struct <= H264_SEI_PIC_STRUCT_BOTTOM_TOP)
1223  out->interlaced_frame = (pt->ct_type & (1 << 1)) != 0;
1224  } else {
1225  /* Derive interlacing flag from used decoding process. */
1226  out->interlaced_frame = FIELD_OR_MBAFF_PICTURE(h);
1227  }
1228  h->prev_interlaced_frame = out->interlaced_frame;
1229 
1230  if (cur->field_poc[0] != cur->field_poc[1]) {
1231  /* Derive top_field_first from field pocs. */
1232  out->top_field_first = cur->field_poc[0] < cur->field_poc[1];
1233  } else {
1234  if (sps->pic_struct_present_flag && h->sei.picture_timing.present) {
1235  /* Use picture timing SEI information. Even if it is a
1236  * information of a past frame, better than nothing. */
1237  if (h->sei.picture_timing.pic_struct == H264_SEI_PIC_STRUCT_TOP_BOTTOM ||
1238  h->sei.picture_timing.pic_struct == H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP)
1239  out->top_field_first = 1;
1240  else
1241  out->top_field_first = 0;
1242  } else if (out->interlaced_frame) {
1243  /* Default to top field first when pic_struct_present_flag
1244  * is not set but interlaced frame detected */
1245  out->top_field_first = 1;
1246  } else {
1247  /* Most likely progressive */
1248  out->top_field_first = 0;
1249  }
1250  }
1251 
1252  if (h->sei.frame_packing.present &&
1253  h->sei.frame_packing.arrangement_type <= 6 &&
1254  h->sei.frame_packing.content_interpretation_type > 0 &&
1255  h->sei.frame_packing.content_interpretation_type < 3) {
1256  H264SEIFramePacking *fp = &h->sei.frame_packing;
1258  if (stereo) {
1259  switch (fp->arrangement_type) {
1261  stereo->type = AV_STEREO3D_CHECKERBOARD;
1262  break;
1264  stereo->type = AV_STEREO3D_COLUMNS;
1265  break;
1267  stereo->type = AV_STEREO3D_LINES;
1268  break;
1270  if (fp->quincunx_sampling_flag)
1272  else
1273  stereo->type = AV_STEREO3D_SIDEBYSIDE;
1274  break;
1276  stereo->type = AV_STEREO3D_TOPBOTTOM;
1277  break;
1279  stereo->type = AV_STEREO3D_FRAMESEQUENCE;
1280  break;
1281  case H264_SEI_FPA_TYPE_2D:
1282  stereo->type = AV_STEREO3D_2D;
1283  break;
1284  }
1285 
1286  if (fp->content_interpretation_type == 2)
1287  stereo->flags = AV_STEREO3D_FLAG_INVERT;
1288 
1289  if (fp->arrangement_type == H264_SEI_FPA_TYPE_INTERLEAVE_TEMPORAL) {
1290  if (fp->current_frame_is_frame0_flag)
1291  stereo->view = AV_STEREO3D_VIEW_LEFT;
1292  else
1293  stereo->view = AV_STEREO3D_VIEW_RIGHT;
1294  }
1295  }
1296  }
1297 
1298  if (h->sei.display_orientation.present &&
1299  (h->sei.display_orientation.anticlockwise_rotation ||
1300  h->sei.display_orientation.hflip ||
1301  h->sei.display_orientation.vflip)) {
1302  H264SEIDisplayOrientation *o = &h->sei.display_orientation;
1303  double angle = o->anticlockwise_rotation * 360 / (double) (1 << 16);
1306  sizeof(int32_t) * 9);
1307  if (rotation) {
1308  /* av_display_rotation_set() expects the angle in the clockwise
1309  * direction, hence the first minus.
1310  * The below code applies the flips after the rotation, yet
1311  * the H.2645 specs require flipping to be applied first.
1312  * Because of R O(phi) = O(-phi) R (where R is flipping around
1313  * an arbitatry axis and O(phi) is the proper rotation by phi)
1314  * we can create display matrices as desired by negating
1315  * the degree once for every flip applied. */
1316  angle = -angle * (1 - 2 * !!o->hflip) * (1 - 2 * !!o->vflip);
1317  av_display_rotation_set((int32_t *)rotation->data, angle);
1318  av_display_matrix_flip((int32_t *)rotation->data,
1319  o->hflip, o->vflip);
1320  }
1321  }
1322 
1323  if (h->sei.afd.present) {
1325  sizeof(uint8_t));
1326 
1327  if (sd) {
1328  *sd->data = h->sei.afd.active_format_description;
1329  h->sei.afd.present = 0;
1330  }
1331  }
1332 
1333  if (h->sei.a53_caption.buf_ref) {
1334  H264SEIA53Caption *a53 = &h->sei.a53_caption;
1335 
1337  if (!sd)
1338  av_buffer_unref(&a53->buf_ref);
1339  a53->buf_ref = NULL;
1340 
1341  h->avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS;
1342  }
1343 
1344  for (int i = 0; i < h->sei.unregistered.nb_buf_ref; i++) {
1345  H264SEIUnregistered *unreg = &h->sei.unregistered;
1346 
1347  if (unreg->buf_ref[i]) {
1350  unreg->buf_ref[i]);
1351  if (!sd)
1352  av_buffer_unref(&unreg->buf_ref[i]);
1353  unreg->buf_ref[i] = NULL;
1354  }
1355  }
1356  h->sei.unregistered.nb_buf_ref = 0;
1357 
1358  if (h->sei.film_grain_characteristics.present) {
1359  H264SEIFilmGrainCharacteristics *fgc = &h->sei.film_grain_characteristics;
1361  if (!fgp)
1362  return AVERROR(ENOMEM);
1363 
1365  fgp->seed = cur->poc + (h->poc_offset << 5);
1366 
1367  fgp->codec.h274.model_id = fgc->model_id;
1371  fgp->codec.h274.color_range = fgc->full_range + 1;
1374  fgp->codec.h274.color_space = fgc->matrix_coeffs;
1375  } else {
1376  fgp->codec.h274.bit_depth_luma = sps->bit_depth_luma;
1377  fgp->codec.h274.bit_depth_chroma = sps->bit_depth_chroma;
1378  if (sps->video_signal_type_present_flag)
1379  fgp->codec.h274.color_range = sps->full_range + 1;
1380  else
1382  if (sps->colour_description_present_flag) {
1383  fgp->codec.h274.color_primaries = sps->color_primaries;
1384  fgp->codec.h274.color_trc = sps->color_trc;
1385  fgp->codec.h274.color_space = sps->colorspace;
1386  } else {
1390  }
1391  }
1394 
1396  sizeof(fgp->codec.h274.component_model_present));
1398  sizeof(fgp->codec.h274.num_intensity_intervals));
1399  memcpy(&fgp->codec.h274.num_model_values, &fgc->num_model_values,
1400  sizeof(fgp->codec.h274.num_model_values));
1405  memcpy(&fgp->codec.h274.comp_model_value, &fgc->comp_model_value,
1406  sizeof(fgp->codec.h274.comp_model_value));
1407 
1408  fgc->present = !!fgc->repetition_period;
1409 
1410  h->avctx->properties |= FF_CODEC_PROPERTY_FILM_GRAIN;
1411  }
1412 
1413  if (h->sei.picture_timing.timecode_cnt > 0) {
1414  uint32_t *tc_sd;
1415  char tcbuf[AV_TIMECODE_STR_SIZE];
1416 
1419  sizeof(uint32_t)*4);
1420  if (!tcside)
1421  return AVERROR(ENOMEM);
1422 
1423  tc_sd = (uint32_t*)tcside->data;
1424  tc_sd[0] = h->sei.picture_timing.timecode_cnt;
1425 
1426  for (int i = 0; i < tc_sd[0]; i++) {
1427  int drop = h->sei.picture_timing.timecode[i].dropframe;
1428  int hh = h->sei.picture_timing.timecode[i].hours;
1429  int mm = h->sei.picture_timing.timecode[i].minutes;
1430  int ss = h->sei.picture_timing.timecode[i].seconds;
1431  int ff = h->sei.picture_timing.timecode[i].frame;
1432 
1433  tc_sd[i + 1] = av_timecode_get_smpte(h->avctx->framerate, drop, hh, mm, ss, ff);
1434  av_timecode_make_smpte_tc_string2(tcbuf, h->avctx->framerate, tc_sd[i + 1], 0, 0);
1435  av_dict_set(&out->metadata, "timecode", tcbuf, 0);
1436  }
1437  h->sei.picture_timing.timecode_cnt = 0;
1438  }
1439 
1440  return 0;
1441 }
1442 
1444 {
1445  const SPS *sps = h->ps.sps;
1446  H264Picture *out = h->cur_pic_ptr;
1447  H264Picture *cur = h->cur_pic_ptr;
1448  int i, pics, out_of_order, out_idx;
1449 
1450  cur->mmco_reset = h->mmco_reset;
1451  h->mmco_reset = 0;
1452 
1453  if (sps->bitstream_restriction_flag ||
1454  h->avctx->strict_std_compliance >= FF_COMPLIANCE_STRICT) {
1455  h->avctx->has_b_frames = FFMAX(h->avctx->has_b_frames, sps->num_reorder_frames);
1456  }
1457 
1458  for (i = 0; 1; i++) {
1459  if(i == MAX_DELAYED_PIC_COUNT || cur->poc < h->last_pocs[i]){
1460  if(i)
1461  h->last_pocs[i-1] = cur->poc;
1462  break;
1463  } else if(i) {
1464  h->last_pocs[i-1]= h->last_pocs[i];
1465  }
1466  }
1467  out_of_order = MAX_DELAYED_PIC_COUNT - i;
1468  if( cur->f->pict_type == AV_PICTURE_TYPE_B
1469  || (h->last_pocs[MAX_DELAYED_PIC_COUNT-2] > INT_MIN && h->last_pocs[MAX_DELAYED_PIC_COUNT-1] - (int64_t)h->last_pocs[MAX_DELAYED_PIC_COUNT-2] > 2))
1470  out_of_order = FFMAX(out_of_order, 1);
1471  if (out_of_order == MAX_DELAYED_PIC_COUNT) {
1472  av_log(h->avctx, AV_LOG_VERBOSE, "Invalid POC %d<%d\n", cur->poc, h->last_pocs[0]);
1473  for (i = 1; i < MAX_DELAYED_PIC_COUNT; i++)
1474  h->last_pocs[i] = INT_MIN;
1475  h->last_pocs[0] = cur->poc;
1476  cur->mmco_reset = 1;
1477  } else if(h->avctx->has_b_frames < out_of_order && !sps->bitstream_restriction_flag){
1478  int loglevel = h->avctx->frame_number > 1 ? AV_LOG_WARNING : AV_LOG_VERBOSE;
1479  av_log(h->avctx, loglevel, "Increasing reorder buffer to %d\n", out_of_order);
1480  h->avctx->has_b_frames = out_of_order;
1481  }
1482 
1483  pics = 0;
1484  while (h->delayed_pic[pics])
1485  pics++;
1486 
1488 
1489  h->delayed_pic[pics++] = cur;
1490  if (cur->reference == 0)
1491  cur->reference = DELAYED_PIC_REF;
1492 
1493  out = h->delayed_pic[0];
1494  out_idx = 0;
1495  for (i = 1; h->delayed_pic[i] &&
1496  !h->delayed_pic[i]->f->key_frame &&
1497  !h->delayed_pic[i]->mmco_reset;
1498  i++)
1499  if (h->delayed_pic[i]->poc < out->poc) {
1500  out = h->delayed_pic[i];
1501  out_idx = i;
1502  }
1503  if (h->avctx->has_b_frames == 0 &&
1504  (h->delayed_pic[0]->f->key_frame || h->delayed_pic[0]->mmco_reset))
1505  h->next_outputed_poc = INT_MIN;
1506  out_of_order = out->poc < h->next_outputed_poc;
1507 
1508  if (out_of_order || pics > h->avctx->has_b_frames) {
1509  out->reference &= ~DELAYED_PIC_REF;
1510  for (i = out_idx; h->delayed_pic[i]; i++)
1511  h->delayed_pic[i] = h->delayed_pic[i + 1];
1512  }
1513  if (!out_of_order && pics > h->avctx->has_b_frames) {
1514  h->next_output_pic = out;
1515  if (out_idx == 0 && h->delayed_pic[0] && (h->delayed_pic[0]->f->key_frame || h->delayed_pic[0]->mmco_reset)) {
1516  h->next_outputed_poc = INT_MIN;
1517  } else
1518  h->next_outputed_poc = out->poc;
1519 
1520  if (out->recovered) {
1521  // We have reached an recovery point and all frames after it in
1522  // display order are "recovered".
1523  h->frame_recovered |= FRAME_RECOVERED_SEI;
1524  }
1525  out->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_SEI);
1526 
1527  if (!out->recovered) {
1528  if (!(h->avctx->flags & AV_CODEC_FLAG_OUTPUT_CORRUPT) &&
1529  !(h->avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL)) {
1530  h->next_output_pic = NULL;
1531  } else {
1532  out->f->flags |= AV_FRAME_FLAG_CORRUPT;
1533  }
1534  }
1535  } else {
1536  av_log(h->avctx, AV_LOG_DEBUG, "no picture %s\n", out_of_order ? "ooo" : "");
1537  }
1538 
1539  return 0;
1540 }
1541 
1542 /* This function is called right after decoding the slice header for a first
1543  * slice in a field (or a frame). It decides whether we are decoding a new frame
1544  * or a second field in a pair and does the necessary setup.
1545  */
1547  const H2645NAL *nal, int first_slice)
1548 {
1549  int i;
1550  const SPS *sps;
1551 
1552  int last_pic_structure, last_pic_droppable, ret;
1553 
1554  ret = h264_init_ps(h, sl, first_slice);
1555  if (ret < 0)
1556  return ret;
1557 
1558  sps = h->ps.sps;
1559 
1560  if (sps && sps->bitstream_restriction_flag &&
1561  h->avctx->has_b_frames < sps->num_reorder_frames) {
1562  h->avctx->has_b_frames = sps->num_reorder_frames;
1563  }
1564 
1565  last_pic_droppable = h->droppable;
1566  last_pic_structure = h->picture_structure;
1567  h->droppable = (nal->ref_idc == 0);
1568  h->picture_structure = sl->picture_structure;
1569 
1570  h->poc.frame_num = sl->frame_num;
1571  h->poc.poc_lsb = sl->poc_lsb;
1572  h->poc.delta_poc_bottom = sl->delta_poc_bottom;
1573  h->poc.delta_poc[0] = sl->delta_poc[0];
1574  h->poc.delta_poc[1] = sl->delta_poc[1];
1575 
1576  if (nal->type == H264_NAL_IDR_SLICE)
1577  h->poc_offset = sl->idr_pic_id;
1578  else if (h->picture_intra_only)
1579  h->poc_offset = 0;
1580 
1581  /* Shorten frame num gaps so we don't have to allocate reference
1582  * frames just to throw them away */
1583  if (h->poc.frame_num != h->poc.prev_frame_num) {
1584  int unwrap_prev_frame_num = h->poc.prev_frame_num;
1585  int max_frame_num = 1 << sps->log2_max_frame_num;
1586 
1587  if (unwrap_prev_frame_num > h->poc.frame_num)
1588  unwrap_prev_frame_num -= max_frame_num;
1589 
1590  if ((h->poc.frame_num - unwrap_prev_frame_num) > sps->ref_frame_count) {
1591  unwrap_prev_frame_num = (h->poc.frame_num - sps->ref_frame_count) - 1;
1592  if (unwrap_prev_frame_num < 0)
1593  unwrap_prev_frame_num += max_frame_num;
1594 
1595  h->poc.prev_frame_num = unwrap_prev_frame_num;
1596  }
1597  }
1598 
1599  /* See if we have a decoded first field looking for a pair...
1600  * Here, we're using that to see if we should mark previously
1601  * decode frames as "finished".
1602  * We have to do that before the "dummy" in-between frame allocation,
1603  * since that can modify h->cur_pic_ptr. */
1604  if (h->first_field) {
1605  int last_field = last_pic_structure == PICT_BOTTOM_FIELD;
1606  av_assert0(h->cur_pic_ptr);
1607  av_assert0(h->cur_pic_ptr->f->buf[0]);
1608  assert(h->cur_pic_ptr->reference != DELAYED_PIC_REF);
1609 
1610  /* Mark old field/frame as completed */
1611  if (h->cur_pic_ptr->tf.owner[last_field] == h->avctx) {
1612  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, last_field);
1613  }
1614 
1615  /* figure out if we have a complementary field pair */
1616  if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) {
1617  /* Previous field is unmatched. Don't display it, but let it
1618  * remain for reference if marked as such. */
1619  if (last_pic_structure != PICT_FRAME) {
1620  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
1621  last_pic_structure == PICT_TOP_FIELD);
1622  }
1623  } else {
1624  if (h->cur_pic_ptr->frame_num != h->poc.frame_num) {
1625  /* This and previous field were reference, but had
1626  * different frame_nums. Consider this field first in
1627  * pair. Throw away previous field except for reference
1628  * purposes. */
1629  if (last_pic_structure != PICT_FRAME) {
1630  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
1631  last_pic_structure == PICT_TOP_FIELD);
1632  }
1633  } else {
1634  /* Second field in complementary pair */
1635  if (!((last_pic_structure == PICT_TOP_FIELD &&
1636  h->picture_structure == PICT_BOTTOM_FIELD) ||
1637  (last_pic_structure == PICT_BOTTOM_FIELD &&
1638  h->picture_structure == PICT_TOP_FIELD))) {
1639  av_log(h->avctx, AV_LOG_ERROR,
1640  "Invalid field mode combination %d/%d\n",
1641  last_pic_structure, h->picture_structure);
1642  h->picture_structure = last_pic_structure;
1643  h->droppable = last_pic_droppable;
1644  return AVERROR_INVALIDDATA;
1645  } else if (last_pic_droppable != h->droppable) {
1646  avpriv_request_sample(h->avctx,
1647  "Found reference and non-reference fields in the same frame, which");
1648  h->picture_structure = last_pic_structure;
1649  h->droppable = last_pic_droppable;
1650  return AVERROR_PATCHWELCOME;
1651  }
1652  }
1653  }
1654  }
1655 
1656  while (h->poc.frame_num != h->poc.prev_frame_num && !h->first_field &&
1657  h->poc.frame_num != (h->poc.prev_frame_num + 1) % (1 << sps->log2_max_frame_num)) {
1658  H264Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL;
1659  av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n",
1660  h->poc.frame_num, h->poc.prev_frame_num);
1661  if (!sps->gaps_in_frame_num_allowed_flag)
1662  for(i=0; i<FF_ARRAY_ELEMS(h->last_pocs); i++)
1663  h->last_pocs[i] = INT_MIN;
1664  ret = h264_frame_start(h);
1665  if (ret < 0) {
1666  h->first_field = 0;
1667  return ret;
1668  }
1669 
1670  h->poc.prev_frame_num++;
1671  h->poc.prev_frame_num %= 1 << sps->log2_max_frame_num;
1672  h->cur_pic_ptr->frame_num = h->poc.prev_frame_num;
1673  h->cur_pic_ptr->invalid_gap = !sps->gaps_in_frame_num_allowed_flag;
1674  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0);
1675  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1);
1676 
1677  h->explicit_ref_marking = 0;
1679  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1680  return ret;
1681  /* Error concealment: If a ref is missing, copy the previous ref
1682  * in its place.
1683  * FIXME: Avoiding a memcpy would be nice, but ref handling makes
1684  * many assumptions about there being no actual duplicates.
1685  * FIXME: This does not copy padding for out-of-frame motion
1686  * vectors. Given we are concealing a lost frame, this probably
1687  * is not noticeable by comparison, but it should be fixed. */
1688  if (h->short_ref_count) {
1689  int c[4] = {
1690  1<<(h->ps.sps->bit_depth_luma-1),
1691  1<<(h->ps.sps->bit_depth_chroma-1),
1692  1<<(h->ps.sps->bit_depth_chroma-1),
1693  -1
1694  };
1695 
1696  if (prev &&
1697  h->short_ref[0]->f->width == prev->f->width &&
1698  h->short_ref[0]->f->height == prev->f->height &&
1699  h->short_ref[0]->f->format == prev->f->format) {
1700  ff_thread_await_progress(&prev->tf, INT_MAX, 0);
1701  if (prev->field_picture)
1702  ff_thread_await_progress(&prev->tf, INT_MAX, 1);
1703  ff_thread_release_buffer(h->avctx, &h->short_ref[0]->tf);
1704  h->short_ref[0]->tf.f = h->short_ref[0]->f;
1705  ret = ff_thread_ref_frame(&h->short_ref[0]->tf, &prev->tf);
1706  if (ret < 0)
1707  return ret;
1708  h->short_ref[0]->poc = prev->poc + 2U;
1709  ff_thread_report_progress(&h->short_ref[0]->tf, INT_MAX, 0);
1710  if (h->short_ref[0]->field_picture)
1711  ff_thread_report_progress(&h->short_ref[0]->tf, INT_MAX, 1);
1712  } else if (!h->frame_recovered && !h->avctx->hwaccel)
1713  ff_color_frame(h->short_ref[0]->f, c);
1714  h->short_ref[0]->frame_num = h->poc.prev_frame_num;
1715  }
1716  }
1717 
1718  /* See if we have a decoded first field looking for a pair...
1719  * We're using that to see whether to continue decoding in that
1720  * frame, or to allocate a new one. */
1721  if (h->first_field) {
1722  av_assert0(h->cur_pic_ptr);
1723  av_assert0(h->cur_pic_ptr->f->buf[0]);
1724  assert(h->cur_pic_ptr->reference != DELAYED_PIC_REF);
1725 
1726  /* figure out if we have a complementary field pair */
1727  if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) {
1728  /* Previous field is unmatched. Don't display it, but let it
1729  * remain for reference if marked as such. */
1730  h->missing_fields ++;
1731  h->cur_pic_ptr = NULL;
1732  h->first_field = FIELD_PICTURE(h);
1733  } else {
1734  h->missing_fields = 0;
1735  if (h->cur_pic_ptr->frame_num != h->poc.frame_num) {
1736  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
1737  h->picture_structure==PICT_BOTTOM_FIELD);
1738  /* This and the previous field had different frame_nums.
1739  * Consider this field first in pair. Throw away previous
1740  * one except for reference purposes. */
1741  h->first_field = 1;
1742  h->cur_pic_ptr = NULL;
1743  } else if (h->cur_pic_ptr->reference & DELAYED_PIC_REF) {
1744  /* This frame was already output, we cannot draw into it
1745  * anymore.
1746  */
1747  h->first_field = 1;
1748  h->cur_pic_ptr = NULL;
1749  } else {
1750  /* Second field in complementary pair */
1751  h->first_field = 0;
1752  }
1753  }
1754  } else {
1755  /* Frame or first field in a potentially complementary pair */
1756  h->first_field = FIELD_PICTURE(h);
1757  }
1758 
1759  if (!FIELD_PICTURE(h) || h->first_field) {
1760  if (h264_frame_start(h) < 0) {
1761  h->first_field = 0;
1762  return AVERROR_INVALIDDATA;
1763  }
1764  } else {
1765  int field = h->picture_structure == PICT_BOTTOM_FIELD;
1767  h->cur_pic_ptr->tf.owner[field] = h->avctx;
1768  }
1769  /* Some macroblocks can be accessed before they're available in case
1770  * of lost slices, MBAFF or threading. */
1771  if (FIELD_PICTURE(h)) {
1772  for(i = (h->picture_structure == PICT_BOTTOM_FIELD); i<h->mb_height; i++)
1773  memset(h->slice_table + i*h->mb_stride, -1, (h->mb_stride - (i+1==h->mb_height)) * sizeof(*h->slice_table));
1774  } else {
1775  memset(h->slice_table, -1,
1776  (h->mb_height * h->mb_stride - 1) * sizeof(*h->slice_table));
1777  }
1778 
1779  ret = ff_h264_init_poc(h->cur_pic_ptr->field_poc, &h->cur_pic_ptr->poc,
1780  h->ps.sps, &h->poc, h->picture_structure, nal->ref_idc);
1781  if (ret < 0)
1782  return ret;
1783 
1784  memcpy(h->mmco, sl->mmco, sl->nb_mmco * sizeof(*h->mmco));
1785  h->nb_mmco = sl->nb_mmco;
1786  h->explicit_ref_marking = sl->explicit_ref_marking;
1787 
1788  h->picture_idr = nal->type == H264_NAL_IDR_SLICE;
1789 
1790  if (h->sei.recovery_point.recovery_frame_cnt >= 0) {
1791  const int sei_recovery_frame_cnt = h->sei.recovery_point.recovery_frame_cnt;
1792 
1793  if (h->poc.frame_num != sei_recovery_frame_cnt || sl->slice_type_nos != AV_PICTURE_TYPE_I)
1794  h->valid_recovery_point = 1;
1795 
1796  if ( h->recovery_frame < 0
1797  || av_mod_uintp2(h->recovery_frame - h->poc.frame_num, h->ps.sps->log2_max_frame_num) > sei_recovery_frame_cnt) {
1798  h->recovery_frame = av_mod_uintp2(h->poc.frame_num + sei_recovery_frame_cnt, h->ps.sps->log2_max_frame_num);
1799 
1800  if (!h->valid_recovery_point)
1801  h->recovery_frame = h->poc.frame_num;
1802  }
1803  }
1804 
1805  h->cur_pic_ptr->f->key_frame |= (nal->type == H264_NAL_IDR_SLICE);
1806 
1807  if (nal->type == H264_NAL_IDR_SLICE ||
1808  (h->recovery_frame == h->poc.frame_num && nal->ref_idc)) {
1809  h->recovery_frame = -1;
1810  h->cur_pic_ptr->recovered = 1;
1811  }
1812  // If we have an IDR, all frames after it in decoded order are
1813  // "recovered".
1814  if (nal->type == H264_NAL_IDR_SLICE)
1815  h->frame_recovered |= FRAME_RECOVERED_IDR;
1816 #if 1
1817  h->cur_pic_ptr->recovered |= h->frame_recovered;
1818 #else
1819  h->cur_pic_ptr->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_IDR);
1820 #endif
1821 
1822  /* Set the frame properties/side data. Only done for the second field in
1823  * field coded frames, since some SEI information is present for each field
1824  * and is merged by the SEI parsing code. */
1825  if (!FIELD_PICTURE(h) || !h->first_field || h->missing_fields > 1) {
1827  if (ret < 0)
1828  return ret;
1829 
1831  if (ret < 0)
1832  return ret;
1833  }
1834 
1835  return 0;
1836 }
1837 
1839  const H2645NAL *nal)
1840 {
1841  const SPS *sps;
1842  const PPS *pps;
1843  int ret;
1844  unsigned int slice_type, tmp, i;
1845  int field_pic_flag, bottom_field_flag;
1846  int first_slice = sl == h->slice_ctx && !h->current_slice;
1847  int picture_structure;
1848 
1849  if (first_slice)
1850  av_assert0(!h->setup_finished);
1851 
1852  sl->first_mb_addr = get_ue_golomb_long(&sl->gb);
1853 
1854  slice_type = get_ue_golomb_31(&sl->gb);
1855  if (slice_type > 9) {
1856  av_log(h->avctx, AV_LOG_ERROR,
1857  "slice type %d too large at %d\n",
1858  slice_type, sl->first_mb_addr);
1859  return AVERROR_INVALIDDATA;
1860  }
1861  if (slice_type > 4) {
1862  slice_type -= 5;
1863  sl->slice_type_fixed = 1;
1864  } else
1865  sl->slice_type_fixed = 0;
1866 
1867  slice_type = ff_h264_golomb_to_pict_type[slice_type];
1868  sl->slice_type = slice_type;
1869  sl->slice_type_nos = slice_type & 3;
1870 
1871  if (nal->type == H264_NAL_IDR_SLICE &&
1873  av_log(h->avctx, AV_LOG_ERROR, "A non-intra slice in an IDR NAL unit.\n");
1874  return AVERROR_INVALIDDATA;
1875  }
1876 
1877  sl->pps_id = get_ue_golomb(&sl->gb);
1878  if (sl->pps_id >= MAX_PPS_COUNT) {
1879  av_log(h->avctx, AV_LOG_ERROR, "pps_id %u out of range\n", sl->pps_id);
1880  return AVERROR_INVALIDDATA;
1881  }
1882  if (!h->ps.pps_list[sl->pps_id]) {
1883  av_log(h->avctx, AV_LOG_ERROR,
1884  "non-existing PPS %u referenced\n",
1885  sl->pps_id);
1886  return AVERROR_INVALIDDATA;
1887  }
1888  pps = (const PPS*)h->ps.pps_list[sl->pps_id]->data;
1889  sps = pps->sps;
1890 
1891  sl->frame_num = get_bits(&sl->gb, sps->log2_max_frame_num);
1892  if (!first_slice) {
1893  if (h->poc.frame_num != sl->frame_num) {
1894  av_log(h->avctx, AV_LOG_ERROR, "Frame num change from %d to %d\n",
1895  h->poc.frame_num, sl->frame_num);
1896  return AVERROR_INVALIDDATA;
1897  }
1898  }
1899 
1900  sl->mb_mbaff = 0;
1901 
1902  if (sps->frame_mbs_only_flag) {
1903  picture_structure = PICT_FRAME;
1904  } else {
1905  if (!sps->direct_8x8_inference_flag && slice_type == AV_PICTURE_TYPE_B) {
1906  av_log(h->avctx, AV_LOG_ERROR, "This stream was generated by a broken encoder, invalid 8x8 inference\n");
1907  return -1;
1908  }
1909  field_pic_flag = get_bits1(&sl->gb);
1910  if (field_pic_flag) {
1911  bottom_field_flag = get_bits1(&sl->gb);
1912  picture_structure = PICT_TOP_FIELD + bottom_field_flag;
1913  } else {
1914  picture_structure = PICT_FRAME;
1915  }
1916  }
1917  sl->picture_structure = picture_structure;
1918  sl->mb_field_decoding_flag = picture_structure != PICT_FRAME;
1919 
1920  if (picture_structure == PICT_FRAME) {
1921  sl->curr_pic_num = sl->frame_num;
1922  sl->max_pic_num = 1 << sps->log2_max_frame_num;
1923  } else {
1924  sl->curr_pic_num = 2 * sl->frame_num + 1;
1925  sl->max_pic_num = 1 << (sps->log2_max_frame_num + 1);
1926  }
1927 
1928  if (nal->type == H264_NAL_IDR_SLICE) {
1929  unsigned idr_pic_id = get_ue_golomb_long(&sl->gb);
1930  if (idr_pic_id < 65536) {
1931  sl->idr_pic_id = idr_pic_id;
1932  } else
1933  av_log(h->avctx, AV_LOG_WARNING, "idr_pic_id is invalid\n");
1934  }
1935 
1936  sl->poc_lsb = 0;
1937  sl->delta_poc_bottom = 0;
1938  if (sps->poc_type == 0) {
1939  sl->poc_lsb = get_bits(&sl->gb, sps->log2_max_poc_lsb);
1940 
1941  if (pps->pic_order_present == 1 && picture_structure == PICT_FRAME)
1942  sl->delta_poc_bottom = get_se_golomb(&sl->gb);
1943  }
1944 
1945  sl->delta_poc[0] = sl->delta_poc[1] = 0;
1946  if (sps->poc_type == 1 && !sps->delta_pic_order_always_zero_flag) {
1947  sl->delta_poc[0] = get_se_golomb(&sl->gb);
1948 
1949  if (pps->pic_order_present == 1 && picture_structure == PICT_FRAME)
1950  sl->delta_poc[1] = get_se_golomb(&sl->gb);
1951  }
1952 
1953  sl->redundant_pic_count = 0;
1954  if (pps->redundant_pic_cnt_present)
1955  sl->redundant_pic_count = get_ue_golomb(&sl->gb);
1956 
1957  if (sl->slice_type_nos == AV_PICTURE_TYPE_B)
1958  sl->direct_spatial_mv_pred = get_bits1(&sl->gb);
1959 
1961  &sl->gb, pps, sl->slice_type_nos,
1962  picture_structure, h->avctx);
1963  if (ret < 0)
1964  return ret;
1965 
1966  if (sl->slice_type_nos != AV_PICTURE_TYPE_I) {
1968  if (ret < 0) {
1969  sl->ref_count[1] = sl->ref_count[0] = 0;
1970  return ret;
1971  }
1972  }
1973 
1974  sl->pwt.use_weight = 0;
1975  for (i = 0; i < 2; i++) {
1976  sl->pwt.luma_weight_flag[i] = 0;
1977  sl->pwt.chroma_weight_flag[i] = 0;
1978  }
1979  if ((pps->weighted_pred && sl->slice_type_nos == AV_PICTURE_TYPE_P) ||
1980  (pps->weighted_bipred_idc == 1 &&
1983  sl->slice_type_nos, &sl->pwt,
1984  picture_structure, h->avctx);
1985  if (ret < 0)
1986  return ret;
1987  }
1988 
1989  sl->explicit_ref_marking = 0;
1990  if (nal->ref_idc) {
1991  ret = ff_h264_decode_ref_pic_marking(sl, &sl->gb, nal, h->avctx);
1992  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1993  return AVERROR_INVALIDDATA;
1994  }
1995 
1996  if (sl->slice_type_nos != AV_PICTURE_TYPE_I && pps->cabac) {
1997  tmp = get_ue_golomb_31(&sl->gb);
1998  if (tmp > 2) {
1999  av_log(h->avctx, AV_LOG_ERROR, "cabac_init_idc %u overflow\n", tmp);
2000  return AVERROR_INVALIDDATA;
2001  }
2002  sl->cabac_init_idc = tmp;
2003  }
2004 
2005  sl->last_qscale_diff = 0;
2006  tmp = pps->init_qp + (unsigned)get_se_golomb(&sl->gb);
2007  if (tmp > 51 + 6 * (sps->bit_depth_luma - 8)) {
2008  av_log(h->avctx, AV_LOG_ERROR, "QP %u out of range\n", tmp);
2009  return AVERROR_INVALIDDATA;
2010  }
2011  sl->qscale = tmp;
2012  sl->chroma_qp[0] = get_chroma_qp(pps, 0, sl->qscale);
2013  sl->chroma_qp[1] = get_chroma_qp(pps, 1, sl->qscale);
2014  // FIXME qscale / qp ... stuff
2015  if (sl->slice_type == AV_PICTURE_TYPE_SP)
2016  get_bits1(&sl->gb); /* sp_for_switch_flag */
2017  if (sl->slice_type == AV_PICTURE_TYPE_SP ||
2019  get_se_golomb(&sl->gb); /* slice_qs_delta */
2020 
2021  sl->deblocking_filter = 1;
2022  sl->slice_alpha_c0_offset = 0;
2023  sl->slice_beta_offset = 0;
2024  if (pps->deblocking_filter_parameters_present) {
2025  tmp = get_ue_golomb_31(&sl->gb);
2026  if (tmp > 2) {
2027  av_log(h->avctx, AV_LOG_ERROR,
2028  "deblocking_filter_idc %u out of range\n", tmp);
2029  return AVERROR_INVALIDDATA;
2030  }
2031  sl->deblocking_filter = tmp;
2032  if (sl->deblocking_filter < 2)
2033  sl->deblocking_filter ^= 1; // 1<->0
2034 
2035  if (sl->deblocking_filter) {
2036  int slice_alpha_c0_offset_div2 = get_se_golomb(&sl->gb);
2037  int slice_beta_offset_div2 = get_se_golomb(&sl->gb);
2038  if (slice_alpha_c0_offset_div2 > 6 ||
2039  slice_alpha_c0_offset_div2 < -6 ||
2040  slice_beta_offset_div2 > 6 ||
2041  slice_beta_offset_div2 < -6) {
2042  av_log(h->avctx, AV_LOG_ERROR,
2043  "deblocking filter parameters %d %d out of range\n",
2044  slice_alpha_c0_offset_div2, slice_beta_offset_div2);
2045  return AVERROR_INVALIDDATA;
2046  }
2047  sl->slice_alpha_c0_offset = slice_alpha_c0_offset_div2 * 2;
2048  sl->slice_beta_offset = slice_beta_offset_div2 * 2;
2049  }
2050  }
2051 
2052  return 0;
2053 }
2054 
2055 /* do all the per-slice initialization needed before we can start decoding the
2056  * actual MBs */
2058  const H2645NAL *nal)
2059 {
2060  int i, j, ret = 0;
2061 
2062  if (h->picture_idr && nal->type != H264_NAL_IDR_SLICE) {
2063  av_log(h->avctx, AV_LOG_ERROR, "Invalid mix of IDR and non-IDR slices\n");
2064  return AVERROR_INVALIDDATA;
2065  }
2066 
2067  av_assert1(h->mb_num == h->mb_width * h->mb_height);
2068  if (sl->first_mb_addr << FIELD_OR_MBAFF_PICTURE(h) >= h->mb_num ||
2069  sl->first_mb_addr >= h->mb_num) {
2070  av_log(h->avctx, AV_LOG_ERROR, "first_mb_in_slice overflow\n");
2071  return AVERROR_INVALIDDATA;
2072  }
2073  sl->resync_mb_x = sl->mb_x = sl->first_mb_addr % h->mb_width;
2074  sl->resync_mb_y = sl->mb_y = (sl->first_mb_addr / h->mb_width) <<
2076  if (h->picture_structure == PICT_BOTTOM_FIELD)
2077  sl->resync_mb_y = sl->mb_y = sl->mb_y + 1;
2078  av_assert1(sl->mb_y < h->mb_height);
2079 
2080  ret = ff_h264_build_ref_list(h, sl);
2081  if (ret < 0)
2082  return ret;
2083 
2084  if (h->ps.pps->weighted_bipred_idc == 2 &&
2086  implicit_weight_table(h, sl, -1);
2087  if (FRAME_MBAFF(h)) {
2088  implicit_weight_table(h, sl, 0);
2089  implicit_weight_table(h, sl, 1);
2090  }
2091  }
2092 
2095  if (!h->setup_finished)
2097 
2098  if (h->avctx->skip_loop_filter >= AVDISCARD_ALL ||
2099  (h->avctx->skip_loop_filter >= AVDISCARD_NONKEY &&
2100  h->nal_unit_type != H264_NAL_IDR_SLICE) ||
2101  (h->avctx->skip_loop_filter >= AVDISCARD_NONINTRA &&
2103  (h->avctx->skip_loop_filter >= AVDISCARD_BIDIR &&
2105  (h->avctx->skip_loop_filter >= AVDISCARD_NONREF &&
2106  nal->ref_idc == 0))
2107  sl->deblocking_filter = 0;
2108 
2109  if (sl->deblocking_filter == 1 && h->nb_slice_ctx > 1) {
2110  if (h->avctx->flags2 & AV_CODEC_FLAG2_FAST) {
2111  /* Cheat slightly for speed:
2112  * Do not bother to deblock across slices. */
2113  sl->deblocking_filter = 2;
2114  } else {
2115  h->postpone_filter = 1;
2116  }
2117  }
2118  sl->qp_thresh = 15 -
2120  FFMAX3(0,
2121  h->ps.pps->chroma_qp_index_offset[0],
2122  h->ps.pps->chroma_qp_index_offset[1]) +
2123  6 * (h->ps.sps->bit_depth_luma - 8);
2124 
2125  sl->slice_num = ++h->current_slice;
2126 
2127  if (sl->slice_num)
2128  h->slice_row[(sl->slice_num-1)&(MAX_SLICES-1)]= sl->resync_mb_y;
2129  if ( h->slice_row[sl->slice_num&(MAX_SLICES-1)] + 3 >= sl->resync_mb_y
2130  && h->slice_row[sl->slice_num&(MAX_SLICES-1)] <= sl->resync_mb_y
2131  && sl->slice_num >= MAX_SLICES) {
2132  //in case of ASO this check needs to be updated depending on how we decide to assign slice numbers in this case
2133  av_log(h->avctx, AV_LOG_WARNING, "Possibly too many slices (%d >= %d), increase MAX_SLICES and recompile if there are artifacts\n", sl->slice_num, MAX_SLICES);
2134  }
2135 
2136  for (j = 0; j < 2; j++) {
2137  int id_list[16];
2138  int *ref2frm = h->ref2frm[sl->slice_num & (MAX_SLICES - 1)][j];
2139  for (i = 0; i < 16; i++) {
2140  id_list[i] = 60;
2141  if (j < sl->list_count && i < sl->ref_count[j] &&
2142  sl->ref_list[j][i].parent->f->buf[0]) {
2143  int k;
2144  AVBuffer *buf = sl->ref_list[j][i].parent->f->buf[0]->buffer;
2145  for (k = 0; k < h->short_ref_count; k++)
2146  if (h->short_ref[k]->f->buf[0]->buffer == buf) {
2147  id_list[i] = k;
2148  break;
2149  }
2150  for (k = 0; k < h->long_ref_count; k++)
2151  if (h->long_ref[k] && h->long_ref[k]->f->buf[0]->buffer == buf) {
2152  id_list[i] = h->short_ref_count + k;
2153  break;
2154  }
2155  }
2156  }
2157 
2158  ref2frm[0] =
2159  ref2frm[1] = -1;
2160  for (i = 0; i < 16; i++)
2161  ref2frm[i + 2] = 4 * id_list[i] + (sl->ref_list[j][i].reference & 3);
2162  ref2frm[18 + 0] =
2163  ref2frm[18 + 1] = -1;
2164  for (i = 16; i < 48; i++)
2165  ref2frm[i + 4] = 4 * id_list[(i - 16) >> 1] +
2166  (sl->ref_list[j][i].reference & 3);
2167  }
2168 
2169  if (h->avctx->debug & FF_DEBUG_PICT_INFO) {
2170  av_log(h->avctx, AV_LOG_DEBUG,
2171  "slice:%d %s mb:%d %c%s%s frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %s\n",
2172  sl->slice_num,
2173  (h->picture_structure == PICT_FRAME ? "F" : h->picture_structure == PICT_TOP_FIELD ? "T" : "B"),
2174  sl->mb_y * h->mb_width + sl->mb_x,
2176  sl->slice_type_fixed ? " fix" : "",
2177  nal->type == H264_NAL_IDR_SLICE ? " IDR" : "",
2178  h->poc.frame_num,
2179  h->cur_pic_ptr->field_poc[0],
2180  h->cur_pic_ptr->field_poc[1],
2181  sl->ref_count[0], sl->ref_count[1],
2182  sl->qscale,
2183  sl->deblocking_filter,
2185  sl->pwt.use_weight,
2186  sl->pwt.use_weight == 1 && sl->pwt.use_weight_chroma ? "c" : "",
2187  sl->slice_type == AV_PICTURE_TYPE_B ? (sl->direct_spatial_mv_pred ? "SPAT" : "TEMP") : "");
2188  }
2189 
2190  return 0;
2191 }
2192 
2194 {
2195  H264SliceContext *sl = h->slice_ctx + h->nb_slice_ctx_queued;
2196  int first_slice = sl == h->slice_ctx && !h->current_slice;
2197  int ret;
2198 
2199  sl->gb = nal->gb;
2200 
2201  ret = h264_slice_header_parse(h, sl, nal);
2202  if (ret < 0)
2203  return ret;
2204 
2205  // discard redundant pictures
2206  if (sl->redundant_pic_count > 0) {
2207  sl->ref_count[0] = sl->ref_count[1] = 0;
2208  return 0;
2209  }
2210 
2211  if (sl->first_mb_addr == 0 || !h->current_slice) {
2212  if (h->setup_finished) {
2213  av_log(h->avctx, AV_LOG_ERROR, "Too many fields\n");
2214  return AVERROR_INVALIDDATA;
2215  }
2216  }
2217 
2218  if (sl->first_mb_addr == 0) { // FIXME better field boundary detection
2219  if (h->current_slice) {
2220  // this slice starts a new field
2221  // first decode any pending queued slices
2222  if (h->nb_slice_ctx_queued) {
2223  H264SliceContext tmp_ctx;
2224 
2226  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
2227  return ret;
2228 
2229  memcpy(&tmp_ctx, h->slice_ctx, sizeof(tmp_ctx));
2230  memcpy(h->slice_ctx, sl, sizeof(tmp_ctx));
2231  memcpy(sl, &tmp_ctx, sizeof(tmp_ctx));
2232  sl = h->slice_ctx;
2233  }
2234 
2235  if (h->cur_pic_ptr && FIELD_PICTURE(h) && h->first_field) {
2236  ret = ff_h264_field_end(h, h->slice_ctx, 1);
2237  if (ret < 0)
2238  return ret;
2239  } else if (h->cur_pic_ptr && !FIELD_PICTURE(h) && !h->first_field && h->nal_unit_type == H264_NAL_IDR_SLICE) {
2240  av_log(h, AV_LOG_WARNING, "Broken frame packetizing\n");
2241  ret = ff_h264_field_end(h, h->slice_ctx, 1);
2242  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0);
2243  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1);
2244  h->cur_pic_ptr = NULL;
2245  if (ret < 0)
2246  return ret;
2247  } else
2248  return AVERROR_INVALIDDATA;
2249  }
2250 
2251  if (!h->first_field) {
2252  if (h->cur_pic_ptr && !h->droppable) {
2253  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
2254  h->picture_structure == PICT_BOTTOM_FIELD);
2255  }
2256  h->cur_pic_ptr = NULL;
2257  }
2258  }
2259 
2260  if (!h->current_slice)
2261  av_assert0(sl == h->slice_ctx);
2262 
2263  if (h->current_slice == 0 && !h->first_field) {
2264  if (
2265  (h->avctx->skip_frame >= AVDISCARD_NONREF && !h->nal_ref_idc) ||
2266  (h->avctx->skip_frame >= AVDISCARD_BIDIR && sl->slice_type_nos == AV_PICTURE_TYPE_B) ||
2267  (h->avctx->skip_frame >= AVDISCARD_NONINTRA && sl->slice_type_nos != AV_PICTURE_TYPE_I) ||
2268  (h->avctx->skip_frame >= AVDISCARD_NONKEY && h->nal_unit_type != H264_NAL_IDR_SLICE && h->sei.recovery_point.recovery_frame_cnt < 0) ||
2269  h->avctx->skip_frame >= AVDISCARD_ALL) {
2270  return 0;
2271  }
2272  }
2273 
2274  if (!first_slice) {
2275  const PPS *pps = (const PPS*)h->ps.pps_list[sl->pps_id]->data;
2276 
2277  if (h->ps.pps->sps_id != pps->sps_id ||
2278  h->ps.pps->transform_8x8_mode != pps->transform_8x8_mode /*||
2279  (h->setup_finished && h->ps.pps != pps)*/) {
2280  av_log(h->avctx, AV_LOG_ERROR, "PPS changed between slices\n");
2281  return AVERROR_INVALIDDATA;
2282  }
2283  if (h->ps.sps != pps->sps) {
2284  av_log(h->avctx, AV_LOG_ERROR,
2285  "SPS changed in the middle of the frame\n");
2286  return AVERROR_INVALIDDATA;
2287  }
2288  }
2289 
2290  if (h->current_slice == 0) {
2291  ret = h264_field_start(h, sl, nal, first_slice);
2292  if (ret < 0)
2293  return ret;
2294  } else {
2295  if (h->picture_structure != sl->picture_structure ||
2296  h->droppable != (nal->ref_idc == 0)) {
2297  av_log(h->avctx, AV_LOG_ERROR,
2298  "Changing field mode (%d -> %d) between slices is not allowed\n",
2299  h->picture_structure, sl->picture_structure);
2300  return AVERROR_INVALIDDATA;
2301  } else if (!h->cur_pic_ptr) {
2302  av_log(h->avctx, AV_LOG_ERROR,
2303  "unset cur_pic_ptr on slice %d\n",
2304  h->current_slice + 1);
2305  return AVERROR_INVALIDDATA;
2306  }
2307  }
2308 
2309  ret = h264_slice_init(h, sl, nal);
2310  if (ret < 0)
2311  return ret;
2312 
2313  h->nb_slice_ctx_queued++;
2314 
2315  return 0;
2316 }
2317 
2319 {
2320  switch (sl->slice_type) {
2321  case AV_PICTURE_TYPE_P:
2322  return 0;
2323  case AV_PICTURE_TYPE_B:
2324  return 1;
2325  case AV_PICTURE_TYPE_I:
2326  return 2;
2327  case AV_PICTURE_TYPE_SP:
2328  return 3;
2329  case AV_PICTURE_TYPE_SI:
2330  return 4;
2331  default:
2332  return AVERROR_INVALIDDATA;
2333  }
2334 }
2335 
2337  H264SliceContext *sl,
2338  int mb_type, int top_xy,
2339  int left_xy[LEFT_MBS],
2340  int top_type,
2341  int left_type[LEFT_MBS],
2342  int mb_xy, int list)
2343 {
2344  int b_stride = h->b_stride;
2345  int16_t(*mv_dst)[2] = &sl->mv_cache[list][scan8[0]];
2346  int8_t *ref_cache = &sl->ref_cache[list][scan8[0]];
2347  if (IS_INTER(mb_type) || IS_DIRECT(mb_type)) {
2348  if (USES_LIST(top_type, list)) {
2349  const int b_xy = h->mb2b_xy[top_xy] + 3 * b_stride;
2350  const int b8_xy = 4 * top_xy + 2;
2351  const int *ref2frm = &h->ref2frm[h->slice_table[top_xy] & (MAX_SLICES - 1)][list][(MB_MBAFF(sl) ? 20 : 2)];
2352  AV_COPY128(mv_dst - 1 * 8, h->cur_pic.motion_val[list][b_xy + 0]);
2353  ref_cache[0 - 1 * 8] =
2354  ref_cache[1 - 1 * 8] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 0]];
2355  ref_cache[2 - 1 * 8] =
2356  ref_cache[3 - 1 * 8] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 1]];
2357  } else {
2358  AV_ZERO128(mv_dst - 1 * 8);
2359  AV_WN32A(&ref_cache[0 - 1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2360  }
2361 
2362  if (!IS_INTERLACED(mb_type ^ left_type[LTOP])) {
2363  if (USES_LIST(left_type[LTOP], list)) {
2364  const int b_xy = h->mb2b_xy[left_xy[LTOP]] + 3;
2365  const int b8_xy = 4 * left_xy[LTOP] + 1;
2366  const int *ref2frm = &h->ref2frm[h->slice_table[left_xy[LTOP]] & (MAX_SLICES - 1)][list][(MB_MBAFF(sl) ? 20 : 2)];
2367  AV_COPY32(mv_dst - 1 + 0, h->cur_pic.motion_val[list][b_xy + b_stride * 0]);
2368  AV_COPY32(mv_dst - 1 + 8, h->cur_pic.motion_val[list][b_xy + b_stride * 1]);
2369  AV_COPY32(mv_dst - 1 + 16, h->cur_pic.motion_val[list][b_xy + b_stride * 2]);
2370  AV_COPY32(mv_dst - 1 + 24, h->cur_pic.motion_val[list][b_xy + b_stride * 3]);
2371  ref_cache[-1 + 0] =
2372  ref_cache[-1 + 8] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 2 * 0]];
2373  ref_cache[-1 + 16] =
2374  ref_cache[-1 + 24] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 2 * 1]];
2375  } else {
2376  AV_ZERO32(mv_dst - 1 + 0);
2377  AV_ZERO32(mv_dst - 1 + 8);
2378  AV_ZERO32(mv_dst - 1 + 16);
2379  AV_ZERO32(mv_dst - 1 + 24);
2380  ref_cache[-1 + 0] =
2381  ref_cache[-1 + 8] =
2382  ref_cache[-1 + 16] =
2383  ref_cache[-1 + 24] = LIST_NOT_USED;
2384  }
2385  }
2386  }
2387 
2388  if (!USES_LIST(mb_type, list)) {
2389  fill_rectangle(mv_dst, 4, 4, 8, pack16to32(0, 0), 4);
2390  AV_WN32A(&ref_cache[0 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2391  AV_WN32A(&ref_cache[1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2392  AV_WN32A(&ref_cache[2 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2393  AV_WN32A(&ref_cache[3 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2394  return;
2395  }
2396 
2397  {
2398  int8_t *ref = &h->cur_pic.ref_index[list][4 * mb_xy];
2399  const int *ref2frm = &h->ref2frm[sl->slice_num & (MAX_SLICES - 1)][list][(MB_MBAFF(sl) ? 20 : 2)];
2400  uint32_t ref01 = (pack16to32(ref2frm[ref[0]], ref2frm[ref[1]]) & 0x00FF00FF) * 0x0101;
2401  uint32_t ref23 = (pack16to32(ref2frm[ref[2]], ref2frm[ref[3]]) & 0x00FF00FF) * 0x0101;
2402  AV_WN32A(&ref_cache[0 * 8], ref01);
2403  AV_WN32A(&ref_cache[1 * 8], ref01);
2404  AV_WN32A(&ref_cache[2 * 8], ref23);
2405  AV_WN32A(&ref_cache[3 * 8], ref23);
2406  }
2407 
2408  {
2409  int16_t(*mv_src)[2] = &h->cur_pic.motion_val[list][4 * sl->mb_x + 4 * sl->mb_y * b_stride];
2410  AV_COPY128(mv_dst + 8 * 0, mv_src + 0 * b_stride);
2411  AV_COPY128(mv_dst + 8 * 1, mv_src + 1 * b_stride);
2412  AV_COPY128(mv_dst + 8 * 2, mv_src + 2 * b_stride);
2413  AV_COPY128(mv_dst + 8 * 3, mv_src + 3 * b_stride);
2414  }
2415 }
2416 
2417 /**
2418  * @return non zero if the loop filter can be skipped
2419  */
2420 static int fill_filter_caches(const H264Context *h, H264SliceContext *sl, int mb_type)
2421 {
2422  const int mb_xy = sl->mb_xy;
2423  int top_xy, left_xy[LEFT_MBS];
2424  int top_type, left_type[LEFT_MBS];
2425  uint8_t *nnz;
2426  uint8_t *nnz_cache;
2427 
2428  top_xy = mb_xy - (h->mb_stride << MB_FIELD(sl));
2429 
2430  left_xy[LBOT] = left_xy[LTOP] = mb_xy - 1;
2431  if (FRAME_MBAFF(h)) {
2432  const int left_mb_field_flag = IS_INTERLACED(h->cur_pic.mb_type[mb_xy - 1]);
2433  const int curr_mb_field_flag = IS_INTERLACED(mb_type);
2434  if (sl->mb_y & 1) {
2435  if (left_mb_field_flag != curr_mb_field_flag)
2436  left_xy[LTOP] -= h->mb_stride;
2437  } else {
2438  if (curr_mb_field_flag)
2439  top_xy += h->mb_stride &
2440  (((h->cur_pic.mb_type[top_xy] >> 7) & 1) - 1);
2441  if (left_mb_field_flag != curr_mb_field_flag)
2442  left_xy[LBOT] += h->mb_stride;
2443  }
2444  }
2445 
2446  sl->top_mb_xy = top_xy;
2447  sl->left_mb_xy[LTOP] = left_xy[LTOP];
2448  sl->left_mb_xy[LBOT] = left_xy[LBOT];
2449  {
2450  /* For sufficiently low qp, filtering wouldn't do anything.
2451  * This is a conservative estimate: could also check beta_offset
2452  * and more accurate chroma_qp. */
2453  int qp_thresh = sl->qp_thresh; // FIXME strictly we should store qp_thresh for each mb of a slice
2454  int qp = h->cur_pic.qscale_table[mb_xy];
2455  if (qp <= qp_thresh &&
2456  (left_xy[LTOP] < 0 ||
2457  ((qp + h->cur_pic.qscale_table[left_xy[LTOP]] + 1) >> 1) <= qp_thresh) &&
2458  (top_xy < 0 ||
2459  ((qp + h->cur_pic.qscale_table[top_xy] + 1) >> 1) <= qp_thresh)) {
2460  if (!FRAME_MBAFF(h))
2461  return 1;
2462  if ((left_xy[LTOP] < 0 ||
2463  ((qp + h->cur_pic.qscale_table[left_xy[LBOT]] + 1) >> 1) <= qp_thresh) &&
2464  (top_xy < h->mb_stride ||
2465  ((qp + h->cur_pic.qscale_table[top_xy - h->mb_stride] + 1) >> 1) <= qp_thresh))
2466  return 1;
2467  }
2468  }
2469 
2470  top_type = h->cur_pic.mb_type[top_xy];
2471  left_type[LTOP] = h->cur_pic.mb_type[left_xy[LTOP]];
2472  left_type[LBOT] = h->cur_pic.mb_type[left_xy[LBOT]];
2473  if (sl->deblocking_filter == 2) {
2474  if (h->slice_table[top_xy] != sl->slice_num)
2475  top_type = 0;
2476  if (h->slice_table[left_xy[LBOT]] != sl->slice_num)
2477  left_type[LTOP] = left_type[LBOT] = 0;
2478  } else {
2479  if (h->slice_table[top_xy] == 0xFFFF)
2480  top_type = 0;
2481  if (h->slice_table[left_xy[LBOT]] == 0xFFFF)
2482  left_type[LTOP] = left_type[LBOT] = 0;
2483  }
2484  sl->top_type = top_type;
2485  sl->left_type[LTOP] = left_type[LTOP];
2486  sl->left_type[LBOT] = left_type[LBOT];
2487 
2488  if (IS_INTRA(mb_type))
2489  return 0;
2490 
2491  fill_filter_caches_inter(h, sl, mb_type, top_xy, left_xy,
2492  top_type, left_type, mb_xy, 0);
2493  if (sl->list_count == 2)
2494  fill_filter_caches_inter(h, sl, mb_type, top_xy, left_xy,
2495  top_type, left_type, mb_xy, 1);
2496 
2497  nnz = h->non_zero_count[mb_xy];
2498  nnz_cache = sl->non_zero_count_cache;
2499  AV_COPY32(&nnz_cache[4 + 8 * 1], &nnz[0]);
2500  AV_COPY32(&nnz_cache[4 + 8 * 2], &nnz[4]);
2501  AV_COPY32(&nnz_cache[4 + 8 * 3], &nnz[8]);
2502  AV_COPY32(&nnz_cache[4 + 8 * 4], &nnz[12]);
2503  sl->cbp = h->cbp_table[mb_xy];
2504 
2505  if (top_type) {
2506  nnz = h->non_zero_count[top_xy];
2507  AV_COPY32(&nnz_cache[4 + 8 * 0], &nnz[3 * 4]);
2508  }
2509 
2510  if (left_type[LTOP]) {
2511  nnz = h->non_zero_count[left_xy[LTOP]];
2512  nnz_cache[3 + 8 * 1] = nnz[3 + 0 * 4];
2513  nnz_cache[3 + 8 * 2] = nnz[3 + 1 * 4];
2514  nnz_cache[3 + 8 * 3] = nnz[3 + 2 * 4];
2515  nnz_cache[3 + 8 * 4] = nnz[3 + 3 * 4];
2516  }
2517 
2518  /* CAVLC 8x8dct requires NNZ values for residual decoding that differ
2519  * from what the loop filter needs */
2520  if (!CABAC(h) && h->ps.pps->transform_8x8_mode) {
2521  if (IS_8x8DCT(top_type)) {
2522  nnz_cache[4 + 8 * 0] =
2523  nnz_cache[5 + 8 * 0] = (h->cbp_table[top_xy] & 0x4000) >> 12;
2524  nnz_cache[6 + 8 * 0] =
2525  nnz_cache[7 + 8 * 0] = (h->cbp_table[top_xy] & 0x8000) >> 12;
2526  }
2527  if (IS_8x8DCT(left_type[LTOP])) {
2528  nnz_cache[3 + 8 * 1] =
2529  nnz_cache[3 + 8 * 2] = (h->cbp_table[left_xy[LTOP]] & 0x2000) >> 12; // FIXME check MBAFF
2530  }
2531  if (IS_8x8DCT(left_type[LBOT])) {
2532  nnz_cache[3 + 8 * 3] =
2533  nnz_cache[3 + 8 * 4] = (h->cbp_table[left_xy[LBOT]] & 0x8000) >> 12; // FIXME check MBAFF
2534  }
2535 
2536  if (IS_8x8DCT(mb_type)) {
2537  nnz_cache[scan8[0]] =
2538  nnz_cache[scan8[1]] =
2539  nnz_cache[scan8[2]] =
2540  nnz_cache[scan8[3]] = (sl->cbp & 0x1000) >> 12;
2541 
2542  nnz_cache[scan8[0 + 4]] =
2543  nnz_cache[scan8[1 + 4]] =
2544  nnz_cache[scan8[2 + 4]] =
2545  nnz_cache[scan8[3 + 4]] = (sl->cbp & 0x2000) >> 12;
2546 
2547  nnz_cache[scan8[0 + 8]] =
2548  nnz_cache[scan8[1 + 8]] =
2549  nnz_cache[scan8[2 + 8]] =
2550  nnz_cache[scan8[3 + 8]] = (sl->cbp & 0x4000) >> 12;
2551 
2552  nnz_cache[scan8[0 + 12]] =
2553  nnz_cache[scan8[1 + 12]] =
2554  nnz_cache[scan8[2 + 12]] =
2555  nnz_cache[scan8[3 + 12]] = (sl->cbp & 0x8000) >> 12;
2556  }
2557  }
2558 
2559  return 0;
2560 }
2561 
2562 static void loop_filter(const H264Context *h, H264SliceContext *sl, int start_x, int end_x)
2563 {
2564  uint8_t *dest_y, *dest_cb, *dest_cr;
2565  int linesize, uvlinesize, mb_x, mb_y;
2566  const int end_mb_y = sl->mb_y + FRAME_MBAFF(h);
2567  const int old_slice_type = sl->slice_type;
2568  const int pixel_shift = h->pixel_shift;
2569  const int block_h = 16 >> h->chroma_y_shift;
2570 
2571  if (h->postpone_filter)
2572  return;
2573 
2574  if (sl->deblocking_filter) {
2575  for (mb_x = start_x; mb_x < end_x; mb_x++)
2576  for (mb_y = end_mb_y - FRAME_MBAFF(h); mb_y <= end_mb_y; mb_y++) {
2577  int mb_xy, mb_type;
2578  mb_xy = sl->mb_xy = mb_x + mb_y * h->mb_stride;
2579  mb_type = h->cur_pic.mb_type[mb_xy];
2580 
2581  if (FRAME_MBAFF(h))
2582  sl->mb_mbaff =
2583  sl->mb_field_decoding_flag = !!IS_INTERLACED(mb_type);
2584 
2585  sl->mb_x = mb_x;
2586  sl->mb_y = mb_y;
2587  dest_y = h->cur_pic.f->data[0] +
2588  ((mb_x << pixel_shift) + mb_y * sl->linesize) * 16;
2589  dest_cb = h->cur_pic.f->data[1] +
2590  (mb_x << pixel_shift) * (8 << CHROMA444(h)) +
2591  mb_y * sl->uvlinesize * block_h;
2592  dest_cr = h->cur_pic.f->data[2] +
2593  (mb_x << pixel_shift) * (8 << CHROMA444(h)) +
2594  mb_y * sl->uvlinesize * block_h;
2595  // FIXME simplify above
2596 
2597  if (MB_FIELD(sl)) {
2598  linesize = sl->mb_linesize = sl->linesize * 2;
2599  uvlinesize = sl->mb_uvlinesize = sl->uvlinesize * 2;
2600  if (mb_y & 1) { // FIXME move out of this function?
2601  dest_y -= sl->linesize * 15;
2602  dest_cb -= sl->uvlinesize * (block_h - 1);
2603  dest_cr -= sl->uvlinesize * (block_h - 1);
2604  }
2605  } else {
2606  linesize = sl->mb_linesize = sl->linesize;
2607  uvlinesize = sl->mb_uvlinesize = sl->uvlinesize;
2608  }
2609  backup_mb_border(h, sl, dest_y, dest_cb, dest_cr, linesize,
2610  uvlinesize, 0);
2611  if (fill_filter_caches(h, sl, mb_type))
2612  continue;
2613  sl->chroma_qp[0] = get_chroma_qp(h->ps.pps, 0, h->cur_pic.qscale_table[mb_xy]);
2614  sl->chroma_qp[1] = get_chroma_qp(h->ps.pps, 1, h->cur_pic.qscale_table[mb_xy]);
2615 
2616  if (FRAME_MBAFF(h)) {
2617  ff_h264_filter_mb(h, sl, mb_x, mb_y, dest_y, dest_cb, dest_cr,
2618  linesize, uvlinesize);
2619  } else {
2620  ff_h264_filter_mb_fast(h, sl, mb_x, mb_y, dest_y, dest_cb,
2621  dest_cr, linesize, uvlinesize);
2622  }
2623  }
2624  }
2625  sl->slice_type = old_slice_type;
2626  sl->mb_x = end_x;
2627  sl->mb_y = end_mb_y - FRAME_MBAFF(h);
2628  sl->chroma_qp[0] = get_chroma_qp(h->ps.pps, 0, sl->qscale);
2629  sl->chroma_qp[1] = get_chroma_qp(h->ps.pps, 1, sl->qscale);
2630 }
2631 
2633 {
2634  const int mb_xy = sl->mb_x + sl->mb_y * h->mb_stride;
2635  int mb_type = (h->slice_table[mb_xy - 1] == sl->slice_num) ?
2636  h->cur_pic.mb_type[mb_xy - 1] :
2637  (h->slice_table[mb_xy - h->mb_stride] == sl->slice_num) ?
2638  h->cur_pic.mb_type[mb_xy - h->mb_stride] : 0;
2639  sl->mb_mbaff = sl->mb_field_decoding_flag = IS_INTERLACED(mb_type) ? 1 : 0;
2640 }
2641 
2642 /**
2643  * Draw edges and report progress for the last MB row.
2644  */
2646 {
2647  int top = 16 * (sl->mb_y >> FIELD_PICTURE(h));
2648  int pic_height = 16 * h->mb_height >> FIELD_PICTURE(h);
2649  int height = 16 << FRAME_MBAFF(h);
2650  int deblock_border = (16 + 4) << FRAME_MBAFF(h);
2651 
2652  if (sl->deblocking_filter) {
2653  if ((top + height) >= pic_height)
2654  height += deblock_border;
2655  top -= deblock_border;
2656  }
2657 
2658  if (top >= pic_height || (top + height) < 0)
2659  return;
2660 
2661  height = FFMIN(height, pic_height - top);
2662  if (top < 0) {
2663  height = top + height;
2664  top = 0;
2665  }
2666 
2667  ff_h264_draw_horiz_band(h, sl, top, height);
2668 
2669  if (h->droppable || sl->h264->slice_ctx[0].er.error_occurred)
2670  return;
2671 
2672  ff_thread_report_progress(&h->cur_pic_ptr->tf, top + height - 1,
2673  h->picture_structure == PICT_BOTTOM_FIELD);
2674 }
2675 
2677  int startx, int starty,
2678  int endx, int endy, int status)
2679 {
2680  if (!sl->h264->enable_er)
2681  return;
2682 
2683  if (CONFIG_ERROR_RESILIENCE) {
2684  ERContext *er = &sl->h264->slice_ctx[0].er;
2685 
2686  ff_er_add_slice(er, startx, starty, endx, endy, status);
2687  }
2688 }
2689 
2690 static int decode_slice(struct AVCodecContext *avctx, void *arg)
2691 {
2692  H264SliceContext *sl = arg;
2693  const H264Context *h = sl->h264;
2694  int lf_x_start = sl->mb_x;
2695  int orig_deblock = sl->deblocking_filter;
2696  int ret;
2697 
2698  sl->linesize = h->cur_pic_ptr->f->linesize[0];
2699  sl->uvlinesize = h->cur_pic_ptr->f->linesize[1];
2700 
2701  ret = alloc_scratch_buffers(sl, sl->linesize);
2702  if (ret < 0)
2703  return ret;
2704 
2705  sl->mb_skip_run = -1;
2706 
2707  av_assert0(h->block_offset[15] == (4 * ((scan8[15] - scan8[0]) & 7) << h->pixel_shift) + 4 * sl->linesize * ((scan8[15] - scan8[0]) >> 3));
2708 
2709  if (h->postpone_filter)
2710  sl->deblocking_filter = 0;
2711 
2712  sl->is_complex = FRAME_MBAFF(h) || h->picture_structure != PICT_FRAME ||
2713  (CONFIG_GRAY && (h->flags & AV_CODEC_FLAG_GRAY));
2714 
2715  if (!(h->avctx->active_thread_type & FF_THREAD_SLICE) && h->picture_structure == PICT_FRAME && h->slice_ctx[0].er.error_status_table) {
2716  const int start_i = av_clip(sl->resync_mb_x + sl->resync_mb_y * h->mb_width, 0, h->mb_num - 1);
2717  if (start_i) {
2718  int prev_status = h->slice_ctx[0].er.error_status_table[h->slice_ctx[0].er.mb_index2xy[start_i - 1]];
2719  prev_status &= ~ VP_START;
2720  if (prev_status != (ER_MV_END | ER_DC_END | ER_AC_END))
2721  h->slice_ctx[0].er.error_occurred = 1;
2722  }
2723  }
2724 
2725  if (h->ps.pps->cabac) {
2726  /* realign */
2727  align_get_bits(&sl->gb);
2728 
2729  /* init cabac */
2731  sl->gb.buffer + get_bits_count(&sl->gb) / 8,
2732  (get_bits_left(&sl->gb) + 7) / 8);
2733  if (ret < 0)
2734  return ret;
2735 
2737 
2738  for (;;) {
2739  int ret, eos;
2740  if (sl->mb_x + sl->mb_y * h->mb_width >= sl->next_slice_idx) {
2741  av_log(h->avctx, AV_LOG_ERROR, "Slice overlaps with next at %d\n",
2742  sl->next_slice_idx);
2743  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2744  sl->mb_y, ER_MB_ERROR);
2745  return AVERROR_INVALIDDATA;
2746  }
2747 
2748  ret = ff_h264_decode_mb_cabac(h, sl);
2749 
2750  if (ret >= 0)
2751  ff_h264_hl_decode_mb(h, sl);
2752 
2753  // FIXME optimal? or let mb_decode decode 16x32 ?
2754  if (ret >= 0 && FRAME_MBAFF(h)) {
2755  sl->mb_y++;
2756 
2757  ret = ff_h264_decode_mb_cabac(h, sl);
2758 
2759  if (ret >= 0)
2760  ff_h264_hl_decode_mb(h, sl);
2761  sl->mb_y--;
2762  }
2763  eos = get_cabac_terminate(&sl->cabac);
2764 
2765  if ((h->workaround_bugs & FF_BUG_TRUNCATED) &&
2766  sl->cabac.bytestream > sl->cabac.bytestream_end + 2) {
2767  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x - 1,
2768  sl->mb_y, ER_MB_END);
2769  if (sl->mb_x >= lf_x_start)
2770  loop_filter(h, sl, lf_x_start, sl->mb_x + 1);
2771  goto finish;
2772  }
2773  if (sl->cabac.bytestream > sl->cabac.bytestream_end + 2 )
2774  av_log(h->avctx, AV_LOG_DEBUG, "bytestream overread %"PTRDIFF_SPECIFIER"\n", sl->cabac.bytestream_end - sl->cabac.bytestream);
2775  if (ret < 0 || sl->cabac.bytestream > sl->cabac.bytestream_end + 4) {
2776  av_log(h->avctx, AV_LOG_ERROR,
2777  "error while decoding MB %d %d, bytestream %"PTRDIFF_SPECIFIER"\n",
2778  sl->mb_x, sl->mb_y,
2779  sl->cabac.bytestream_end - sl->cabac.bytestream);
2780  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2781  sl->mb_y, ER_MB_ERROR);
2782  return AVERROR_INVALIDDATA;
2783  }
2784 
2785  if (++sl->mb_x >= h->mb_width) {
2786  loop_filter(h, sl, lf_x_start, sl->mb_x);
2787  sl->mb_x = lf_x_start = 0;
2788  decode_finish_row(h, sl);
2789  ++sl->mb_y;
2790  if (FIELD_OR_MBAFF_PICTURE(h)) {
2791  ++sl->mb_y;
2792  if (FRAME_MBAFF(h) && sl->mb_y < h->mb_height)
2794  }
2795  }
2796 
2797  if (eos || sl->mb_y >= h->mb_height) {
2798  ff_tlog(h->avctx, "slice end %d %d\n",
2799  get_bits_count(&sl->gb), sl->gb.size_in_bits);
2800  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x - 1,
2801  sl->mb_y, ER_MB_END);
2802  if (sl->mb_x > lf_x_start)
2803  loop_filter(h, sl, lf_x_start, sl->mb_x);
2804  goto finish;
2805  }
2806  }
2807  } else {
2808  for (;;) {
2809  int ret;
2810 
2811  if (sl->mb_x + sl->mb_y * h->mb_width >= sl->next_slice_idx) {
2812  av_log(h->avctx, AV_LOG_ERROR, "Slice overlaps with next at %d\n",
2813  sl->next_slice_idx);
2814  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2815  sl->mb_y, ER_MB_ERROR);
2816  return AVERROR_INVALIDDATA;
2817  }
2818 
2819  ret = ff_h264_decode_mb_cavlc(h, sl);
2820 
2821  if (ret >= 0)
2822  ff_h264_hl_decode_mb(h, sl);
2823 
2824  // FIXME optimal? or let mb_decode decode 16x32 ?
2825  if (ret >= 0 && FRAME_MBAFF(h)) {
2826  sl->mb_y++;
2827  ret = ff_h264_decode_mb_cavlc(h, sl);
2828 
2829  if (ret >= 0)
2830  ff_h264_hl_decode_mb(h, sl);
2831  sl->mb_y--;
2832  }
2833 
2834  if (ret < 0) {
2835  av_log(h->avctx, AV_LOG_ERROR,
2836  "error while decoding MB %d %d\n", sl->mb_x, sl->mb_y);
2837  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2838  sl->mb_y, ER_MB_ERROR);
2839  return ret;
2840  }
2841 
2842  if (++sl->mb_x >= h->mb_width) {
2843  loop_filter(h, sl, lf_x_start, sl->mb_x);
2844  sl->mb_x = lf_x_start = 0;
2845  decode_finish_row(h, sl);
2846  ++sl->mb_y;
2847  if (FIELD_OR_MBAFF_PICTURE(h)) {
2848  ++sl->mb_y;
2849  if (FRAME_MBAFF(h) && sl->mb_y < h->mb_height)
2851  }
2852  if (sl->mb_y >= h->mb_height) {
2853  ff_tlog(h->avctx, "slice end %d %d\n",
2854  get_bits_count(&sl->gb), sl->gb.size_in_bits);
2855 
2856  if ( get_bits_left(&sl->gb) == 0
2857  || get_bits_left(&sl->gb) > 0 && !(h->avctx->err_recognition & AV_EF_AGGRESSIVE)) {
2858  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y,
2859  sl->mb_x - 1, sl->mb_y, ER_MB_END);
2860 
2861  goto finish;
2862  } else {
2863  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y,
2864  sl->mb_x, sl->mb_y, ER_MB_END);
2865 
2866  return AVERROR_INVALIDDATA;
2867  }
2868  }
2869  }
2870 
2871  if (get_bits_left(&sl->gb) <= 0 && sl->mb_skip_run <= 0) {
2872  ff_tlog(h->avctx, "slice end %d %d\n",
2873  get_bits_count(&sl->gb), sl->gb.size_in_bits);
2874 
2875  if (get_bits_left(&sl->gb) == 0) {
2876  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y,
2877  sl->mb_x - 1, sl->mb_y, ER_MB_END);
2878  if (sl->mb_x > lf_x_start)
2879  loop_filter(h, sl, lf_x_start, sl->mb_x);
2880 
2881  goto finish;
2882  } else {
2883  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2884  sl->mb_y, ER_MB_ERROR);
2885 
2886  return AVERROR_INVALIDDATA;
2887  }
2888  }
2889  }
2890  }
2891 
2892 finish:
2893  sl->deblocking_filter = orig_deblock;
2894  return 0;
2895 }
2896 
2897 /**
2898  * Call decode_slice() for each context.
2899  *
2900  * @param h h264 master context
2901  */
2903 {
2904  AVCodecContext *const avctx = h->avctx;
2905  H264SliceContext *sl;
2906  int context_count = h->nb_slice_ctx_queued;
2907  int ret = 0;
2908  int i, j;
2909 
2910  h->slice_ctx[0].next_slice_idx = INT_MAX;
2911 
2912  if (h->avctx->hwaccel || context_count < 1)
2913  return 0;
2914 
2915  av_assert0(context_count && h->slice_ctx[context_count - 1].mb_y < h->mb_height);
2916 
2917  if (context_count == 1) {
2918 
2919  h->slice_ctx[0].next_slice_idx = h->mb_width * h->mb_height;
2920  h->postpone_filter = 0;
2921 
2922  ret = decode_slice(avctx, &h->slice_ctx[0]);
2923  h->mb_y = h->slice_ctx[0].mb_y;
2924  if (ret < 0)
2925  goto finish;
2926  } else {
2927  av_assert0(context_count > 0);
2928  for (i = 0; i < context_count; i++) {
2929  int next_slice_idx = h->mb_width * h->mb_height;
2930  int slice_idx;
2931 
2932  sl = &h->slice_ctx[i];
2933  if (CONFIG_ERROR_RESILIENCE) {
2934  sl->er.error_count = 0;
2935  }
2936 
2937  /* make sure none of those slices overlap */
2938  slice_idx = sl->mb_y * h->mb_width + sl->mb_x;
2939  for (j = 0; j < context_count; j++) {
2940  H264SliceContext *sl2 = &h->slice_ctx[j];
2941  int slice_idx2 = sl2->mb_y * h->mb_width + sl2->mb_x;
2942 
2943  if (i == j || slice_idx2 < slice_idx)
2944  continue;
2945  next_slice_idx = FFMIN(next_slice_idx, slice_idx2);
2946  }
2947  sl->next_slice_idx = next_slice_idx;
2948  }
2949 
2950  avctx->execute(avctx, decode_slice, h->slice_ctx,
2951  NULL, context_count, sizeof(h->slice_ctx[0]));
2952 
2953  /* pull back stuff from slices to master context */
2954  sl = &h->slice_ctx[context_count - 1];
2955  h->mb_y = sl->mb_y;
2956  if (CONFIG_ERROR_RESILIENCE) {
2957  for (i = 1; i < context_count; i++)
2958  h->slice_ctx[0].er.error_count += h->slice_ctx[i].er.error_count;
2959  }
2960 
2961  if (h->postpone_filter) {
2962  h->postpone_filter = 0;
2963 
2964  for (i = 0; i < context_count; i++) {
2965  int y_end, x_end;
2966 
2967  sl = &h->slice_ctx[i];
2968  y_end = FFMIN(sl->mb_y + 1, h->mb_height);
2969  x_end = (sl->mb_y >= h->mb_height) ? h->mb_width : sl->mb_x;
2970 
2971  for (j = sl->resync_mb_y; j < y_end; j += 1 + FIELD_OR_MBAFF_PICTURE(h)) {
2972  sl->mb_y = j;
2973  loop_filter(h, sl, j > sl->resync_mb_y ? 0 : sl->resync_mb_x,
2974  j == y_end - 1 ? x_end : h->mb_width);
2975  }
2976  }
2977  }
2978  }
2979 
2980 finish:
2981  h->nb_slice_ctx_queued = 0;
2982  return ret;
2983 }
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:38
er_add_slice
static void er_add_slice(H264SliceContext *sl, int startx, int starty, int endx, int endy, int status)
Definition: h264_slice.c:2676
ff_h264_filter_mb_fast
void ff_h264_filter_mb_fast(const H264Context *h, H264SliceContext *sl, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize)
Definition: h264_loopfilter.c:419
h264_slice_header_init
static int h264_slice_header_init(H264Context *h)
Definition: h264_slice.c:947
implicit_weight_table
static void implicit_weight_table(const H264Context *h, H264SliceContext *sl, int field)
Initialize implicit_weight table.
Definition: h264_slice.c:691
H264SliceContext::mb_xy
int mb_xy
Definition: h264dec.h:243
ff_h264_unref_picture
void ff_h264_unref_picture(H264Context *h, H264Picture *pic)
Definition: h264_picture.c:44
av_buffer_pool_init
AVBufferPool * av_buffer_pool_init(size_t size, AVBufferRef *(*alloc)(size_t size))
Allocate and initialize a buffer pool.
Definition: buffer.c:280
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_TIMECODE_STR_SIZE
#define AV_TIMECODE_STR_SIZE
Definition: timecode.h:33
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:225
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
H264SliceContext::ref_cache
int8_t ref_cache[2][5 *8]
Definition: h264dec.h:312
status
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
ff_h264_free_tables
void ff_h264_free_tables(H264Context *h)
Definition: h264dec.c:138
H264_NAL_IDR_SLICE
@ H264_NAL_IDR_SLICE
Definition: h264.h:39
H264SEIDisplayOrientation::hflip
int hflip
Definition: h264_sei.h:147
AV_STEREO3D_VIEW_LEFT
@ AV_STEREO3D_VIEW_LEFT
Frame contains only the left view.
Definition: stereo3d.h:156
av_clip
#define av_clip
Definition: common.h:96
h264_init_ps
static int h264_init_ps(H264Context *h, const H264SliceContext *sl, int first_slice)
Definition: h264_slice.c:1047
H264SEIFilmGrainCharacteristics::blending_mode_id
int blending_mode_id
Definition: h264_sei.h:178
H264SliceContext::max_pic_num
int max_pic_num
Definition: h264dec.h:344
H264SliceContext::nb_mmco
int nb_mmco
Definition: h264dec.h:335
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:850
CHROMA422
#define CHROMA422(h)
Definition: h264dec.h:100
FF_BUG_TRUNCATED
#define FF_BUG_TRUNCATED
Definition: avcodec.h:1265
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
cabac.h
AV_STEREO3D_SIDEBYSIDE_QUINCUNX
@ AV_STEREO3D_SIDEBYSIDE_QUINCUNX
Views are next to each other, but when upscaling apply a checkerboard pattern.
Definition: stereo3d.h:117
H264Picture::poc
int poc
frame POC
Definition: h264dec.h:153
h264_export_frame_props
static int h264_export_frame_props(H264Context *h)
Definition: h264_slice.c:1166
H264Picture::f
AVFrame * f
Definition: h264dec.h:131
ff_thread_ref_frame
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
Definition: utils.c:866
out
FILE * out
Definition: movenc.c:54
cb
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:215
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:605
av_clip_int8
#define av_clip_int8
Definition: common.h:105
zigzag_scan8x8_cavlc
static const uint8_t zigzag_scan8x8_cavlc[64+1]
Definition: h264_slice.c:97
ff_h264_replace_picture
int ff_h264_replace_picture(H264Context *h, H264Picture *dst, const H264Picture *src)
Definition: h264_picture.c:155
ff_thread_can_start_frame
int ff_thread_can_start_frame(AVCodecContext *avctx)
Definition: pthread_frame.c:934
H264Context::slice_ctx
H264SliceContext * slice_ctx
Definition: h264dec.h:364
AVFilmGrainH274Params::color_space
enum AVColorSpace color_space
Definition: film_grain_params.h:152
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
H264Picture::ref_index
int8_t * ref_index[2]
Definition: h264dec.h:150
HWACCEL_MAX
#define HWACCEL_MAX
ff_h264_slice_context_init
int ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl)
Init context Allocate buffers which are not shared amongst multiple threads.
Definition: h264dec.c:222
AVFrame::coded_picture_number
int coded_picture_number
picture number in bitstream order
Definition: frame.h:444
MB_MBAFF
#define MB_MBAFF(h)
Definition: h264dec.h:73
H264SliceContext::mvd_table
uint8_t(*[2] mvd_table)[2]
Definition: h264dec.h:325
ff_h264_set_erpic
void ff_h264_set_erpic(ERPicture *dst, H264Picture *src)
Definition: h264_picture.c:208
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:220
H264_SEI_PIC_STRUCT_TOP_BOTTOM
@ H264_SEI_PIC_STRUCT_TOP_BOTTOM
3: top field, bottom field, in that order
Definition: h264_sei.h:34
AVFilmGrainH274Params::blending_mode_id
int blending_mode_id
Specifies the blending mode used to blend the simulated film grain with the decoded images.
Definition: film_grain_params.h:160
H264Picture::pps
const PPS * pps
Definition: h264dec.h:172
AV_FRAME_DATA_S12M_TIMECODE
@ AV_FRAME_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1.
Definition: frame.h:151
av_mod_uintp2
#define av_mod_uintp2
Definition: common.h:123
GetBitContext::size_in_bits
int size_in_bits
Definition: get_bits.h:69
H2645NAL::ref_idc
int ref_idc
H.264 only, nal_ref_idc.
Definition: h2645_parse.h:57
predict_field_decoding_flag
static void predict_field_decoding_flag(const H264Context *h, H264SliceContext *sl)
Definition: h264_slice.c:2632
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:317
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:26
AVFrame::width
int width
Definition: frame.h:389
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:597
get_ue_golomb
static int get_ue_golomb(GetBitContext *gb)
Read an unsigned Exp-Golomb code in the range 0 to 8190.
Definition: golomb.h:55
av_display_matrix_flip
void av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip)
Flip the input matrix horizontally and/or vertically.
Definition: display.c:65
internal.h
ff_h264_update_thread_context
int ff_h264_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: h264_slice.c:307
alloc_scratch_buffers
static int alloc_scratch_buffers(H264SliceContext *sl, int linesize)
Definition: h264_slice.c:129
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:497
FRAME_RECOVERED_IDR
#define FRAME_RECOVERED_IDR
We have seen an IDR, so all the following frames in coded order are correctly decodable.
Definition: h264dec.h:537
H264SliceContext::mmco
MMCO mmco[MAX_MMCO_COUNT]
Definition: h264dec.h:334
decode_finish_row
static void decode_finish_row(const H264Context *h, H264SliceContext *sl)
Draw edges and report progress for the last MB row.
Definition: h264_slice.c:2645
H264SliceContext::ref_count
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264dec.h:279
H264SEIFilmGrainCharacteristics::color_primaries
int color_primaries
Definition: h264_sei.h:175
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:404
ff_er_frame_start
void ff_er_frame_start(ERContext *s)
Definition: error_resilience.c:793
H264_SEI_FPA_TYPE_CHECKERBOARD
@ H264_SEI_FPA_TYPE_CHECKERBOARD
Definition: h264_sei.h:46
H264Picture::qscale_table
int8_t * qscale_table
Definition: h264dec.h:138
H264SliceContext::h264
struct H264Context * h264
Definition: h264dec.h:190
H264SliceContext::left_mb_xy
int left_mb_xy[LEFT_MBS]
Definition: h264dec.h:223
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:219
ERContext
Definition: error_resilience.h:53
H264PredWeightTable::use_weight_chroma
int use_weight_chroma
Definition: h264_parse.h:32
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:37
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
Definition: pixfmt.h:524
AV_WN32A
#define AV_WN32A(p, v)
Definition: intreadwrite.h:538
ff_h264_update_thread_context_for_user
int ff_h264_update_thread_context_for_user(AVCodecContext *dst, const AVCodecContext *src)
Definition: h264_slice.c:475
AVFilmGrainH274Params::color_range
enum AVColorRange color_range
Definition: film_grain_params.h:149
ff_er_add_slice
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
Definition: error_resilience.c:826
av_display_rotation_set
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure clockwise rotation by the specified angle (in de...
Definition: display.c:50
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:84
H264Picture::ref_index_buf
AVBufferRef * ref_index_buf[2]
Definition: h264dec.h:149
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
ff_h264_pred_weight_table
int ff_h264_pred_weight_table(GetBitContext *gb, const SPS *sps, const int *ref_count, int slice_type_nos, H264PredWeightTable *pwt, int picture_structure, void *logctx)
Definition: h264_parse.c:27
FRAME_RECOVERED_SEI
#define FRAME_RECOVERED_SEI
Sufficient number of frames have been decoded since a SEI recovery point, so all the following frames...
Definition: h264dec.h:542
H264SliceContext::is_complex
int is_complex
Definition: h264dec.h:250
ER_DC_END
#define ER_DC_END
Definition: error_resilience.h:35
ff_h264_decode_ref_pic_list_reordering
int ff_h264_decode_ref_pic_list_reordering(H264SliceContext *sl, void *logctx)
Definition: h264_refs.c:423
mpegutils.h
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:513
H264Picture::invalid_gap
int invalid_gap
Definition: h264dec.h:167
av_timecode_get_smpte
uint32_t av_timecode_get_smpte(AVRational rate, int drop, int hh, int mm, int ss, int ff)
Convert sei info to SMPTE 12M binary representation.
Definition: timecode.c:68
AV_STEREO3D_VIEW_RIGHT
@ AV_STEREO3D_VIEW_RIGHT
Frame contains only the right view.
Definition: stereo3d.h:161
H264Picture::pps_buf
AVBufferRef * pps_buf
Definition: h264dec.h:171
thread.h
ff_thread_await_progress
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
ThreadFrame::f
AVFrame * f
Definition: thread.h:35
AVFilmGrainParams::seed
uint64_t seed
Seed to use for the synthesis process, if the codec allows for it.
Definition: film_grain_params.h:228
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1303
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:338
H264SliceContext::mb_x
int mb_x
Definition: h264dec.h:242
h264_mvpred.h
H264Picture::frame_num
int frame_num
frame_num (raw frame_num from slice header)
Definition: h264dec.h:154
H264SliceContext::next_slice_idx
int next_slice_idx
Definition: h264dec.h:248
H264SliceContext
Definition: h264dec.h:189
fill_filter_caches_inter
static av_always_inline void fill_filter_caches_inter(const H264Context *h, H264SliceContext *sl, int mb_type, int top_xy, int left_xy[LEFT_MBS], int top_type, int left_type[LEFT_MBS], int mb_xy, int list)
Definition: h264_slice.c:2336
golomb.h
exp golomb vlc stuff
MB_FIELD
#define MB_FIELD(sl)
Definition: h264dec.h:74
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:67
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:380
ff_h264_filter_mb
void ff_h264_filter_mb(const H264Context *h, H264SliceContext *sl, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize)
Definition: h264_loopfilter.c:719
H264SliceContext::mv_cache
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
Definition: h264dec.h:311
AV_CODEC_FLAG_OUTPUT_CORRUPT
#define AV_CODEC_FLAG_OUTPUT_CORRUPT
Output even those frames that might be corrupted.
Definition: avcodec.h:218
AVHWAccel
Definition: avcodec.h:2039
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:422
finish
static void finish(void)
Definition: movenc.c:342
get_chroma_qp
static av_always_inline int get_chroma_qp(const PPS *pps, int t, int qscale)
Get the chroma qp.
Definition: h264dec.h:701
U
#define U(x)
Definition: vp56_arith.h:37
H264Picture::mmco_reset
int mmco_reset
MMCO_RESET set this 1.
Definition: h264dec.h:155
fail
#define fail()
Definition: checkasm.h:127
copy_picture_range
static void copy_picture_range(H264Picture **to, H264Picture **from, int count, H264Context *new_base, H264Context *old_base)
Definition: h264_slice.c:291
AV_STEREO3D_2D
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:55
H264SEIA53Caption
Definition: h264_sei.h:107
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:420
timecode.h
h264_select_output_frame
static int h264_select_output_frame(H264Context *h)
Definition: h264_slice.c:1443
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:402
H264SliceContext::er
ERContext er
Definition: h264dec.h:192
USES_LIST
#define USES_LIST(a, list)
Definition: mpegutils.h:98
CABACContext::bytestream
const uint8_t * bytestream
Definition: cabac.h:45
AVFrame::key_frame
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:409
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2688
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
H264Picture::mb_stride
int mb_stride
Definition: h264dec.h:175
IN_RANGE
#define IN_RANGE(a, b, size)
Definition: h264_slice.c:284
ff_h264_flush_change
void ff_h264_flush_change(H264Context *h)
Definition: h264dec.c:453
ff_h264qpel_init
av_cold void ff_h264qpel_init(H264QpelContext *c, int bit_depth)
Definition: h264qpel.c:49
ff_h264_sei_process_picture_timing
int ff_h264_sei_process_picture_timing(H264SEIPictureTiming *h, const SPS *sps, void *logctx)
Parse the contents of a picture timing message given an active SPS.
Definition: h264_sei.c:69
h264_frame_start
static int h264_frame_start(H264Context *h)
Definition: h264_slice.c:487
H264SliceContext::deblocking_filter
int deblocking_filter
disable_deblocking_filter_idc with 1 <-> 0
Definition: h264dec.h:205
H264PredWeightTable::luma_log2_weight_denom
int luma_log2_weight_denom
Definition: h264_parse.h:33
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:261
H264Picture::f_grain
AVFrame * f_grain
Definition: h264dec.h:134
H264SliceContext::picture_structure
int picture_structure
Definition: h264dec.h:252
ff_h264_golomb_to_pict_type
const uint8_t ff_h264_golomb_to_pict_type[5]
Definition: h264data.c:37
release_unused_pictures
static void release_unused_pictures(H264Context *h, int remove_current)
Definition: h264_slice.c:116
H264PredWeightTable::use_weight
int use_weight
Definition: h264_parse.h:31
H264_SEI_FPA_TYPE_SIDE_BY_SIDE
@ H264_SEI_FPA_TYPE_SIDE_BY_SIDE
Definition: h264_sei.h:49
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
H264SliceContext::direct_spatial_mv_pred
int direct_spatial_mv_pred
Definition: h264dec.h:263
H264SliceContext::slice_num
int slice_num
Definition: h264dec.h:194
H264_SEI_FPA_TYPE_INTERLEAVE_TEMPORAL
@ H264_SEI_FPA_TYPE_INTERLEAVE_TEMPORAL
Definition: h264_sei.h:51
AVFilmGrainH274Params::intensity_interval_upper_bound
uint8_t intensity_interval_upper_bound[3][256]
Specifies the upper bound of each intensity interval for which the set of model values applies for th...
Definition: film_grain_params.h:194
non_j_pixfmt
static enum AVPixelFormat non_j_pixfmt(enum AVPixelFormat a)
Definition: h264_slice.c:1036
AVFilmGrainH274Params::bit_depth_luma
int bit_depth_luma
Specifies the bit depth used for the luma component.
Definition: film_grain_params.h:142
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:407
ff_h264_init_cabac_states
void ff_h264_init_cabac_states(const H264Context *h, H264SliceContext *sl)
Definition: h264_cabac.c:1263
ff_h264_hl_decode_mb
void ff_h264_hl_decode_mb(const H264Context *h, H264SliceContext *sl)
Definition: h264_mb.c:799
avassert.h
AV_STEREO3D_FRAMESEQUENCE
@ AV_STEREO3D_FRAMESEQUENCE
Views are alternated temporally.
Definition: stereo3d.h:92
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
film_grain_params.h
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
ff_color_frame
void ff_color_frame(AVFrame *frame, const int color[4])
Definition: utils.c:410
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:588
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
av_buffer_pool_get
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:387
ff_h264_queue_decode_slice
int ff_h264_queue_decode_slice(H264Context *h, const H2645NAL *nal)
Submit a slice for decoding.
Definition: h264_slice.c:2193
width
#define width
H264Context::DPB
H264Picture DPB[H264_MAX_PICTURE_COUNT]
Definition: h264dec.h:359
AV_STEREO3D_LINES
@ AV_STEREO3D_LINES
Views are packed per line, as if interlaced.
Definition: stereo3d.h:129
H264SEIFilmGrainCharacteristics::present
int present
Definition: h264_sei.h:169
stereo3d.h
H264_SEI_FPA_TYPE_TOP_BOTTOM
@ H264_SEI_FPA_TYPE_TOP_BOTTOM
Definition: h264_sei.h:50
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:127
H264SEIA53Caption::buf_ref
AVBufferRef * buf_ref
Definition: h264_sei.h:108
H264PredWeightTable::chroma_log2_weight_denom
int chroma_log2_weight_denom
Definition: h264_parse.h:34
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:51
AV_ZERO32
#define AV_ZERO32(d)
Definition: intreadwrite.h:629
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:361
FIELD_PICTURE
#define FIELD_PICTURE(h)
Definition: h264dec.h:76
av_film_grain_params_create_side_data
AVFilmGrainParams * av_film_grain_params_create_side_data(AVFrame *frame)
Allocate a complete AVFilmGrainParams and add it to the frame.
Definition: film_grain_params.c:31
ff_h264_execute_ref_pic_marking
int ff_h264_execute_ref_pic_marking(H264Context *h)
Execute the reference picture marking (memory management control operations).
Definition: h264_refs.c:610
ff_h264_decode_ref_pic_marking
int ff_h264_decode_ref_pic_marking(H264SliceContext *sl, GetBitContext *gb, const H2645NAL *nal, void *logctx)
Definition: h264_refs.c:834
from
const char * from
Definition: jacosubdec.c:66
to
const char * to
Definition: webvttdec.c:35
h264_slice_header_parse
static int h264_slice_header_parse(const H264Context *h, H264SliceContext *sl, const H2645NAL *nal)
Definition: h264_slice.c:1838
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
H264PredWeightTable::chroma_weight_flag
int chroma_weight_flag[2]
7.4.3.2 chroma_weight_lX_flag
Definition: h264_parse.h:36
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
h264data.h
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:401
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
H264Ref::parent
H264Picture * parent
Definition: h264dec.h:186
PICT_TOP_FIELD
#define PICT_TOP_FIELD
Definition: mpegutils.h:36
field_scan8x8_cavlc
static const uint8_t field_scan8x8_cavlc[64+1]
Definition: h264_slice.c:77
H264SliceContext::slice_alpha_c0_offset
int slice_alpha_c0_offset
Definition: h264dec.h:206
H264SEIFilmGrainCharacteristics::intensity_interval_lower_bound
uint8_t intensity_interval_lower_bound[3][256]
Definition: h264_sei.h:183
IS_INTRA
#define IS_INTRA(x, y)
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
AVFrame::crop_right
size_t crop_right
Definition: frame.h:667
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
AVFilmGrainH274Params::comp_model_value
int16_t comp_model_value[3][256][6]
Specifies the model values for the component for each intensity interval.
Definition: film_grain_params.h:205
H264SliceContext::slice_type
int slice_type
Definition: h264dec.h:195
H264SliceContext::resync_mb_x
int resync_mb_x
Definition: h264dec.h:244
H264Picture::sei_recovery_frame_cnt
int sei_recovery_frame_cnt
Definition: h264dec.h:168
AVCOL_PRI_UNSPECIFIED
@ AVCOL_PRI_UNSPECIFIED
Definition: pixfmt.h:472
AVDISCARD_BIDIR
@ AVDISCARD_BIDIR
discard all bidirectional frames
Definition: defs.h:51
get_se_golomb
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
Definition: golomb.h:241
H2645NAL::type
int type
NAL unit type.
Definition: h2645_parse.h:52
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
H264Context::enable_er
int enable_er
Definition: h264dec.h:565
ff_h264_draw_horiz_band
void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl, int y, int height)
Definition: h264dec.c:103
H264SliceContext::curr_pic_num
int curr_pic_num
Definition: h264dec.h:343
FF_CODEC_PROPERTY_FILM_GRAIN
#define FF_CODEC_PROPERTY_FILM_GRAIN
Definition: avcodec.h:1825
arg
const char * arg
Definition: jacosubdec.c:67
AVStereo3D::flags
int flags
Additional information about the frame packing.
Definition: stereo3d.h:185
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:65
if
if(ret)
Definition: filter_design.txt:179
AVFilmGrainH274Params::model_id
int model_id
Specifies the film grain simulation mode.
Definition: film_grain_params.h:137
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:54
GetBitContext::buffer
const uint8_t * buffer
Definition: get_bits.h:63
alloc_picture
static int alloc_picture(H264Context *h, H264Picture *pic)
Definition: h264_slice.c:188
H264Picture::motion_val_buf
AVBufferRef * motion_val_buf[2]
Definition: h264dec.h:140
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:192
NULL
#define NULL
Definition: coverity.c:32
AV_COPY128
#define AV_COPY128(d, s)
Definition: intreadwrite.h:609
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
AV_COPY64
#define AV_COPY64(d, s)
Definition: intreadwrite.h:605
H264SliceContext::edge_emu_buffer
uint8_t * edge_emu_buffer
Definition: h264dec.h:296
H264SEIUnregistered
Definition: h264_sei.h:111
SPS
Sequence parameter set.
Definition: h264_ps.h:44
TRANSPOSE
#define TRANSPOSE(x)
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
ER_MB_ERROR
#define ER_MB_ERROR
Definition: error_resilience.h:38
ff_h264_decode_mb_cabac
int ff_h264_decode_mb_cabac(const H264Context *h, H264SliceContext *sl)
Decode a macroblock.
Definition: h264_cabac.c:1912
AV_PICTURE_TYPE_SI
@ AV_PICTURE_TYPE_SI
Switching Intra.
Definition: avutil.h:278
H264SliceContext::chroma_qp
int chroma_qp[2]
Definition: h264dec.h:200
AV_CODEC_FLAG2_FAST
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:287
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:499
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
src
#define src
Definition: vp8dsp.c:255
H264SEIFilmGrainCharacteristics::repetition_period
int repetition_period
Definition: h264_sei.h:186
av_buffer_pool_uninit
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:322
PPS
Picture parameter set.
Definition: h264_ps.h:111
av_fast_mallocz
void av_fast_mallocz(void *ptr, unsigned int *size, size_t min_size)
Allocate and clear a buffer, reusing the given one if large enough.
Definition: mem.c:565
mathops.h
list
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
Definition: filter_design.txt:25
IS_INTERLACED
#define IS_INTERLACED(a)
Definition: mpegutils.h:82
av_frame_new_side_data_from_buf
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
Definition: frame.c:573
H264Picture::mb_height
int mb_height
Definition: h264dec.h:174
MAX_PPS_COUNT
#define MAX_PPS_COUNT
Definition: h264_ps.h:38
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:405
H264SliceContext::qscale
int qscale
Definition: h264dec.h:199
get_pixel_format
static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
Definition: h264_slice.c:789
fill_filter_caches
static int fill_filter_caches(const H264Context *h, H264SliceContext *sl, int mb_type)
Definition: h264_slice.c:2420
ERContext::error_occurred
int error_occurred
Definition: error_resilience.h:65
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:1335
fp
#define fp
Definition: regdef.h:44
AV_ZERO128
#define AV_ZERO128(d)
Definition: intreadwrite.h:637
init_scan_tables
static void init_scan_tables(H264Context *h)
initialize scan tables
Definition: h264_slice.c:755
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:419
H264SliceContext::top_borders_allocated
int top_borders_allocated[2]
Definition: h264dec.h:300
AV_FRAME_DATA_AFD
@ AV_FRAME_DATA_AFD
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
Definition: frame.h:89
AV_PICTURE_TYPE_SP
@ AV_PICTURE_TYPE_SP
Switching Predicted.
Definition: avutil.h:279
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:563
FIELD_OR_MBAFF_PICTURE
#define FIELD_OR_MBAFF_PICTURE(h)
Definition: h264dec.h:93
H264SliceContext::mb_skip_run
int mb_skip_run
Definition: h264dec.h:249
h264_ps.h
init_dimensions
static void init_dimensions(H264Context *h)
Definition: h264_slice.c:907
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AV_FRAME_DATA_SEI_UNREGISTERED
@ AV_FRAME_DATA_SEI_UNREGISTERED
User data unregistered metadata associated with a video frame.
Definition: frame.h:177
H264SliceContext::top_type
int top_type
Definition: h264dec.h:226
AVFrame::crop_bottom
size_t crop_bottom
Definition: frame.h:665
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
H264SliceContext::resync_mb_y
int resync_mb_y
Definition: h264dec.h:245
H264_SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM
@ H264_SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM
6: bottom field, top field, bottom field repeated, in that order
Definition: h264_sei.h:37
ff_thread_release_buffer
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
Definition: pthread_frame.c:1056
DELAYED_PIC_REF
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output.
Definition: diracdec.c:68
H264SEIPictureTiming
Definition: h264_sei.h:66
H264SliceContext::cabac
CABACContext cabac
Cabac.
Definition: h264dec.h:330
H264SliceContext::redundant_pic_count
int redundant_pic_count
Definition: h264dec.h:256
AVFrame::crop_left
size_t crop_left
Definition: frame.h:666
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:53
ERContext::error_count
atomic_int error_count
Definition: error_resilience.h:64
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:414
ff_zigzag_scan
const uint8_t ff_zigzag_scan[16+1]
Definition: mathtables.c:109
AV_STEREO3D_CHECKERBOARD
@ AV_STEREO3D_CHECKERBOARD
Views are packed in a checkerboard-like structure per pixel.
Definition: stereo3d.h:104
H264Picture::reference
int reference
Definition: h264dec.h:165
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:243
CABAC
#define CABAC(h)
Definition: h264_cabac.c:28
LEFT_MBS
#define LEFT_MBS
Definition: h264dec.h:77
pps
static int FUNC() pps(CodedBitstreamContext *ctx, RWContext *rw, H264RawPPS *current)
Definition: cbs_h264_syntax_template.c:404
AVFilmGrainH274Params::component_model_present
int component_model_present[3]
Indicates if the modelling of film grain for a given component is present.
Definition: film_grain_params.h:170
H264SEIFramePacking
Definition: h264_sei.h:133
rectangle.h
FF_COMPLIANCE_STRICT
#define FF_COMPLIANCE_STRICT
Strictly conform to all the things in the spec no matter what consequences.
Definition: avcodec.h:1282
H264SliceContext::mb_uvlinesize
ptrdiff_t mb_uvlinesize
Definition: h264dec.h:240
VP_START
#define VP_START
< current MB is the first after a resync marker
Definition: error_resilience.h:30
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:409
H264SliceContext::pwt
H264PredWeightTable pwt
Definition: h264dec.h:209
MAX_DELAYED_PIC_COUNT
#define MAX_DELAYED_PIC_COUNT
Definition: h264dec.h:58
H264SEIFilmGrainCharacteristics::bit_depth_chroma
int bit_depth_chroma
Definition: h264_sei.h:173
H264Picture::tf
ThreadFrame tf
Definition: h264dec.h:132
H264Picture::mb_type
uint32_t * mb_type
Definition: h264dec.h:144
ff_h264_decode_mb_cavlc
int ff_h264_decode_mb_cavlc(const H264Context *h, H264SliceContext *sl)
Decode a macroblock.
Definition: h264_cavlc.c:696
H264_SEI_PIC_STRUCT_BOTTOM_TOP
@ H264_SEI_PIC_STRUCT_BOTTOM_TOP
4: bottom field, top field, in that order
Definition: h264_sei.h:35
H264Picture::recovered
int recovered
picture at IDR or recovery point + recovery count
Definition: h264dec.h:166
H2645NAL::gb
GetBitContext gb
Definition: h2645_parse.h:47
H264SliceContext::top_mb_xy
int top_mb_xy
Definition: h264dec.h:221
H264SliceContext::qp_thresh
int qp_thresh
QP threshold to skip loopfilter.
Definition: h264dec.h:201
H2645NAL
Definition: h2645_parse.h:34
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:411
H264SliceContext::top_borders
uint8_t(*[2] top_borders)[(16 *3) *2]
Definition: h264dec.h:297
AVFrameSideData::data
uint8_t * data
Definition: frame.h:225
h264chroma.h
AVFilmGrainParams
This structure describes how to handle film grain synthesis in video for specific codecs.
Definition: film_grain_params.h:216
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1452
H264SliceContext::cbp
int cbp
Definition: h264dec.h:267
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:404
H264SliceContext::left_type
int left_type[LEFT_MBS]
Definition: h264dec.h:228
ff_h264_direct_ref_list_init
void ff_h264_direct_ref_list_init(const H264Context *const h, H264SliceContext *sl)
Definition: h264_direct.c:121
H264SliceContext::mb_y
int mb_y
Definition: h264dec.h:242
H264PredWeightTable::implicit_weight
int implicit_weight[48][48][2]
Definition: h264_parse.h:40
height
#define height
decode_slice
static int decode_slice(struct AVCodecContext *avctx, void *arg)
Definition: h264_slice.c:2690
H264SliceContext::explicit_ref_marking
int explicit_ref_marking
Definition: h264dec.h:336
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
av_reallocp_array
int av_reallocp_array(void *ptr, size_t nmemb, size_t size)
Allocate, reallocate, or free an array through a pointer to a pointer.
Definition: mem.c:232
H264_SEI_FPA_TYPE_INTERLEAVE_COLUMN
@ H264_SEI_FPA_TYPE_INTERLEAVE_COLUMN
Definition: h264_sei.h:47
pt
int pt
Definition: rtp.c:35
H264SliceContext::uvlinesize
ptrdiff_t uvlinesize
Definition: h264dec.h:238
AVBufferRef::buffer
AVBuffer * buffer
Definition: buffer.h:83
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:303
H264SEIFilmGrainCharacteristics::model_id
int model_id
Definition: h264_sei.h:170
H264SEIDisplayOrientation::anticlockwise_rotation
int anticlockwise_rotation
Definition: h264_sei.h:146
H264SliceContext::slice_type_nos
int slice_type_nos
S free slice type (SI/SP are remapped to I/P)
Definition: h264dec.h:196
H264SliceContext::delta_poc_bottom
int delta_poc_bottom
Definition: h264dec.h:341
AVFilmGrainParams::h274
AVFilmGrainH274Params h274
Definition: film_grain_params.h:237
AV_STEREO3D_FLAG_INVERT
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:167
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:119
H264SEIFilmGrainCharacteristics::intensity_interval_upper_bound
uint8_t intensity_interval_upper_bound[3][256]
Definition: h264_sei.h:184
FRAME_MBAFF
#define FRAME_MBAFF(h)
Definition: h264dec.h:75
IS_DIRECT
#define IS_DIRECT(a)
Definition: mpegutils.h:83
H264_SEI_PIC_STRUCT_FRAME
@ H264_SEI_PIC_STRUCT_FRAME
0: frame
Definition: h264_sei.h:31
pack16to32
static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
Definition: h264dec.h:680
get_cabac_terminate
static int av_unused get_cabac_terminate(CABACContext *c)
Definition: cabac_functions.h:185
H264_SEI_PIC_STRUCT_FRAME_TRIPLING
@ H264_SEI_PIC_STRUCT_FRAME_TRIPLING
8: frame tripling
Definition: h264_sei.h:39
field_scan
static const uint8_t field_scan[16+1]
Definition: h264_slice.c:51
loop_filter
static void loop_filter(const H264Context *h, H264SliceContext *sl, int start_x, int end_x)
Definition: h264_slice.c:2562
ff_init_cabac_decoder
int ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size)
Definition: cabac.c:165
H264SliceContext::mb_mbaff
int mb_mbaff
mb_aff_frame && mb_field_decoding_flag
Definition: h264dec.h:254
field_scan8x8
static const uint8_t field_scan8x8[64+1]
Definition: h264_slice.c:58
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:187
av_get_picture_type_char
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:83
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:272
LIST_NOT_USED
#define LIST_NOT_USED
Definition: h264dec.h:409
H264Picture::field_picture
int field_picture
whether or not picture was encoded in separate fields
Definition: h264dec.h:163
h264dec.h
H264SliceContext::poc_lsb
int poc_lsb
Definition: h264dec.h:340
H264SEIFilmGrainCharacteristics
Definition: h264_sei.h:168
H264SliceContext::first_mb_addr
unsigned int first_mb_addr
Definition: h264dec.h:246
ff_h264_direct_dist_scale_factor
void ff_h264_direct_dist_scale_factor(const H264Context *const h, H264SliceContext *sl)
Definition: h264_direct.c:62
H264Picture::needs_fg
int needs_fg
whether picture needs film grain synthesis (see f_grain)
Definition: h264dec.h:169
AVBuffer
A reference counted buffer type.
Definition: buffer_internal.h:39
H264Context
H264Context.
Definition: h264dec.h:350
AVDISCARD_NONINTRA
@ AVDISCARD_NONINTRA
discard all non intra frames
Definition: defs.h:52
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
av_timecode_make_smpte_tc_string2
char * av_timecode_make_smpte_tc_string2(char *buf, AVRational rate, uint32_t tcsmpte, int prevent_df, int skip_field)
Get the timecode string from the SMPTE timecode format.
Definition: timecode.c:136
AV_CODEC_FLAG2_SHOW_ALL
#define AV_CODEC_FLAG2_SHOW_ALL
Show all frames before the first keyframe.
Definition: avcodec.h:315
AV_FRAME_FLAG_CORRUPT
#define AV_FRAME_FLAG_CORRUPT
The frame data may be corrupted, e.g.
Definition: frame.h:547
H264_SEI_PIC_STRUCT_FRAME_DOUBLING
@ H264_SEI_PIC_STRUCT_FRAME_DOUBLING
7: frame doubling
Definition: h264_sei.h:38
ff_h264chroma_init
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:41
H264SliceContext::frame_num
int frame_num
Definition: h264dec.h:338
AVFilmGrainH274Params::num_intensity_intervals
uint16_t num_intensity_intervals[3]
Specifies the number of intensity intervals for which a specific set of model values has been estimat...
Definition: film_grain_params.h:176
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:421
display.h
AV_STEREO3D_TOPBOTTOM
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:79
ff_h264_execute_decode_slices
int ff_h264_execute_decode_slices(H264Context *h)
Call decode_slice() for each context.
Definition: h264_slice.c:2902
H264SliceContext::mb_linesize
ptrdiff_t mb_linesize
may be equal to s->linesize or s->linesize * 2, for mbaff
Definition: h264dec.h:239
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
H264SEIFilmGrainCharacteristics::matrix_coeffs
int matrix_coeffs
Definition: h264_sei.h:177
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
cabac_functions.h
H264Picture::hwaccel_priv_buf
AVBufferRef * hwaccel_priv_buf
Definition: h264dec.h:146
tb
#define tb
Definition: regdef.h:68
AV_COPY32
#define AV_COPY32(d, s)
Definition: intreadwrite.h:601
av_buffer_replace
int av_buffer_replace(AVBufferRef **pdst, const AVBufferRef *src)
Ensure dst refers to the same data as src.
Definition: buffer.c:233
ff_h264_parse_ref_count
int ff_h264_parse_ref_count(int *plist_count, int ref_count[2], GetBitContext *gb, const PPS *pps, int slice_type_nos, int picture_structure, void *logctx)
Definition: h264_parse.c:219
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:526
AVFilmGrainH274Params::color_primaries
enum AVColorPrimaries color_primaries
Definition: film_grain_params.h:150
H264_SEI_FPA_TYPE_INTERLEAVE_ROW
@ H264_SEI_FPA_TYPE_INTERLEAVE_ROW
Definition: h264_sei.h:48
ff_h264_alloc_tables
int ff_h264_alloc_tables(H264Context *h)
Allocate tables.
Definition: h264dec.c:181
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:580
AVFilmGrainH274Params::intensity_interval_lower_bound
uint8_t intensity_interval_lower_bound[3][256]
Specifies the lower ounds of each intensity interval for whichthe set of model values applies for the...
Definition: film_grain_params.h:188
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:403
H264SliceContext::list_count
unsigned int list_count
Definition: h264dec.h:280
avcodec.h
av_cmp_q
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:89
ff_h264dsp_init
av_cold void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
Definition: h264dsp.c:66
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
av_buffer_allocz
AVBufferRef * av_buffer_allocz(size_t size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:93
ff_h264_ref_picture
int ff_h264_ref_picture(H264Context *h, H264Picture *dst, H264Picture *src)
Definition: h264_picture.c:100
ret
ret
Definition: filter_design.txt:187
ff_h264_init_poc
int ff_h264_init_poc(int pic_field_poc[2], int *pic_poc, const SPS *sps, H264POCContext *pc, int picture_structure, int nal_ref_idc)
Definition: h264_parse.c:277
ff_h264_get_profile
int ff_h264_get_profile(const SPS *sps)
Compute profile from profile_idc and constraint_set?_flags.
Definition: h264_parse.c:529
H264SEIFilmGrainCharacteristics::transfer_characteristics
int transfer_characteristics
Definition: h264_sei.h:176
AV_STEREO3D_COLUMNS
@ AV_STEREO3D_COLUMNS
Views are packed per column.
Definition: stereo3d.h:141
h264_field_start
static int h264_field_start(H264Context *h, const H264SliceContext *sl, const H2645NAL *nal, int first_slice)
Definition: h264_slice.c:1546
AVStereo3D::type
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:180
H264SliceContext::last_qscale_diff
int last_qscale_diff
Definition: h264dec.h:202
H264SEIFilmGrainCharacteristics::bit_depth_luma
int bit_depth_luma
Definition: h264_sei.h:172
sps
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
Definition: cbs_h264_syntax_template.c:260
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:694
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:408
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:413
ff_set_sar
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
Definition: utils.c:101
H264SEIFilmGrainCharacteristics::full_range
int full_range
Definition: h264_sei.h:174
H264SliceContext::pps_id
unsigned int pps_id
Definition: h264dec.h:290
H264SliceContext::linesize
ptrdiff_t linesize
Definition: h264dec.h:238
H264SliceContext::slice_beta_offset
int slice_beta_offset
Definition: h264dec.h:207
AVCodecContext
main external API structure.
Definition: avcodec.h:383
AVFrame::height
int height
Definition: frame.h:389
get_ue_golomb_31
static int get_ue_golomb_31(GetBitContext *gb)
read unsigned exp golomb code, constraint to a max of 31.
Definition: golomb.h:122
MAX_SLICES
#define MAX_SLICES
Definition: dxva2_hevc.c:29
backup_mb_border
static av_always_inline void backup_mb_border(const H264Context *h, H264SliceContext *sl, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int simple)
Definition: h264_slice.c:589
ff_h264_build_ref_list
int ff_h264_build_ref_list(H264Context *h, H264SliceContext *sl)
Definition: h264_refs.c:299
AVCodecContext::execute
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:1491
H264SEIFilmGrainCharacteristics::comp_model_present_flag
int comp_model_present_flag[3]
Definition: h264_sei.h:180
AV_FILM_GRAIN_PARAMS_H274
@ AV_FILM_GRAIN_PARAMS_H274
The union is valid when interpreted as AVFilmGrainH274Params (codec.h274)
Definition: film_grain_params.h:35
H264SliceContext::bipred_scratchpad
uint8_t * bipred_scratchpad
Definition: h264dec.h:295
ff_h264_pred_init
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:413
H264Picture::field_poc
int field_poc[2]
top/bottom POC
Definition: h264dec.h:152
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
error_resilience.h
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AVHWAccel::frame_priv_data_size
int frame_priv_data_size
Size of per-frame hardware accelerator private data.
Definition: avcodec.h:2149
H264Picture::mb_width
int mb_width
Definition: h264dec.h:174
fill_rectangle
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:826
H264Picture
Definition: h264dec.h:130
find_unused_picture
static int find_unused_picture(H264Context *h)
Definition: h264_slice.c:272
ff_thread_get_format
FF_DISABLE_DEPRECATION_WARNINGS enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Wrapper around get_format() for frame-multithreaded codecs.
Definition: pthread_frame.c:1020
AVFilmGrainH274Params::log2_scale_factor
int log2_scale_factor
Specifies a scale factor used in the film grain characterization equations.
Definition: film_grain_params.h:165
scan8
static const uint8_t scan8[16 *3+3]
Definition: h264dec.h:664
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
h264_slice_init
static int h264_slice_init(H264Context *h, H264SliceContext *sl, const H2645NAL *nal)
Definition: h264_slice.c:2057
H264SEIDisplayOrientation::vflip
int vflip
Definition: h264_sei.h:147
H264SEIDisplayOrientation
Definition: h264_sei.h:144
H264Picture::tf_grain
ThreadFrame tf_grain
Definition: h264dec.h:135
FF_CODEC_PROPERTY_CLOSED_CAPTIONS
#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS
Definition: avcodec.h:1824
ff_h264_field_end
int ff_h264_field_end(H264Context *h, H264SliceContext *sl, int in_setup)
Definition: h264_picture.c:231
CABACContext::bytestream_end
const uint8_t * bytestream_end
Definition: cabac.h:46
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVFilmGrainH274Params::num_model_values
uint8_t num_model_values[3]
Specifies the number of model values present for each intensity interval in which the film grain has ...
Definition: film_grain_params.h:182
init_table_pools
static int init_table_pools(H264Context *h)
Definition: h264_slice.c:161
H264Picture::mb_type_buf
AVBufferRef * mb_type_buf
Definition: h264dec.h:143
H264SEIUnregistered::buf_ref
AVBufferRef ** buf_ref
Definition: h264_sei.h:113
H264SliceContext::ref_list
H264Ref ref_list[2][48]
0..15: frame refs, 16..47: mbaff field refs.
Definition: h264dec.h:281
LBOT
#define LBOT
Definition: h264dec.h:79
AV_EF_AGGRESSIVE
#define AV_EF_AGGRESSIVE
consider things that a sane encoder should not do as an error
Definition: avcodec.h:1340
H264SliceContext::non_zero_count_cache
uint8_t non_zero_count_cache[15 *8]
non zero coeff count cache.
Definition: h264dec.h:306
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
H264SEIFilmGrainCharacteristics::num_intensity_intervals
uint16_t num_intensity_intervals[3]
Definition: h264_sei.h:181
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
IS_INTER
#define IS_INTER(a)
Definition: mpegutils.h:78
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
H264SEIFilmGrainCharacteristics::comp_model_value
int16_t comp_model_value[3][256][6]
Definition: h264_sei.h:185
H264SEIFilmGrainCharacteristics::num_model_values
uint8_t num_model_values[3]
Definition: h264_sei.h:182
get_ue_golomb_long
static unsigned get_ue_golomb_long(GetBitContext *gb)
Read an unsigned Exp-Golomb code in the range 0 to UINT32_MAX-1.
Definition: golomb.h:106
H264Context::nal_length_size
int nal_length_size
Number of bytes used for nal length (1, 2 or 4)
Definition: h264dec.h:470
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:33
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:37
ER_MB_END
#define ER_MB_END
Definition: error_resilience.h:39
AVFilmGrainH274Params::color_trc
enum AVColorTransferCharacteristic color_trc
Definition: film_grain_params.h:151
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:223
H264_SEI_PIC_STRUCT_BOTTOM_FIELD
@ H264_SEI_PIC_STRUCT_BOTTOM_FIELD
2: bottom field
Definition: h264_sei.h:33
H264Picture::hwaccel_picture_private
void * hwaccel_picture_private
hardware accelerator private data
Definition: h264dec.h:147
ER_MV_END
#define ER_MV_END
Definition: error_resilience.h:36
H264SEIFilmGrainCharacteristics::log2_scale_factor
int log2_scale_factor
Definition: h264_sei.h:179
H264SliceContext::idr_pic_id
int idr_pic_id
Definition: h264dec.h:339
AVStereo3D::view
enum AVStereo3DView view
Determines which views are packed.
Definition: stereo3d.h:190
H264_SEI_FPA_TYPE_2D
@ H264_SEI_FPA_TYPE_2D
Definition: h264_sei.h:52
ff_tlog
#define ff_tlog(ctx,...)
Definition: internal.h:205
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
AVFilmGrainParams::codec
union AVFilmGrainParams::@294 codec
Additional fields may be added both here and in any structure included.
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:410
cr
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:216
AVFrame::crop_top
size_t crop_top
Definition: frame.h:664
H264SliceContext::gb
GetBitContext gb
Definition: h264dec.h:191
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:560
H264SliceContext::intra4x4_pred_mode
int8_t * intra4x4_pred_mode
Definition: h264dec.h:218
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
LTOP
#define LTOP
Definition: h264dec.h:78
int32_t
int32_t
Definition: audioconvert.c:56
h264.h
imgutils.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:362
H264SliceContext::edge_emu_buffer_allocated
int edge_emu_buffer_allocated
Definition: h264dec.h:299
REBASE_PICTURE
#define REBASE_PICTURE(pic, new_ctx, old_ctx)
Definition: h264_slice.c:286
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
CHROMA444
#define CHROMA444(h)
Definition: h264dec.h:101
AVFilmGrainH274Params::bit_depth_chroma
int bit_depth_chroma
Specifies the bit depth used for the chroma components.
Definition: film_grain_params.h:147
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
ff_h264_get_slice_type
int ff_h264_get_slice_type(const H264SliceContext *sl)
Reconstruct bitstream slice_type.
Definition: h264_slice.c:2318
h
h
Definition: vp9dsp_template.c:2038
H264SliceContext::cabac_init_idc
int cabac_init_idc
Definition: h264dec.h:332
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:414
H264PredWeightTable::luma_weight_flag
int luma_weight_flag[2]
7.4.3.2 luma_weight_lX_flag
Definition: h264_parse.h:35
H264_MAX_PICTURE_COUNT
#define H264_MAX_PICTURE_COUNT
Definition: h264dec.h:54
ER_AC_END
#define ER_AC_END
Definition: error_resilience.h:34
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:176
H264SliceContext::bipred_scratchpad_allocated
int bipred_scratchpad_allocated
Definition: h264dec.h:298
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:50
H264SliceContext::slice_type_fixed
int slice_type_fixed
Definition: h264dec.h:197
H264Ref::poc
int poc
Definition: h264dec.h:183
AVFilmGrainParams::type
enum AVFilmGrainParamsType type
Specifies the codec for which this structure is valid.
Definition: film_grain_params.h:220
IS_8x8DCT
#define IS_8x8DCT(a)
Definition: h264dec.h:106
H264Picture::qscale_table_buf
AVBufferRef * qscale_table_buf
Definition: h264dec.h:137
H264_SEI_PIC_STRUCT_TOP_FIELD
@ H264_SEI_PIC_STRUCT_TOP_FIELD
1: top field
Definition: h264_sei.h:32
H264SliceContext::delta_poc
int delta_poc[2]
Definition: h264dec.h:342
av_color_transfer_name
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
Definition: pixdesc.c:3027
H264Picture::long_ref
int long_ref
1->long term reference 0->short term reference
Definition: h264dec.h:159
H264Ref::reference
int reference
Definition: h264dec.h:182
H264Picture::motion_val
int16_t(*[2] motion_val)[2]
Definition: h264dec.h:141
AV_CODEC_EXPORT_DATA_FILM_GRAIN
#define AV_CODEC_EXPORT_DATA_FILM_GRAIN
Decoding only.
Definition: avcodec.h:356
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:412
H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP
@ H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP
5: top field, bottom field, top field repeated, in that order
Definition: h264_sei.h:36
H264SEIFilmGrainCharacteristics::separate_colour_description_present_flag
int separate_colour_description_present_flag
Definition: h264_sei.h:171
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2580
H264SliceContext::mb_field_decoding_flag
int mb_field_decoding_flag
Definition: h264dec.h:253
H264Context::is_avc
int is_avc
Used to parse AVC variant of H.264.
Definition: h264dec.h:469