FFmpeg
h264_slice.c
Go to the documentation of this file.
1 /*
2  * H.26L/H.264/AVC/JVT/14496-10/... decoder
3  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * H.264 / AVC / MPEG-4 part10 codec.
25  * @author Michael Niedermayer <michaelni@gmx.at>
26  */
27 
28 #include "config_components.h"
29 
30 #include "libavutil/avassert.h"
31 #include "libavutil/mem.h"
32 #include "libavutil/pixdesc.h"
33 #include "libavutil/timecode.h"
34 #include "decode.h"
35 #include "cabac.h"
36 #include "cabac_functions.h"
37 #include "error_resilience.h"
38 #include "avcodec.h"
39 #include "h264.h"
40 #include "h264dec.h"
41 #include "h264data.h"
42 #include "h264chroma.h"
43 #include "h264_ps.h"
44 #include "golomb.h"
45 #include "mathops.h"
46 #include "mpegutils.h"
47 #include "rectangle.h"
48 #include "refstruct.h"
49 #include "thread.h"
50 #include "threadframe.h"
51 
52 static const uint8_t field_scan[16+1] = {
53  0 + 0 * 4, 0 + 1 * 4, 1 + 0 * 4, 0 + 2 * 4,
54  0 + 3 * 4, 1 + 1 * 4, 1 + 2 * 4, 1 + 3 * 4,
55  2 + 0 * 4, 2 + 1 * 4, 2 + 2 * 4, 2 + 3 * 4,
56  3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4, 3 + 3 * 4,
57 };
58 
59 static const uint8_t field_scan8x8[64+1] = {
60  0 + 0 * 8, 0 + 1 * 8, 0 + 2 * 8, 1 + 0 * 8,
61  1 + 1 * 8, 0 + 3 * 8, 0 + 4 * 8, 1 + 2 * 8,
62  2 + 0 * 8, 1 + 3 * 8, 0 + 5 * 8, 0 + 6 * 8,
63  0 + 7 * 8, 1 + 4 * 8, 2 + 1 * 8, 3 + 0 * 8,
64  2 + 2 * 8, 1 + 5 * 8, 1 + 6 * 8, 1 + 7 * 8,
65  2 + 3 * 8, 3 + 1 * 8, 4 + 0 * 8, 3 + 2 * 8,
66  2 + 4 * 8, 2 + 5 * 8, 2 + 6 * 8, 2 + 7 * 8,
67  3 + 3 * 8, 4 + 1 * 8, 5 + 0 * 8, 4 + 2 * 8,
68  3 + 4 * 8, 3 + 5 * 8, 3 + 6 * 8, 3 + 7 * 8,
69  4 + 3 * 8, 5 + 1 * 8, 6 + 0 * 8, 5 + 2 * 8,
70  4 + 4 * 8, 4 + 5 * 8, 4 + 6 * 8, 4 + 7 * 8,
71  5 + 3 * 8, 6 + 1 * 8, 6 + 2 * 8, 5 + 4 * 8,
72  5 + 5 * 8, 5 + 6 * 8, 5 + 7 * 8, 6 + 3 * 8,
73  7 + 0 * 8, 7 + 1 * 8, 6 + 4 * 8, 6 + 5 * 8,
74  6 + 6 * 8, 6 + 7 * 8, 7 + 2 * 8, 7 + 3 * 8,
75  7 + 4 * 8, 7 + 5 * 8, 7 + 6 * 8, 7 + 7 * 8,
76 };
77 
78 static const uint8_t field_scan8x8_cavlc[64+1] = {
79  0 + 0 * 8, 1 + 1 * 8, 2 + 0 * 8, 0 + 7 * 8,
80  2 + 2 * 8, 2 + 3 * 8, 2 + 4 * 8, 3 + 3 * 8,
81  3 + 4 * 8, 4 + 3 * 8, 4 + 4 * 8, 5 + 3 * 8,
82  5 + 5 * 8, 7 + 0 * 8, 6 + 6 * 8, 7 + 4 * 8,
83  0 + 1 * 8, 0 + 3 * 8, 1 + 3 * 8, 1 + 4 * 8,
84  1 + 5 * 8, 3 + 1 * 8, 2 + 5 * 8, 4 + 1 * 8,
85  3 + 5 * 8, 5 + 1 * 8, 4 + 5 * 8, 6 + 1 * 8,
86  5 + 6 * 8, 7 + 1 * 8, 6 + 7 * 8, 7 + 5 * 8,
87  0 + 2 * 8, 0 + 4 * 8, 0 + 5 * 8, 2 + 1 * 8,
88  1 + 6 * 8, 4 + 0 * 8, 2 + 6 * 8, 5 + 0 * 8,
89  3 + 6 * 8, 6 + 0 * 8, 4 + 6 * 8, 6 + 2 * 8,
90  5 + 7 * 8, 6 + 4 * 8, 7 + 2 * 8, 7 + 6 * 8,
91  1 + 0 * 8, 1 + 2 * 8, 0 + 6 * 8, 3 + 0 * 8,
92  1 + 7 * 8, 3 + 2 * 8, 2 + 7 * 8, 4 + 2 * 8,
93  3 + 7 * 8, 5 + 2 * 8, 4 + 7 * 8, 5 + 4 * 8,
94  6 + 3 * 8, 6 + 5 * 8, 7 + 3 * 8, 7 + 7 * 8,
95 };
96 
97 // zigzag_scan8x8_cavlc[i] = zigzag_scan8x8[(i/4) + 16*(i%4)]
98 static const uint8_t zigzag_scan8x8_cavlc[64+1] = {
99  0 + 0 * 8, 1 + 1 * 8, 1 + 2 * 8, 2 + 2 * 8,
100  4 + 1 * 8, 0 + 5 * 8, 3 + 3 * 8, 7 + 0 * 8,
101  3 + 4 * 8, 1 + 7 * 8, 5 + 3 * 8, 6 + 3 * 8,
102  2 + 7 * 8, 6 + 4 * 8, 5 + 6 * 8, 7 + 5 * 8,
103  1 + 0 * 8, 2 + 0 * 8, 0 + 3 * 8, 3 + 1 * 8,
104  3 + 2 * 8, 0 + 6 * 8, 4 + 2 * 8, 6 + 1 * 8,
105  2 + 5 * 8, 2 + 6 * 8, 6 + 2 * 8, 5 + 4 * 8,
106  3 + 7 * 8, 7 + 3 * 8, 4 + 7 * 8, 7 + 6 * 8,
107  0 + 1 * 8, 3 + 0 * 8, 0 + 4 * 8, 4 + 0 * 8,
108  2 + 3 * 8, 1 + 5 * 8, 5 + 1 * 8, 5 + 2 * 8,
109  1 + 6 * 8, 3 + 5 * 8, 7 + 1 * 8, 4 + 5 * 8,
110  4 + 6 * 8, 7 + 4 * 8, 5 + 7 * 8, 6 + 7 * 8,
111  0 + 2 * 8, 2 + 1 * 8, 1 + 3 * 8, 5 + 0 * 8,
112  1 + 4 * 8, 2 + 4 * 8, 6 + 0 * 8, 4 + 3 * 8,
113  0 + 7 * 8, 4 + 4 * 8, 7 + 2 * 8, 3 + 6 * 8,
114  5 + 5 * 8, 6 + 5 * 8, 6 + 6 * 8, 7 + 7 * 8,
115 };
116 
117 static void release_unused_pictures(H264Context *h, int remove_current)
118 {
119  int i;
120 
121  /* release non reference frames */
122  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
123  if (h->DPB[i].f->buf[0] && !h->DPB[i].reference &&
124  (remove_current || &h->DPB[i] != h->cur_pic_ptr)) {
125  ff_h264_unref_picture(&h->DPB[i]);
126  }
127  }
128 }
129 
130 static int alloc_scratch_buffers(H264SliceContext *sl, int linesize)
131 {
132  const H264Context *h = sl->h264;
133  int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
134 
135  av_fast_malloc(&sl->bipred_scratchpad, &sl->bipred_scratchpad_allocated, 16 * 6 * alloc_size);
136  // edge emu needs blocksize + filter length - 1
137  // (= 21x21 for H.264)
138  av_fast_malloc(&sl->edge_emu_buffer, &sl->edge_emu_buffer_allocated, alloc_size * 2 * 21);
139 
141  h->mb_width * 16 * 3 * sizeof(uint8_t) * 2);
143  h->mb_width * 16 * 3 * sizeof(uint8_t) * 2);
144 
145  if (!sl->bipred_scratchpad || !sl->edge_emu_buffer ||
146  !sl->top_borders[0] || !sl->top_borders[1]) {
149  av_freep(&sl->top_borders[0]);
150  av_freep(&sl->top_borders[1]);
151 
154  sl->top_borders_allocated[0] = 0;
155  sl->top_borders_allocated[1] = 0;
156  return AVERROR(ENOMEM);
157  }
158 
159  return 0;
160 }
161 
163 {
164  const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
165  const int mb_array_size = h->mb_stride * h->mb_height;
166  const int b4_stride = h->mb_width * 4 + 1;
167  const int b4_array_size = b4_stride * h->mb_height * 4;
168 
169  h->qscale_table_pool = ff_refstruct_pool_alloc(big_mb_num + h->mb_stride, 0);
170  h->mb_type_pool = ff_refstruct_pool_alloc((big_mb_num + h->mb_stride) *
171  sizeof(uint32_t), 0);
172  h->motion_val_pool = ff_refstruct_pool_alloc(2 * (b4_array_size + 4) *
173  sizeof(int16_t), 0);
174  h->ref_index_pool = ff_refstruct_pool_alloc(4 * mb_array_size, 0);
175 
176  if (!h->qscale_table_pool || !h->mb_type_pool || !h->motion_val_pool ||
177  !h->ref_index_pool) {
178  ff_refstruct_pool_uninit(&h->qscale_table_pool);
179  ff_refstruct_pool_uninit(&h->mb_type_pool);
180  ff_refstruct_pool_uninit(&h->motion_val_pool);
181  ff_refstruct_pool_uninit(&h->ref_index_pool);
182  return AVERROR(ENOMEM);
183  }
184 
185  return 0;
186 }
187 
189 {
190  int i, ret = 0;
191 
192  av_assert0(!pic->f->data[0]);
193 
194  pic->tf.f = pic->f;
195  ret = ff_thread_get_ext_buffer(h->avctx, &pic->tf,
196  pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
197  if (ret < 0)
198  goto fail;
199 
200  if (pic->needs_fg) {
201  pic->f_grain->format = pic->f->format;
202  pic->f_grain->width = pic->f->width;
203  pic->f_grain->height = pic->f->height;
204  ret = ff_thread_get_buffer(h->avctx, pic->f_grain, 0);
205  if (ret < 0)
206  goto fail;
207  }
208 
210  if (ret < 0)
211  goto fail;
212 
213  if (h->decode_error_flags_pool) {
214  pic->decode_error_flags = ff_refstruct_pool_get(h->decode_error_flags_pool);
215  if (!pic->decode_error_flags)
216  goto fail;
218  }
219 
220  if (CONFIG_GRAY && !h->avctx->hwaccel && h->flags & AV_CODEC_FLAG_GRAY && pic->f->data[2]) {
221  int h_chroma_shift, v_chroma_shift;
223  &h_chroma_shift, &v_chroma_shift);
224 
225  for(i=0; i<AV_CEIL_RSHIFT(pic->f->height, v_chroma_shift); i++) {
226  memset(pic->f->data[1] + pic->f->linesize[1]*i,
227  0x80, AV_CEIL_RSHIFT(pic->f->width, h_chroma_shift));
228  memset(pic->f->data[2] + pic->f->linesize[2]*i,
229  0x80, AV_CEIL_RSHIFT(pic->f->width, h_chroma_shift));
230  }
231  }
232 
233  if (!h->qscale_table_pool) {
235  if (ret < 0)
236  goto fail;
237  }
238 
239  pic->qscale_table_base = ff_refstruct_pool_get(h->qscale_table_pool);
240  pic->mb_type_base = ff_refstruct_pool_get(h->mb_type_pool);
241  if (!pic->qscale_table_base || !pic->mb_type_base)
242  goto fail;
243 
244  pic->mb_type = pic->mb_type_base + 2 * h->mb_stride + 1;
245  pic->qscale_table = pic->qscale_table_base + 2 * h->mb_stride + 1;
246 
247  for (i = 0; i < 2; i++) {
248  pic->motion_val_base[i] = ff_refstruct_pool_get(h->motion_val_pool);
249  pic->ref_index[i] = ff_refstruct_pool_get(h->ref_index_pool);
250  if (!pic->motion_val_base[i] || !pic->ref_index[i])
251  goto fail;
252 
253  pic->motion_val[i] = pic->motion_val_base[i] + 4;
254  }
255 
256  pic->pps = ff_refstruct_ref_c(h->ps.pps);
257 
258  pic->mb_width = h->mb_width;
259  pic->mb_height = h->mb_height;
260  pic->mb_stride = h->mb_stride;
261 
262  return 0;
263 fail:
265  return (ret < 0) ? ret : AVERROR(ENOMEM);
266 }
267 
269 {
270  int i;
271 
272  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
273  if (!h->DPB[i].f->buf[0])
274  return i;
275  }
276  return AVERROR_INVALIDDATA;
277 }
278 
279 
280 #define IN_RANGE(a, b, size) (((void*)(a) >= (void*)(b)) && ((void*)(a) < (void*)((b) + (size))))
281 
282 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
283  (((pic) && (pic) >= (old_ctx)->DPB && \
284  (pic) < (old_ctx)->DPB + H264_MAX_PICTURE_COUNT) ? \
285  &(new_ctx)->DPB[(pic) - (old_ctx)->DPB] : NULL)
286 
287 static void copy_picture_range(H264Picture **to, H264Picture *const *from, int count,
288  H264Context *new_base, const H264Context *old_base)
289 {
290  int i;
291 
292  for (i = 0; i < count; i++) {
293  av_assert1(!from[i] ||
294  IN_RANGE(from[i], old_base, 1) ||
295  IN_RANGE(from[i], old_base->DPB, H264_MAX_PICTURE_COUNT));
296  to[i] = REBASE_PICTURE(from[i], new_base, old_base);
297  }
298 }
299 
300 static void color_frame(AVFrame *frame, const int c[4])
301 {
303 
305 
306  for (int p = 0; p < desc->nb_components; p++) {
307  uint8_t *dst = frame->data[p];
308  int is_chroma = p == 1 || p == 2;
309  int bytes = is_chroma ? AV_CEIL_RSHIFT(frame->width, desc->log2_chroma_w) : frame->width;
310  int height = is_chroma ? AV_CEIL_RSHIFT(frame->height, desc->log2_chroma_h) : frame->height;
311  if (desc->comp[0].depth >= 9) {
312  ((uint16_t*)dst)[0] = c[p];
313  av_memcpy_backptr(dst + 2, 2, bytes - 2);
314  dst += frame->linesize[p];
315  for (int y = 1; y < height; y++) {
316  memcpy(dst, frame->data[p], 2*bytes);
317  dst += frame->linesize[p];
318  }
319  } else {
320  for (int y = 0; y < height; y++) {
321  memset(dst, c[p], bytes);
322  dst += frame->linesize[p];
323  }
324  }
325  }
326 }
327 
329 
331  const AVCodecContext *src)
332 {
333  H264Context *h = dst->priv_data, *h1 = src->priv_data;
334  int inited = h->context_initialized, err = 0;
335  int need_reinit = 0;
336  int i, ret;
337 
338  if (dst == src)
339  return 0;
340 
341  if (inited && !h1->ps.sps)
342  return AVERROR_INVALIDDATA;
343 
344  if (inited &&
345  (h->width != h1->width ||
346  h->height != h1->height ||
347  h->mb_width != h1->mb_width ||
348  h->mb_height != h1->mb_height ||
349  !h->ps.sps ||
350  h->ps.sps->bit_depth_luma != h1->ps.sps->bit_depth_luma ||
351  h->ps.sps->chroma_format_idc != h1->ps.sps->chroma_format_idc ||
352  h->ps.sps->vui.matrix_coeffs != h1->ps.sps->vui.matrix_coeffs)) {
353  need_reinit = 1;
354  }
355 
356  /* copy block_offset since frame_start may not be called */
357  memcpy(h->block_offset, h1->block_offset, sizeof(h->block_offset));
358 
359  // SPS/PPS
360  for (int i = 0; i < FF_ARRAY_ELEMS(h->ps.sps_list); i++)
361  ff_refstruct_replace(&h->ps.sps_list[i], h1->ps.sps_list[i]);
362  for (int i = 0; i < FF_ARRAY_ELEMS(h->ps.pps_list); i++)
363  ff_refstruct_replace(&h->ps.pps_list[i], h1->ps.pps_list[i]);
364 
365  ff_refstruct_replace(&h->ps.pps, h1->ps.pps);
366  h->ps.sps = h1->ps.sps;
367 
368  if (need_reinit || !inited) {
369  h->width = h1->width;
370  h->height = h1->height;
371  h->mb_height = h1->mb_height;
372  h->mb_width = h1->mb_width;
373  h->mb_num = h1->mb_num;
374  h->mb_stride = h1->mb_stride;
375  h->b_stride = h1->b_stride;
376  h->x264_build = h1->x264_build;
377 
378  if (h->context_initialized || h1->context_initialized) {
379  if ((err = h264_slice_header_init(h)) < 0) {
380  av_log(h->avctx, AV_LOG_ERROR, "h264_slice_header_init() failed");
381  return err;
382  }
383  }
384 
385  /* copy block_offset since frame_start may not be called */
386  memcpy(h->block_offset, h1->block_offset, sizeof(h->block_offset));
387  }
388 
389  h->width_from_caller = h1->width_from_caller;
390  h->height_from_caller = h1->height_from_caller;
391  h->first_field = h1->first_field;
392  h->picture_structure = h1->picture_structure;
393  h->mb_aff_frame = h1->mb_aff_frame;
394  h->droppable = h1->droppable;
395 
396  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
397  ret = ff_h264_replace_picture(&h->DPB[i], &h1->DPB[i]);
398  if (ret < 0)
399  return ret;
400  }
401 
402  h->cur_pic_ptr = REBASE_PICTURE(h1->cur_pic_ptr, h, h1);
403  ret = ff_h264_replace_picture(&h->cur_pic, &h1->cur_pic);
404  if (ret < 0)
405  return ret;
406 
407  h->enable_er = h1->enable_er;
408  h->workaround_bugs = h1->workaround_bugs;
409  h->droppable = h1->droppable;
410 
411  // extradata/NAL handling
412  h->is_avc = h1->is_avc;
413  h->nal_length_size = h1->nal_length_size;
414 
415  memcpy(&h->poc, &h1->poc, sizeof(h->poc));
416 
417  memcpy(h->short_ref, h1->short_ref, sizeof(h->short_ref));
418  memcpy(h->long_ref, h1->long_ref, sizeof(h->long_ref));
419  memcpy(h->delayed_pic, h1->delayed_pic, sizeof(h->delayed_pic));
420  memcpy(h->last_pocs, h1->last_pocs, sizeof(h->last_pocs));
421 
422  h->next_output_pic = h1->next_output_pic;
423  h->next_outputed_poc = h1->next_outputed_poc;
424  h->poc_offset = h1->poc_offset;
425 
426  memcpy(h->mmco, h1->mmco, sizeof(h->mmco));
427  h->nb_mmco = h1->nb_mmco;
428  h->mmco_reset = h1->mmco_reset;
429  h->explicit_ref_marking = h1->explicit_ref_marking;
430  h->long_ref_count = h1->long_ref_count;
431  h->short_ref_count = h1->short_ref_count;
432 
433  copy_picture_range(h->short_ref, h1->short_ref, 32, h, h1);
434  copy_picture_range(h->long_ref, h1->long_ref, 32, h, h1);
435  copy_picture_range(h->delayed_pic, h1->delayed_pic,
436  FF_ARRAY_ELEMS(h->delayed_pic), h, h1);
437 
438  h->frame_recovered = h1->frame_recovered;
439 
440  ret = ff_h264_sei_ctx_replace(&h->sei, &h1->sei);
441  if (ret < 0)
442  return ret;
443 
444  h->sei.common.unregistered.x264_build = h1->sei.common.unregistered.x264_build;
445  h->sei.common.mastering_display = h1->sei.common.mastering_display;
446  h->sei.common.content_light = h1->sei.common.content_light;
447 
448  if (!h->cur_pic_ptr)
449  return 0;
450 
451  if (!h->droppable) {
453  h->poc.prev_poc_msb = h->poc.poc_msb;
454  h->poc.prev_poc_lsb = h->poc.poc_lsb;
455  }
456  h->poc.prev_frame_num_offset = h->poc.frame_num_offset;
457  h->poc.prev_frame_num = h->poc.frame_num;
458 
459  h->recovery_frame = h1->recovery_frame;
460  h->non_gray = h1->non_gray;
461 
462  return err;
463 }
464 
466  const AVCodecContext *src)
467 {
468  H264Context *h = dst->priv_data;
469  const H264Context *h1 = src->priv_data;
470 
471  h->is_avc = h1->is_avc;
472  h->nal_length_size = h1->nal_length_size;
473 
474  return 0;
475 }
476 
478 {
479  H264Picture *pic;
480  int i, ret;
481  const int pixel_shift = h->pixel_shift;
482 
483  if (!ff_thread_can_start_frame(h->avctx)) {
484  av_log(h->avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
485  return AVERROR_BUG;
486  }
487 
489  h->cur_pic_ptr = NULL;
490 
492  if (i < 0) {
493  av_log(h->avctx, AV_LOG_ERROR, "no frame buffer available\n");
494  return i;
495  }
496  pic = &h->DPB[i];
497 
498  pic->reference = h->droppable ? 0 : h->picture_structure;
499  pic->field_picture = h->picture_structure != PICT_FRAME;
500  pic->frame_num = h->poc.frame_num;
501  /*
502  * Zero key_frame here; IDR markings per slice in frame or fields are ORed
503  * in later.
504  * See decode_nal_units().
505  */
506  pic->f->flags &= ~AV_FRAME_FLAG_KEY;
507  pic->mmco_reset = 0;
508  pic->recovered = 0;
509  pic->invalid_gap = 0;
510  pic->sei_recovery_frame_cnt = h->sei.recovery_point.recovery_frame_cnt;
511 
512  pic->f->pict_type = h->slice_ctx[0].slice_type;
513 
514  pic->f->crop_left = h->crop_left;
515  pic->f->crop_right = h->crop_right;
516  pic->f->crop_top = h->crop_top;
517  pic->f->crop_bottom = h->crop_bottom;
518 
519  pic->needs_fg = h->sei.common.film_grain_characteristics.present && !h->avctx->hwaccel &&
520  !(h->avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN);
521 
522  if ((ret = alloc_picture(h, pic)) < 0)
523  return ret;
524 
525  h->cur_pic_ptr = pic;
526  ff_h264_unref_picture(&h->cur_pic);
527  if (CONFIG_ERROR_RESILIENCE) {
528  ff_h264_set_erpic(&h->er.cur_pic, NULL);
529  }
530 
531  if ((ret = ff_h264_ref_picture(&h->cur_pic, h->cur_pic_ptr)) < 0)
532  return ret;
533 
534  for (i = 0; i < h->nb_slice_ctx; i++) {
535  h->slice_ctx[i].linesize = h->cur_pic_ptr->f->linesize[0];
536  h->slice_ctx[i].uvlinesize = h->cur_pic_ptr->f->linesize[1];
537  }
538 
539  if (CONFIG_ERROR_RESILIENCE && h->enable_er) {
540  ff_er_frame_start(&h->er);
541  ff_h264_set_erpic(&h->er.last_pic, NULL);
542  ff_h264_set_erpic(&h->er.next_pic, NULL);
543  }
544 
545  for (i = 0; i < 16; i++) {
546  h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
547  h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
548  }
549  for (i = 0; i < 16; i++) {
550  h->block_offset[16 + i] =
551  h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
552  h->block_offset[48 + 16 + i] =
553  h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
554  }
555 
556  /* We mark the current picture as non-reference after allocating it, so
557  * that if we break out due to an error it can be released automatically
558  * in the next ff_mpv_frame_start().
559  */
560  h->cur_pic_ptr->reference = 0;
561 
562  h->cur_pic_ptr->field_poc[0] = h->cur_pic_ptr->field_poc[1] = INT_MAX;
563 
564  h->next_output_pic = NULL;
565 
566  h->postpone_filter = 0;
567 
568  h->mb_aff_frame = h->ps.sps->mb_aff && (h->picture_structure == PICT_FRAME);
569 
570  if (h->sei.common.unregistered.x264_build >= 0)
571  h->x264_build = h->sei.common.unregistered.x264_build;
572 
573  assert(h->cur_pic_ptr->long_ref == 0);
574 
575  return 0;
576 }
577 
579  const uint8_t *src_y,
580  const uint8_t *src_cb, const uint8_t *src_cr,
581  int linesize, int uvlinesize,
582  int simple)
583 {
584  uint8_t *top_border;
585  int top_idx = 1;
586  const int pixel_shift = h->pixel_shift;
587  int chroma444 = CHROMA444(h);
588  int chroma422 = CHROMA422(h);
589 
590  src_y -= linesize;
591  src_cb -= uvlinesize;
592  src_cr -= uvlinesize;
593 
594  if (!simple && FRAME_MBAFF(h)) {
595  if (sl->mb_y & 1) {
596  if (!MB_MBAFF(sl)) {
597  top_border = sl->top_borders[0][sl->mb_x];
598  AV_COPY128(top_border, src_y + 15 * linesize);
599  if (pixel_shift)
600  AV_COPY128(top_border + 16, src_y + 15 * linesize + 16);
601  if (simple || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
602  if (chroma444) {
603  if (pixel_shift) {
604  AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize);
605  AV_COPY128(top_border + 48, src_cb + 15 * uvlinesize + 16);
606  AV_COPY128(top_border + 64, src_cr + 15 * uvlinesize);
607  AV_COPY128(top_border + 80, src_cr + 15 * uvlinesize + 16);
608  } else {
609  AV_COPY128(top_border + 16, src_cb + 15 * uvlinesize);
610  AV_COPY128(top_border + 32, src_cr + 15 * uvlinesize);
611  }
612  } else if (chroma422) {
613  if (pixel_shift) {
614  AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize);
615  AV_COPY128(top_border + 48, src_cr + 15 * uvlinesize);
616  } else {
617  AV_COPY64(top_border + 16, src_cb + 15 * uvlinesize);
618  AV_COPY64(top_border + 24, src_cr + 15 * uvlinesize);
619  }
620  } else {
621  if (pixel_shift) {
622  AV_COPY128(top_border + 32, src_cb + 7 * uvlinesize);
623  AV_COPY128(top_border + 48, src_cr + 7 * uvlinesize);
624  } else {
625  AV_COPY64(top_border + 16, src_cb + 7 * uvlinesize);
626  AV_COPY64(top_border + 24, src_cr + 7 * uvlinesize);
627  }
628  }
629  }
630  }
631  } else if (MB_MBAFF(sl)) {
632  top_idx = 0;
633  } else
634  return;
635  }
636 
637  top_border = sl->top_borders[top_idx][sl->mb_x];
638  /* There are two lines saved, the line above the top macroblock
639  * of a pair, and the line above the bottom macroblock. */
640  AV_COPY128(top_border, src_y + 16 * linesize);
641  if (pixel_shift)
642  AV_COPY128(top_border + 16, src_y + 16 * linesize + 16);
643 
644  if (simple || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
645  if (chroma444) {
646  if (pixel_shift) {
647  AV_COPY128(top_border + 32, src_cb + 16 * linesize);
648  AV_COPY128(top_border + 48, src_cb + 16 * linesize + 16);
649  AV_COPY128(top_border + 64, src_cr + 16 * linesize);
650  AV_COPY128(top_border + 80, src_cr + 16 * linesize + 16);
651  } else {
652  AV_COPY128(top_border + 16, src_cb + 16 * linesize);
653  AV_COPY128(top_border + 32, src_cr + 16 * linesize);
654  }
655  } else if (chroma422) {
656  if (pixel_shift) {
657  AV_COPY128(top_border + 32, src_cb + 16 * uvlinesize);
658  AV_COPY128(top_border + 48, src_cr + 16 * uvlinesize);
659  } else {
660  AV_COPY64(top_border + 16, src_cb + 16 * uvlinesize);
661  AV_COPY64(top_border + 24, src_cr + 16 * uvlinesize);
662  }
663  } else {
664  if (pixel_shift) {
665  AV_COPY128(top_border + 32, src_cb + 8 * uvlinesize);
666  AV_COPY128(top_border + 48, src_cr + 8 * uvlinesize);
667  } else {
668  AV_COPY64(top_border + 16, src_cb + 8 * uvlinesize);
669  AV_COPY64(top_border + 24, src_cr + 8 * uvlinesize);
670  }
671  }
672  }
673 }
674 
675 /**
676  * Initialize implicit_weight table.
677  * @param field 0/1 initialize the weight for interlaced MBAFF
678  * -1 initializes the rest
679  */
681 {
682  int ref0, ref1, i, cur_poc, ref_start, ref_count0, ref_count1;
683 
684  for (i = 0; i < 2; i++) {
685  sl->pwt.luma_weight_flag[i] = 0;
686  sl->pwt.chroma_weight_flag[i] = 0;
687  }
688 
689  if (field < 0) {
690  if (h->picture_structure == PICT_FRAME) {
691  cur_poc = h->cur_pic_ptr->poc;
692  } else {
693  cur_poc = h->cur_pic_ptr->field_poc[h->picture_structure - 1];
694  }
695  if (sl->ref_count[0] == 1 && sl->ref_count[1] == 1 && !FRAME_MBAFF(h) &&
696  sl->ref_list[0][0].poc + (int64_t)sl->ref_list[1][0].poc == 2LL * cur_poc) {
697  sl->pwt.use_weight = 0;
698  sl->pwt.use_weight_chroma = 0;
699  return;
700  }
701  ref_start = 0;
702  ref_count0 = sl->ref_count[0];
703  ref_count1 = sl->ref_count[1];
704  } else {
705  cur_poc = h->cur_pic_ptr->field_poc[field];
706  ref_start = 16;
707  ref_count0 = 16 + 2 * sl->ref_count[0];
708  ref_count1 = 16 + 2 * sl->ref_count[1];
709  }
710 
711  sl->pwt.use_weight = 2;
712  sl->pwt.use_weight_chroma = 2;
713  sl->pwt.luma_log2_weight_denom = 5;
715 
716  for (ref0 = ref_start; ref0 < ref_count0; ref0++) {
717  int64_t poc0 = sl->ref_list[0][ref0].poc;
718  for (ref1 = ref_start; ref1 < ref_count1; ref1++) {
719  int w = 32;
720  if (!sl->ref_list[0][ref0].parent->long_ref && !sl->ref_list[1][ref1].parent->long_ref) {
721  int poc1 = sl->ref_list[1][ref1].poc;
722  int td = av_clip_int8(poc1 - poc0);
723  if (td) {
724  int tb = av_clip_int8(cur_poc - poc0);
725  int tx = (16384 + (FFABS(td) >> 1)) / td;
726  int dist_scale_factor = (tb * tx + 32) >> 8;
727  if (dist_scale_factor >= -64 && dist_scale_factor <= 128)
728  w = 64 - dist_scale_factor;
729  }
730  }
731  if (field < 0) {
732  sl->pwt.implicit_weight[ref0][ref1][0] =
733  sl->pwt.implicit_weight[ref0][ref1][1] = w;
734  } else {
735  sl->pwt.implicit_weight[ref0][ref1][field] = w;
736  }
737  }
738  }
739 }
740 
741 /**
742  * initialize scan tables
743  */
745 {
746  int i;
747  for (i = 0; i < 16; i++) {
748 #define TRANSPOSE(x) ((x) >> 2) | (((x) << 2) & 0xF)
749  h->zigzag_scan[i] = TRANSPOSE(ff_zigzag_scan[i]);
750  h->field_scan[i] = TRANSPOSE(field_scan[i]);
751 #undef TRANSPOSE
752  }
753  for (i = 0; i < 64; i++) {
754 #define TRANSPOSE(x) ((x) >> 3) | (((x) & 7) << 3)
755  h->zigzag_scan8x8[i] = TRANSPOSE(ff_zigzag_direct[i]);
756  h->zigzag_scan8x8_cavlc[i] = TRANSPOSE(zigzag_scan8x8_cavlc[i]);
757  h->field_scan8x8[i] = TRANSPOSE(field_scan8x8[i]);
758  h->field_scan8x8_cavlc[i] = TRANSPOSE(field_scan8x8_cavlc[i]);
759 #undef TRANSPOSE
760  }
761  if (h->ps.sps->transform_bypass) { // FIXME same ugly
762  memcpy(h->zigzag_scan_q0 , ff_zigzag_scan , sizeof(h->zigzag_scan_q0 ));
763  memcpy(h->zigzag_scan8x8_q0 , ff_zigzag_direct , sizeof(h->zigzag_scan8x8_q0 ));
764  memcpy(h->zigzag_scan8x8_cavlc_q0 , zigzag_scan8x8_cavlc , sizeof(h->zigzag_scan8x8_cavlc_q0));
765  memcpy(h->field_scan_q0 , field_scan , sizeof(h->field_scan_q0 ));
766  memcpy(h->field_scan8x8_q0 , field_scan8x8 , sizeof(h->field_scan8x8_q0 ));
767  memcpy(h->field_scan8x8_cavlc_q0 , field_scan8x8_cavlc , sizeof(h->field_scan8x8_cavlc_q0 ));
768  } else {
769  memcpy(h->zigzag_scan_q0 , h->zigzag_scan , sizeof(h->zigzag_scan_q0 ));
770  memcpy(h->zigzag_scan8x8_q0 , h->zigzag_scan8x8 , sizeof(h->zigzag_scan8x8_q0 ));
771  memcpy(h->zigzag_scan8x8_cavlc_q0 , h->zigzag_scan8x8_cavlc , sizeof(h->zigzag_scan8x8_cavlc_q0));
772  memcpy(h->field_scan_q0 , h->field_scan , sizeof(h->field_scan_q0 ));
773  memcpy(h->field_scan8x8_q0 , h->field_scan8x8 , sizeof(h->field_scan8x8_q0 ));
774  memcpy(h->field_scan8x8_cavlc_q0 , h->field_scan8x8_cavlc , sizeof(h->field_scan8x8_cavlc_q0 ));
775  }
776 }
777 
778 static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
779 {
780 #define HWACCEL_MAX (CONFIG_H264_DXVA2_HWACCEL + \
781  (CONFIG_H264_D3D11VA_HWACCEL * 2) + \
782  CONFIG_H264_D3D12VA_HWACCEL + \
783  CONFIG_H264_NVDEC_HWACCEL + \
784  CONFIG_H264_VAAPI_HWACCEL + \
785  CONFIG_H264_VIDEOTOOLBOX_HWACCEL + \
786  CONFIG_H264_VDPAU_HWACCEL + \
787  CONFIG_H264_VULKAN_HWACCEL)
788  enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmt = pix_fmts;
789 
790  switch (h->ps.sps->bit_depth_luma) {
791  case 9:
792  if (CHROMA444(h)) {
793  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
794  *fmt++ = AV_PIX_FMT_GBRP9;
795  } else
796  *fmt++ = AV_PIX_FMT_YUV444P9;
797  } else if (CHROMA422(h))
798  *fmt++ = AV_PIX_FMT_YUV422P9;
799  else
800  *fmt++ = AV_PIX_FMT_YUV420P9;
801  break;
802  case 10:
803 #if CONFIG_H264_VIDEOTOOLBOX_HWACCEL
804  if (h->avctx->colorspace != AVCOL_SPC_RGB)
805  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
806 #endif
807 #if CONFIG_H264_VULKAN_HWACCEL
808  *fmt++ = AV_PIX_FMT_VULKAN;
809 #endif
810  if (CHROMA444(h)) {
811  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
812  *fmt++ = AV_PIX_FMT_GBRP10;
813  } else
814  *fmt++ = AV_PIX_FMT_YUV444P10;
815  } else if (CHROMA422(h))
816  *fmt++ = AV_PIX_FMT_YUV422P10;
817  else {
818 #if CONFIG_H264_VAAPI_HWACCEL
819  // Just add as candidate. Whether VAProfileH264High10 usable or
820  // not is decided by vaapi_decode_make_config() defined in FFmpeg
821  // and vaQueryCodingProfile() defined in libva.
822  *fmt++ = AV_PIX_FMT_VAAPI;
823 #endif
824  *fmt++ = AV_PIX_FMT_YUV420P10;
825  }
826  break;
827  case 12:
828 #if CONFIG_H264_VULKAN_HWACCEL
829  *fmt++ = AV_PIX_FMT_VULKAN;
830 #endif
831  if (CHROMA444(h)) {
832  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
833  *fmt++ = AV_PIX_FMT_GBRP12;
834  } else
835  *fmt++ = AV_PIX_FMT_YUV444P12;
836  } else if (CHROMA422(h))
837  *fmt++ = AV_PIX_FMT_YUV422P12;
838  else
839  *fmt++ = AV_PIX_FMT_YUV420P12;
840  break;
841  case 14:
842  if (CHROMA444(h)) {
843  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
844  *fmt++ = AV_PIX_FMT_GBRP14;
845  } else
846  *fmt++ = AV_PIX_FMT_YUV444P14;
847  } else if (CHROMA422(h))
848  *fmt++ = AV_PIX_FMT_YUV422P14;
849  else
850  *fmt++ = AV_PIX_FMT_YUV420P14;
851  break;
852  case 8:
853 #if CONFIG_H264_VDPAU_HWACCEL
854  *fmt++ = AV_PIX_FMT_VDPAU;
855 #endif
856 #if CONFIG_H264_VULKAN_HWACCEL
857  *fmt++ = AV_PIX_FMT_VULKAN;
858 #endif
859 #if CONFIG_H264_NVDEC_HWACCEL
860  *fmt++ = AV_PIX_FMT_CUDA;
861 #endif
862 #if CONFIG_H264_VIDEOTOOLBOX_HWACCEL
863  if (h->avctx->colorspace != AVCOL_SPC_RGB)
864  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
865 #endif
866  if (CHROMA444(h)) {
867  if (h->avctx->colorspace == AVCOL_SPC_RGB)
868  *fmt++ = AV_PIX_FMT_GBRP;
869  else if (h->avctx->color_range == AVCOL_RANGE_JPEG)
870  *fmt++ = AV_PIX_FMT_YUVJ444P;
871  else
872  *fmt++ = AV_PIX_FMT_YUV444P;
873  } else if (CHROMA422(h)) {
874  if (h->avctx->color_range == AVCOL_RANGE_JPEG)
875  *fmt++ = AV_PIX_FMT_YUVJ422P;
876  else
877  *fmt++ = AV_PIX_FMT_YUV422P;
878  } else {
879 #if CONFIG_H264_DXVA2_HWACCEL
880  *fmt++ = AV_PIX_FMT_DXVA2_VLD;
881 #endif
882 #if CONFIG_H264_D3D11VA_HWACCEL
883  *fmt++ = AV_PIX_FMT_D3D11VA_VLD;
884  *fmt++ = AV_PIX_FMT_D3D11;
885 #endif
886 #if CONFIG_H264_D3D12VA_HWACCEL
887  *fmt++ = AV_PIX_FMT_D3D12;
888 #endif
889 #if CONFIG_H264_VAAPI_HWACCEL
890  *fmt++ = AV_PIX_FMT_VAAPI;
891 #endif
892  if (h->avctx->color_range == AVCOL_RANGE_JPEG)
893  *fmt++ = AV_PIX_FMT_YUVJ420P;
894  else
895  *fmt++ = AV_PIX_FMT_YUV420P;
896  }
897  break;
898  default:
899  av_log(h->avctx, AV_LOG_ERROR,
900  "Unsupported bit depth %d\n", h->ps.sps->bit_depth_luma);
901  return AVERROR_INVALIDDATA;
902  }
903 
904  *fmt = AV_PIX_FMT_NONE;
905 
906  for (int i = 0; pix_fmts[i] != AV_PIX_FMT_NONE; i++)
907  if (pix_fmts[i] == h->avctx->pix_fmt && !force_callback)
908  return pix_fmts[i];
909  return ff_get_format(h->avctx, pix_fmts);
910 }
911 
912 /* export coded and cropped frame dimensions to AVCodecContext */
914 {
915  const SPS *sps = h->ps.sps;
916  int cr = sps->crop_right;
917  int cl = sps->crop_left;
918  int ct = sps->crop_top;
919  int cb = sps->crop_bottom;
920  int width = h->width - (cr + cl);
921  int height = h->height - (ct + cb);
922  av_assert0(sps->crop_right + sps->crop_left < (unsigned)h->width);
923  av_assert0(sps->crop_top + sps->crop_bottom < (unsigned)h->height);
924 
925  /* handle container cropping */
926  if (h->width_from_caller > 0 && h->height_from_caller > 0 &&
927  !sps->crop_top && !sps->crop_left &&
928  FFALIGN(h->width_from_caller, 16) == FFALIGN(width, 16) &&
929  FFALIGN(h->height_from_caller, 16) == FFALIGN(height, 16) &&
930  h->width_from_caller <= width &&
931  h->height_from_caller <= height) {
932  width = h->width_from_caller;
933  height = h->height_from_caller;
934  cl = 0;
935  ct = 0;
936  cr = h->width - width;
937  cb = h->height - height;
938  } else {
939  h->width_from_caller = 0;
940  h->height_from_caller = 0;
941  }
942 
943  h->avctx->coded_width = h->width;
944  h->avctx->coded_height = h->height;
945  h->avctx->width = width;
946  h->avctx->height = height;
947  h->crop_right = cr;
948  h->crop_left = cl;
949  h->crop_top = ct;
950  h->crop_bottom = cb;
951 }
952 
954 {
955  const SPS *sps = h->ps.sps;
956  int i, ret;
957 
958  if (!sps) {
960  goto fail;
961  }
962 
963  ff_set_sar(h->avctx, sps->vui.sar);
964  av_pix_fmt_get_chroma_sub_sample(h->avctx->pix_fmt,
965  &h->chroma_x_shift, &h->chroma_y_shift);
966 
967  if (sps->timing_info_present_flag) {
968  int64_t den = sps->time_scale;
969  if (h->x264_build < 44U)
970  den *= 2;
971  av_reduce(&h->avctx->framerate.den, &h->avctx->framerate.num,
972  sps->num_units_in_tick * 2, den, 1 << 30);
973  }
974 
976 
977  h->first_field = 0;
978  h->prev_interlaced_frame = 1;
979 
982  if (ret < 0) {
983  av_log(h->avctx, AV_LOG_ERROR, "Could not allocate memory\n");
984  goto fail;
985  }
986 
987  if (sps->bit_depth_luma < 8 || sps->bit_depth_luma > 14 ||
988  sps->bit_depth_luma == 11 || sps->bit_depth_luma == 13
989  ) {
990  av_log(h->avctx, AV_LOG_ERROR, "Unsupported bit depth %d\n",
991  sps->bit_depth_luma);
993  goto fail;
994  }
995 
996  h->cur_bit_depth_luma =
997  h->avctx->bits_per_raw_sample = sps->bit_depth_luma;
998  h->cur_chroma_format_idc = sps->chroma_format_idc;
999  h->pixel_shift = sps->bit_depth_luma > 8;
1000  h->chroma_format_idc = sps->chroma_format_idc;
1001  h->bit_depth_luma = sps->bit_depth_luma;
1002 
1003  ff_h264dsp_init(&h->h264dsp, sps->bit_depth_luma,
1004  sps->chroma_format_idc);
1005  ff_h264chroma_init(&h->h264chroma, sps->bit_depth_chroma);
1006  ff_h264qpel_init(&h->h264qpel, sps->bit_depth_luma);
1007  ff_h264_pred_init(&h->hpc, AV_CODEC_ID_H264, sps->bit_depth_luma,
1008  sps->chroma_format_idc);
1009  ff_videodsp_init(&h->vdsp, sps->bit_depth_luma);
1010 
1011  if (!HAVE_THREADS || !(h->avctx->active_thread_type & FF_THREAD_SLICE)) {
1012  ff_h264_slice_context_init(h, &h->slice_ctx[0]);
1013  } else {
1014  for (i = 0; i < h->nb_slice_ctx; i++) {
1015  H264SliceContext *sl = &h->slice_ctx[i];
1016 
1017  sl->h264 = h;
1018  sl->intra4x4_pred_mode = h->intra4x4_pred_mode + i * 8 * 2 * h->mb_stride;
1019  sl->mvd_table[0] = h->mvd_table[0] + i * 8 * 2 * h->mb_stride;
1020  sl->mvd_table[1] = h->mvd_table[1] + i * 8 * 2 * h->mb_stride;
1021 
1023  }
1024  }
1025 
1026  h->context_initialized = 1;
1027 
1028  return 0;
1029 fail:
1031  h->context_initialized = 0;
1032  return ret;
1033 }
1034 
1036 {
1037  switch (a) {
1041  default:
1042  return a;
1043  }
1044 }
1045 
1046 static int h264_init_ps(H264Context *h, const H264SliceContext *sl, int first_slice)
1047 {
1048  const SPS *sps;
1049  int needs_reinit = 0, must_reinit, ret;
1050 
1051  if (first_slice)
1052  ff_refstruct_replace(&h->ps.pps, h->ps.pps_list[sl->pps_id]);
1053 
1054  if (h->ps.sps != h->ps.pps->sps) {
1055  h->ps.sps = h->ps.pps->sps;
1056 
1057  if (h->mb_width != h->ps.sps->mb_width ||
1058  h->mb_height != h->ps.sps->mb_height ||
1059  h->cur_bit_depth_luma != h->ps.sps->bit_depth_luma ||
1060  h->cur_chroma_format_idc != h->ps.sps->chroma_format_idc
1061  )
1062  needs_reinit = 1;
1063 
1064  if (h->bit_depth_luma != h->ps.sps->bit_depth_luma ||
1065  h->chroma_format_idc != h->ps.sps->chroma_format_idc)
1066  needs_reinit = 1;
1067  }
1068  sps = h->ps.sps;
1069 
1070  must_reinit = (h->context_initialized &&
1071  ( 16*sps->mb_width != h->avctx->coded_width
1072  || 16*sps->mb_height != h->avctx->coded_height
1073  || h->cur_bit_depth_luma != sps->bit_depth_luma
1074  || h->cur_chroma_format_idc != sps->chroma_format_idc
1075  || h->mb_width != sps->mb_width
1076  || h->mb_height != sps->mb_height
1077  ));
1078  if (h->avctx->pix_fmt == AV_PIX_FMT_NONE
1079  || (non_j_pixfmt(h->avctx->pix_fmt) != non_j_pixfmt(get_pixel_format(h, 0))))
1080  must_reinit = 1;
1081 
1082  if (first_slice && av_cmp_q(sps->vui.sar, h->avctx->sample_aspect_ratio))
1083  must_reinit = 1;
1084 
1085  if (!h->setup_finished) {
1086  h->avctx->profile = ff_h264_get_profile(sps);
1087  h->avctx->level = sps->level_idc;
1088  h->avctx->refs = sps->ref_frame_count;
1089 
1090  h->mb_width = sps->mb_width;
1091  h->mb_height = sps->mb_height;
1092  h->mb_num = h->mb_width * h->mb_height;
1093  h->mb_stride = h->mb_width + 1;
1094 
1095  h->b_stride = h->mb_width * 4;
1096 
1097  h->chroma_y_shift = sps->chroma_format_idc <= 1; // 400 uses yuv420p
1098 
1099  h->width = 16 * h->mb_width;
1100  h->height = 16 * h->mb_height;
1101 
1102  init_dimensions(h);
1103 
1104  if (sps->vui.video_signal_type_present_flag) {
1105  h->avctx->color_range = sps->vui.video_full_range_flag > 0 ? AVCOL_RANGE_JPEG
1106  : AVCOL_RANGE_MPEG;
1107  if (sps->vui.colour_description_present_flag) {
1108  if (h->avctx->colorspace != sps->vui.matrix_coeffs)
1109  needs_reinit = 1;
1110  h->avctx->color_primaries = sps->vui.colour_primaries;
1111  h->avctx->color_trc = sps->vui.transfer_characteristics;
1112  h->avctx->colorspace = sps->vui.matrix_coeffs;
1113  }
1114  }
1115 
1116  if (h->sei.common.alternative_transfer.present &&
1117  av_color_transfer_name(h->sei.common.alternative_transfer.preferred_transfer_characteristics) &&
1118  h->sei.common.alternative_transfer.preferred_transfer_characteristics != AVCOL_TRC_UNSPECIFIED) {
1119  h->avctx->color_trc = h->sei.common.alternative_transfer.preferred_transfer_characteristics;
1120  }
1121  }
1122  h->avctx->chroma_sample_location = sps->vui.chroma_location;
1123 
1124  if (!h->context_initialized || must_reinit || needs_reinit) {
1125  int flush_changes = h->context_initialized;
1126  h->context_initialized = 0;
1127  if (sl != h->slice_ctx) {
1128  av_log(h->avctx, AV_LOG_ERROR,
1129  "changing width %d -> %d / height %d -> %d on "
1130  "slice %d\n",
1131  h->width, h->avctx->coded_width,
1132  h->height, h->avctx->coded_height,
1133  h->current_slice + 1);
1134  return AVERROR_INVALIDDATA;
1135  }
1136 
1137  av_assert1(first_slice);
1138 
1139  if (flush_changes)
1141 
1142  if ((ret = get_pixel_format(h, 1)) < 0)
1143  return ret;
1144  h->avctx->pix_fmt = ret;
1145 
1146  av_log(h->avctx, AV_LOG_VERBOSE, "Reinit context to %dx%d, "
1147  "pix_fmt: %s\n", h->width, h->height, av_get_pix_fmt_name(h->avctx->pix_fmt));
1148 
1149  if ((ret = h264_slice_header_init(h)) < 0) {
1150  av_log(h->avctx, AV_LOG_ERROR,
1151  "h264_slice_header_init() failed\n");
1152  return ret;
1153  }
1154  }
1155 
1156  return 0;
1157 }
1158 
1160 {
1161  const SPS *sps = h->ps.sps;
1162  H264Picture *cur = h->cur_pic_ptr;
1163  AVFrame *out = cur->f;
1164  int interlaced_frame = 0, top_field_first = 0;
1165  int ret;
1166 
1167  out->flags &= ~AV_FRAME_FLAG_INTERLACED;
1168  out->repeat_pict = 0;
1169 
1170  /* Signal interlacing information externally. */
1171  /* Prioritize picture timing SEI information over used
1172  * decoding process if it exists. */
1173  if (h->sei.picture_timing.present) {
1174  int ret = ff_h264_sei_process_picture_timing(&h->sei.picture_timing, sps,
1175  h->avctx);
1176  if (ret < 0) {
1177  av_log(h->avctx, AV_LOG_ERROR, "Error processing a picture timing SEI\n");
1178  if (h->avctx->err_recognition & AV_EF_EXPLODE)
1179  return ret;
1180  h->sei.picture_timing.present = 0;
1181  }
1182  }
1183 
1184  if (sps->pic_struct_present_flag && h->sei.picture_timing.present) {
1185  const H264SEIPictureTiming *pt = &h->sei.picture_timing;
1186  switch (pt->pic_struct) {
1188  break;
1191  interlaced_frame = 1;
1192  break;
1196  interlaced_frame = 1;
1197  else
1198  // try to flag soft telecine progressive
1199  interlaced_frame = !!h->prev_interlaced_frame;
1200  break;
1203  /* Signal the possibility of telecined film externally
1204  * (pic_struct 5,6). From these hints, let the applications
1205  * decide if they apply deinterlacing. */
1206  out->repeat_pict = 1;
1207  break;
1209  out->repeat_pict = 2;
1210  break;
1212  out->repeat_pict = 4;
1213  break;
1214  }
1215 
1216  if ((pt->ct_type & 3) &&
1217  pt->pic_struct <= H264_SEI_PIC_STRUCT_BOTTOM_TOP)
1218  interlaced_frame = ((pt->ct_type & (1 << 1)) != 0);
1219  } else {
1220  /* Derive interlacing flag from used decoding process. */
1221  interlaced_frame = !!FIELD_OR_MBAFF_PICTURE(h);
1222  }
1223  h->prev_interlaced_frame = interlaced_frame;
1224 
1225  if (cur->field_poc[0] != cur->field_poc[1]) {
1226  /* Derive top_field_first from field pocs. */
1227  top_field_first = (cur->field_poc[0] < cur->field_poc[1]);
1228  } else {
1229  if (sps->pic_struct_present_flag && h->sei.picture_timing.present) {
1230  /* Use picture timing SEI information. Even if it is a
1231  * information of a past frame, better than nothing. */
1232  if (h->sei.picture_timing.pic_struct == H264_SEI_PIC_STRUCT_TOP_BOTTOM ||
1233  h->sei.picture_timing.pic_struct == H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP)
1234  top_field_first = 1;
1235  } else if (interlaced_frame) {
1236  /* Default to top field first when pic_struct_present_flag
1237  * is not set but interlaced frame detected */
1238  top_field_first = 1;
1239  } // else
1240  /* Most likely progressive */
1241  }
1242 
1243  out->flags |= (AV_FRAME_FLAG_INTERLACED * interlaced_frame) |
1244  (AV_FRAME_FLAG_TOP_FIELD_FIRST * top_field_first);
1245 
1246  ret = ff_h2645_sei_to_frame(out, &h->sei.common, AV_CODEC_ID_H264, h->avctx,
1247  &sps->vui, sps->bit_depth_luma, sps->bit_depth_chroma,
1248  cur->poc + (unsigned)(h->poc_offset << 5));
1249  if (ret < 0)
1250  return ret;
1251 
1252  if (h->sei.picture_timing.timecode_cnt > 0) {
1253  uint32_t *tc_sd;
1254  char tcbuf[AV_TIMECODE_STR_SIZE];
1255  AVFrameSideData *tcside;
1257  sizeof(uint32_t)*4, &tcside);
1258  if (ret < 0)
1259  return ret;
1260 
1261  if (tcside) {
1262  tc_sd = (uint32_t*)tcside->data;
1263  tc_sd[0] = h->sei.picture_timing.timecode_cnt;
1264 
1265  for (int i = 0; i < tc_sd[0]; i++) {
1266  int drop = h->sei.picture_timing.timecode[i].dropframe;
1267  int hh = h->sei.picture_timing.timecode[i].hours;
1268  int mm = h->sei.picture_timing.timecode[i].minutes;
1269  int ss = h->sei.picture_timing.timecode[i].seconds;
1270  int ff = h->sei.picture_timing.timecode[i].frame;
1271 
1272  tc_sd[i + 1] = av_timecode_get_smpte(h->avctx->framerate, drop, hh, mm, ss, ff);
1273  av_timecode_make_smpte_tc_string2(tcbuf, h->avctx->framerate, tc_sd[i + 1], 0, 0);
1274  av_dict_set(&out->metadata, "timecode", tcbuf, 0);
1275  }
1276  }
1277  h->sei.picture_timing.timecode_cnt = 0;
1278  }
1279 
1280  return 0;
1281 }
1282 
1284 {
1285  const SPS *sps = h->ps.sps;
1286  H264Picture *out = h->cur_pic_ptr;
1287  H264Picture *cur = h->cur_pic_ptr;
1288  int i, pics, out_of_order, out_idx;
1289 
1290  cur->mmco_reset = h->mmco_reset;
1291  h->mmco_reset = 0;
1292 
1293  if (sps->bitstream_restriction_flag ||
1294  h->avctx->strict_std_compliance >= FF_COMPLIANCE_STRICT) {
1295  h->avctx->has_b_frames = FFMAX(h->avctx->has_b_frames, sps->num_reorder_frames);
1296  }
1297 
1298  for (i = 0; 1; i++) {
1299  if(i == H264_MAX_DPB_FRAMES || cur->poc < h->last_pocs[i]){
1300  if(i)
1301  h->last_pocs[i-1] = cur->poc;
1302  break;
1303  } else if(i) {
1304  h->last_pocs[i-1]= h->last_pocs[i];
1305  }
1306  }
1307  out_of_order = H264_MAX_DPB_FRAMES - i;
1308  if( cur->f->pict_type == AV_PICTURE_TYPE_B
1309  || (h->last_pocs[H264_MAX_DPB_FRAMES-2] > INT_MIN && h->last_pocs[H264_MAX_DPB_FRAMES-1] - (int64_t)h->last_pocs[H264_MAX_DPB_FRAMES-2] > 2))
1310  out_of_order = FFMAX(out_of_order, 1);
1311  if (out_of_order == H264_MAX_DPB_FRAMES) {
1312  av_log(h->avctx, AV_LOG_VERBOSE, "Invalid POC %d<%d\n", cur->poc, h->last_pocs[0]);
1313  for (i = 1; i < H264_MAX_DPB_FRAMES; i++)
1314  h->last_pocs[i] = INT_MIN;
1315  h->last_pocs[0] = cur->poc;
1316  cur->mmco_reset = 1;
1317  } else if(h->avctx->has_b_frames < out_of_order && !sps->bitstream_restriction_flag){
1318  int loglevel = h->avctx->frame_num > 1 ? AV_LOG_WARNING : AV_LOG_VERBOSE;
1319  av_log(h->avctx, loglevel, "Increasing reorder buffer to %d\n", out_of_order);
1320  h->avctx->has_b_frames = out_of_order;
1321  }
1322 
1323  pics = 0;
1324  while (h->delayed_pic[pics])
1325  pics++;
1326 
1328 
1329  h->delayed_pic[pics++] = cur;
1330  if (cur->reference == 0)
1331  cur->reference = DELAYED_PIC_REF;
1332 
1333  out = h->delayed_pic[0];
1334  out_idx = 0;
1335  for (i = 1; h->delayed_pic[i] &&
1336  !(h->delayed_pic[i]->f->flags & AV_FRAME_FLAG_KEY) &&
1337  !h->delayed_pic[i]->mmco_reset;
1338  i++)
1339  if (h->delayed_pic[i]->poc < out->poc) {
1340  out = h->delayed_pic[i];
1341  out_idx = i;
1342  }
1343  if (h->avctx->has_b_frames == 0 &&
1344  ((h->delayed_pic[0]->f->flags & AV_FRAME_FLAG_KEY) || h->delayed_pic[0]->mmco_reset))
1345  h->next_outputed_poc = INT_MIN;
1346  out_of_order = out->poc < h->next_outputed_poc;
1347 
1348  if (out_of_order || pics > h->avctx->has_b_frames) {
1349  out->reference &= ~DELAYED_PIC_REF;
1350  for (i = out_idx; h->delayed_pic[i]; i++)
1351  h->delayed_pic[i] = h->delayed_pic[i + 1];
1352  }
1353  if (!out_of_order && pics > h->avctx->has_b_frames) {
1354  h->next_output_pic = out;
1355  if (out_idx == 0 && h->delayed_pic[0] && ((h->delayed_pic[0]->f->flags & AV_FRAME_FLAG_KEY) || h->delayed_pic[0]->mmco_reset)) {
1356  h->next_outputed_poc = INT_MIN;
1357  } else
1358  h->next_outputed_poc = out->poc;
1359 
1360  // We have reached an recovery point and all frames after it in
1361  // display order are "recovered".
1362  h->frame_recovered |= out->recovered;
1363 
1364  out->recovered |= h->frame_recovered & FRAME_RECOVERED_SEI;
1365 
1366  if (!out->recovered) {
1367  if (!(h->avctx->flags & AV_CODEC_FLAG_OUTPUT_CORRUPT) &&
1368  !(h->avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL)) {
1369  h->next_output_pic = NULL;
1370  } else {
1371  out->f->flags |= AV_FRAME_FLAG_CORRUPT;
1372  }
1373  }
1374  } else {
1375  av_log(h->avctx, AV_LOG_DEBUG, "no picture %s\n", out_of_order ? "ooo" : "");
1376  }
1377 
1378  return 0;
1379 }
1380 
1381 /* This function is called right after decoding the slice header for a first
1382  * slice in a field (or a frame). It decides whether we are decoding a new frame
1383  * or a second field in a pair and does the necessary setup.
1384  */
1386  const H2645NAL *nal, int first_slice)
1387 {
1388  int i;
1389  const SPS *sps;
1390 
1391  int last_pic_structure, last_pic_droppable, ret;
1392 
1393  ret = h264_init_ps(h, sl, first_slice);
1394  if (ret < 0)
1395  return ret;
1396 
1397  sps = h->ps.sps;
1398 
1399  if (sps->bitstream_restriction_flag &&
1400  h->avctx->has_b_frames < sps->num_reorder_frames) {
1401  h->avctx->has_b_frames = sps->num_reorder_frames;
1402  }
1403 
1404  last_pic_droppable = h->droppable;
1405  last_pic_structure = h->picture_structure;
1406  h->droppable = (nal->ref_idc == 0);
1407  h->picture_structure = sl->picture_structure;
1408 
1409  h->poc.frame_num = sl->frame_num;
1410  h->poc.poc_lsb = sl->poc_lsb;
1411  h->poc.delta_poc_bottom = sl->delta_poc_bottom;
1412  h->poc.delta_poc[0] = sl->delta_poc[0];
1413  h->poc.delta_poc[1] = sl->delta_poc[1];
1414 
1415  if (nal->type == H264_NAL_IDR_SLICE)
1416  h->poc_offset = sl->idr_pic_id;
1417  else if (h->picture_intra_only)
1418  h->poc_offset = 0;
1419 
1420  /* Shorten frame num gaps so we don't have to allocate reference
1421  * frames just to throw them away */
1422  if (h->poc.frame_num != h->poc.prev_frame_num) {
1423  int unwrap_prev_frame_num = h->poc.prev_frame_num;
1424  int max_frame_num = 1 << sps->log2_max_frame_num;
1425 
1426  if (unwrap_prev_frame_num > h->poc.frame_num)
1427  unwrap_prev_frame_num -= max_frame_num;
1428 
1429  if ((h->poc.frame_num - unwrap_prev_frame_num) > sps->ref_frame_count) {
1430  unwrap_prev_frame_num = (h->poc.frame_num - sps->ref_frame_count) - 1;
1431  if (unwrap_prev_frame_num < 0)
1432  unwrap_prev_frame_num += max_frame_num;
1433 
1434  h->poc.prev_frame_num = unwrap_prev_frame_num;
1435  }
1436  }
1437 
1438  /* See if we have a decoded first field looking for a pair...
1439  * Here, we're using that to see if we should mark previously
1440  * decode frames as "finished".
1441  * We have to do that before the "dummy" in-between frame allocation,
1442  * since that can modify h->cur_pic_ptr. */
1443  if (h->first_field) {
1444  int last_field = last_pic_structure == PICT_BOTTOM_FIELD;
1445  av_assert0(h->cur_pic_ptr);
1446  av_assert0(h->cur_pic_ptr->f->buf[0]);
1447  assert(h->cur_pic_ptr->reference != DELAYED_PIC_REF);
1448 
1449  /* Mark old field/frame as completed */
1450  if (h->cur_pic_ptr->tf.owner[last_field] == h->avctx) {
1451  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, last_field);
1452  }
1453 
1454  /* figure out if we have a complementary field pair */
1455  if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) {
1456  /* Previous field is unmatched. Don't display it, but let it
1457  * remain for reference if marked as such. */
1458  if (last_pic_structure != PICT_FRAME) {
1459  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
1460  last_pic_structure == PICT_TOP_FIELD);
1461  }
1462  } else {
1463  if (h->cur_pic_ptr->frame_num != h->poc.frame_num) {
1464  /* This and previous field were reference, but had
1465  * different frame_nums. Consider this field first in
1466  * pair. Throw away previous field except for reference
1467  * purposes. */
1468  if (last_pic_structure != PICT_FRAME) {
1469  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
1470  last_pic_structure == PICT_TOP_FIELD);
1471  }
1472  } else {
1473  /* Second field in complementary pair */
1474  if (!((last_pic_structure == PICT_TOP_FIELD &&
1475  h->picture_structure == PICT_BOTTOM_FIELD) ||
1476  (last_pic_structure == PICT_BOTTOM_FIELD &&
1477  h->picture_structure == PICT_TOP_FIELD))) {
1478  av_log(h->avctx, AV_LOG_ERROR,
1479  "Invalid field mode combination %d/%d\n",
1480  last_pic_structure, h->picture_structure);
1481  h->picture_structure = last_pic_structure;
1482  h->droppable = last_pic_droppable;
1483  return AVERROR_INVALIDDATA;
1484  } else if (last_pic_droppable != h->droppable) {
1485  avpriv_request_sample(h->avctx,
1486  "Found reference and non-reference fields in the same frame, which");
1487  h->picture_structure = last_pic_structure;
1488  h->droppable = last_pic_droppable;
1489  return AVERROR_PATCHWELCOME;
1490  }
1491  }
1492  }
1493  }
1494 
1495  while (h->poc.frame_num != h->poc.prev_frame_num && !h->first_field &&
1496  h->poc.frame_num != (h->poc.prev_frame_num + 1) % (1 << sps->log2_max_frame_num)) {
1497  const H264Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL;
1498  av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n",
1499  h->poc.frame_num, h->poc.prev_frame_num);
1500  if (!sps->gaps_in_frame_num_allowed_flag)
1501  for(i=0; i<FF_ARRAY_ELEMS(h->last_pocs); i++)
1502  h->last_pocs[i] = INT_MIN;
1503  ret = h264_frame_start(h);
1504  if (ret < 0) {
1505  h->first_field = 0;
1506  return ret;
1507  }
1508 
1509  h->poc.prev_frame_num++;
1510  h->poc.prev_frame_num %= 1 << sps->log2_max_frame_num;
1511  h->cur_pic_ptr->frame_num = h->poc.prev_frame_num;
1512  h->cur_pic_ptr->invalid_gap = !sps->gaps_in_frame_num_allowed_flag;
1513  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0);
1514  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1);
1515 
1516  h->explicit_ref_marking = 0;
1518  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1519  return ret;
1520  /* Error concealment: If a ref is missing, copy the previous ref
1521  * in its place.
1522  * FIXME: Avoiding a memcpy would be nice, but ref handling makes
1523  * many assumptions about there being no actual duplicates.
1524  * FIXME: This does not copy padding for out-of-frame motion
1525  * vectors. Given we are concealing a lost frame, this probably
1526  * is not noticeable by comparison, but it should be fixed. */
1527  if (h->short_ref_count) {
1528  int c[4] = {
1529  1<<(h->ps.sps->bit_depth_luma-1),
1530  1<<(h->ps.sps->bit_depth_chroma-1),
1531  1<<(h->ps.sps->bit_depth_chroma-1),
1532  -1
1533  };
1534 
1535  if (prev &&
1536  h->short_ref[0]->f->width == prev->f->width &&
1537  h->short_ref[0]->f->height == prev->f->height &&
1538  h->short_ref[0]->f->format == prev->f->format) {
1539  ff_thread_await_progress(&prev->tf, INT_MAX, 0);
1540  if (prev->field_picture)
1541  ff_thread_await_progress(&prev->tf, INT_MAX, 1);
1542  ff_thread_release_ext_buffer(&h->short_ref[0]->tf);
1543  h->short_ref[0]->tf.f = h->short_ref[0]->f;
1544  ret = ff_thread_ref_frame(&h->short_ref[0]->tf, &prev->tf);
1545  if (ret < 0)
1546  return ret;
1547  h->short_ref[0]->poc = prev->poc + 2U;
1548  h->short_ref[0]->gray = prev->gray;
1549  ff_thread_report_progress(&h->short_ref[0]->tf, INT_MAX, 0);
1550  if (h->short_ref[0]->field_picture)
1551  ff_thread_report_progress(&h->short_ref[0]->tf, INT_MAX, 1);
1552  } else if (!h->frame_recovered) {
1553  if (!h->avctx->hwaccel)
1554  color_frame(h->short_ref[0]->f, c);
1555  h->short_ref[0]->gray = 1;
1556  }
1557  h->short_ref[0]->frame_num = h->poc.prev_frame_num;
1558  }
1559  }
1560 
1561  /* See if we have a decoded first field looking for a pair...
1562  * We're using that to see whether to continue decoding in that
1563  * frame, or to allocate a new one. */
1564  if (h->first_field) {
1565  av_assert0(h->cur_pic_ptr);
1566  av_assert0(h->cur_pic_ptr->f->buf[0]);
1567  assert(h->cur_pic_ptr->reference != DELAYED_PIC_REF);
1568 
1569  /* figure out if we have a complementary field pair */
1570  if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) {
1571  /* Previous field is unmatched. Don't display it, but let it
1572  * remain for reference if marked as such. */
1573  h->missing_fields ++;
1574  h->cur_pic_ptr = NULL;
1575  h->first_field = FIELD_PICTURE(h);
1576  } else {
1577  h->missing_fields = 0;
1578  if (h->cur_pic_ptr->frame_num != h->poc.frame_num) {
1579  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
1580  h->picture_structure==PICT_BOTTOM_FIELD);
1581  /* This and the previous field had different frame_nums.
1582  * Consider this field first in pair. Throw away previous
1583  * one except for reference purposes. */
1584  h->first_field = 1;
1585  h->cur_pic_ptr = NULL;
1586  } else if (h->cur_pic_ptr->reference & DELAYED_PIC_REF) {
1587  /* This frame was already output, we cannot draw into it
1588  * anymore.
1589  */
1590  h->first_field = 1;
1591  h->cur_pic_ptr = NULL;
1592  } else {
1593  /* Second field in complementary pair */
1594  h->first_field = 0;
1595  }
1596  }
1597  } else {
1598  /* Frame or first field in a potentially complementary pair */
1599  h->first_field = FIELD_PICTURE(h);
1600  }
1601 
1602  if (!FIELD_PICTURE(h) || h->first_field) {
1603  if (h264_frame_start(h) < 0) {
1604  h->first_field = 0;
1605  return AVERROR_INVALIDDATA;
1606  }
1607  } else {
1608  int field = h->picture_structure == PICT_BOTTOM_FIELD;
1610  h->cur_pic_ptr->tf.owner[field] = h->avctx;
1611  }
1612  /* Some macroblocks can be accessed before they're available in case
1613  * of lost slices, MBAFF or threading. */
1614  if (FIELD_PICTURE(h)) {
1615  for(i = (h->picture_structure == PICT_BOTTOM_FIELD); i<h->mb_height; i++)
1616  memset(h->slice_table + i*h->mb_stride, -1, (h->mb_stride - (i+1==h->mb_height)) * sizeof(*h->slice_table));
1617  } else {
1618  memset(h->slice_table, -1,
1619  (h->mb_height * h->mb_stride - 1) * sizeof(*h->slice_table));
1620  }
1621 
1622  ret = ff_h264_init_poc(h->cur_pic_ptr->field_poc, &h->cur_pic_ptr->poc,
1623  h->ps.sps, &h->poc, h->picture_structure, nal->ref_idc);
1624  if (ret < 0)
1625  return ret;
1626 
1627  memcpy(h->mmco, sl->mmco, sl->nb_mmco * sizeof(*h->mmco));
1628  h->nb_mmco = sl->nb_mmco;
1629  h->explicit_ref_marking = sl->explicit_ref_marking;
1630 
1631  h->picture_idr = nal->type == H264_NAL_IDR_SLICE;
1632 
1633  if (h->sei.recovery_point.recovery_frame_cnt >= 0) {
1634  const int sei_recovery_frame_cnt = h->sei.recovery_point.recovery_frame_cnt;
1635 
1636  if (h->poc.frame_num != sei_recovery_frame_cnt || sl->slice_type_nos != AV_PICTURE_TYPE_I)
1637  h->valid_recovery_point = 1;
1638 
1639  if ( h->recovery_frame < 0
1640  || av_mod_uintp2(h->recovery_frame - h->poc.frame_num, h->ps.sps->log2_max_frame_num) > sei_recovery_frame_cnt) {
1641  h->recovery_frame = av_mod_uintp2(h->poc.frame_num + sei_recovery_frame_cnt, h->ps.sps->log2_max_frame_num);
1642 
1643  if (!h->valid_recovery_point)
1644  h->recovery_frame = h->poc.frame_num;
1645  }
1646  }
1647 
1648  h->cur_pic_ptr->f->flags |= AV_FRAME_FLAG_KEY * !!(nal->type == H264_NAL_IDR_SLICE);
1649 
1650  if (nal->type == H264_NAL_IDR_SLICE) {
1651  h->cur_pic_ptr->recovered |= FRAME_RECOVERED_IDR;
1652  // If we have an IDR, all frames after it in decoded order are
1653  // "recovered".
1654  h->frame_recovered |= FRAME_RECOVERED_IDR;
1655  }
1656 
1657  if (h->recovery_frame == h->poc.frame_num && nal->ref_idc) {
1658  h->recovery_frame = -1;
1659  h->cur_pic_ptr->recovered |= FRAME_RECOVERED_SEI;
1660  }
1661 
1662 #if 1
1663  h->cur_pic_ptr->recovered |= h->frame_recovered;
1664 #else
1665  h->cur_pic_ptr->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_IDR);
1666 #endif
1667 
1668  /* Set the frame properties/side data. Only done for the second field in
1669  * field coded frames, since some SEI information is present for each field
1670  * and is merged by the SEI parsing code. */
1671  if (!FIELD_PICTURE(h) || !h->first_field || h->missing_fields > 1) {
1673  if (ret < 0)
1674  return ret;
1675 
1677  if (ret < 0)
1678  return ret;
1679  }
1680 
1681  return 0;
1682 }
1683 
1685  const H2645NAL *nal)
1686 {
1687  const SPS *sps;
1688  const PPS *pps;
1689  int ret;
1690  unsigned int slice_type, tmp, i;
1691  int field_pic_flag, bottom_field_flag;
1692  int first_slice = sl == h->slice_ctx && !h->current_slice;
1693  int picture_structure;
1694 
1695  if (first_slice)
1696  av_assert0(!h->setup_finished);
1697 
1698  sl->first_mb_addr = get_ue_golomb_long(&sl->gb);
1699 
1700  slice_type = get_ue_golomb_31(&sl->gb);
1701  if (slice_type > 9) {
1702  av_log(h->avctx, AV_LOG_ERROR,
1703  "slice type %d too large at %d\n",
1704  slice_type, sl->first_mb_addr);
1705  return AVERROR_INVALIDDATA;
1706  }
1707  if (slice_type > 4) {
1708  slice_type -= 5;
1709  sl->slice_type_fixed = 1;
1710  } else
1711  sl->slice_type_fixed = 0;
1712 
1713  slice_type = ff_h264_golomb_to_pict_type[slice_type];
1714  sl->slice_type = slice_type;
1715  sl->slice_type_nos = slice_type & 3;
1716 
1717  if (nal->type == H264_NAL_IDR_SLICE &&
1719  av_log(h->avctx, AV_LOG_ERROR, "A non-intra slice in an IDR NAL unit.\n");
1720  return AVERROR_INVALIDDATA;
1721  }
1722 
1723  sl->pps_id = get_ue_golomb(&sl->gb);
1724  if (sl->pps_id >= MAX_PPS_COUNT) {
1725  av_log(h->avctx, AV_LOG_ERROR, "pps_id %u out of range\n", sl->pps_id);
1726  return AVERROR_INVALIDDATA;
1727  }
1728  if (!h->ps.pps_list[sl->pps_id]) {
1729  av_log(h->avctx, AV_LOG_ERROR,
1730  "non-existing PPS %u referenced\n",
1731  sl->pps_id);
1732  return AVERROR_INVALIDDATA;
1733  }
1734  pps = h->ps.pps_list[sl->pps_id];
1735  sps = pps->sps;
1736 
1737  sl->frame_num = get_bits(&sl->gb, sps->log2_max_frame_num);
1738  if (!first_slice) {
1739  if (h->poc.frame_num != sl->frame_num) {
1740  av_log(h->avctx, AV_LOG_ERROR, "Frame num change from %d to %d\n",
1741  h->poc.frame_num, sl->frame_num);
1742  return AVERROR_INVALIDDATA;
1743  }
1744  }
1745 
1746  sl->mb_mbaff = 0;
1747 
1748  if (sps->frame_mbs_only_flag) {
1749  picture_structure = PICT_FRAME;
1750  } else {
1751  if (!sps->direct_8x8_inference_flag && slice_type == AV_PICTURE_TYPE_B) {
1752  av_log(h->avctx, AV_LOG_ERROR, "This stream was generated by a broken encoder, invalid 8x8 inference\n");
1753  return -1;
1754  }
1755  field_pic_flag = get_bits1(&sl->gb);
1756  if (field_pic_flag) {
1757  bottom_field_flag = get_bits1(&sl->gb);
1758  picture_structure = PICT_TOP_FIELD + bottom_field_flag;
1759  } else {
1760  picture_structure = PICT_FRAME;
1761  }
1762  }
1763  sl->picture_structure = picture_structure;
1764  sl->mb_field_decoding_flag = picture_structure != PICT_FRAME;
1765 
1766  if (picture_structure == PICT_FRAME) {
1767  sl->curr_pic_num = sl->frame_num;
1768  sl->max_pic_num = 1 << sps->log2_max_frame_num;
1769  } else {
1770  sl->curr_pic_num = 2 * sl->frame_num + 1;
1771  sl->max_pic_num = 1 << (sps->log2_max_frame_num + 1);
1772  }
1773 
1774  if (nal->type == H264_NAL_IDR_SLICE) {
1775  unsigned idr_pic_id = get_ue_golomb_long(&sl->gb);
1776  if (idr_pic_id < 65536) {
1777  sl->idr_pic_id = idr_pic_id;
1778  } else
1779  av_log(h->avctx, AV_LOG_WARNING, "idr_pic_id is invalid\n");
1780  }
1781 
1782  sl->poc_lsb = 0;
1783  sl->delta_poc_bottom = 0;
1784  if (sps->poc_type == 0) {
1785  sl->poc_lsb = get_bits(&sl->gb, sps->log2_max_poc_lsb);
1786 
1787  if (pps->pic_order_present == 1 && picture_structure == PICT_FRAME)
1788  sl->delta_poc_bottom = get_se_golomb(&sl->gb);
1789  }
1790 
1791  sl->delta_poc[0] = sl->delta_poc[1] = 0;
1792  if (sps->poc_type == 1 && !sps->delta_pic_order_always_zero_flag) {
1793  sl->delta_poc[0] = get_se_golomb(&sl->gb);
1794 
1795  if (pps->pic_order_present == 1 && picture_structure == PICT_FRAME)
1796  sl->delta_poc[1] = get_se_golomb(&sl->gb);
1797  }
1798 
1799  sl->redundant_pic_count = 0;
1800  if (pps->redundant_pic_cnt_present)
1801  sl->redundant_pic_count = get_ue_golomb(&sl->gb);
1802 
1803  if (sl->slice_type_nos == AV_PICTURE_TYPE_B)
1804  sl->direct_spatial_mv_pred = get_bits1(&sl->gb);
1805 
1807  &sl->gb, pps, sl->slice_type_nos,
1808  picture_structure, h->avctx);
1809  if (ret < 0)
1810  return ret;
1811 
1812  if (sl->slice_type_nos != AV_PICTURE_TYPE_I) {
1814  if (ret < 0) {
1815  sl->ref_count[1] = sl->ref_count[0] = 0;
1816  return ret;
1817  }
1818  }
1819 
1820  sl->pwt.use_weight = 0;
1821  for (i = 0; i < 2; i++) {
1822  sl->pwt.luma_weight_flag[i] = 0;
1823  sl->pwt.chroma_weight_flag[i] = 0;
1824  }
1825  if ((pps->weighted_pred && sl->slice_type_nos == AV_PICTURE_TYPE_P) ||
1826  (pps->weighted_bipred_idc == 1 &&
1829  sl->slice_type_nos, &sl->pwt,
1830  picture_structure, h->avctx);
1831  if (ret < 0)
1832  return ret;
1833  }
1834 
1835  sl->explicit_ref_marking = 0;
1836  if (nal->ref_idc) {
1837  ret = ff_h264_decode_ref_pic_marking(sl, &sl->gb, nal, h->avctx);
1838  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1839  return AVERROR_INVALIDDATA;
1840  }
1841 
1842  if (sl->slice_type_nos != AV_PICTURE_TYPE_I && pps->cabac) {
1843  tmp = get_ue_golomb_31(&sl->gb);
1844  if (tmp > 2) {
1845  av_log(h->avctx, AV_LOG_ERROR, "cabac_init_idc %u overflow\n", tmp);
1846  return AVERROR_INVALIDDATA;
1847  }
1848  sl->cabac_init_idc = tmp;
1849  }
1850 
1851  sl->last_qscale_diff = 0;
1852  tmp = pps->init_qp + (unsigned)get_se_golomb(&sl->gb);
1853  if (tmp > 51 + 6 * (sps->bit_depth_luma - 8)) {
1854  av_log(h->avctx, AV_LOG_ERROR, "QP %u out of range\n", tmp);
1855  return AVERROR_INVALIDDATA;
1856  }
1857  sl->qscale = tmp;
1858  sl->chroma_qp[0] = get_chroma_qp(pps, 0, sl->qscale);
1859  sl->chroma_qp[1] = get_chroma_qp(pps, 1, sl->qscale);
1860  // FIXME qscale / qp ... stuff
1861  if (sl->slice_type == AV_PICTURE_TYPE_SP)
1862  get_bits1(&sl->gb); /* sp_for_switch_flag */
1863  if (sl->slice_type == AV_PICTURE_TYPE_SP ||
1865  get_se_golomb(&sl->gb); /* slice_qs_delta */
1866 
1867  sl->deblocking_filter = 1;
1868  sl->slice_alpha_c0_offset = 0;
1869  sl->slice_beta_offset = 0;
1870  if (pps->deblocking_filter_parameters_present) {
1871  tmp = get_ue_golomb_31(&sl->gb);
1872  if (tmp > 2) {
1873  av_log(h->avctx, AV_LOG_ERROR,
1874  "deblocking_filter_idc %u out of range\n", tmp);
1875  return AVERROR_INVALIDDATA;
1876  }
1877  sl->deblocking_filter = tmp;
1878  if (sl->deblocking_filter < 2)
1879  sl->deblocking_filter ^= 1; // 1<->0
1880 
1881  if (sl->deblocking_filter) {
1882  int slice_alpha_c0_offset_div2 = get_se_golomb(&sl->gb);
1883  int slice_beta_offset_div2 = get_se_golomb(&sl->gb);
1884  if (slice_alpha_c0_offset_div2 > 6 ||
1885  slice_alpha_c0_offset_div2 < -6 ||
1886  slice_beta_offset_div2 > 6 ||
1887  slice_beta_offset_div2 < -6) {
1888  av_log(h->avctx, AV_LOG_ERROR,
1889  "deblocking filter parameters %d %d out of range\n",
1890  slice_alpha_c0_offset_div2, slice_beta_offset_div2);
1891  return AVERROR_INVALIDDATA;
1892  }
1893  sl->slice_alpha_c0_offset = slice_alpha_c0_offset_div2 * 2;
1894  sl->slice_beta_offset = slice_beta_offset_div2 * 2;
1895  }
1896  }
1897 
1898  return 0;
1899 }
1900 
1901 /* do all the per-slice initialization needed before we can start decoding the
1902  * actual MBs */
1904  const H2645NAL *nal)
1905 {
1906  int i, j, ret = 0;
1907 
1908  if (h->picture_idr && nal->type != H264_NAL_IDR_SLICE) {
1909  av_log(h->avctx, AV_LOG_ERROR, "Invalid mix of IDR and non-IDR slices\n");
1910  return AVERROR_INVALIDDATA;
1911  }
1912 
1913  av_assert1(h->mb_num == h->mb_width * h->mb_height);
1914  if (sl->first_mb_addr << FIELD_OR_MBAFF_PICTURE(h) >= h->mb_num ||
1915  sl->first_mb_addr >= h->mb_num) {
1916  av_log(h->avctx, AV_LOG_ERROR, "first_mb_in_slice overflow\n");
1917  return AVERROR_INVALIDDATA;
1918  }
1919  sl->resync_mb_x = sl->mb_x = sl->first_mb_addr % h->mb_width;
1920  sl->resync_mb_y = sl->mb_y = (sl->first_mb_addr / h->mb_width) <<
1922  if (h->picture_structure == PICT_BOTTOM_FIELD)
1923  sl->resync_mb_y = sl->mb_y = sl->mb_y + 1;
1924  av_assert1(sl->mb_y < h->mb_height);
1925 
1926  ret = ff_h264_build_ref_list(h, sl);
1927  if (ret < 0)
1928  return ret;
1929 
1930  if (h->ps.pps->weighted_bipred_idc == 2 &&
1932  implicit_weight_table(h, sl, -1);
1933  if (FRAME_MBAFF(h)) {
1934  implicit_weight_table(h, sl, 0);
1935  implicit_weight_table(h, sl, 1);
1936  }
1937  }
1938 
1941  if (!h->setup_finished)
1943 
1944  if (h->avctx->skip_loop_filter >= AVDISCARD_ALL ||
1945  (h->avctx->skip_loop_filter >= AVDISCARD_NONKEY &&
1946  h->nal_unit_type != H264_NAL_IDR_SLICE) ||
1947  (h->avctx->skip_loop_filter >= AVDISCARD_NONINTRA &&
1949  (h->avctx->skip_loop_filter >= AVDISCARD_BIDIR &&
1951  (h->avctx->skip_loop_filter >= AVDISCARD_NONREF &&
1952  nal->ref_idc == 0))
1953  sl->deblocking_filter = 0;
1954 
1955  if (sl->deblocking_filter == 1 && h->nb_slice_ctx > 1) {
1956  if (h->avctx->flags2 & AV_CODEC_FLAG2_FAST) {
1957  /* Cheat slightly for speed:
1958  * Do not bother to deblock across slices. */
1959  sl->deblocking_filter = 2;
1960  } else {
1961  h->postpone_filter = 1;
1962  }
1963  }
1964  sl->qp_thresh = 15 -
1966  FFMAX3(0,
1967  h->ps.pps->chroma_qp_index_offset[0],
1968  h->ps.pps->chroma_qp_index_offset[1]) +
1969  6 * (h->ps.sps->bit_depth_luma - 8);
1970 
1971  sl->slice_num = ++h->current_slice;
1972 
1973  if (sl->slice_num)
1974  h->slice_row[(sl->slice_num-1)&(MAX_SLICES-1)]= sl->resync_mb_y;
1975  if ( h->slice_row[sl->slice_num&(MAX_SLICES-1)] + 3 >= sl->resync_mb_y
1976  && h->slice_row[sl->slice_num&(MAX_SLICES-1)] <= sl->resync_mb_y
1977  && sl->slice_num >= MAX_SLICES) {
1978  //in case of ASO this check needs to be updated depending on how we decide to assign slice numbers in this case
1979  av_log(h->avctx, AV_LOG_WARNING, "Possibly too many slices (%d >= %d), increase MAX_SLICES and recompile if there are artifacts\n", sl->slice_num, MAX_SLICES);
1980  }
1981 
1982  for (j = 0; j < 2; j++) {
1983  int id_list[16];
1984  int *ref2frm = h->ref2frm[sl->slice_num & (MAX_SLICES - 1)][j];
1985  for (i = 0; i < 16; i++) {
1986  id_list[i] = 60;
1987  if (j < sl->list_count && i < sl->ref_count[j] &&
1988  sl->ref_list[j][i].parent->f->buf[0]) {
1989  int k;
1990  const AVBuffer *buf = sl->ref_list[j][i].parent->f->buf[0]->buffer;
1991  for (k = 0; k < h->short_ref_count; k++)
1992  if (h->short_ref[k]->f->buf[0]->buffer == buf) {
1993  id_list[i] = k;
1994  break;
1995  }
1996  for (k = 0; k < h->long_ref_count; k++)
1997  if (h->long_ref[k] && h->long_ref[k]->f->buf[0]->buffer == buf) {
1998  id_list[i] = h->short_ref_count + k;
1999  break;
2000  }
2001  }
2002  }
2003 
2004  ref2frm[0] =
2005  ref2frm[1] = -1;
2006  for (i = 0; i < 16; i++)
2007  ref2frm[i + 2] = 4 * id_list[i] + (sl->ref_list[j][i].reference & 3);
2008  ref2frm[18 + 0] =
2009  ref2frm[18 + 1] = -1;
2010  for (i = 16; i < 48; i++)
2011  ref2frm[i + 4] = 4 * id_list[(i - 16) >> 1] +
2012  (sl->ref_list[j][i].reference & 3);
2013  }
2014 
2015  if (sl->slice_type_nos == AV_PICTURE_TYPE_I) {
2016  h->cur_pic_ptr->gray = 0;
2017  h->non_gray = 1;
2018  } else {
2019  int gray = 0;
2020  for (j = 0; j < sl->list_count; j++) {
2021  for (i = 0; i < sl->ref_count[j]; i++) {
2022  gray |= sl->ref_list[j][i].parent->gray;
2023  }
2024  }
2025  h->cur_pic_ptr->gray = gray;
2026  }
2027 
2028  if (h->avctx->debug & FF_DEBUG_PICT_INFO) {
2029  av_log(h->avctx, AV_LOG_DEBUG,
2030  "slice:%d %c mb:%d %c%s%s frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %s\n",
2031  sl->slice_num,
2032  (h->picture_structure == PICT_FRAME ? 'F' : h->picture_structure == PICT_TOP_FIELD ? 'T' : 'B'),
2033  sl->mb_y * h->mb_width + sl->mb_x,
2035  sl->slice_type_fixed ? " fix" : "",
2036  nal->type == H264_NAL_IDR_SLICE ? " IDR" : "",
2037  h->poc.frame_num,
2038  h->cur_pic_ptr->field_poc[0],
2039  h->cur_pic_ptr->field_poc[1],
2040  sl->ref_count[0], sl->ref_count[1],
2041  sl->qscale,
2042  sl->deblocking_filter,
2044  sl->pwt.use_weight,
2045  sl->pwt.use_weight == 1 && sl->pwt.use_weight_chroma ? "c" : "",
2046  sl->slice_type == AV_PICTURE_TYPE_B ? (sl->direct_spatial_mv_pred ? "SPAT" : "TEMP") : "");
2047  }
2048 
2049  return 0;
2050 }
2051 
2053 {
2054  H264SliceContext *sl = h->slice_ctx + h->nb_slice_ctx_queued;
2055  int first_slice = sl == h->slice_ctx && !h->current_slice;
2056  int ret;
2057 
2058  sl->gb = nal->gb;
2059 
2060  ret = h264_slice_header_parse(h, sl, nal);
2061  if (ret < 0)
2062  return ret;
2063 
2064  // discard redundant pictures
2065  if (sl->redundant_pic_count > 0) {
2066  sl->ref_count[0] = sl->ref_count[1] = 0;
2067  return 0;
2068  }
2069 
2070  if (sl->first_mb_addr == 0 || !h->current_slice) {
2071  if (h->setup_finished) {
2072  av_log(h->avctx, AV_LOG_ERROR, "Too many fields\n");
2073  return AVERROR_INVALIDDATA;
2074  }
2075  }
2076 
2077  if (sl->first_mb_addr == 0) { // FIXME better field boundary detection
2078  if (h->current_slice) {
2079  // this slice starts a new field
2080  // first decode any pending queued slices
2081  if (h->nb_slice_ctx_queued) {
2082  H264SliceContext tmp_ctx;
2083 
2085  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
2086  return ret;
2087 
2088  memcpy(&tmp_ctx, h->slice_ctx, sizeof(tmp_ctx));
2089  memcpy(h->slice_ctx, sl, sizeof(tmp_ctx));
2090  memcpy(sl, &tmp_ctx, sizeof(tmp_ctx));
2091  sl = h->slice_ctx;
2092  }
2093 
2094  if (h->cur_pic_ptr && FIELD_PICTURE(h) && h->first_field) {
2095  ret = ff_h264_field_end(h, h->slice_ctx, 1);
2096  if (ret < 0)
2097  return ret;
2098  } else if (h->cur_pic_ptr && !FIELD_PICTURE(h) && !h->first_field && h->nal_unit_type == H264_NAL_IDR_SLICE) {
2099  av_log(h, AV_LOG_WARNING, "Broken frame packetizing\n");
2100  ret = ff_h264_field_end(h, h->slice_ctx, 1);
2101  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0);
2102  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1);
2103  h->cur_pic_ptr = NULL;
2104  if (ret < 0)
2105  return ret;
2106  } else
2107  return AVERROR_INVALIDDATA;
2108  }
2109 
2110  if (!h->first_field) {
2111  if (h->cur_pic_ptr && !h->droppable) {
2112  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
2113  h->picture_structure == PICT_BOTTOM_FIELD);
2114  }
2115  h->cur_pic_ptr = NULL;
2116  }
2117  }
2118 
2119  if (!h->current_slice)
2120  av_assert0(sl == h->slice_ctx);
2121 
2122  if (h->current_slice == 0 && !h->first_field) {
2123  if (
2124  (h->avctx->skip_frame >= AVDISCARD_NONREF && !h->nal_ref_idc) ||
2125  (h->avctx->skip_frame >= AVDISCARD_BIDIR && sl->slice_type_nos == AV_PICTURE_TYPE_B) ||
2126  (h->avctx->skip_frame >= AVDISCARD_NONINTRA && sl->slice_type_nos != AV_PICTURE_TYPE_I) ||
2127  (h->avctx->skip_frame >= AVDISCARD_NONKEY && h->nal_unit_type != H264_NAL_IDR_SLICE && h->sei.recovery_point.recovery_frame_cnt < 0) ||
2128  h->avctx->skip_frame >= AVDISCARD_ALL) {
2129  return 0;
2130  }
2131  }
2132 
2133  if (!first_slice) {
2134  const PPS *pps = h->ps.pps_list[sl->pps_id];
2135 
2136  if (h->ps.pps->sps_id != pps->sps_id ||
2137  h->ps.pps->transform_8x8_mode != pps->transform_8x8_mode /*||
2138  (h->setup_finished && h->ps.pps != pps)*/) {
2139  av_log(h->avctx, AV_LOG_ERROR, "PPS changed between slices\n");
2140  return AVERROR_INVALIDDATA;
2141  }
2142  if (h->ps.sps != pps->sps) {
2143  av_log(h->avctx, AV_LOG_ERROR,
2144  "SPS changed in the middle of the frame\n");
2145  return AVERROR_INVALIDDATA;
2146  }
2147  }
2148 
2149  if (h->current_slice == 0) {
2150  ret = h264_field_start(h, sl, nal, first_slice);
2151  if (ret < 0)
2152  return ret;
2153  } else {
2154  if (h->picture_structure != sl->picture_structure ||
2155  h->droppable != (nal->ref_idc == 0)) {
2156  av_log(h->avctx, AV_LOG_ERROR,
2157  "Changing field mode (%d -> %d) between slices is not allowed\n",
2158  h->picture_structure, sl->picture_structure);
2159  return AVERROR_INVALIDDATA;
2160  } else if (!h->cur_pic_ptr) {
2161  av_log(h->avctx, AV_LOG_ERROR,
2162  "unset cur_pic_ptr on slice %d\n",
2163  h->current_slice + 1);
2164  return AVERROR_INVALIDDATA;
2165  }
2166  }
2167 
2168  ret = h264_slice_init(h, sl, nal);
2169  if (ret < 0)
2170  return ret;
2171 
2172  h->nb_slice_ctx_queued++;
2173 
2174  return 0;
2175 }
2176 
2178 {
2179  switch (sl->slice_type) {
2180  case AV_PICTURE_TYPE_P:
2181  return 0;
2182  case AV_PICTURE_TYPE_B:
2183  return 1;
2184  case AV_PICTURE_TYPE_I:
2185  return 2;
2186  case AV_PICTURE_TYPE_SP:
2187  return 3;
2188  case AV_PICTURE_TYPE_SI:
2189  return 4;
2190  default:
2191  return AVERROR_INVALIDDATA;
2192  }
2193 }
2194 
2196  H264SliceContext *sl,
2197  int mb_type, int top_xy,
2198  const int left_xy[LEFT_MBS],
2199  int top_type,
2200  const int left_type[LEFT_MBS],
2201  int mb_xy, int list)
2202 {
2203  int b_stride = h->b_stride;
2204  int16_t(*mv_dst)[2] = &sl->mv_cache[list][scan8[0]];
2205  int8_t *ref_cache = &sl->ref_cache[list][scan8[0]];
2206  if (IS_INTER(mb_type) || IS_DIRECT(mb_type)) {
2207  if (USES_LIST(top_type, list)) {
2208  const int b_xy = h->mb2b_xy[top_xy] + 3 * b_stride;
2209  const int b8_xy = 4 * top_xy + 2;
2210  const int *ref2frm = &h->ref2frm[h->slice_table[top_xy] & (MAX_SLICES - 1)][list][(MB_MBAFF(sl) ? 20 : 2)];
2211  AV_COPY128(mv_dst - 1 * 8, h->cur_pic.motion_val[list][b_xy + 0]);
2212  ref_cache[0 - 1 * 8] =
2213  ref_cache[1 - 1 * 8] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 0]];
2214  ref_cache[2 - 1 * 8] =
2215  ref_cache[3 - 1 * 8] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 1]];
2216  } else {
2217  AV_ZERO128(mv_dst - 1 * 8);
2218  AV_WN32A(&ref_cache[0 - 1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2219  }
2220 
2221  if (!IS_INTERLACED(mb_type ^ left_type[LTOP])) {
2222  if (USES_LIST(left_type[LTOP], list)) {
2223  const int b_xy = h->mb2b_xy[left_xy[LTOP]] + 3;
2224  const int b8_xy = 4 * left_xy[LTOP] + 1;
2225  const int *ref2frm = &h->ref2frm[h->slice_table[left_xy[LTOP]] & (MAX_SLICES - 1)][list][(MB_MBAFF(sl) ? 20 : 2)];
2226  AV_COPY32(mv_dst - 1 + 0, h->cur_pic.motion_val[list][b_xy + b_stride * 0]);
2227  AV_COPY32(mv_dst - 1 + 8, h->cur_pic.motion_val[list][b_xy + b_stride * 1]);
2228  AV_COPY32(mv_dst - 1 + 16, h->cur_pic.motion_val[list][b_xy + b_stride * 2]);
2229  AV_COPY32(mv_dst - 1 + 24, h->cur_pic.motion_val[list][b_xy + b_stride * 3]);
2230  ref_cache[-1 + 0] =
2231  ref_cache[-1 + 8] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 2 * 0]];
2232  ref_cache[-1 + 16] =
2233  ref_cache[-1 + 24] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 2 * 1]];
2234  } else {
2235  AV_ZERO32(mv_dst - 1 + 0);
2236  AV_ZERO32(mv_dst - 1 + 8);
2237  AV_ZERO32(mv_dst - 1 + 16);
2238  AV_ZERO32(mv_dst - 1 + 24);
2239  ref_cache[-1 + 0] =
2240  ref_cache[-1 + 8] =
2241  ref_cache[-1 + 16] =
2242  ref_cache[-1 + 24] = LIST_NOT_USED;
2243  }
2244  }
2245  }
2246 
2247  if (!USES_LIST(mb_type, list)) {
2248  fill_rectangle(mv_dst, 4, 4, 8, pack16to32(0, 0), 4);
2249  AV_WN32A(&ref_cache[0 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2250  AV_WN32A(&ref_cache[1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2251  AV_WN32A(&ref_cache[2 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2252  AV_WN32A(&ref_cache[3 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2253  return;
2254  }
2255 
2256  {
2257  const int8_t *ref = &h->cur_pic.ref_index[list][4 * mb_xy];
2258  const int *ref2frm = &h->ref2frm[sl->slice_num & (MAX_SLICES - 1)][list][(MB_MBAFF(sl) ? 20 : 2)];
2259  uint32_t ref01 = (pack16to32(ref2frm[ref[0]], ref2frm[ref[1]]) & 0x00FF00FF) * 0x0101;
2260  uint32_t ref23 = (pack16to32(ref2frm[ref[2]], ref2frm[ref[3]]) & 0x00FF00FF) * 0x0101;
2261  AV_WN32A(&ref_cache[0 * 8], ref01);
2262  AV_WN32A(&ref_cache[1 * 8], ref01);
2263  AV_WN32A(&ref_cache[2 * 8], ref23);
2264  AV_WN32A(&ref_cache[3 * 8], ref23);
2265  }
2266 
2267  {
2268  int16_t(*mv_src)[2] = &h->cur_pic.motion_val[list][4 * sl->mb_x + 4 * sl->mb_y * b_stride];
2269  AV_COPY128(mv_dst + 8 * 0, mv_src + 0 * b_stride);
2270  AV_COPY128(mv_dst + 8 * 1, mv_src + 1 * b_stride);
2271  AV_COPY128(mv_dst + 8 * 2, mv_src + 2 * b_stride);
2272  AV_COPY128(mv_dst + 8 * 3, mv_src + 3 * b_stride);
2273  }
2274 }
2275 
2276 /**
2277  * @return non zero if the loop filter can be skipped
2278  */
2279 static int fill_filter_caches(const H264Context *h, H264SliceContext *sl, int mb_type)
2280 {
2281  const int mb_xy = sl->mb_xy;
2282  int top_xy, left_xy[LEFT_MBS];
2283  int top_type, left_type[LEFT_MBS];
2284  const uint8_t *nnz;
2285  uint8_t *nnz_cache;
2286 
2287  top_xy = mb_xy - (h->mb_stride << MB_FIELD(sl));
2288 
2289  left_xy[LBOT] = left_xy[LTOP] = mb_xy - 1;
2290  if (FRAME_MBAFF(h)) {
2291  const int left_mb_field_flag = IS_INTERLACED(h->cur_pic.mb_type[mb_xy - 1]);
2292  const int curr_mb_field_flag = IS_INTERLACED(mb_type);
2293  if (sl->mb_y & 1) {
2294  if (left_mb_field_flag != curr_mb_field_flag)
2295  left_xy[LTOP] -= h->mb_stride;
2296  } else {
2297  if (curr_mb_field_flag)
2298  top_xy += h->mb_stride &
2299  (((h->cur_pic.mb_type[top_xy] >> 7) & 1) - 1);
2300  if (left_mb_field_flag != curr_mb_field_flag)
2301  left_xy[LBOT] += h->mb_stride;
2302  }
2303  }
2304 
2305  sl->top_mb_xy = top_xy;
2306  sl->left_mb_xy[LTOP] = left_xy[LTOP];
2307  sl->left_mb_xy[LBOT] = left_xy[LBOT];
2308  {
2309  /* For sufficiently low qp, filtering wouldn't do anything.
2310  * This is a conservative estimate: could also check beta_offset
2311  * and more accurate chroma_qp. */
2312  int qp_thresh = sl->qp_thresh; // FIXME strictly we should store qp_thresh for each mb of a slice
2313  int qp = h->cur_pic.qscale_table[mb_xy];
2314  if (qp <= qp_thresh &&
2315  (left_xy[LTOP] < 0 ||
2316  ((qp + h->cur_pic.qscale_table[left_xy[LTOP]] + 1) >> 1) <= qp_thresh) &&
2317  (top_xy < 0 ||
2318  ((qp + h->cur_pic.qscale_table[top_xy] + 1) >> 1) <= qp_thresh)) {
2319  if (!FRAME_MBAFF(h))
2320  return 1;
2321  if ((left_xy[LTOP] < 0 ||
2322  ((qp + h->cur_pic.qscale_table[left_xy[LBOT]] + 1) >> 1) <= qp_thresh) &&
2323  (top_xy < h->mb_stride ||
2324  ((qp + h->cur_pic.qscale_table[top_xy - h->mb_stride] + 1) >> 1) <= qp_thresh))
2325  return 1;
2326  }
2327  }
2328 
2329  top_type = h->cur_pic.mb_type[top_xy];
2330  left_type[LTOP] = h->cur_pic.mb_type[left_xy[LTOP]];
2331  left_type[LBOT] = h->cur_pic.mb_type[left_xy[LBOT]];
2332  if (sl->deblocking_filter == 2) {
2333  if (h->slice_table[top_xy] != sl->slice_num)
2334  top_type = 0;
2335  if (h->slice_table[left_xy[LBOT]] != sl->slice_num)
2336  left_type[LTOP] = left_type[LBOT] = 0;
2337  } else {
2338  if (h->slice_table[top_xy] == 0xFFFF)
2339  top_type = 0;
2340  if (h->slice_table[left_xy[LBOT]] == 0xFFFF)
2341  left_type[LTOP] = left_type[LBOT] = 0;
2342  }
2343  sl->top_type = top_type;
2344  sl->left_type[LTOP] = left_type[LTOP];
2345  sl->left_type[LBOT] = left_type[LBOT];
2346 
2347  if (IS_INTRA(mb_type))
2348  return 0;
2349 
2350  fill_filter_caches_inter(h, sl, mb_type, top_xy, left_xy,
2351  top_type, left_type, mb_xy, 0);
2352  if (sl->list_count == 2)
2353  fill_filter_caches_inter(h, sl, mb_type, top_xy, left_xy,
2354  top_type, left_type, mb_xy, 1);
2355 
2356  nnz = h->non_zero_count[mb_xy];
2357  nnz_cache = sl->non_zero_count_cache;
2358  AV_COPY32(&nnz_cache[4 + 8 * 1], &nnz[0]);
2359  AV_COPY32(&nnz_cache[4 + 8 * 2], &nnz[4]);
2360  AV_COPY32(&nnz_cache[4 + 8 * 3], &nnz[8]);
2361  AV_COPY32(&nnz_cache[4 + 8 * 4], &nnz[12]);
2362  sl->cbp = h->cbp_table[mb_xy];
2363 
2364  if (top_type) {
2365  nnz = h->non_zero_count[top_xy];
2366  AV_COPY32(&nnz_cache[4 + 8 * 0], &nnz[3 * 4]);
2367  }
2368 
2369  if (left_type[LTOP]) {
2370  nnz = h->non_zero_count[left_xy[LTOP]];
2371  nnz_cache[3 + 8 * 1] = nnz[3 + 0 * 4];
2372  nnz_cache[3 + 8 * 2] = nnz[3 + 1 * 4];
2373  nnz_cache[3 + 8 * 3] = nnz[3 + 2 * 4];
2374  nnz_cache[3 + 8 * 4] = nnz[3 + 3 * 4];
2375  }
2376 
2377  /* CAVLC 8x8dct requires NNZ values for residual decoding that differ
2378  * from what the loop filter needs */
2379  if (!CABAC(h) && h->ps.pps->transform_8x8_mode) {
2380  if (IS_8x8DCT(top_type)) {
2381  nnz_cache[4 + 8 * 0] =
2382  nnz_cache[5 + 8 * 0] = (h->cbp_table[top_xy] & 0x4000) >> 12;
2383  nnz_cache[6 + 8 * 0] =
2384  nnz_cache[7 + 8 * 0] = (h->cbp_table[top_xy] & 0x8000) >> 12;
2385  }
2386  if (IS_8x8DCT(left_type[LTOP])) {
2387  nnz_cache[3 + 8 * 1] =
2388  nnz_cache[3 + 8 * 2] = (h->cbp_table[left_xy[LTOP]] & 0x2000) >> 12; // FIXME check MBAFF
2389  }
2390  if (IS_8x8DCT(left_type[LBOT])) {
2391  nnz_cache[3 + 8 * 3] =
2392  nnz_cache[3 + 8 * 4] = (h->cbp_table[left_xy[LBOT]] & 0x8000) >> 12; // FIXME check MBAFF
2393  }
2394 
2395  if (IS_8x8DCT(mb_type)) {
2396  nnz_cache[scan8[0]] =
2397  nnz_cache[scan8[1]] =
2398  nnz_cache[scan8[2]] =
2399  nnz_cache[scan8[3]] = (sl->cbp & 0x1000) >> 12;
2400 
2401  nnz_cache[scan8[0 + 4]] =
2402  nnz_cache[scan8[1 + 4]] =
2403  nnz_cache[scan8[2 + 4]] =
2404  nnz_cache[scan8[3 + 4]] = (sl->cbp & 0x2000) >> 12;
2405 
2406  nnz_cache[scan8[0 + 8]] =
2407  nnz_cache[scan8[1 + 8]] =
2408  nnz_cache[scan8[2 + 8]] =
2409  nnz_cache[scan8[3 + 8]] = (sl->cbp & 0x4000) >> 12;
2410 
2411  nnz_cache[scan8[0 + 12]] =
2412  nnz_cache[scan8[1 + 12]] =
2413  nnz_cache[scan8[2 + 12]] =
2414  nnz_cache[scan8[3 + 12]] = (sl->cbp & 0x8000) >> 12;
2415  }
2416  }
2417 
2418  return 0;
2419 }
2420 
2421 static void loop_filter(const H264Context *h, H264SliceContext *sl, int start_x, int end_x)
2422 {
2423  uint8_t *dest_y, *dest_cb, *dest_cr;
2424  int linesize, uvlinesize, mb_x, mb_y;
2425  const int end_mb_y = sl->mb_y + FRAME_MBAFF(h);
2426  const int old_slice_type = sl->slice_type;
2427  const int pixel_shift = h->pixel_shift;
2428  const int block_h = 16 >> h->chroma_y_shift;
2429 
2430  if (h->postpone_filter)
2431  return;
2432 
2433  if (sl->deblocking_filter) {
2434  for (mb_x = start_x; mb_x < end_x; mb_x++)
2435  for (mb_y = end_mb_y - FRAME_MBAFF(h); mb_y <= end_mb_y; mb_y++) {
2436  int mb_xy, mb_type;
2437  mb_xy = sl->mb_xy = mb_x + mb_y * h->mb_stride;
2438  mb_type = h->cur_pic.mb_type[mb_xy];
2439 
2440  if (FRAME_MBAFF(h))
2441  sl->mb_mbaff =
2442  sl->mb_field_decoding_flag = !!IS_INTERLACED(mb_type);
2443 
2444  sl->mb_x = mb_x;
2445  sl->mb_y = mb_y;
2446  dest_y = h->cur_pic.f->data[0] +
2447  ((mb_x << pixel_shift) + mb_y * sl->linesize) * 16;
2448  dest_cb = h->cur_pic.f->data[1] +
2449  (mb_x << pixel_shift) * (8 << CHROMA444(h)) +
2450  mb_y * sl->uvlinesize * block_h;
2451  dest_cr = h->cur_pic.f->data[2] +
2452  (mb_x << pixel_shift) * (8 << CHROMA444(h)) +
2453  mb_y * sl->uvlinesize * block_h;
2454  // FIXME simplify above
2455 
2456  if (MB_FIELD(sl)) {
2457  linesize = sl->mb_linesize = sl->linesize * 2;
2458  uvlinesize = sl->mb_uvlinesize = sl->uvlinesize * 2;
2459  if (mb_y & 1) { // FIXME move out of this function?
2460  dest_y -= sl->linesize * 15;
2461  dest_cb -= sl->uvlinesize * (block_h - 1);
2462  dest_cr -= sl->uvlinesize * (block_h - 1);
2463  }
2464  } else {
2465  linesize = sl->mb_linesize = sl->linesize;
2466  uvlinesize = sl->mb_uvlinesize = sl->uvlinesize;
2467  }
2468  backup_mb_border(h, sl, dest_y, dest_cb, dest_cr, linesize,
2469  uvlinesize, 0);
2470  if (fill_filter_caches(h, sl, mb_type))
2471  continue;
2472  sl->chroma_qp[0] = get_chroma_qp(h->ps.pps, 0, h->cur_pic.qscale_table[mb_xy]);
2473  sl->chroma_qp[1] = get_chroma_qp(h->ps.pps, 1, h->cur_pic.qscale_table[mb_xy]);
2474 
2475  if (FRAME_MBAFF(h)) {
2476  ff_h264_filter_mb(h, sl, mb_x, mb_y, dest_y, dest_cb, dest_cr,
2477  linesize, uvlinesize);
2478  } else {
2479  ff_h264_filter_mb_fast(h, sl, mb_x, mb_y, dest_y, dest_cb,
2480  dest_cr, linesize, uvlinesize);
2481  }
2482  }
2483  }
2484  sl->slice_type = old_slice_type;
2485  sl->mb_x = end_x;
2486  sl->mb_y = end_mb_y - FRAME_MBAFF(h);
2487  sl->chroma_qp[0] = get_chroma_qp(h->ps.pps, 0, sl->qscale);
2488  sl->chroma_qp[1] = get_chroma_qp(h->ps.pps, 1, sl->qscale);
2489 }
2490 
2492 {
2493  const int mb_xy = sl->mb_x + sl->mb_y * h->mb_stride;
2494  int mb_type = (h->slice_table[mb_xy - 1] == sl->slice_num) ?
2495  h->cur_pic.mb_type[mb_xy - 1] :
2496  (h->slice_table[mb_xy - h->mb_stride] == sl->slice_num) ?
2497  h->cur_pic.mb_type[mb_xy - h->mb_stride] : 0;
2498  sl->mb_mbaff = sl->mb_field_decoding_flag = IS_INTERLACED(mb_type) ? 1 : 0;
2499 }
2500 
2501 /**
2502  * Draw edges and report progress for the last MB row.
2503  */
2505 {
2506  int top = 16 * (sl->mb_y >> FIELD_PICTURE(h));
2507  int pic_height = 16 * h->mb_height >> FIELD_PICTURE(h);
2508  int height = 16 << FRAME_MBAFF(h);
2509  int deblock_border = (16 + 4) << FRAME_MBAFF(h);
2510 
2511  if (sl->deblocking_filter) {
2512  if ((top + height) >= pic_height)
2513  height += deblock_border;
2514  top -= deblock_border;
2515  }
2516 
2517  if (top >= pic_height || (top + height) < 0)
2518  return;
2519 
2520  height = FFMIN(height, pic_height - top);
2521  if (top < 0) {
2522  height = top + height;
2523  top = 0;
2524  }
2525 
2526  ff_h264_draw_horiz_band(h, sl, top, height);
2527 
2528  if (h->droppable || h->er.error_occurred)
2529  return;
2530 
2531  ff_thread_report_progress(&h->cur_pic_ptr->tf, top + height - 1,
2532  h->picture_structure == PICT_BOTTOM_FIELD);
2533 }
2534 
2536  int startx, int starty,
2537  int endx, int endy, int status)
2538 {
2539  if (!sl->h264->enable_er)
2540  return;
2541 
2542  if (CONFIG_ERROR_RESILIENCE) {
2543  ff_er_add_slice(sl->er, startx, starty, endx, endy, status);
2544  }
2545 }
2546 
2547 static int decode_slice(struct AVCodecContext *avctx, void *arg)
2548 {
2549  H264SliceContext *sl = arg;
2550  const H264Context *h = sl->h264;
2551  int lf_x_start = sl->mb_x;
2552  int orig_deblock = sl->deblocking_filter;
2553  int ret;
2554 
2555  sl->linesize = h->cur_pic_ptr->f->linesize[0];
2556  sl->uvlinesize = h->cur_pic_ptr->f->linesize[1];
2557 
2558  ret = alloc_scratch_buffers(sl, sl->linesize);
2559  if (ret < 0)
2560  return ret;
2561 
2562  sl->mb_skip_run = -1;
2563 
2564  av_assert0(h->block_offset[15] == (4 * ((scan8[15] - scan8[0]) & 7) << h->pixel_shift) + 4 * sl->linesize * ((scan8[15] - scan8[0]) >> 3));
2565 
2566  if (h->postpone_filter)
2567  sl->deblocking_filter = 0;
2568 
2569  sl->is_complex = FRAME_MBAFF(h) || h->picture_structure != PICT_FRAME ||
2570  (CONFIG_GRAY && (h->flags & AV_CODEC_FLAG_GRAY));
2571 
2572  if (!(h->avctx->active_thread_type & FF_THREAD_SLICE) && h->picture_structure == PICT_FRAME && sl->er->error_status_table) {
2573  const int start_i = av_clip(sl->resync_mb_x + sl->resync_mb_y * h->mb_width, 0, h->mb_num - 1);
2574  if (start_i) {
2575  int prev_status = sl->er->error_status_table[sl->er->mb_index2xy[start_i - 1]];
2576  prev_status &= ~ VP_START;
2577  if (prev_status != (ER_MV_END | ER_DC_END | ER_AC_END))
2578  sl->er->error_occurred = 1;
2579  }
2580  }
2581 
2582  if (h->ps.pps->cabac) {
2583  /* realign */
2584  align_get_bits(&sl->gb);
2585 
2586  /* init cabac */
2588  sl->gb.buffer + get_bits_count(&sl->gb) / 8,
2589  (get_bits_left(&sl->gb) + 7) / 8);
2590  if (ret < 0)
2591  return ret;
2592 
2594 
2595  for (;;) {
2596  int ret, eos;
2597  if (sl->mb_x + sl->mb_y * h->mb_width >= sl->next_slice_idx) {
2598  av_log(h->avctx, AV_LOG_ERROR, "Slice overlaps with next at %d\n",
2599  sl->next_slice_idx);
2600  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2601  sl->mb_y, ER_MB_ERROR);
2602  return AVERROR_INVALIDDATA;
2603  }
2604 
2605  ret = ff_h264_decode_mb_cabac(h, sl);
2606 
2607  if (ret >= 0)
2608  ff_h264_hl_decode_mb(h, sl);
2609 
2610  // FIXME optimal? or let mb_decode decode 16x32 ?
2611  if (ret >= 0 && FRAME_MBAFF(h)) {
2612  sl->mb_y++;
2613 
2614  ret = ff_h264_decode_mb_cabac(h, sl);
2615 
2616  if (ret >= 0)
2617  ff_h264_hl_decode_mb(h, sl);
2618  sl->mb_y--;
2619  }
2620  eos = get_cabac_terminate(&sl->cabac);
2621 
2622  if ((h->workaround_bugs & FF_BUG_TRUNCATED) &&
2623  sl->cabac.bytestream > sl->cabac.bytestream_end + 2) {
2624  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x - 1,
2625  sl->mb_y, ER_MB_END);
2626  if (sl->mb_x >= lf_x_start)
2627  loop_filter(h, sl, lf_x_start, sl->mb_x + 1);
2628  goto finish;
2629  }
2630  if (sl->cabac.bytestream > sl->cabac.bytestream_end + 2 )
2631  av_log(h->avctx, AV_LOG_DEBUG, "bytestream overread %"PTRDIFF_SPECIFIER"\n", sl->cabac.bytestream_end - sl->cabac.bytestream);
2632  if (ret < 0 || sl->cabac.bytestream > sl->cabac.bytestream_end + 4) {
2633  av_log(h->avctx, AV_LOG_ERROR,
2634  "error while decoding MB %d %d, bytestream %"PTRDIFF_SPECIFIER"\n",
2635  sl->mb_x, sl->mb_y,
2636  sl->cabac.bytestream_end - sl->cabac.bytestream);
2637  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2638  sl->mb_y, ER_MB_ERROR);
2639  return AVERROR_INVALIDDATA;
2640  }
2641 
2642  if (++sl->mb_x >= h->mb_width) {
2643  loop_filter(h, sl, lf_x_start, sl->mb_x);
2644  sl->mb_x = lf_x_start = 0;
2645  decode_finish_row(h, sl);
2646  ++sl->mb_y;
2647  if (FIELD_OR_MBAFF_PICTURE(h)) {
2648  ++sl->mb_y;
2649  if (FRAME_MBAFF(h) && sl->mb_y < h->mb_height)
2651  }
2652  }
2653 
2654  if (eos || sl->mb_y >= h->mb_height) {
2655  ff_tlog(h->avctx, "slice end %d %d\n",
2656  get_bits_count(&sl->gb), sl->gb.size_in_bits);
2657  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x - 1,
2658  sl->mb_y, ER_MB_END);
2659  if (sl->mb_x > lf_x_start)
2660  loop_filter(h, sl, lf_x_start, sl->mb_x);
2661  goto finish;
2662  }
2663  }
2664  } else {
2665  for (;;) {
2666  int ret;
2667 
2668  if (sl->mb_x + sl->mb_y * h->mb_width >= sl->next_slice_idx) {
2669  av_log(h->avctx, AV_LOG_ERROR, "Slice overlaps with next at %d\n",
2670  sl->next_slice_idx);
2671  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2672  sl->mb_y, ER_MB_ERROR);
2673  return AVERROR_INVALIDDATA;
2674  }
2675 
2676  ret = ff_h264_decode_mb_cavlc(h, sl);
2677 
2678  if (ret >= 0)
2679  ff_h264_hl_decode_mb(h, sl);
2680 
2681  // FIXME optimal? or let mb_decode decode 16x32 ?
2682  if (ret >= 0 && FRAME_MBAFF(h)) {
2683  sl->mb_y++;
2684  ret = ff_h264_decode_mb_cavlc(h, sl);
2685 
2686  if (ret >= 0)
2687  ff_h264_hl_decode_mb(h, sl);
2688  sl->mb_y--;
2689  }
2690 
2691  if (ret < 0) {
2692  av_log(h->avctx, AV_LOG_ERROR,
2693  "error while decoding MB %d %d\n", sl->mb_x, sl->mb_y);
2694  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2695  sl->mb_y, ER_MB_ERROR);
2696  return ret;
2697  }
2698 
2699  if (++sl->mb_x >= h->mb_width) {
2700  loop_filter(h, sl, lf_x_start, sl->mb_x);
2701  sl->mb_x = lf_x_start = 0;
2702  decode_finish_row(h, sl);
2703  ++sl->mb_y;
2704  if (FIELD_OR_MBAFF_PICTURE(h)) {
2705  ++sl->mb_y;
2706  if (FRAME_MBAFF(h) && sl->mb_y < h->mb_height)
2708  }
2709  if (sl->mb_y >= h->mb_height) {
2710  ff_tlog(h->avctx, "slice end %d %d\n",
2711  get_bits_count(&sl->gb), sl->gb.size_in_bits);
2712 
2713  if ( get_bits_left(&sl->gb) == 0
2714  || get_bits_left(&sl->gb) > 0 && !(h->avctx->err_recognition & AV_EF_AGGRESSIVE)) {
2715  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y,
2716  sl->mb_x - 1, sl->mb_y, ER_MB_END);
2717 
2718  goto finish;
2719  } else {
2720  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y,
2721  sl->mb_x, sl->mb_y, ER_MB_END);
2722 
2723  return AVERROR_INVALIDDATA;
2724  }
2725  }
2726  }
2727 
2728  if (get_bits_left(&sl->gb) <= 0 && sl->mb_skip_run <= 0) {
2729  ff_tlog(h->avctx, "slice end %d %d\n",
2730  get_bits_count(&sl->gb), sl->gb.size_in_bits);
2731 
2732  if (get_bits_left(&sl->gb) == 0) {
2733  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y,
2734  sl->mb_x - 1, sl->mb_y, ER_MB_END);
2735  if (sl->mb_x > lf_x_start)
2736  loop_filter(h, sl, lf_x_start, sl->mb_x);
2737 
2738  goto finish;
2739  } else {
2740  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2741  sl->mb_y, ER_MB_ERROR);
2742 
2743  return AVERROR_INVALIDDATA;
2744  }
2745  }
2746  }
2747  }
2748 
2749 finish:
2750  sl->deblocking_filter = orig_deblock;
2751  return 0;
2752 }
2753 
2754 /**
2755  * Call decode_slice() for each context.
2756  *
2757  * @param h h264 master context
2758  */
2760 {
2761  AVCodecContext *const avctx = h->avctx;
2762  H264SliceContext *sl;
2763  int context_count = h->nb_slice_ctx_queued;
2764  int ret = 0;
2765  int i, j;
2766 
2767  h->slice_ctx[0].next_slice_idx = INT_MAX;
2768 
2769  if (h->avctx->hwaccel || context_count < 1)
2770  return 0;
2771 
2772  av_assert0(context_count && h->slice_ctx[context_count - 1].mb_y < h->mb_height);
2773 
2774  if (context_count == 1) {
2775 
2776  h->slice_ctx[0].next_slice_idx = h->mb_width * h->mb_height;
2777  h->postpone_filter = 0;
2778 
2779  ret = decode_slice(avctx, &h->slice_ctx[0]);
2780  h->mb_y = h->slice_ctx[0].mb_y;
2781  if (ret < 0)
2782  goto finish;
2783  } else {
2784  av_assert0(context_count > 0);
2785  for (i = 0; i < context_count; i++) {
2786  int next_slice_idx = h->mb_width * h->mb_height;
2787  int slice_idx;
2788 
2789  sl = &h->slice_ctx[i];
2790 
2791  /* make sure none of those slices overlap */
2792  slice_idx = sl->mb_y * h->mb_width + sl->mb_x;
2793  for (j = 0; j < context_count; j++) {
2794  H264SliceContext *sl2 = &h->slice_ctx[j];
2795  int slice_idx2 = sl2->mb_y * h->mb_width + sl2->mb_x;
2796 
2797  if (i == j || slice_idx2 < slice_idx)
2798  continue;
2799  next_slice_idx = FFMIN(next_slice_idx, slice_idx2);
2800  }
2801  sl->next_slice_idx = next_slice_idx;
2802  }
2803 
2804  avctx->execute(avctx, decode_slice, h->slice_ctx,
2805  NULL, context_count, sizeof(h->slice_ctx[0]));
2806 
2807  /* pull back stuff from slices to master context */
2808  sl = &h->slice_ctx[context_count - 1];
2809  h->mb_y = sl->mb_y;
2810 
2811  if (h->postpone_filter) {
2812  h->postpone_filter = 0;
2813 
2814  for (i = 0; i < context_count; i++) {
2815  int y_end, x_end;
2816 
2817  sl = &h->slice_ctx[i];
2818  y_end = FFMIN(sl->mb_y + 1, h->mb_height);
2819  x_end = (sl->mb_y >= h->mb_height) ? h->mb_width : sl->mb_x;
2820 
2821  for (j = sl->resync_mb_y; j < y_end; j += 1 + FIELD_OR_MBAFF_PICTURE(h)) {
2822  sl->mb_y = j;
2823  loop_filter(h, sl, j > sl->resync_mb_y ? 0 : sl->resync_mb_x,
2824  j == y_end - 1 ? x_end : h->mb_width);
2825  }
2826  }
2827  }
2828  }
2829 
2830 finish:
2831  h->nb_slice_ctx_queued = 0;
2832  return ret;
2833 }
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:33
er_add_slice
static void er_add_slice(H264SliceContext *sl, int startx, int starty, int endx, int endy, int status)
Definition: h264_slice.c:2535
ff_h264_filter_mb_fast
void ff_h264_filter_mb_fast(const H264Context *h, H264SliceContext *sl, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize)
Definition: h264_loopfilter.c:416
h264_slice_header_init
static int h264_slice_header_init(H264Context *h)
Definition: h264_slice.c:953
implicit_weight_table
static void implicit_weight_table(const H264Context *h, H264SliceContext *sl, int field)
Initialize implicit_weight table.
Definition: h264_slice.c:680
H264SliceContext::mb_xy
int mb_xy
Definition: h264dec.h:226
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_TIMECODE_STR_SIZE
#define AV_TIMECODE_STR_SIZE
Definition: timecode.h:33
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:260
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
H264SliceContext::ref_cache
int8_t ref_cache[2][5 *8]
Definition: h264dec.h:294
ff_h264_free_tables
void ff_h264_free_tables(H264Context *h)
Definition: h264dec.c:141
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
av_clip
#define av_clip
Definition: common.h:99
h264_init_ps
static int h264_init_ps(H264Context *h, const H264SliceContext *sl, int first_slice)
Definition: h264_slice.c:1046
H264SliceContext::max_pic_num
int max_pic_num
Definition: h264dec.h:326
H264SliceContext::nb_mmco
int nb_mmco
Definition: h264dec.h:317
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:695
CHROMA422
#define CHROMA422(h)
Definition: h264dec.h:91
FF_BUG_TRUNCATED
#define FF_BUG_TRUNCATED
Definition: avcodec.h:1363
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
cabac.h
H264Picture::poc
int poc
frame POC
Definition: h264dec.h:127
h264_export_frame_props
static int h264_export_frame_props(H264Context *h)
Definition: h264_slice.c:1159
ff_h264_sei_ctx_replace
static int ff_h264_sei_ctx_replace(H264SEIContext *dst, const H264SEIContext *src)
Definition: h264_sei.h:132
H264Picture::f
AVFrame * f
Definition: h264dec.h:107
ff_get_format
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1241
ff_refstruct_pool_alloc
FFRefStructPool * ff_refstruct_pool_alloc(size_t size, unsigned flags)
Equivalent to ff_refstruct_pool_alloc(size, flags, NULL, NULL, NULL, NULL, NULL)
Definition: refstruct.c:335
out
FILE * out
Definition: movenc.c:55
cb
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:246
av_clip_int8
#define av_clip_int8
Definition: common.h:108
zigzag_scan8x8_cavlc
static const uint8_t zigzag_scan8x8_cavlc[64+1]
Definition: h264_slice.c:98
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2965
ff_h264_ref_picture
int ff_h264_ref_picture(H264Picture *dst, const H264Picture *src)
Definition: h264_picture.c:108
ff_thread_can_start_frame
int ff_thread_can_start_frame(AVCodecContext *avctx)
Definition: pthread_frame.c:950
ff_h2645_sei_to_frame
int ff_h2645_sei_to_frame(AVFrame *frame, H2645SEI *sei, enum AVCodecID codec_id, AVCodecContext *avctx, const H2645VUI *vui, unsigned bit_depth_luma, unsigned bit_depth_chroma, int seed)
Definition: h2645_sei.c:672
H264Picture::ref_index
int8_t * ref_index[2]
RefStruct reference.
Definition: h264dec.h:124
HWACCEL_MAX
#define HWACCEL_MAX
MB_MBAFF
#define MB_MBAFF(h)
Definition: h264dec.h:64
H264SliceContext::mvd_table
uint8_t(*[2] mvd_table)[2]
Definition: h264dec.h:307
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:266
H264_SEI_PIC_STRUCT_TOP_BOTTOM
@ H264_SEI_PIC_STRUCT_TOP_BOTTOM
3: top field, bottom field, in that order
Definition: h264_sei.h:35
color_frame
static void color_frame(AVFrame *frame, const int c[4])
Definition: h264_slice.c:300
H264Picture::pps
const PPS * pps
Definition: h264dec.h:150
AV_FRAME_DATA_S12M_TIMECODE
@ AV_FRAME_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1.
Definition: frame.h:152
av_mod_uintp2
#define av_mod_uintp2
Definition: common.h:126
GetBitContext::size_in_bits
int size_in_bits
Definition: get_bits.h:111
H2645NAL::ref_idc
int ref_idc
H.264 only, nal_ref_idc.
Definition: h2645_parse.h:57
ff_h264_slice_context_init
void ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl)
Init slice context.
Definition: h264dec.c:265
ERContext::mb_index2xy
int * mb_index2xy
Definition: error_resilience.h:58
predict_field_decoding_flag
static void predict_field_decoding_flag(const H264Context *h, H264SliceContext *sl)
Definition: h264_slice.c:2491
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
pixdesc.h
AVFrame::width
int width
Definition: frame.h:446
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:686
get_ue_golomb
static int get_ue_golomb(GetBitContext *gb)
Read an unsigned Exp-Golomb code in the range 0 to 8190.
Definition: golomb.h:53
ff_h264_update_thread_context
int ff_h264_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: h264_slice.c:330
alloc_scratch_buffers
static int alloc_scratch_buffers(H264SliceContext *sl, int linesize)
Definition: h264_slice.c:130
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:583
FRAME_RECOVERED_IDR
#define FRAME_RECOVERED_IDR
We have seen an IDR, so all the following frames in coded order are correctly decodable.
Definition: h264dec.h:517
decode_finish_row
static void decode_finish_row(const H264Context *h, H264SliceContext *sl)
Draw edges and report progress for the last MB row.
Definition: h264_slice.c:2504
H264SliceContext::ref_count
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264dec.h:262
FF_COMPLIANCE_STRICT
#define FF_COMPLIANCE_STRICT
Strictly conform to all the things in the spec no matter what consequences.
Definition: defs.h:59
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:478
ff_er_frame_start
void ff_er_frame_start(ERContext *s)
Definition: error_resilience.c:786
H264Picture::qscale_table
int8_t * qscale_table
Definition: h264dec.h:113
H264SliceContext::left_mb_xy
int left_mb_xy[LEFT_MBS]
Definition: h264dec.h:206
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:254
H264PredWeightTable::use_weight_chroma
int use_weight_chroma
Definition: h264_parse.h:71
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:32
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
Definition: pixfmt.h:610
AV_WN32A
#define AV_WN32A(p, v)
Definition: intreadwrite.h:536
ff_h264_update_thread_context_for_user
int ff_h264_update_thread_context_for_user(AVCodecContext *dst, const AVCodecContext *src)
Definition: h264_slice.c:465
ff_er_add_slice
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
Definition: error_resilience.c:821
find_unused_picture
static int find_unused_picture(const H264Context *h)
Definition: h264_slice.c:268
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:646
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
ff_h264_pred_weight_table
int ff_h264_pred_weight_table(GetBitContext *gb, const SPS *sps, const int *ref_count, int slice_type_nos, H264PredWeightTable *pwt, int picture_structure, void *logctx)
Definition: h264_parse.c:30
FRAME_RECOVERED_SEI
#define FRAME_RECOVERED_SEI
Sufficient number of frames have been decoded since a SEI recovery point, so all the following frames...
Definition: h264dec.h:522
H264SliceContext::is_complex
int is_complex
Definition: h264dec.h:233
ER_DC_END
#define ER_DC_END
Definition: error_resilience.h:34
ff_h264_decode_ref_pic_list_reordering
int ff_h264_decode_ref_pic_list_reordering(H264SliceContext *sl, void *logctx)
Definition: h264_refs.c:427
mpegutils.h
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:587
H264Picture::invalid_gap
int invalid_gap
Definition: h264dec.h:146
av_timecode_get_smpte
uint32_t av_timecode_get_smpte(AVRational rate, int drop, int hh, int mm, int ss, int ff)
Convert sei info to SMPTE 12M binary representation.
Definition: timecode.c:69
thread.h
ThreadFrame::f
AVFrame * f
Definition: threadframe.h:28
AV_PIX_FMT_VULKAN
@ AV_PIX_FMT_VULKAN
Vulkan hardware images.
Definition: pixfmt.h:379
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1397
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:395
H264SliceContext::mb_x
int mb_x
Definition: h264dec.h:225
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:638
H264Picture::frame_num
int frame_num
frame_num (raw frame_num from slice header)
Definition: h264dec.h:128
H264SliceContext::next_slice_idx
int next_slice_idx
Definition: h264dec.h:231
H264SliceContext
Definition: h264dec.h:172
golomb.h
exp golomb vlc stuff
MB_FIELD
#define MB_FIELD(sl)
Definition: h264dec.h:65
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
ff_h264_filter_mb
void ff_h264_filter_mb(const H264Context *h, H264SliceContext *sl, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize)
Definition: h264_loopfilter.c:716
H264SliceContext::mv_cache
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
Definition: h264dec.h:293
AV_CODEC_FLAG_OUTPUT_CORRUPT
#define AV_CODEC_FLAG_OUTPUT_CORRUPT
Output even those frames that might be corrupted.
Definition: avcodec.h:232
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:496
finish
static void finish(void)
Definition: movenc.c:373
get_chroma_qp
static av_always_inline int get_chroma_qp(const PPS *pps, int t, int qscale)
Get the chroma qp.
Definition: h264dec.h:667
H264Picture::mmco_reset
int mmco_reset
MMCO_RESET set this 1.
Definition: h264dec.h:129
fail
#define fail()
Definition: checkasm.h:183
ff_refstruct_pool_uninit
static void ff_refstruct_pool_uninit(FFRefStructPool **poolp)
Mark the pool as being available for freeing.
Definition: refstruct.h:292
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:494
timecode.h
h264_select_output_frame
static int h264_select_output_frame(H264Context *h)
Definition: h264_slice.c:1283
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:476
USES_LIST
#define USES_LIST(a, list)
Definition: mpegutils.h:87
CABACContext::bytestream
const uint8_t * bytestream
Definition: cabac.h:45
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2993
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
H264Picture::mb_stride
int mb_stride
Definition: h264dec.h:153
IN_RANGE
#define IN_RANGE(a, b, size)
Definition: h264_slice.c:280
scan8
static const uint8_t scan8[16 *3+3]
Definition: h264_parse.h:40
ff_h264_flush_change
void ff_h264_flush_change(H264Context *h)
Definition: h264dec.c:458
ff_h264qpel_init
av_cold void ff_h264qpel_init(H264QpelContext *c, int bit_depth)
Definition: h264qpel.c:49
MAX_SLICES
#define MAX_SLICES
Definition: d3d12va_hevc.c:33
ff_h264_sei_process_picture_timing
int ff_h264_sei_process_picture_timing(H264SEIPictureTiming *h, const SPS *sps, void *logctx)
Parse the contents of a picture timing message given an active SPS.
Definition: h264_sei.c:65
h264_frame_start
static int h264_frame_start(H264Context *h)
Definition: h264_slice.c:477
H264SliceContext::deblocking_filter
int deblocking_filter
disable_deblocking_filter_idc with 1 <-> 0
Definition: h264dec.h:188
H264PredWeightTable::luma_log2_weight_denom
int luma_log2_weight_denom
Definition: h264_parse.h:72
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:202
H264Picture::f_grain
AVFrame * f_grain
Definition: h264dec.h:110
H264SliceContext::picture_structure
int picture_structure
Definition: h264dec.h:235
ff_h264_golomb_to_pict_type
const uint8_t ff_h264_golomb_to_pict_type[5]
Definition: h264data.c:37
release_unused_pictures
static void release_unused_pictures(H264Context *h, int remove_current)
Definition: h264_slice.c:117
H264PredWeightTable::use_weight
int use_weight
Definition: h264_parse.h:70
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
H264SliceContext::direct_spatial_mv_pred
int direct_spatial_mv_pred
Definition: h264dec.h:246
H264SliceContext::slice_num
int slice_num
Definition: h264dec.h:177
pack16to32
static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
Definition: h264_parse.h:127
refstruct.h
non_j_pixfmt
static enum AVPixelFormat non_j_pixfmt(enum AVPixelFormat a)
Definition: h264_slice.c:1035
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:481
ff_h264_init_cabac_states
void ff_h264_init_cabac_states(const H264Context *h, H264SliceContext *sl)
Definition: h264_cabac.c:1262
ff_h264_hl_decode_mb
void ff_h264_hl_decode_mb(const H264Context *h, H264SliceContext *sl)
Definition: h264_mb.c:800
avassert.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
ff_refstruct_ref_c
const void * ff_refstruct_ref_c(const void *obj)
Analog of ff_refstruct_ref(), but for constant objects.
Definition: refstruct.c:149
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:625
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:577
av_memcpy_backptr
void av_memcpy_backptr(uint8_t *dst, int back, int cnt)
Overlapping memcpy() implementation.
Definition: mem.c:447
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
H264Picture::qscale_table_base
int8_t * qscale_table_base
RefStruct reference.
Definition: h264dec.h:112
ff_h264_queue_decode_slice
int ff_h264_queue_decode_slice(H264Context *h, const H2645NAL *nal)
Submit a slice for decoding.
Definition: h264_slice.c:2052
width
#define width
H264Context::DPB
H264Picture DPB[H264_MAX_PICTURE_COUNT]
Definition: h264dec.h:341
ff_hwaccel_frame_priv_alloc
int ff_hwaccel_frame_priv_alloc(AVCodecContext *avctx, void **hwaccel_picture_private)
Allocate a hwaccel frame private data if the provided avctx uses a hwaccel method that needs it.
Definition: decode.c:2111
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:134
H264PredWeightTable::chroma_log2_weight_denom
int chroma_log2_weight_denom
Definition: h264_parse.h:73
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:59
AV_ZERO32
#define AV_ZERO32(d)
Definition: intreadwrite.h:625
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:425
FIELD_PICTURE
#define FIELD_PICTURE(h)
Definition: h264dec.h:67
ff_thread_get_buffer
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:986
ff_h264_execute_ref_pic_marking
int ff_h264_execute_ref_pic_marking(H264Context *h)
Execute the reference picture marking (memory management control operations).
Definition: h264_refs.c:606
ff_h264_decode_ref_pic_marking
int ff_h264_decode_ref_pic_marking(H264SliceContext *sl, GetBitContext *gb, const H2645NAL *nal, void *logctx)
Definition: h264_refs.c:828
from
const char * from
Definition: jacosubdec.c:66
to
const char * to
Definition: webvttdec.c:35
h264_slice_header_parse
static int h264_slice_header_parse(const H264Context *h, H264SliceContext *sl, const H2645NAL *nal)
Definition: h264_slice.c:1684
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
H264PredWeightTable::chroma_weight_flag
int chroma_weight_flag[2]
7.4.3.2 chroma_weight_lX_flag
Definition: h264_parse.h:75
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
h264data.h
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:475
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
PICT_TOP_FIELD
#define PICT_TOP_FIELD
Definition: mpegutils.h:31
decode.h
field_scan8x8_cavlc
static const uint8_t field_scan8x8_cavlc[64+1]
Definition: h264_slice.c:78
H264SliceContext::slice_alpha_c0_offset
int slice_alpha_c0_offset
Definition: h264dec.h:189
IS_INTRA
#define IS_INTRA(x, y)
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
AVFrame::crop_right
size_t crop_right
Definition: frame.h:754
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
backup_mb_border
static av_always_inline void backup_mb_border(const H264Context *h, H264SliceContext *sl, const uint8_t *src_y, const uint8_t *src_cb, const uint8_t *src_cr, int linesize, int uvlinesize, int simple)
Definition: h264_slice.c:578
H264SliceContext::slice_type
int slice_type
Definition: h264dec.h:178
H264SliceContext::resync_mb_x
int resync_mb_x
Definition: h264dec.h:227
H264Picture::sei_recovery_frame_cnt
int sei_recovery_frame_cnt
Definition: h264dec.h:147
AVDISCARD_BIDIR
@ AVDISCARD_BIDIR
discard all bidirectional frames
Definition: defs.h:216
get_se_golomb
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
Definition: golomb.h:239
H2645NAL::type
int type
NAL unit type.
Definition: h2645_parse.h:52
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: codec_id.h:79
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
H264Context::enable_er
int enable_er
Definition: h264dec.h:559
ff_h264_draw_horiz_band
void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl, int y, int height)
Definition: h264dec.c:103
H264SliceContext::curr_pic_num
int curr_pic_num
Definition: h264dec.h:325
ff_thread_ref_frame
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
Definition: utils.c:850
arg
const char * arg
Definition: jacosubdec.c:67
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:73
if
if(ret)
Definition: filter_design.txt:179
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:219
threadframe.h
GetBitContext::buffer
const uint8_t * buffer
Definition: get_bits.h:109
alloc_picture
static int alloc_picture(H264Context *h, H264Picture *pic)
Definition: h264_slice.c:188
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:128
NULL
#define NULL
Definition: coverity.c:32
AV_COPY128
#define AV_COPY128(d, s)
Definition: intreadwrite.h:605
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
AV_COPY64
#define AV_COPY64(d, s)
Definition: intreadwrite.h:601
H264SliceContext::edge_emu_buffer
uint8_t * edge_emu_buffer
Definition: h264dec.h:278
H264Picture::mb_type_base
uint32_t * mb_type_base
RefStruct reference.
Definition: h264dec.h:118
ff_thread_await_progress
void ff_thread_await_progress(const ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
Definition: pthread_frame.c:600
SPS
Sequence parameter set.
Definition: h264_ps.h:44
H264Ref::parent
const H264Picture * parent
Definition: h264dec.h:169
TRANSPOSE
#define TRANSPOSE(x)
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
ER_MB_ERROR
#define ER_MB_ERROR
Definition: error_resilience.h:37
ff_h264_decode_mb_cabac
int ff_h264_decode_mb_cabac(const H264Context *h, H264SliceContext *sl)
Decode a macroblock.
Definition: h264_cabac.c:1920
AV_PICTURE_TYPE_SI
@ AV_PICTURE_TYPE_SI
Switching Intra.
Definition: avutil.h:283
H264SliceContext::chroma_qp
int chroma_qp[2]
Definition: h264dec.h:183
AV_CODEC_FLAG2_FAST
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:357
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:388
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
PPS
Picture parameter set.
Definition: h264_ps.h:110
av_fast_mallocz
void av_fast_mallocz(void *ptr, unsigned int *size, size_t min_size)
Allocate and clear a buffer, reusing the given one if large enough.
Definition: mem.c:562
ff_thread_release_ext_buffer
void ff_thread_release_ext_buffer(ThreadFrame *f)
Unref a ThreadFrame.
Definition: pthread_frame.c:1020
ff_set_sar
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
Definition: utils.c:109
mathops.h
list
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
Definition: filter_design.txt:25
IS_INTERLACED
#define IS_INTERLACED(a)
Definition: mpegutils.h:71
H264Picture::mb_height
int mb_height
Definition: h264dec.h:152
MAX_PPS_COUNT
#define MAX_PPS_COUNT
Definition: h264_ps.h:38
AV_PIX_FMT_D3D12
@ AV_PIX_FMT_D3D12
Hardware surfaces for Direct3D 12.
Definition: pixfmt.h:440
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:479
H264SliceContext::qscale
int qscale
Definition: h264dec.h:182
get_pixel_format
static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
Definition: h264_slice.c:778
fill_filter_caches
static int fill_filter_caches(const H264Context *h, H264SliceContext *sl, int mb_type)
Definition: h264_slice.c:2279
ERContext::error_occurred
int error_occurred
Definition: error_resilience.h:65
AV_ZERO128
#define AV_ZERO128(d)
Definition: intreadwrite.h:633
init_scan_tables
static void init_scan_tables(H264Context *h)
initialize scan tables
Definition: h264_slice.c:744
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:493
H264SliceContext::top_borders_allocated
int top_borders_allocated[2]
Definition: h264dec.h:282
AV_PICTURE_TYPE_SP
@ AV_PICTURE_TYPE_SP
Switching Predicted.
Definition: avutil.h:284
FIELD_OR_MBAFF_PICTURE
#define FIELD_OR_MBAFF_PICTURE(h)
Definition: h264dec.h:84
H264SliceContext::mb_skip_run
int mb_skip_run
Definition: h264dec.h:232
h264_ps.h
init_dimensions
static void init_dimensions(H264Context *h)
Definition: h264_slice.c:913
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
H264SliceContext::top_type
int top_type
Definition: h264dec.h:209
AVFrame::crop_bottom
size_t crop_bottom
Definition: frame.h:752
H264SliceContext::resync_mb_y
int resync_mb_y
Definition: h264dec.h:228
H264_SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM
@ H264_SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM
6: bottom field, top field, bottom field repeated, in that order
Definition: h264_sei.h:38
DELAYED_PIC_REF
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output.
Definition: diracdec.c:69
H264SEIPictureTiming
Definition: h264_sei.h:54
H264SliceContext::cabac
CABACContext cabac
Cabac.
Definition: h264dec.h:312
H264SliceContext::redundant_pic_count
int redundant_pic_count
Definition: h264dec.h:239
AVFrame::crop_left
size_t crop_left
Definition: frame.h:753
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:218
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:476
ff_zigzag_scan
const uint8_t ff_zigzag_scan[16+1]
Definition: mathtables.c:109
H264Picture::reference
int reference
Definition: h264dec.h:144
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:322
CABAC
#define CABAC(h)
Definition: h264_cabac.c:28
LEFT_MBS
#define LEFT_MBS
Definition: h264dec.h:68
rectangle.h
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
H264SliceContext::mb_uvlinesize
ptrdiff_t mb_uvlinesize
Definition: h264dec.h:223
VP_START
#define VP_START
< current MB is the first after a resync marker
Definition: error_resilience.h:29
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:483
H264SliceContext::pwt
H264PredWeightTable pwt
Definition: h264dec.h:192
H264Picture::tf
ThreadFrame tf
Definition: h264dec.h:108
H264Picture::mb_type
uint32_t * mb_type
Definition: h264dec.h:119
H264Picture::decode_error_flags
atomic_int * decode_error_flags
RefStruct reference; its pointee is shared between decoding threads.
Definition: h264dec.h:156
ff_h264_decode_mb_cavlc
int ff_h264_decode_mb_cavlc(const H264Context *h, H264SliceContext *sl)
Decode a macroblock.
Definition: h264_cavlc.c:665
H264_SEI_PIC_STRUCT_BOTTOM_TOP
@ H264_SEI_PIC_STRUCT_BOTTOM_TOP
4: bottom field, top field, in that order
Definition: h264_sei.h:36
H264Picture::recovered
int recovered
picture at IDR or recovery point + recovery count
Definition: h264dec.h:145
H2645NAL::gb
GetBitContext gb
Definition: h2645_parse.h:47
H264SliceContext::top_mb_xy
int top_mb_xy
Definition: h264dec.h:204
H264SliceContext::qp_thresh
int qp_thresh
QP threshold to skip loopfilter.
Definition: h264dec.h:184
ff_frame_new_side_data
int ff_frame_new_side_data(const AVCodecContext *avctx, AVFrame *frame, enum AVFrameSideDataType type, size_t size, AVFrameSideData **psd)
Wrapper around av_frame_new_side_data, which rejects side data overridden by the demuxer.
Definition: decode.c:1959
H264Picture::gray
int gray
Definition: h264dec.h:158
H2645NAL
Definition: h2645_parse.h:34
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:485
H264SliceContext::top_borders
uint8_t(*[2] top_borders)[(16 *3) *2]
Definition: h264dec.h:279
AVFrameSideData::data
uint8_t * data
Definition: frame.h:252
h264chroma.h
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1595
H264SliceContext::cbp
int cbp
Definition: h264dec.h:250
gray
The official guide to swscale for confused that consecutive non overlapping rectangles of slice_bottom special converter These generally are unscaled converters of common like for each output line the vertical scaler pulls lines from a ring buffer When the ring buffer does not contain the wanted then it is pulled from the input slice through the input converter and horizontal scaler The result is also stored in the ring buffer to serve future vertical scaler requests When no more output can be generated because lines from a future slice would be then all remaining lines in the current slice are horizontally scaled and put in the ring buffer[This is done for luma and chroma, each with possibly different numbers of lines per picture.] Input to YUV Converter When the input to the main path is not planar bits per component YUV or bit gray
Definition: swscale.txt:52
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:461
H264_NAL_IDR_SLICE
@ H264_NAL_IDR_SLICE
Definition: h264.h:39
H264SliceContext::left_type
int left_type[LEFT_MBS]
Definition: h264dec.h:211
ff_h264_direct_ref_list_init
void ff_h264_direct_ref_list_init(const H264Context *const h, H264SliceContext *sl)
Definition: h264_direct.c:120
H264SliceContext::mb_y
int mb_y
Definition: h264dec.h:225
H264PredWeightTable::implicit_weight
int implicit_weight[48][48][2]
Definition: h264_parse.h:79
height
#define height
decode_slice
static int decode_slice(struct AVCodecContext *avctx, void *arg)
Definition: h264_slice.c:2547
H264SliceContext::explicit_ref_marking
int explicit_ref_marking
Definition: h264dec.h:318
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
pt
int pt
Definition: rtp.c:35
H264SliceContext::uvlinesize
ptrdiff_t uvlinesize
Definition: h264dec.h:221
AVBufferRef::buffer
AVBuffer * buffer
Definition: buffer.h:83
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:336
H264SliceContext::slice_type_nos
int slice_type_nos
S free slice type (SI/SP are remapped to I/P)
Definition: h264dec.h:179
H264SliceContext::delta_poc_bottom
int delta_poc_bottom
Definition: h264dec.h:323
copy_picture_range
static void copy_picture_range(H264Picture **to, H264Picture *const *from, int count, H264Context *new_base, const H264Context *old_base)
Definition: h264_slice.c:287
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:126
FRAME_MBAFF
#define FRAME_MBAFF(h)
Definition: h264dec.h:66
IS_DIRECT
#define IS_DIRECT(a)
Definition: mpegutils.h:72
H264_SEI_PIC_STRUCT_FRAME
@ H264_SEI_PIC_STRUCT_FRAME
0: frame
Definition: h264_sei.h:32
get_cabac_terminate
static int av_unused get_cabac_terminate(CABACContext *c)
Definition: cabac_functions.h:187
H264_SEI_PIC_STRUCT_FRAME_TRIPLING
@ H264_SEI_PIC_STRUCT_FRAME_TRIPLING
8: frame tripling
Definition: h264_sei.h:40
field_scan
static const uint8_t field_scan[16+1]
Definition: h264_slice.c:52
loop_filter
static void loop_filter(const H264Context *h, H264SliceContext *sl, int start_x, int end_x)
Definition: h264_slice.c:2421
ff_init_cabac_decoder
int ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size)
Definition: cabac.c:162
H264SliceContext::mb_mbaff
int mb_mbaff
mb_aff_frame && mb_field_decoding_flag
Definition: h264dec.h:237
field_scan8x8
static const uint8_t field_scan8x8[64+1]
Definition: h264_slice.c:59
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:194
av_get_picture_type_char
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:40
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:305
LIST_NOT_USED
#define LIST_NOT_USED
Definition: h264dec.h:390
H264Picture::field_picture
int field_picture
whether or not picture was encoded in separate fields
Definition: h264dec.h:137
h264dec.h
H264SliceContext::poc_lsb
int poc_lsb
Definition: h264dec.h:322
H264SliceContext::first_mb_addr
unsigned int first_mb_addr
Definition: h264dec.h:229
ff_h264_direct_dist_scale_factor
void ff_h264_direct_dist_scale_factor(const H264Context *const h, H264SliceContext *sl)
Definition: h264_direct.c:61
H264Picture::needs_fg
int needs_fg
whether picture needs film grain synthesis (see f_grain)
Definition: h264dec.h:148
AVBuffer
A reference counted buffer type.
Definition: buffer_internal.h:38
H264Context
H264Context.
Definition: h264dec.h:332
AVDISCARD_NONINTRA
@ AVDISCARD_NONINTRA
discard all non intra frames
Definition: defs.h:217
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
av_timecode_make_smpte_tc_string2
char * av_timecode_make_smpte_tc_string2(char *buf, AVRational rate, uint32_t tcsmpte, int prevent_df, int skip_field)
Get the timecode string from the SMPTE timecode format.
Definition: timecode.c:138
AV_CODEC_FLAG2_SHOW_ALL
#define AV_CODEC_FLAG2_SHOW_ALL
Show all frames before the first keyframe.
Definition: avcodec.h:380
AV_FRAME_FLAG_CORRUPT
#define AV_FRAME_FLAG_CORRUPT
The frame data may be corrupted, e.g.
Definition: frame.h:621
H264_SEI_PIC_STRUCT_FRAME_DOUBLING
@ H264_SEI_PIC_STRUCT_FRAME_DOUBLING
7: frame doubling
Definition: h264_sei.h:39
H264SliceContext::frame_num
int frame_num
Definition: h264dec.h:320
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:495
ff_h264_execute_decode_slices
int ff_h264_execute_decode_slices(H264Context *h)
Call decode_slice() for each context.
Definition: h264_slice.c:2759
H264SliceContext::mb_linesize
ptrdiff_t mb_linesize
may be equal to s->linesize or s->linesize * 2, for mbaff
Definition: h264dec.h:222
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
cabac_functions.h
tb
#define tb
Definition: regdef.h:68
AV_COPY32
#define AV_COPY32(d, s)
Definition: intreadwrite.h:597
ff_h264_replace_picture
int ff_h264_replace_picture(H264Picture *dst, const H264Picture *src)
Definition: h264_picture.c:135
ff_h264_parse_ref_count
int ff_h264_parse_ref_count(int *plist_count, int ref_count[2], GetBitContext *gb, const PPS *pps, int slice_type_nos, int picture_structure, void *logctx)
Definition: h264_parse.c:222
ff_h264_alloc_tables
int ff_h264_alloc_tables(H264Context *h)
Allocate tables.
Definition: h264dec.c:186
ff_thread_get_ext_buffer
int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around ff_get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:994
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:633
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:669
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:477
H264SliceContext::list_count
unsigned int list_count
Definition: h264dec.h:263
avcodec.h
H264SliceContext::h264
const struct H264Context * h264
Definition: h264dec.h:173
av_cmp_q
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:89
ff_h264dsp_init
av_cold void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
Definition: h264dsp.c:66
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
ret
ret
Definition: filter_design.txt:187
AV_EF_AGGRESSIVE
#define AV_EF_AGGRESSIVE
consider things that a sane encoder/muxer should not do as an error
Definition: defs.h:56
ff_h264_init_poc
int ff_h264_init_poc(int pic_field_poc[2], int *pic_poc, const SPS *sps, H264POCContext *pc, int picture_structure, int nal_ref_idc)
Definition: h264_parse.c:280
ff_h264_get_profile
int ff_h264_get_profile(const SPS *sps)
Compute profile from profile_idc and constraint_set?_flags.
Definition: h264_parse.c:532
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
h264_field_start
static int h264_field_start(H264Context *h, const H264SliceContext *sl, const H2645NAL *nal, int first_slice)
Definition: h264_slice.c:1385
H264SliceContext::last_qscale_diff
int last_qscale_diff
Definition: h264dec.h:185
sps
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
Definition: cbs_h264_syntax_template.c:260
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:561
ff_refstruct_replace
void ff_refstruct_replace(void *dstp, const void *src)
Ensure *dstp refers to the same object as src.
Definition: refstruct.c:160
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:482
U
#define U(x)
Definition: vpx_arith.h:37
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:487
H264SliceContext::pps_id
unsigned int pps_id
Definition: h264dec.h:273
H264SliceContext::linesize
ptrdiff_t linesize
Definition: h264dec.h:221
H264SliceContext::slice_beta_offset
int slice_beta_offset
Definition: h264dec.h:190
AVCodecContext
main external API structure.
Definition: avcodec.h:445
AVFrame::height
int height
Definition: frame.h:446
get_ue_golomb_31
static int get_ue_golomb_31(GetBitContext *gb)
read unsigned exp golomb code, constraint to a max of 31.
Definition: golomb.h:120
status
ov_status_e status
Definition: dnn_backend_openvino.c:101
ff_h264_build_ref_list
int ff_h264_build_ref_list(H264Context *h, H264SliceContext *sl)
Definition: h264_refs.c:294
H264Picture::motion_val_base
int16_t(*[2] motion_val_base)[2]
RefStruct reference.
Definition: h264dec.h:115
AVCodecContext::execute
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:1613
H264SliceContext::bipred_scratchpad
uint8_t * bipred_scratchpad
Definition: h264dec.h:277
ff_h264_pred_init
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:437
H264Picture::field_poc
int field_poc[2]
top/bottom POC
Definition: h264dec.h:126
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:281
H264SliceContext::mmco
MMCO mmco[H264_MAX_MMCO_COUNT]
Definition: h264dec.h:316
error_resilience.h
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
H264Picture::mb_width
int mb_width
Definition: h264dec.h:152
ff_h264_unref_picture
void ff_h264_unref_picture(H264Picture *pic)
Definition: h264_picture.c:39
fill_rectangle
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:826
H264Picture
Definition: h264dec.h:106
ERContext::error_status_table
uint8_t * error_status_table
Definition: error_resilience.h:66
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
AV_PIX_FMT_FLAG_PLANAR
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
Definition: pixdesc.h:132
pps
uint64_t pps
Definition: dovi_rpuenc.c:35
h264_slice_init
static int h264_slice_init(H264Context *h, H264SliceContext *sl, const H2645NAL *nal)
Definition: h264_slice.c:1903
ff_h264chroma_init
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:41
ff_h264_field_end
int ff_h264_field_end(H264Context *h, H264SliceContext *sl, int in_setup)
Definition: h264_picture.c:189
CABACContext::bytestream_end
const uint8_t * bytestream_end
Definition: cabac.h:46
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
init_table_pools
static int init_table_pools(H264Context *h)
Definition: h264_slice.c:162
H264SliceContext::ref_list
H264Ref ref_list[2][48]
0..15: frame refs, 16..47: mbaff field refs.
Definition: h264dec.h:264
LBOT
#define LBOT
Definition: h264dec.h:70
H264SliceContext::non_zero_count_cache
uint8_t non_zero_count_cache[15 *8]
non zero coeff count cache.
Definition: h264dec.h:288
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
H264_MAX_DPB_FRAMES
@ H264_MAX_DPB_FRAMES
Definition: h264.h:76
desc
const char * desc
Definition: libsvtav1.c:79
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
IS_INTER
#define IS_INTER(a)
Definition: mpegutils.h:67
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
get_ue_golomb_long
static unsigned get_ue_golomb_long(GetBitContext *gb)
Read an unsigned Exp-Golomb code in the range 0 to UINT32_MAX-1.
Definition: golomb.h:104
H264Context::nal_length_size
int nal_length_size
Number of bytes used for nal length (1, 2 or 4)
Definition: h264dec.h:450
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
ER_MB_END
#define ER_MB_END
Definition: error_resilience.h:38
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:250
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
H264SliceContext::er
ERContext * er
Definition: h264dec.h:175
H264_SEI_PIC_STRUCT_BOTTOM_FIELD
@ H264_SEI_PIC_STRUCT_BOTTOM_FIELD
2: bottom field
Definition: h264_sei.h:34
H264Picture::hwaccel_picture_private
void * hwaccel_picture_private
RefStruct reference for hardware accelerator private data.
Definition: h264dec.h:122
ER_MV_END
#define ER_MV_END
Definition: error_resilience.h:35
H264SliceContext::idr_pic_id
int idr_pic_id
Definition: h264dec.h:321
fill_filter_caches_inter
static av_always_inline void fill_filter_caches_inter(const H264Context *h, H264SliceContext *sl, int mb_type, int top_xy, const int left_xy[LEFT_MBS], int top_type, const int left_type[LEFT_MBS], int mb_xy, int list)
Definition: h264_slice.c:2195
ff_tlog
#define ff_tlog(ctx,...)
Definition: internal.h:141
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
cr
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:247
AVFrame::crop_top
size_t crop_top
Definition: frame.h:751
H264SliceContext::gb
GetBitContext gb
Definition: h264dec.h:174
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:88
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:557
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
H264SliceContext::intra4x4_pred_mode
int8_t * intra4x4_pred_mode
Definition: h264dec.h:201
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
LTOP
#define LTOP
Definition: h264dec.h:69
h264.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:419
H264SliceContext::edge_emu_buffer_allocated
int edge_emu_buffer_allocated
Definition: h264dec.h:281
REBASE_PICTURE
#define REBASE_PICTURE(pic, new_ctx, old_ctx)
Definition: h264_slice.c:282
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
CHROMA444
#define CHROMA444(h)
Definition: h264dec.h:92
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
ff_h264_get_slice_type
int ff_h264_get_slice_type(const H264SliceContext *sl)
Reconstruct bitstream slice_type.
Definition: h264_slice.c:2177
h
h
Definition: vp9dsp_template.c:2038
H264SliceContext::cabac_init_idc
int cabac_init_idc
Definition: h264dec.h:314
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:488
H264PredWeightTable::luma_weight_flag
int luma_weight_flag[2]
7.4.3.2 luma_weight_lX_flag
Definition: h264_parse.h:74
H264_MAX_PICTURE_COUNT
#define H264_MAX_PICTURE_COUNT
Definition: h264dec.h:49
ER_AC_END
#define ER_AC_END
Definition: error_resilience.h:33
H264SliceContext::bipred_scratchpad_allocated
int bipred_scratchpad_allocated
Definition: h264dec.h:280
atomic_init
#define atomic_init(obj, value)
Definition: stdatomic.h:33
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:215
H264SliceContext::slice_type_fixed
int slice_type_fixed
Definition: h264dec.h:180
H264Ref::poc
int poc
Definition: h264dec.h:166
IS_8x8DCT
#define IS_8x8DCT(a)
Definition: h264dec.h:95
H264_SEI_PIC_STRUCT_TOP_FIELD
@ H264_SEI_PIC_STRUCT_TOP_FIELD
1: top field
Definition: h264_sei.h:33
H264SliceContext::delta_poc
int delta_poc[2]
Definition: h264dec.h:324
av_color_transfer_name
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
Definition: pixdesc.c:3320
H264Picture::long_ref
int long_ref
1->long term reference 0->short term reference
Definition: h264dec.h:133
H264Ref::reference
int reference
Definition: h264dec.h:165
H264Picture::motion_val
int16_t(*[2] motion_val)[2]
Definition: h264dec.h:116
AV_CODEC_EXPORT_DATA_FILM_GRAIN
#define AV_CODEC_EXPORT_DATA_FILM_GRAIN
Decoding only.
Definition: avcodec.h:420
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:486
ff_refstruct_pool_get
void * ff_refstruct_pool_get(FFRefStructPool *pool)
Get an object from the pool, reusing an old one from the pool when available.
Definition: refstruct.c:297
H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP
@ H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP
5: top field, bottom field, top field repeated, in that order
Definition: h264_sei.h:37
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2885
H264SliceContext::mb_field_decoding_flag
int mb_field_decoding_flag
Definition: h264dec.h:236
ff_h264_set_erpic
void ff_h264_set_erpic(ERPicture *dst, const H264Picture *src)
Definition: h264_picture.c:166
H264Context::is_avc
int is_avc
Used to parse AVC variant of H.264.
Definition: h264dec.h:449