FFmpeg
h264_slice.c
Go to the documentation of this file.
1 /*
2  * H.26L/H.264/AVC/JVT/14496-10/... decoder
3  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * H.264 / AVC / MPEG-4 part10 codec.
25  * @author Michael Niedermayer <michaelni@gmx.at>
26  */
27 
28 #include "config_components.h"
29 
30 #include "libavutil/avassert.h"
31 #include "libavutil/display.h"
33 #include "libavutil/pixdesc.h"
34 #include "libavutil/timecode.h"
35 #include "internal.h"
36 #include "decode.h"
37 #include "cabac.h"
38 #include "cabac_functions.h"
39 #include "decode.h"
40 #include "error_resilience.h"
41 #include "avcodec.h"
42 #include "h264.h"
43 #include "h264dec.h"
44 #include "h264data.h"
45 #include "h264chroma.h"
46 #include "h264_ps.h"
47 #include "golomb.h"
48 #include "mathops.h"
49 #include "mpegutils.h"
50 #include "rectangle.h"
51 #include "thread.h"
52 #include "threadframe.h"
53 
54 static const uint8_t field_scan[16+1] = {
55  0 + 0 * 4, 0 + 1 * 4, 1 + 0 * 4, 0 + 2 * 4,
56  0 + 3 * 4, 1 + 1 * 4, 1 + 2 * 4, 1 + 3 * 4,
57  2 + 0 * 4, 2 + 1 * 4, 2 + 2 * 4, 2 + 3 * 4,
58  3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4, 3 + 3 * 4,
59 };
60 
61 static const uint8_t field_scan8x8[64+1] = {
62  0 + 0 * 8, 0 + 1 * 8, 0 + 2 * 8, 1 + 0 * 8,
63  1 + 1 * 8, 0 + 3 * 8, 0 + 4 * 8, 1 + 2 * 8,
64  2 + 0 * 8, 1 + 3 * 8, 0 + 5 * 8, 0 + 6 * 8,
65  0 + 7 * 8, 1 + 4 * 8, 2 + 1 * 8, 3 + 0 * 8,
66  2 + 2 * 8, 1 + 5 * 8, 1 + 6 * 8, 1 + 7 * 8,
67  2 + 3 * 8, 3 + 1 * 8, 4 + 0 * 8, 3 + 2 * 8,
68  2 + 4 * 8, 2 + 5 * 8, 2 + 6 * 8, 2 + 7 * 8,
69  3 + 3 * 8, 4 + 1 * 8, 5 + 0 * 8, 4 + 2 * 8,
70  3 + 4 * 8, 3 + 5 * 8, 3 + 6 * 8, 3 + 7 * 8,
71  4 + 3 * 8, 5 + 1 * 8, 6 + 0 * 8, 5 + 2 * 8,
72  4 + 4 * 8, 4 + 5 * 8, 4 + 6 * 8, 4 + 7 * 8,
73  5 + 3 * 8, 6 + 1 * 8, 6 + 2 * 8, 5 + 4 * 8,
74  5 + 5 * 8, 5 + 6 * 8, 5 + 7 * 8, 6 + 3 * 8,
75  7 + 0 * 8, 7 + 1 * 8, 6 + 4 * 8, 6 + 5 * 8,
76  6 + 6 * 8, 6 + 7 * 8, 7 + 2 * 8, 7 + 3 * 8,
77  7 + 4 * 8, 7 + 5 * 8, 7 + 6 * 8, 7 + 7 * 8,
78 };
79 
80 static const uint8_t field_scan8x8_cavlc[64+1] = {
81  0 + 0 * 8, 1 + 1 * 8, 2 + 0 * 8, 0 + 7 * 8,
82  2 + 2 * 8, 2 + 3 * 8, 2 + 4 * 8, 3 + 3 * 8,
83  3 + 4 * 8, 4 + 3 * 8, 4 + 4 * 8, 5 + 3 * 8,
84  5 + 5 * 8, 7 + 0 * 8, 6 + 6 * 8, 7 + 4 * 8,
85  0 + 1 * 8, 0 + 3 * 8, 1 + 3 * 8, 1 + 4 * 8,
86  1 + 5 * 8, 3 + 1 * 8, 2 + 5 * 8, 4 + 1 * 8,
87  3 + 5 * 8, 5 + 1 * 8, 4 + 5 * 8, 6 + 1 * 8,
88  5 + 6 * 8, 7 + 1 * 8, 6 + 7 * 8, 7 + 5 * 8,
89  0 + 2 * 8, 0 + 4 * 8, 0 + 5 * 8, 2 + 1 * 8,
90  1 + 6 * 8, 4 + 0 * 8, 2 + 6 * 8, 5 + 0 * 8,
91  3 + 6 * 8, 6 + 0 * 8, 4 + 6 * 8, 6 + 2 * 8,
92  5 + 7 * 8, 6 + 4 * 8, 7 + 2 * 8, 7 + 6 * 8,
93  1 + 0 * 8, 1 + 2 * 8, 0 + 6 * 8, 3 + 0 * 8,
94  1 + 7 * 8, 3 + 2 * 8, 2 + 7 * 8, 4 + 2 * 8,
95  3 + 7 * 8, 5 + 2 * 8, 4 + 7 * 8, 5 + 4 * 8,
96  6 + 3 * 8, 6 + 5 * 8, 7 + 3 * 8, 7 + 7 * 8,
97 };
98 
99 // zigzag_scan8x8_cavlc[i] = zigzag_scan8x8[(i/4) + 16*(i%4)]
100 static const uint8_t zigzag_scan8x8_cavlc[64+1] = {
101  0 + 0 * 8, 1 + 1 * 8, 1 + 2 * 8, 2 + 2 * 8,
102  4 + 1 * 8, 0 + 5 * 8, 3 + 3 * 8, 7 + 0 * 8,
103  3 + 4 * 8, 1 + 7 * 8, 5 + 3 * 8, 6 + 3 * 8,
104  2 + 7 * 8, 6 + 4 * 8, 5 + 6 * 8, 7 + 5 * 8,
105  1 + 0 * 8, 2 + 0 * 8, 0 + 3 * 8, 3 + 1 * 8,
106  3 + 2 * 8, 0 + 6 * 8, 4 + 2 * 8, 6 + 1 * 8,
107  2 + 5 * 8, 2 + 6 * 8, 6 + 2 * 8, 5 + 4 * 8,
108  3 + 7 * 8, 7 + 3 * 8, 4 + 7 * 8, 7 + 6 * 8,
109  0 + 1 * 8, 3 + 0 * 8, 0 + 4 * 8, 4 + 0 * 8,
110  2 + 3 * 8, 1 + 5 * 8, 5 + 1 * 8, 5 + 2 * 8,
111  1 + 6 * 8, 3 + 5 * 8, 7 + 1 * 8, 4 + 5 * 8,
112  4 + 6 * 8, 7 + 4 * 8, 5 + 7 * 8, 6 + 7 * 8,
113  0 + 2 * 8, 2 + 1 * 8, 1 + 3 * 8, 5 + 0 * 8,
114  1 + 4 * 8, 2 + 4 * 8, 6 + 0 * 8, 4 + 3 * 8,
115  0 + 7 * 8, 4 + 4 * 8, 7 + 2 * 8, 3 + 6 * 8,
116  5 + 5 * 8, 6 + 5 * 8, 6 + 6 * 8, 7 + 7 * 8,
117 };
118 
119 static void release_unused_pictures(H264Context *h, int remove_current)
120 {
121  int i;
122 
123  /* release non reference frames */
124  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
125  if (h->DPB[i].f->buf[0] && !h->DPB[i].reference &&
126  (remove_current || &h->DPB[i] != h->cur_pic_ptr)) {
127  ff_h264_unref_picture(h, &h->DPB[i]);
128  }
129  }
130 }
131 
132 static int alloc_scratch_buffers(H264SliceContext *sl, int linesize)
133 {
134  const H264Context *h = sl->h264;
135  int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
136 
137  av_fast_malloc(&sl->bipred_scratchpad, &sl->bipred_scratchpad_allocated, 16 * 6 * alloc_size);
138  // edge emu needs blocksize + filter length - 1
139  // (= 21x21 for H.264)
140  av_fast_malloc(&sl->edge_emu_buffer, &sl->edge_emu_buffer_allocated, alloc_size * 2 * 21);
141 
143  h->mb_width * 16 * 3 * sizeof(uint8_t) * 2);
145  h->mb_width * 16 * 3 * sizeof(uint8_t) * 2);
146 
147  if (!sl->bipred_scratchpad || !sl->edge_emu_buffer ||
148  !sl->top_borders[0] || !sl->top_borders[1]) {
151  av_freep(&sl->top_borders[0]);
152  av_freep(&sl->top_borders[1]);
153 
156  sl->top_borders_allocated[0] = 0;
157  sl->top_borders_allocated[1] = 0;
158  return AVERROR(ENOMEM);
159  }
160 
161  return 0;
162 }
163 
165 {
166  const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
167  const int mb_array_size = h->mb_stride * h->mb_height;
168  const int b4_stride = h->mb_width * 4 + 1;
169  const int b4_array_size = b4_stride * h->mb_height * 4;
170 
171  h->qscale_table_pool = av_buffer_pool_init(big_mb_num + h->mb_stride,
173  h->mb_type_pool = av_buffer_pool_init((big_mb_num + h->mb_stride) *
174  sizeof(uint32_t), av_buffer_allocz);
175  h->motion_val_pool = av_buffer_pool_init(2 * (b4_array_size + 4) *
176  sizeof(int16_t), av_buffer_allocz);
177  h->ref_index_pool = av_buffer_pool_init(4 * mb_array_size, av_buffer_allocz);
178 
179  if (!h->qscale_table_pool || !h->mb_type_pool || !h->motion_val_pool ||
180  !h->ref_index_pool) {
181  av_buffer_pool_uninit(&h->qscale_table_pool);
182  av_buffer_pool_uninit(&h->mb_type_pool);
183  av_buffer_pool_uninit(&h->motion_val_pool);
184  av_buffer_pool_uninit(&h->ref_index_pool);
185  return AVERROR(ENOMEM);
186  }
187 
188  return 0;
189 }
190 
192 {
193  int i, ret = 0;
194 
195  av_assert0(!pic->f->data[0]);
196 
197  pic->tf.f = pic->f;
198  ret = ff_thread_get_ext_buffer(h->avctx, &pic->tf,
199  pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
200  if (ret < 0)
201  goto fail;
202 
203  if (pic->needs_fg) {
204  pic->f_grain->format = pic->f->format;
205  pic->f_grain->width = pic->f->width;
206  pic->f_grain->height = pic->f->height;
207  ret = ff_thread_get_buffer(h->avctx, pic->f_grain, 0);
208  if (ret < 0)
209  goto fail;
210  }
211 
212  if (h->avctx->hwaccel) {
213  const AVHWAccel *hwaccel = h->avctx->hwaccel;
215  if (hwaccel->frame_priv_data_size) {
216  pic->hwaccel_priv_buf = ff_hwaccel_frame_priv_alloc(h->avctx, hwaccel);
217  if (!pic->hwaccel_priv_buf)
218  return AVERROR(ENOMEM);
220  }
221  }
222  if (CONFIG_GRAY && !h->avctx->hwaccel && h->flags & AV_CODEC_FLAG_GRAY && pic->f->data[2]) {
223  int h_chroma_shift, v_chroma_shift;
225  &h_chroma_shift, &v_chroma_shift);
226 
227  for(i=0; i<AV_CEIL_RSHIFT(pic->f->height, v_chroma_shift); i++) {
228  memset(pic->f->data[1] + pic->f->linesize[1]*i,
229  0x80, AV_CEIL_RSHIFT(pic->f->width, h_chroma_shift));
230  memset(pic->f->data[2] + pic->f->linesize[2]*i,
231  0x80, AV_CEIL_RSHIFT(pic->f->width, h_chroma_shift));
232  }
233  }
234 
235  if (!h->qscale_table_pool) {
237  if (ret < 0)
238  goto fail;
239  }
240 
241  pic->qscale_table_buf = av_buffer_pool_get(h->qscale_table_pool);
242  pic->mb_type_buf = av_buffer_pool_get(h->mb_type_pool);
243  if (!pic->qscale_table_buf || !pic->mb_type_buf)
244  goto fail;
245 
246  pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
247  pic->qscale_table = pic->qscale_table_buf->data + 2 * h->mb_stride + 1;
248 
249  for (i = 0; i < 2; i++) {
250  pic->motion_val_buf[i] = av_buffer_pool_get(h->motion_val_pool);
251  pic->ref_index_buf[i] = av_buffer_pool_get(h->ref_index_pool);
252  if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
253  goto fail;
254 
255  pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
256  pic->ref_index[i] = pic->ref_index_buf[i]->data;
257  }
258 
259  pic->pps_buf = av_buffer_ref(h->ps.pps_ref);
260  if (!pic->pps_buf)
261  goto fail;
262  pic->pps = (const PPS*)pic->pps_buf->data;
263 
264  pic->mb_width = h->mb_width;
265  pic->mb_height = h->mb_height;
266  pic->mb_stride = h->mb_stride;
267 
268  return 0;
269 fail:
270  ff_h264_unref_picture(h, pic);
271  return (ret < 0) ? ret : AVERROR(ENOMEM);
272 }
273 
275 {
276  int i;
277 
278  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
279  if (!h->DPB[i].f->buf[0])
280  return i;
281  }
282  return AVERROR_INVALIDDATA;
283 }
284 
285 
286 #define IN_RANGE(a, b, size) (((void*)(a) >= (void*)(b)) && ((void*)(a) < (void*)((b) + (size))))
287 
288 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
289  (((pic) && (pic) >= (old_ctx)->DPB && \
290  (pic) < (old_ctx)->DPB + H264_MAX_PICTURE_COUNT) ? \
291  &(new_ctx)->DPB[(pic) - (old_ctx)->DPB] : NULL)
292 
293 static void copy_picture_range(H264Picture **to, H264Picture **from, int count,
294  H264Context *new_base,
295  H264Context *old_base)
296 {
297  int i;
298 
299  for (i = 0; i < count; i++) {
300  av_assert1(!from[i] ||
301  IN_RANGE(from[i], old_base, 1) ||
302  IN_RANGE(from[i], old_base->DPB, H264_MAX_PICTURE_COUNT));
303  to[i] = REBASE_PICTURE(from[i], new_base, old_base);
304  }
305 }
306 
308 
310  const AVCodecContext *src)
311 {
312  H264Context *h = dst->priv_data, *h1 = src->priv_data;
313  int inited = h->context_initialized, err = 0;
314  int need_reinit = 0;
315  int i, ret;
316 
317  if (dst == src)
318  return 0;
319 
320  if (inited && !h1->ps.sps)
321  return AVERROR_INVALIDDATA;
322 
323  if (inited &&
324  (h->width != h1->width ||
325  h->height != h1->height ||
326  h->mb_width != h1->mb_width ||
327  h->mb_height != h1->mb_height ||
328  !h->ps.sps ||
329  h->ps.sps->bit_depth_luma != h1->ps.sps->bit_depth_luma ||
330  h->ps.sps->chroma_format_idc != h1->ps.sps->chroma_format_idc ||
331  h->ps.sps->vui.matrix_coeffs != h1->ps.sps->vui.matrix_coeffs)) {
332  need_reinit = 1;
333  }
334 
335  /* copy block_offset since frame_start may not be called */
336  memcpy(h->block_offset, h1->block_offset, sizeof(h->block_offset));
337 
338  // SPS/PPS
339  for (i = 0; i < FF_ARRAY_ELEMS(h->ps.sps_list); i++) {
340  ret = av_buffer_replace(&h->ps.sps_list[i], h1->ps.sps_list[i]);
341  if (ret < 0)
342  return ret;
343  }
344  for (i = 0; i < FF_ARRAY_ELEMS(h->ps.pps_list); i++) {
345  ret = av_buffer_replace(&h->ps.pps_list[i], h1->ps.pps_list[i]);
346  if (ret < 0)
347  return ret;
348  }
349 
350  ret = av_buffer_replace(&h->ps.pps_ref, h1->ps.pps_ref);
351  if (ret < 0)
352  return ret;
353  h->ps.pps = NULL;
354  h->ps.sps = NULL;
355  if (h1->ps.pps_ref) {
356  h->ps.pps = (const PPS*)h->ps.pps_ref->data;
357  h->ps.sps = h->ps.pps->sps;
358  }
359 
360  if (need_reinit || !inited) {
361  h->width = h1->width;
362  h->height = h1->height;
363  h->mb_height = h1->mb_height;
364  h->mb_width = h1->mb_width;
365  h->mb_num = h1->mb_num;
366  h->mb_stride = h1->mb_stride;
367  h->b_stride = h1->b_stride;
368  h->x264_build = h1->x264_build;
369 
370  if (h->context_initialized || h1->context_initialized) {
371  if ((err = h264_slice_header_init(h)) < 0) {
372  av_log(h->avctx, AV_LOG_ERROR, "h264_slice_header_init() failed");
373  return err;
374  }
375  }
376 
377  /* copy block_offset since frame_start may not be called */
378  memcpy(h->block_offset, h1->block_offset, sizeof(h->block_offset));
379  }
380 
381  h->avctx->coded_height = h1->avctx->coded_height;
382  h->avctx->coded_width = h1->avctx->coded_width;
383  h->avctx->width = h1->avctx->width;
384  h->avctx->height = h1->avctx->height;
385  h->width_from_caller = h1->width_from_caller;
386  h->height_from_caller = h1->height_from_caller;
387  h->coded_picture_number = h1->coded_picture_number;
388  h->first_field = h1->first_field;
389  h->picture_structure = h1->picture_structure;
390  h->mb_aff_frame = h1->mb_aff_frame;
391  h->droppable = h1->droppable;
392 
393  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
394  ret = ff_h264_replace_picture(h, &h->DPB[i], &h1->DPB[i]);
395  if (ret < 0)
396  return ret;
397  }
398 
399  h->cur_pic_ptr = REBASE_PICTURE(h1->cur_pic_ptr, h, h1);
400  ret = ff_h264_replace_picture(h, &h->cur_pic, &h1->cur_pic);
401  if (ret < 0)
402  return ret;
403 
404  h->enable_er = h1->enable_er;
405  h->workaround_bugs = h1->workaround_bugs;
406  h->droppable = h1->droppable;
407 
408  // extradata/NAL handling
409  h->is_avc = h1->is_avc;
410  h->nal_length_size = h1->nal_length_size;
411 
412  memcpy(&h->poc, &h1->poc, sizeof(h->poc));
413 
414  memcpy(h->short_ref, h1->short_ref, sizeof(h->short_ref));
415  memcpy(h->long_ref, h1->long_ref, sizeof(h->long_ref));
416  memcpy(h->delayed_pic, h1->delayed_pic, sizeof(h->delayed_pic));
417  memcpy(h->last_pocs, h1->last_pocs, sizeof(h->last_pocs));
418 
419  h->next_output_pic = h1->next_output_pic;
420  h->next_outputed_poc = h1->next_outputed_poc;
421  h->poc_offset = h1->poc_offset;
422 
423  memcpy(h->mmco, h1->mmco, sizeof(h->mmco));
424  h->nb_mmco = h1->nb_mmco;
425  h->mmco_reset = h1->mmco_reset;
426  h->explicit_ref_marking = h1->explicit_ref_marking;
427  h->long_ref_count = h1->long_ref_count;
428  h->short_ref_count = h1->short_ref_count;
429 
430  copy_picture_range(h->short_ref, h1->short_ref, 32, h, h1);
431  copy_picture_range(h->long_ref, h1->long_ref, 32, h, h1);
432  copy_picture_range(h->delayed_pic, h1->delayed_pic,
433  FF_ARRAY_ELEMS(h->delayed_pic), h, h1);
434 
435  h->frame_recovered = h1->frame_recovered;
436 
437  ret = ff_h264_sei_ctx_replace(&h->sei, &h1->sei);
438  if (ret < 0)
439  return ret;
440 
441  h->sei.common.unregistered.x264_build = h1->sei.common.unregistered.x264_build;
442 
443  if (!h->cur_pic_ptr)
444  return 0;
445 
446  if (!h->droppable) {
448  h->poc.prev_poc_msb = h->poc.poc_msb;
449  h->poc.prev_poc_lsb = h->poc.poc_lsb;
450  }
451  h->poc.prev_frame_num_offset = h->poc.frame_num_offset;
452  h->poc.prev_frame_num = h->poc.frame_num;
453 
454  h->recovery_frame = h1->recovery_frame;
455 
456  return err;
457 }
458 
460  const AVCodecContext *src)
461 {
462  H264Context *h = dst->priv_data;
463  const H264Context *h1 = src->priv_data;
464 
465  h->is_avc = h1->is_avc;
466  h->nal_length_size = h1->nal_length_size;
467 
468  return 0;
469 }
470 
472 {
473  H264Picture *pic;
474  int i, ret;
475  const int pixel_shift = h->pixel_shift;
476 
477  if (!ff_thread_can_start_frame(h->avctx)) {
478  av_log(h->avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
479  return -1;
480  }
481 
483  h->cur_pic_ptr = NULL;
484 
486  if (i < 0) {
487  av_log(h->avctx, AV_LOG_ERROR, "no frame buffer available\n");
488  return i;
489  }
490  pic = &h->DPB[i];
491 
492  pic->reference = h->droppable ? 0 : h->picture_structure;
493 #if FF_API_FRAME_PICTURE_NUMBER
495  pic->f->coded_picture_number = h->coded_picture_number++;
497 #endif
498  pic->field_picture = h->picture_structure != PICT_FRAME;
499  pic->frame_num = h->poc.frame_num;
500  /*
501  * Zero key_frame here; IDR markings per slice in frame or fields are ORed
502  * in later.
503  * See decode_nal_units().
504  */
505  pic->f->flags &= ~AV_FRAME_FLAG_KEY;
506  pic->mmco_reset = 0;
507  pic->recovered = 0;
508  pic->invalid_gap = 0;
509  pic->sei_recovery_frame_cnt = h->sei.recovery_point.recovery_frame_cnt;
510 
511  pic->f->pict_type = h->slice_ctx[0].slice_type;
512 
513  pic->f->crop_left = h->crop_left;
514  pic->f->crop_right = h->crop_right;
515  pic->f->crop_top = h->crop_top;
516  pic->f->crop_bottom = h->crop_bottom;
517 
518  pic->needs_fg = h->sei.common.film_grain_characteristics.present && !h->avctx->hwaccel &&
519  !(h->avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN);
520 
521  if ((ret = alloc_picture(h, pic)) < 0)
522  return ret;
523 
524  h->cur_pic_ptr = pic;
525  ff_h264_unref_picture(h, &h->cur_pic);
526  if (CONFIG_ERROR_RESILIENCE) {
527  ff_h264_set_erpic(&h->er.cur_pic, NULL);
528  }
529 
530  if ((ret = ff_h264_ref_picture(h, &h->cur_pic, h->cur_pic_ptr)) < 0)
531  return ret;
532 
533  for (i = 0; i < h->nb_slice_ctx; i++) {
534  h->slice_ctx[i].linesize = h->cur_pic_ptr->f->linesize[0];
535  h->slice_ctx[i].uvlinesize = h->cur_pic_ptr->f->linesize[1];
536  }
537 
538  if (CONFIG_ERROR_RESILIENCE && h->enable_er) {
539  ff_er_frame_start(&h->er);
540  ff_h264_set_erpic(&h->er.last_pic, NULL);
541  ff_h264_set_erpic(&h->er.next_pic, NULL);
542  }
543 
544  for (i = 0; i < 16; i++) {
545  h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
546  h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
547  }
548  for (i = 0; i < 16; i++) {
549  h->block_offset[16 + i] =
550  h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
551  h->block_offset[48 + 16 + i] =
552  h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
553  }
554 
555  /* We mark the current picture as non-reference after allocating it, so
556  * that if we break out due to an error it can be released automatically
557  * in the next ff_mpv_frame_start().
558  */
559  h->cur_pic_ptr->reference = 0;
560 
561  h->cur_pic_ptr->field_poc[0] = h->cur_pic_ptr->field_poc[1] = INT_MAX;
562 
563  h->next_output_pic = NULL;
564 
565  h->postpone_filter = 0;
566 
567  h->mb_aff_frame = h->ps.sps->mb_aff && (h->picture_structure == PICT_FRAME);
568 
569  if (h->sei.common.unregistered.x264_build >= 0)
570  h->x264_build = h->sei.common.unregistered.x264_build;
571 
572  assert(h->cur_pic_ptr->long_ref == 0);
573 
574  return 0;
575 }
576 
578  uint8_t *src_y,
579  uint8_t *src_cb, uint8_t *src_cr,
580  int linesize, int uvlinesize,
581  int simple)
582 {
583  uint8_t *top_border;
584  int top_idx = 1;
585  const int pixel_shift = h->pixel_shift;
586  int chroma444 = CHROMA444(h);
587  int chroma422 = CHROMA422(h);
588 
589  src_y -= linesize;
590  src_cb -= uvlinesize;
591  src_cr -= uvlinesize;
592 
593  if (!simple && FRAME_MBAFF(h)) {
594  if (sl->mb_y & 1) {
595  if (!MB_MBAFF(sl)) {
596  top_border = sl->top_borders[0][sl->mb_x];
597  AV_COPY128(top_border, src_y + 15 * linesize);
598  if (pixel_shift)
599  AV_COPY128(top_border + 16, src_y + 15 * linesize + 16);
600  if (simple || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
601  if (chroma444) {
602  if (pixel_shift) {
603  AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize);
604  AV_COPY128(top_border + 48, src_cb + 15 * uvlinesize + 16);
605  AV_COPY128(top_border + 64, src_cr + 15 * uvlinesize);
606  AV_COPY128(top_border + 80, src_cr + 15 * uvlinesize + 16);
607  } else {
608  AV_COPY128(top_border + 16, src_cb + 15 * uvlinesize);
609  AV_COPY128(top_border + 32, src_cr + 15 * uvlinesize);
610  }
611  } else if (chroma422) {
612  if (pixel_shift) {
613  AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize);
614  AV_COPY128(top_border + 48, src_cr + 15 * uvlinesize);
615  } else {
616  AV_COPY64(top_border + 16, src_cb + 15 * uvlinesize);
617  AV_COPY64(top_border + 24, src_cr + 15 * uvlinesize);
618  }
619  } else {
620  if (pixel_shift) {
621  AV_COPY128(top_border + 32, src_cb + 7 * uvlinesize);
622  AV_COPY128(top_border + 48, src_cr + 7 * uvlinesize);
623  } else {
624  AV_COPY64(top_border + 16, src_cb + 7 * uvlinesize);
625  AV_COPY64(top_border + 24, src_cr + 7 * uvlinesize);
626  }
627  }
628  }
629  }
630  } else if (MB_MBAFF(sl)) {
631  top_idx = 0;
632  } else
633  return;
634  }
635 
636  top_border = sl->top_borders[top_idx][sl->mb_x];
637  /* There are two lines saved, the line above the top macroblock
638  * of a pair, and the line above the bottom macroblock. */
639  AV_COPY128(top_border, src_y + 16 * linesize);
640  if (pixel_shift)
641  AV_COPY128(top_border + 16, src_y + 16 * linesize + 16);
642 
643  if (simple || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
644  if (chroma444) {
645  if (pixel_shift) {
646  AV_COPY128(top_border + 32, src_cb + 16 * linesize);
647  AV_COPY128(top_border + 48, src_cb + 16 * linesize + 16);
648  AV_COPY128(top_border + 64, src_cr + 16 * linesize);
649  AV_COPY128(top_border + 80, src_cr + 16 * linesize + 16);
650  } else {
651  AV_COPY128(top_border + 16, src_cb + 16 * linesize);
652  AV_COPY128(top_border + 32, src_cr + 16 * linesize);
653  }
654  } else if (chroma422) {
655  if (pixel_shift) {
656  AV_COPY128(top_border + 32, src_cb + 16 * uvlinesize);
657  AV_COPY128(top_border + 48, src_cr + 16 * uvlinesize);
658  } else {
659  AV_COPY64(top_border + 16, src_cb + 16 * uvlinesize);
660  AV_COPY64(top_border + 24, src_cr + 16 * uvlinesize);
661  }
662  } else {
663  if (pixel_shift) {
664  AV_COPY128(top_border + 32, src_cb + 8 * uvlinesize);
665  AV_COPY128(top_border + 48, src_cr + 8 * uvlinesize);
666  } else {
667  AV_COPY64(top_border + 16, src_cb + 8 * uvlinesize);
668  AV_COPY64(top_border + 24, src_cr + 8 * uvlinesize);
669  }
670  }
671  }
672 }
673 
674 /**
675  * Initialize implicit_weight table.
676  * @param field 0/1 initialize the weight for interlaced MBAFF
677  * -1 initializes the rest
678  */
680 {
681  int ref0, ref1, i, cur_poc, ref_start, ref_count0, ref_count1;
682 
683  for (i = 0; i < 2; i++) {
684  sl->pwt.luma_weight_flag[i] = 0;
685  sl->pwt.chroma_weight_flag[i] = 0;
686  }
687 
688  if (field < 0) {
689  if (h->picture_structure == PICT_FRAME) {
690  cur_poc = h->cur_pic_ptr->poc;
691  } else {
692  cur_poc = h->cur_pic_ptr->field_poc[h->picture_structure - 1];
693  }
694  if (sl->ref_count[0] == 1 && sl->ref_count[1] == 1 && !FRAME_MBAFF(h) &&
695  sl->ref_list[0][0].poc + (int64_t)sl->ref_list[1][0].poc == 2LL * cur_poc) {
696  sl->pwt.use_weight = 0;
697  sl->pwt.use_weight_chroma = 0;
698  return;
699  }
700  ref_start = 0;
701  ref_count0 = sl->ref_count[0];
702  ref_count1 = sl->ref_count[1];
703  } else {
704  cur_poc = h->cur_pic_ptr->field_poc[field];
705  ref_start = 16;
706  ref_count0 = 16 + 2 * sl->ref_count[0];
707  ref_count1 = 16 + 2 * sl->ref_count[1];
708  }
709 
710  sl->pwt.use_weight = 2;
711  sl->pwt.use_weight_chroma = 2;
712  sl->pwt.luma_log2_weight_denom = 5;
714 
715  for (ref0 = ref_start; ref0 < ref_count0; ref0++) {
716  int64_t poc0 = sl->ref_list[0][ref0].poc;
717  for (ref1 = ref_start; ref1 < ref_count1; ref1++) {
718  int w = 32;
719  if (!sl->ref_list[0][ref0].parent->long_ref && !sl->ref_list[1][ref1].parent->long_ref) {
720  int poc1 = sl->ref_list[1][ref1].poc;
721  int td = av_clip_int8(poc1 - poc0);
722  if (td) {
723  int tb = av_clip_int8(cur_poc - poc0);
724  int tx = (16384 + (FFABS(td) >> 1)) / td;
725  int dist_scale_factor = (tb * tx + 32) >> 8;
726  if (dist_scale_factor >= -64 && dist_scale_factor <= 128)
727  w = 64 - dist_scale_factor;
728  }
729  }
730  if (field < 0) {
731  sl->pwt.implicit_weight[ref0][ref1][0] =
732  sl->pwt.implicit_weight[ref0][ref1][1] = w;
733  } else {
734  sl->pwt.implicit_weight[ref0][ref1][field] = w;
735  }
736  }
737  }
738 }
739 
740 /**
741  * initialize scan tables
742  */
744 {
745  int i;
746  for (i = 0; i < 16; i++) {
747 #define TRANSPOSE(x) ((x) >> 2) | (((x) << 2) & 0xF)
748  h->zigzag_scan[i] = TRANSPOSE(ff_zigzag_scan[i]);
749  h->field_scan[i] = TRANSPOSE(field_scan[i]);
750 #undef TRANSPOSE
751  }
752  for (i = 0; i < 64; i++) {
753 #define TRANSPOSE(x) ((x) >> 3) | (((x) & 7) << 3)
754  h->zigzag_scan8x8[i] = TRANSPOSE(ff_zigzag_direct[i]);
755  h->zigzag_scan8x8_cavlc[i] = TRANSPOSE(zigzag_scan8x8_cavlc[i]);
756  h->field_scan8x8[i] = TRANSPOSE(field_scan8x8[i]);
757  h->field_scan8x8_cavlc[i] = TRANSPOSE(field_scan8x8_cavlc[i]);
758 #undef TRANSPOSE
759  }
760  if (h->ps.sps->transform_bypass) { // FIXME same ugly
761  memcpy(h->zigzag_scan_q0 , ff_zigzag_scan , sizeof(h->zigzag_scan_q0 ));
762  memcpy(h->zigzag_scan8x8_q0 , ff_zigzag_direct , sizeof(h->zigzag_scan8x8_q0 ));
763  memcpy(h->zigzag_scan8x8_cavlc_q0 , zigzag_scan8x8_cavlc , sizeof(h->zigzag_scan8x8_cavlc_q0));
764  memcpy(h->field_scan_q0 , field_scan , sizeof(h->field_scan_q0 ));
765  memcpy(h->field_scan8x8_q0 , field_scan8x8 , sizeof(h->field_scan8x8_q0 ));
766  memcpy(h->field_scan8x8_cavlc_q0 , field_scan8x8_cavlc , sizeof(h->field_scan8x8_cavlc_q0 ));
767  } else {
768  memcpy(h->zigzag_scan_q0 , h->zigzag_scan , sizeof(h->zigzag_scan_q0 ));
769  memcpy(h->zigzag_scan8x8_q0 , h->zigzag_scan8x8 , sizeof(h->zigzag_scan8x8_q0 ));
770  memcpy(h->zigzag_scan8x8_cavlc_q0 , h->zigzag_scan8x8_cavlc , sizeof(h->zigzag_scan8x8_cavlc_q0));
771  memcpy(h->field_scan_q0 , h->field_scan , sizeof(h->field_scan_q0 ));
772  memcpy(h->field_scan8x8_q0 , h->field_scan8x8 , sizeof(h->field_scan8x8_q0 ));
773  memcpy(h->field_scan8x8_cavlc_q0 , h->field_scan8x8_cavlc , sizeof(h->field_scan8x8_cavlc_q0 ));
774  }
775 }
776 
777 static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
778 {
779 #define HWACCEL_MAX (CONFIG_H264_DXVA2_HWACCEL + \
780  (CONFIG_H264_D3D11VA_HWACCEL * 2) + \
781  CONFIG_H264_NVDEC_HWACCEL + \
782  CONFIG_H264_VAAPI_HWACCEL + \
783  CONFIG_H264_VIDEOTOOLBOX_HWACCEL + \
784  CONFIG_H264_VDPAU_HWACCEL + \
785  CONFIG_H264_VULKAN_HWACCEL)
786  enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmt = pix_fmts;
787  const enum AVPixelFormat *choices = pix_fmts;
788  int i;
789 
790  switch (h->ps.sps->bit_depth_luma) {
791  case 9:
792  if (CHROMA444(h)) {
793  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
794  *fmt++ = AV_PIX_FMT_GBRP9;
795  } else
796  *fmt++ = AV_PIX_FMT_YUV444P9;
797  } else if (CHROMA422(h))
798  *fmt++ = AV_PIX_FMT_YUV422P9;
799  else
800  *fmt++ = AV_PIX_FMT_YUV420P9;
801  break;
802  case 10:
803 #if CONFIG_H264_VIDEOTOOLBOX_HWACCEL
804  if (h->avctx->colorspace != AVCOL_SPC_RGB)
805  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
806 #endif
807 #if CONFIG_H264_VULKAN_HWACCEL
808  *fmt++ = AV_PIX_FMT_VULKAN;
809 #endif
810  if (CHROMA444(h)) {
811  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
812  *fmt++ = AV_PIX_FMT_GBRP10;
813  } else
814  *fmt++ = AV_PIX_FMT_YUV444P10;
815  } else if (CHROMA422(h))
816  *fmt++ = AV_PIX_FMT_YUV422P10;
817  else {
818 #if CONFIG_H264_VAAPI_HWACCEL
819  // Just add as candidate. Whether VAProfileH264High10 usable or
820  // not is decided by vaapi_decode_make_config() defined in FFmpeg
821  // and vaQueryCodingProfile() defined in libva.
822  *fmt++ = AV_PIX_FMT_VAAPI;
823 #endif
824  *fmt++ = AV_PIX_FMT_YUV420P10;
825  }
826  break;
827  case 12:
828 #if CONFIG_H264_VULKAN_HWACCEL
829  *fmt++ = AV_PIX_FMT_VULKAN;
830 #endif
831  if (CHROMA444(h)) {
832  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
833  *fmt++ = AV_PIX_FMT_GBRP12;
834  } else
835  *fmt++ = AV_PIX_FMT_YUV444P12;
836  } else if (CHROMA422(h))
837  *fmt++ = AV_PIX_FMT_YUV422P12;
838  else
839  *fmt++ = AV_PIX_FMT_YUV420P12;
840  break;
841  case 14:
842  if (CHROMA444(h)) {
843  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
844  *fmt++ = AV_PIX_FMT_GBRP14;
845  } else
846  *fmt++ = AV_PIX_FMT_YUV444P14;
847  } else if (CHROMA422(h))
848  *fmt++ = AV_PIX_FMT_YUV422P14;
849  else
850  *fmt++ = AV_PIX_FMT_YUV420P14;
851  break;
852  case 8:
853 #if CONFIG_H264_VDPAU_HWACCEL
854  *fmt++ = AV_PIX_FMT_VDPAU;
855 #endif
856 #if CONFIG_H264_VULKAN_HWACCEL
857  *fmt++ = AV_PIX_FMT_VULKAN;
858 #endif
859 #if CONFIG_H264_NVDEC_HWACCEL
860  *fmt++ = AV_PIX_FMT_CUDA;
861 #endif
862 #if CONFIG_H264_VIDEOTOOLBOX_HWACCEL
863  if (h->avctx->colorspace != AVCOL_SPC_RGB)
864  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
865 #endif
866  if (CHROMA444(h)) {
867  if (h->avctx->colorspace == AVCOL_SPC_RGB)
868  *fmt++ = AV_PIX_FMT_GBRP;
869  else if (h->avctx->color_range == AVCOL_RANGE_JPEG)
870  *fmt++ = AV_PIX_FMT_YUVJ444P;
871  else
872  *fmt++ = AV_PIX_FMT_YUV444P;
873  } else if (CHROMA422(h)) {
874  if (h->avctx->color_range == AVCOL_RANGE_JPEG)
875  *fmt++ = AV_PIX_FMT_YUVJ422P;
876  else
877  *fmt++ = AV_PIX_FMT_YUV422P;
878  } else {
879 #if CONFIG_H264_DXVA2_HWACCEL
880  *fmt++ = AV_PIX_FMT_DXVA2_VLD;
881 #endif
882 #if CONFIG_H264_D3D11VA_HWACCEL
883  *fmt++ = AV_PIX_FMT_D3D11VA_VLD;
884  *fmt++ = AV_PIX_FMT_D3D11;
885 #endif
886 #if CONFIG_H264_VAAPI_HWACCEL
887  *fmt++ = AV_PIX_FMT_VAAPI;
888 #endif
889  if (h->avctx->codec->pix_fmts)
890  choices = h->avctx->codec->pix_fmts;
891  else if (h->avctx->color_range == AVCOL_RANGE_JPEG)
892  *fmt++ = AV_PIX_FMT_YUVJ420P;
893  else
894  *fmt++ = AV_PIX_FMT_YUV420P;
895  }
896  break;
897  default:
898  av_log(h->avctx, AV_LOG_ERROR,
899  "Unsupported bit depth %d\n", h->ps.sps->bit_depth_luma);
900  return AVERROR_INVALIDDATA;
901  }
902 
903  *fmt = AV_PIX_FMT_NONE;
904 
905  for (i=0; choices[i] != AV_PIX_FMT_NONE; i++)
906  if (choices[i] == h->avctx->pix_fmt && !force_callback)
907  return choices[i];
908  return ff_thread_get_format(h->avctx, choices);
909 }
910 
911 /* export coded and cropped frame dimensions to AVCodecContext */
913 {
914  const SPS *sps = (const SPS*)h->ps.sps;
915  int cr = sps->crop_right;
916  int cl = sps->crop_left;
917  int ct = sps->crop_top;
918  int cb = sps->crop_bottom;
919  int width = h->width - (cr + cl);
920  int height = h->height - (ct + cb);
921  av_assert0(sps->crop_right + sps->crop_left < (unsigned)h->width);
922  av_assert0(sps->crop_top + sps->crop_bottom < (unsigned)h->height);
923 
924  /* handle container cropping */
925  if (h->width_from_caller > 0 && h->height_from_caller > 0 &&
926  !sps->crop_top && !sps->crop_left &&
927  FFALIGN(h->width_from_caller, 16) == FFALIGN(width, 16) &&
928  FFALIGN(h->height_from_caller, 16) == FFALIGN(height, 16) &&
929  h->width_from_caller <= width &&
930  h->height_from_caller <= height) {
931  width = h->width_from_caller;
932  height = h->height_from_caller;
933  cl = 0;
934  ct = 0;
935  cr = h->width - width;
936  cb = h->height - height;
937  } else {
938  h->width_from_caller = 0;
939  h->height_from_caller = 0;
940  }
941 
942  h->avctx->coded_width = h->width;
943  h->avctx->coded_height = h->height;
944  h->avctx->width = width;
945  h->avctx->height = height;
946  h->crop_right = cr;
947  h->crop_left = cl;
948  h->crop_top = ct;
949  h->crop_bottom = cb;
950 }
951 
953 {
954  const SPS *sps = h->ps.sps;
955  int i, ret;
956 
957  if (!sps) {
959  goto fail;
960  }
961 
962  ff_set_sar(h->avctx, sps->vui.sar);
963  av_pix_fmt_get_chroma_sub_sample(h->avctx->pix_fmt,
964  &h->chroma_x_shift, &h->chroma_y_shift);
965 
966  if (sps->timing_info_present_flag) {
967  int64_t den = sps->time_scale;
968  if (h->x264_build < 44U)
969  den *= 2;
970  av_reduce(&h->avctx->framerate.den, &h->avctx->framerate.num,
971  sps->num_units_in_tick * 2, den, 1 << 30);
972  }
973 
975 
976  h->first_field = 0;
977  h->prev_interlaced_frame = 1;
978 
981  if (ret < 0) {
982  av_log(h->avctx, AV_LOG_ERROR, "Could not allocate memory\n");
983  goto fail;
984  }
985 
986  if (sps->bit_depth_luma < 8 || sps->bit_depth_luma > 14 ||
987  sps->bit_depth_luma == 11 || sps->bit_depth_luma == 13
988  ) {
989  av_log(h->avctx, AV_LOG_ERROR, "Unsupported bit depth %d\n",
990  sps->bit_depth_luma);
992  goto fail;
993  }
994 
995  h->cur_bit_depth_luma =
996  h->avctx->bits_per_raw_sample = sps->bit_depth_luma;
997  h->cur_chroma_format_idc = sps->chroma_format_idc;
998  h->pixel_shift = sps->bit_depth_luma > 8;
999  h->chroma_format_idc = sps->chroma_format_idc;
1000  h->bit_depth_luma = sps->bit_depth_luma;
1001 
1002  ff_h264dsp_init(&h->h264dsp, sps->bit_depth_luma,
1003  sps->chroma_format_idc);
1004  ff_h264chroma_init(&h->h264chroma, sps->bit_depth_chroma);
1005  ff_h264qpel_init(&h->h264qpel, sps->bit_depth_luma);
1006  ff_h264_pred_init(&h->hpc, AV_CODEC_ID_H264, sps->bit_depth_luma,
1007  sps->chroma_format_idc);
1008  ff_videodsp_init(&h->vdsp, sps->bit_depth_luma);
1009 
1010  if (!HAVE_THREADS || !(h->avctx->active_thread_type & FF_THREAD_SLICE)) {
1011  ff_h264_slice_context_init(h, &h->slice_ctx[0]);
1012  } else {
1013  for (i = 0; i < h->nb_slice_ctx; i++) {
1014  H264SliceContext *sl = &h->slice_ctx[i];
1015 
1016  sl->h264 = h;
1017  sl->intra4x4_pred_mode = h->intra4x4_pred_mode + i * 8 * 2 * h->mb_stride;
1018  sl->mvd_table[0] = h->mvd_table[0] + i * 8 * 2 * h->mb_stride;
1019  sl->mvd_table[1] = h->mvd_table[1] + i * 8 * 2 * h->mb_stride;
1020 
1022  }
1023  }
1024 
1025  h->context_initialized = 1;
1026 
1027  return 0;
1028 fail:
1030  h->context_initialized = 0;
1031  return ret;
1032 }
1033 
1035 {
1036  switch (a) {
1040  default:
1041  return a;
1042  }
1043 }
1044 
1045 static int h264_init_ps(H264Context *h, const H264SliceContext *sl, int first_slice)
1046 {
1047  const SPS *sps;
1048  int needs_reinit = 0, must_reinit, ret;
1049 
1050  if (first_slice) {
1051  av_buffer_unref(&h->ps.pps_ref);
1052  h->ps.pps = NULL;
1053  h->ps.pps_ref = av_buffer_ref(h->ps.pps_list[sl->pps_id]);
1054  if (!h->ps.pps_ref)
1055  return AVERROR(ENOMEM);
1056  h->ps.pps = (const PPS*)h->ps.pps_ref->data;
1057  }
1058 
1059  if (h->ps.sps != h->ps.pps->sps) {
1060  h->ps.sps = (const SPS*)h->ps.pps->sps;
1061 
1062  if (h->mb_width != h->ps.sps->mb_width ||
1063  h->mb_height != h->ps.sps->mb_height ||
1064  h->cur_bit_depth_luma != h->ps.sps->bit_depth_luma ||
1065  h->cur_chroma_format_idc != h->ps.sps->chroma_format_idc
1066  )
1067  needs_reinit = 1;
1068 
1069  if (h->bit_depth_luma != h->ps.sps->bit_depth_luma ||
1070  h->chroma_format_idc != h->ps.sps->chroma_format_idc)
1071  needs_reinit = 1;
1072  }
1073  sps = h->ps.sps;
1074 
1075  must_reinit = (h->context_initialized &&
1076  ( 16*sps->mb_width != h->avctx->coded_width
1077  || 16*sps->mb_height != h->avctx->coded_height
1078  || h->cur_bit_depth_luma != sps->bit_depth_luma
1079  || h->cur_chroma_format_idc != sps->chroma_format_idc
1080  || h->mb_width != sps->mb_width
1081  || h->mb_height != sps->mb_height
1082  ));
1083  if (h->avctx->pix_fmt == AV_PIX_FMT_NONE
1084  || (non_j_pixfmt(h->avctx->pix_fmt) != non_j_pixfmt(get_pixel_format(h, 0))))
1085  must_reinit = 1;
1086 
1087  if (first_slice && av_cmp_q(sps->vui.sar, h->avctx->sample_aspect_ratio))
1088  must_reinit = 1;
1089 
1090  if (!h->setup_finished) {
1091  h->avctx->profile = ff_h264_get_profile(sps);
1092  h->avctx->level = sps->level_idc;
1093  h->avctx->refs = sps->ref_frame_count;
1094 
1095  h->mb_width = sps->mb_width;
1096  h->mb_height = sps->mb_height;
1097  h->mb_num = h->mb_width * h->mb_height;
1098  h->mb_stride = h->mb_width + 1;
1099 
1100  h->b_stride = h->mb_width * 4;
1101 
1102  h->chroma_y_shift = sps->chroma_format_idc <= 1; // 400 uses yuv420p
1103 
1104  h->width = 16 * h->mb_width;
1105  h->height = 16 * h->mb_height;
1106 
1107  init_dimensions(h);
1108 
1109  if (sps->vui.video_signal_type_present_flag) {
1110  h->avctx->color_range = sps->vui.video_full_range_flag > 0 ? AVCOL_RANGE_JPEG
1111  : AVCOL_RANGE_MPEG;
1112  if (sps->vui.colour_description_present_flag) {
1113  if (h->avctx->colorspace != sps->vui.matrix_coeffs)
1114  needs_reinit = 1;
1115  h->avctx->color_primaries = sps->vui.colour_primaries;
1116  h->avctx->color_trc = sps->vui.transfer_characteristics;
1117  h->avctx->colorspace = sps->vui.matrix_coeffs;
1118  }
1119  }
1120 
1121  if (h->sei.common.alternative_transfer.present &&
1122  av_color_transfer_name(h->sei.common.alternative_transfer.preferred_transfer_characteristics) &&
1123  h->sei.common.alternative_transfer.preferred_transfer_characteristics != AVCOL_TRC_UNSPECIFIED) {
1124  h->avctx->color_trc = h->sei.common.alternative_transfer.preferred_transfer_characteristics;
1125  }
1126  }
1127  h->avctx->chroma_sample_location = sps->vui.chroma_location;
1128 
1129  if (!h->context_initialized || must_reinit || needs_reinit) {
1130  int flush_changes = h->context_initialized;
1131  h->context_initialized = 0;
1132  if (sl != h->slice_ctx) {
1133  av_log(h->avctx, AV_LOG_ERROR,
1134  "changing width %d -> %d / height %d -> %d on "
1135  "slice %d\n",
1136  h->width, h->avctx->coded_width,
1137  h->height, h->avctx->coded_height,
1138  h->current_slice + 1);
1139  return AVERROR_INVALIDDATA;
1140  }
1141 
1142  av_assert1(first_slice);
1143 
1144  if (flush_changes)
1146 
1147  if ((ret = get_pixel_format(h, 1)) < 0)
1148  return ret;
1149  h->avctx->pix_fmt = ret;
1150 
1151  av_log(h->avctx, AV_LOG_VERBOSE, "Reinit context to %dx%d, "
1152  "pix_fmt: %s\n", h->width, h->height, av_get_pix_fmt_name(h->avctx->pix_fmt));
1153 
1154  if ((ret = h264_slice_header_init(h)) < 0) {
1155  av_log(h->avctx, AV_LOG_ERROR,
1156  "h264_slice_header_init() failed\n");
1157  return ret;
1158  }
1159  }
1160 
1161  return 0;
1162 }
1163 
1165 {
1166  const SPS *sps = h->ps.sps;
1167  H264Picture *cur = h->cur_pic_ptr;
1168  AVFrame *out = cur->f;
1169  int interlaced_frame = 0, top_field_first = 0;
1170  int ret;
1171 
1172  out->flags &= ~AV_FRAME_FLAG_INTERLACED;
1173  out->repeat_pict = 0;
1174 
1175  /* Signal interlacing information externally. */
1176  /* Prioritize picture timing SEI information over used
1177  * decoding process if it exists. */
1178  if (h->sei.picture_timing.present) {
1179  int ret = ff_h264_sei_process_picture_timing(&h->sei.picture_timing, sps,
1180  h->avctx);
1181  if (ret < 0) {
1182  av_log(h->avctx, AV_LOG_ERROR, "Error processing a picture timing SEI\n");
1183  if (h->avctx->err_recognition & AV_EF_EXPLODE)
1184  return ret;
1185  h->sei.picture_timing.present = 0;
1186  }
1187  }
1188 
1189  if (sps->pic_struct_present_flag && h->sei.picture_timing.present) {
1190  H264SEIPictureTiming *pt = &h->sei.picture_timing;
1191  switch (pt->pic_struct) {
1193  break;
1196  interlaced_frame = 1;
1197  break;
1201  interlaced_frame = 1;
1202  else
1203  // try to flag soft telecine progressive
1204  interlaced_frame = !!h->prev_interlaced_frame;
1205  break;
1208  /* Signal the possibility of telecined film externally
1209  * (pic_struct 5,6). From these hints, let the applications
1210  * decide if they apply deinterlacing. */
1211  out->repeat_pict = 1;
1212  break;
1214  out->repeat_pict = 2;
1215  break;
1217  out->repeat_pict = 4;
1218  break;
1219  }
1220 
1221  if ((pt->ct_type & 3) &&
1222  pt->pic_struct <= H264_SEI_PIC_STRUCT_BOTTOM_TOP)
1223  interlaced_frame = ((pt->ct_type & (1 << 1)) != 0);
1224  } else {
1225  /* Derive interlacing flag from used decoding process. */
1226  interlaced_frame = !!FIELD_OR_MBAFF_PICTURE(h);
1227  }
1228  h->prev_interlaced_frame = interlaced_frame;
1229 
1230  if (cur->field_poc[0] != cur->field_poc[1]) {
1231  /* Derive top_field_first from field pocs. */
1232  top_field_first = (cur->field_poc[0] < cur->field_poc[1]);
1233  } else {
1234  if (sps->pic_struct_present_flag && h->sei.picture_timing.present) {
1235  /* Use picture timing SEI information. Even if it is a
1236  * information of a past frame, better than nothing. */
1237  if (h->sei.picture_timing.pic_struct == H264_SEI_PIC_STRUCT_TOP_BOTTOM ||
1238  h->sei.picture_timing.pic_struct == H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP)
1239  top_field_first = 1;
1240  } else if (interlaced_frame) {
1241  /* Default to top field first when pic_struct_present_flag
1242  * is not set but interlaced frame detected */
1243  top_field_first = 1;
1244  } // else
1245  /* Most likely progressive */
1246  }
1247 
1248  out->flags |= (AV_FRAME_FLAG_INTERLACED * interlaced_frame) |
1249  (AV_FRAME_FLAG_TOP_FIELD_FIRST * top_field_first);
1250 
1251  ret = ff_h2645_sei_to_frame(out, &h->sei.common, AV_CODEC_ID_H264, h->avctx,
1252  &sps->vui, sps->bit_depth_luma, sps->bit_depth_chroma,
1253  cur->poc + (unsigned)(h->poc_offset << 5));
1254  if (ret < 0)
1255  return ret;
1256 
1257  if (h->sei.picture_timing.timecode_cnt > 0) {
1258  uint32_t *tc_sd;
1259  char tcbuf[AV_TIMECODE_STR_SIZE];
1260 
1263  sizeof(uint32_t)*4);
1264  if (!tcside)
1265  return AVERROR(ENOMEM);
1266 
1267  tc_sd = (uint32_t*)tcside->data;
1268  tc_sd[0] = h->sei.picture_timing.timecode_cnt;
1269 
1270  for (int i = 0; i < tc_sd[0]; i++) {
1271  int drop = h->sei.picture_timing.timecode[i].dropframe;
1272  int hh = h->sei.picture_timing.timecode[i].hours;
1273  int mm = h->sei.picture_timing.timecode[i].minutes;
1274  int ss = h->sei.picture_timing.timecode[i].seconds;
1275  int ff = h->sei.picture_timing.timecode[i].frame;
1276 
1277  tc_sd[i + 1] = av_timecode_get_smpte(h->avctx->framerate, drop, hh, mm, ss, ff);
1278  av_timecode_make_smpte_tc_string2(tcbuf, h->avctx->framerate, tc_sd[i + 1], 0, 0);
1279  av_dict_set(&out->metadata, "timecode", tcbuf, 0);
1280  }
1281  h->sei.picture_timing.timecode_cnt = 0;
1282  }
1283 
1284  return 0;
1285 }
1286 
1288 {
1289  const SPS *sps = h->ps.sps;
1290  H264Picture *out = h->cur_pic_ptr;
1291  H264Picture *cur = h->cur_pic_ptr;
1292  int i, pics, out_of_order, out_idx;
1293 
1294  cur->mmco_reset = h->mmco_reset;
1295  h->mmco_reset = 0;
1296 
1297  if (sps->bitstream_restriction_flag ||
1298  h->avctx->strict_std_compliance >= FF_COMPLIANCE_STRICT) {
1299  h->avctx->has_b_frames = FFMAX(h->avctx->has_b_frames, sps->num_reorder_frames);
1300  }
1301 
1302  for (i = 0; 1; i++) {
1303  if(i == H264_MAX_DPB_FRAMES || cur->poc < h->last_pocs[i]){
1304  if(i)
1305  h->last_pocs[i-1] = cur->poc;
1306  break;
1307  } else if(i) {
1308  h->last_pocs[i-1]= h->last_pocs[i];
1309  }
1310  }
1311  out_of_order = H264_MAX_DPB_FRAMES - i;
1312  if( cur->f->pict_type == AV_PICTURE_TYPE_B
1313  || (h->last_pocs[H264_MAX_DPB_FRAMES-2] > INT_MIN && h->last_pocs[H264_MAX_DPB_FRAMES-1] - (int64_t)h->last_pocs[H264_MAX_DPB_FRAMES-2] > 2))
1314  out_of_order = FFMAX(out_of_order, 1);
1315  if (out_of_order == H264_MAX_DPB_FRAMES) {
1316  av_log(h->avctx, AV_LOG_VERBOSE, "Invalid POC %d<%d\n", cur->poc, h->last_pocs[0]);
1317  for (i = 1; i < H264_MAX_DPB_FRAMES; i++)
1318  h->last_pocs[i] = INT_MIN;
1319  h->last_pocs[0] = cur->poc;
1320  cur->mmco_reset = 1;
1321  } else if(h->avctx->has_b_frames < out_of_order && !sps->bitstream_restriction_flag){
1322  int loglevel = h->avctx->frame_num > 1 ? AV_LOG_WARNING : AV_LOG_VERBOSE;
1323  av_log(h->avctx, loglevel, "Increasing reorder buffer to %d\n", out_of_order);
1324  h->avctx->has_b_frames = out_of_order;
1325  }
1326 
1327  pics = 0;
1328  while (h->delayed_pic[pics])
1329  pics++;
1330 
1332 
1333  h->delayed_pic[pics++] = cur;
1334  if (cur->reference == 0)
1335  cur->reference = DELAYED_PIC_REF;
1336 
1337  out = h->delayed_pic[0];
1338  out_idx = 0;
1339  for (i = 1; h->delayed_pic[i] &&
1340  !(h->delayed_pic[i]->f->flags & AV_FRAME_FLAG_KEY) &&
1341  !h->delayed_pic[i]->mmco_reset;
1342  i++)
1343  if (h->delayed_pic[i]->poc < out->poc) {
1344  out = h->delayed_pic[i];
1345  out_idx = i;
1346  }
1347  if (h->avctx->has_b_frames == 0 &&
1348  ((h->delayed_pic[0]->f->flags & AV_FRAME_FLAG_KEY) || h->delayed_pic[0]->mmco_reset))
1349  h->next_outputed_poc = INT_MIN;
1350  out_of_order = out->poc < h->next_outputed_poc;
1351 
1352  if (out_of_order || pics > h->avctx->has_b_frames) {
1353  out->reference &= ~DELAYED_PIC_REF;
1354  for (i = out_idx; h->delayed_pic[i]; i++)
1355  h->delayed_pic[i] = h->delayed_pic[i + 1];
1356  }
1357  if (!out_of_order && pics > h->avctx->has_b_frames) {
1358  h->next_output_pic = out;
1359  if (out_idx == 0 && h->delayed_pic[0] && ((h->delayed_pic[0]->f->flags & AV_FRAME_FLAG_KEY) || h->delayed_pic[0]->mmco_reset)) {
1360  h->next_outputed_poc = INT_MIN;
1361  } else
1362  h->next_outputed_poc = out->poc;
1363 
1364  if (out->recovered) {
1365  // We have reached an recovery point and all frames after it in
1366  // display order are "recovered".
1367  h->frame_recovered |= FRAME_RECOVERED_SEI;
1368  }
1369  out->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_SEI);
1370 
1371  if (!out->recovered) {
1372  if (!(h->avctx->flags & AV_CODEC_FLAG_OUTPUT_CORRUPT) &&
1373  !(h->avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL)) {
1374  h->next_output_pic = NULL;
1375  } else {
1376  out->f->flags |= AV_FRAME_FLAG_CORRUPT;
1377  }
1378  }
1379  } else {
1380  av_log(h->avctx, AV_LOG_DEBUG, "no picture %s\n", out_of_order ? "ooo" : "");
1381  }
1382 
1383  return 0;
1384 }
1385 
1386 /* This function is called right after decoding the slice header for a first
1387  * slice in a field (or a frame). It decides whether we are decoding a new frame
1388  * or a second field in a pair and does the necessary setup.
1389  */
1391  const H2645NAL *nal, int first_slice)
1392 {
1393  int i;
1394  const SPS *sps;
1395 
1396  int last_pic_structure, last_pic_droppable, ret;
1397 
1398  ret = h264_init_ps(h, sl, first_slice);
1399  if (ret < 0)
1400  return ret;
1401 
1402  sps = h->ps.sps;
1403 
1404  if (sps && sps->bitstream_restriction_flag &&
1405  h->avctx->has_b_frames < sps->num_reorder_frames) {
1406  h->avctx->has_b_frames = sps->num_reorder_frames;
1407  }
1408 
1409  last_pic_droppable = h->droppable;
1410  last_pic_structure = h->picture_structure;
1411  h->droppable = (nal->ref_idc == 0);
1412  h->picture_structure = sl->picture_structure;
1413 
1414  h->poc.frame_num = sl->frame_num;
1415  h->poc.poc_lsb = sl->poc_lsb;
1416  h->poc.delta_poc_bottom = sl->delta_poc_bottom;
1417  h->poc.delta_poc[0] = sl->delta_poc[0];
1418  h->poc.delta_poc[1] = sl->delta_poc[1];
1419 
1420  if (nal->type == H264_NAL_IDR_SLICE)
1421  h->poc_offset = sl->idr_pic_id;
1422  else if (h->picture_intra_only)
1423  h->poc_offset = 0;
1424 
1425  /* Shorten frame num gaps so we don't have to allocate reference
1426  * frames just to throw them away */
1427  if (h->poc.frame_num != h->poc.prev_frame_num) {
1428  int unwrap_prev_frame_num = h->poc.prev_frame_num;
1429  int max_frame_num = 1 << sps->log2_max_frame_num;
1430 
1431  if (unwrap_prev_frame_num > h->poc.frame_num)
1432  unwrap_prev_frame_num -= max_frame_num;
1433 
1434  if ((h->poc.frame_num - unwrap_prev_frame_num) > sps->ref_frame_count) {
1435  unwrap_prev_frame_num = (h->poc.frame_num - sps->ref_frame_count) - 1;
1436  if (unwrap_prev_frame_num < 0)
1437  unwrap_prev_frame_num += max_frame_num;
1438 
1439  h->poc.prev_frame_num = unwrap_prev_frame_num;
1440  }
1441  }
1442 
1443  /* See if we have a decoded first field looking for a pair...
1444  * Here, we're using that to see if we should mark previously
1445  * decode frames as "finished".
1446  * We have to do that before the "dummy" in-between frame allocation,
1447  * since that can modify h->cur_pic_ptr. */
1448  if (h->first_field) {
1449  int last_field = last_pic_structure == PICT_BOTTOM_FIELD;
1450  av_assert0(h->cur_pic_ptr);
1451  av_assert0(h->cur_pic_ptr->f->buf[0]);
1452  assert(h->cur_pic_ptr->reference != DELAYED_PIC_REF);
1453 
1454  /* Mark old field/frame as completed */
1455  if (h->cur_pic_ptr->tf.owner[last_field] == h->avctx) {
1456  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, last_field);
1457  }
1458 
1459  /* figure out if we have a complementary field pair */
1460  if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) {
1461  /* Previous field is unmatched. Don't display it, but let it
1462  * remain for reference if marked as such. */
1463  if (last_pic_structure != PICT_FRAME) {
1464  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
1465  last_pic_structure == PICT_TOP_FIELD);
1466  }
1467  } else {
1468  if (h->cur_pic_ptr->frame_num != h->poc.frame_num) {
1469  /* This and previous field were reference, but had
1470  * different frame_nums. Consider this field first in
1471  * pair. Throw away previous field except for reference
1472  * purposes. */
1473  if (last_pic_structure != PICT_FRAME) {
1474  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
1475  last_pic_structure == PICT_TOP_FIELD);
1476  }
1477  } else {
1478  /* Second field in complementary pair */
1479  if (!((last_pic_structure == PICT_TOP_FIELD &&
1480  h->picture_structure == PICT_BOTTOM_FIELD) ||
1481  (last_pic_structure == PICT_BOTTOM_FIELD &&
1482  h->picture_structure == PICT_TOP_FIELD))) {
1483  av_log(h->avctx, AV_LOG_ERROR,
1484  "Invalid field mode combination %d/%d\n",
1485  last_pic_structure, h->picture_structure);
1486  h->picture_structure = last_pic_structure;
1487  h->droppable = last_pic_droppable;
1488  return AVERROR_INVALIDDATA;
1489  } else if (last_pic_droppable != h->droppable) {
1490  avpriv_request_sample(h->avctx,
1491  "Found reference and non-reference fields in the same frame, which");
1492  h->picture_structure = last_pic_structure;
1493  h->droppable = last_pic_droppable;
1494  return AVERROR_PATCHWELCOME;
1495  }
1496  }
1497  }
1498  }
1499 
1500  while (h->poc.frame_num != h->poc.prev_frame_num && !h->first_field &&
1501  h->poc.frame_num != (h->poc.prev_frame_num + 1) % (1 << sps->log2_max_frame_num)) {
1502  const H264Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL;
1503  av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n",
1504  h->poc.frame_num, h->poc.prev_frame_num);
1505  if (!sps->gaps_in_frame_num_allowed_flag)
1506  for(i=0; i<FF_ARRAY_ELEMS(h->last_pocs); i++)
1507  h->last_pocs[i] = INT_MIN;
1508  ret = h264_frame_start(h);
1509  if (ret < 0) {
1510  h->first_field = 0;
1511  return ret;
1512  }
1513 
1514  h->poc.prev_frame_num++;
1515  h->poc.prev_frame_num %= 1 << sps->log2_max_frame_num;
1516  h->cur_pic_ptr->frame_num = h->poc.prev_frame_num;
1517  h->cur_pic_ptr->invalid_gap = !sps->gaps_in_frame_num_allowed_flag;
1518  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0);
1519  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1);
1520 
1521  h->explicit_ref_marking = 0;
1523  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1524  return ret;
1525  /* Error concealment: If a ref is missing, copy the previous ref
1526  * in its place.
1527  * FIXME: Avoiding a memcpy would be nice, but ref handling makes
1528  * many assumptions about there being no actual duplicates.
1529  * FIXME: This does not copy padding for out-of-frame motion
1530  * vectors. Given we are concealing a lost frame, this probably
1531  * is not noticeable by comparison, but it should be fixed. */
1532  if (h->short_ref_count) {
1533  int c[4] = {
1534  1<<(h->ps.sps->bit_depth_luma-1),
1535  1<<(h->ps.sps->bit_depth_chroma-1),
1536  1<<(h->ps.sps->bit_depth_chroma-1),
1537  -1
1538  };
1539 
1540  if (prev &&
1541  h->short_ref[0]->f->width == prev->f->width &&
1542  h->short_ref[0]->f->height == prev->f->height &&
1543  h->short_ref[0]->f->format == prev->f->format) {
1544  ff_thread_await_progress(&prev->tf, INT_MAX, 0);
1545  if (prev->field_picture)
1546  ff_thread_await_progress(&prev->tf, INT_MAX, 1);
1547  ff_thread_release_ext_buffer(h->avctx, &h->short_ref[0]->tf);
1548  h->short_ref[0]->tf.f = h->short_ref[0]->f;
1549  ret = ff_thread_ref_frame(&h->short_ref[0]->tf, &prev->tf);
1550  if (ret < 0)
1551  return ret;
1552  h->short_ref[0]->poc = prev->poc + 2U;
1553  ff_thread_report_progress(&h->short_ref[0]->tf, INT_MAX, 0);
1554  if (h->short_ref[0]->field_picture)
1555  ff_thread_report_progress(&h->short_ref[0]->tf, INT_MAX, 1);
1556  } else if (!h->frame_recovered && !h->avctx->hwaccel)
1557  ff_color_frame(h->short_ref[0]->f, c);
1558  h->short_ref[0]->frame_num = h->poc.prev_frame_num;
1559  }
1560  }
1561 
1562  /* See if we have a decoded first field looking for a pair...
1563  * We're using that to see whether to continue decoding in that
1564  * frame, or to allocate a new one. */
1565  if (h->first_field) {
1566  av_assert0(h->cur_pic_ptr);
1567  av_assert0(h->cur_pic_ptr->f->buf[0]);
1568  assert(h->cur_pic_ptr->reference != DELAYED_PIC_REF);
1569 
1570  /* figure out if we have a complementary field pair */
1571  if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) {
1572  /* Previous field is unmatched. Don't display it, but let it
1573  * remain for reference if marked as such. */
1574  h->missing_fields ++;
1575  h->cur_pic_ptr = NULL;
1576  h->first_field = FIELD_PICTURE(h);
1577  } else {
1578  h->missing_fields = 0;
1579  if (h->cur_pic_ptr->frame_num != h->poc.frame_num) {
1580  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
1581  h->picture_structure==PICT_BOTTOM_FIELD);
1582  /* This and the previous field had different frame_nums.
1583  * Consider this field first in pair. Throw away previous
1584  * one except for reference purposes. */
1585  h->first_field = 1;
1586  h->cur_pic_ptr = NULL;
1587  } else if (h->cur_pic_ptr->reference & DELAYED_PIC_REF) {
1588  /* This frame was already output, we cannot draw into it
1589  * anymore.
1590  */
1591  h->first_field = 1;
1592  h->cur_pic_ptr = NULL;
1593  } else {
1594  /* Second field in complementary pair */
1595  h->first_field = 0;
1596  }
1597  }
1598  } else {
1599  /* Frame or first field in a potentially complementary pair */
1600  h->first_field = FIELD_PICTURE(h);
1601  }
1602 
1603  if (!FIELD_PICTURE(h) || h->first_field) {
1604  if (h264_frame_start(h) < 0) {
1605  h->first_field = 0;
1606  return AVERROR_INVALIDDATA;
1607  }
1608  } else {
1609  int field = h->picture_structure == PICT_BOTTOM_FIELD;
1611  h->cur_pic_ptr->tf.owner[field] = h->avctx;
1612  }
1613  /* Some macroblocks can be accessed before they're available in case
1614  * of lost slices, MBAFF or threading. */
1615  if (FIELD_PICTURE(h)) {
1616  for(i = (h->picture_structure == PICT_BOTTOM_FIELD); i<h->mb_height; i++)
1617  memset(h->slice_table + i*h->mb_stride, -1, (h->mb_stride - (i+1==h->mb_height)) * sizeof(*h->slice_table));
1618  } else {
1619  memset(h->slice_table, -1,
1620  (h->mb_height * h->mb_stride - 1) * sizeof(*h->slice_table));
1621  }
1622 
1623  ret = ff_h264_init_poc(h->cur_pic_ptr->field_poc, &h->cur_pic_ptr->poc,
1624  h->ps.sps, &h->poc, h->picture_structure, nal->ref_idc);
1625  if (ret < 0)
1626  return ret;
1627 
1628  memcpy(h->mmco, sl->mmco, sl->nb_mmco * sizeof(*h->mmco));
1629  h->nb_mmco = sl->nb_mmco;
1630  h->explicit_ref_marking = sl->explicit_ref_marking;
1631 
1632  h->picture_idr = nal->type == H264_NAL_IDR_SLICE;
1633 
1634  if (h->sei.recovery_point.recovery_frame_cnt >= 0) {
1635  const int sei_recovery_frame_cnt = h->sei.recovery_point.recovery_frame_cnt;
1636 
1637  if (h->poc.frame_num != sei_recovery_frame_cnt || sl->slice_type_nos != AV_PICTURE_TYPE_I)
1638  h->valid_recovery_point = 1;
1639 
1640  if ( h->recovery_frame < 0
1641  || av_mod_uintp2(h->recovery_frame - h->poc.frame_num, h->ps.sps->log2_max_frame_num) > sei_recovery_frame_cnt) {
1642  h->recovery_frame = av_mod_uintp2(h->poc.frame_num + sei_recovery_frame_cnt, h->ps.sps->log2_max_frame_num);
1643 
1644  if (!h->valid_recovery_point)
1645  h->recovery_frame = h->poc.frame_num;
1646  }
1647  }
1648 
1649  h->cur_pic_ptr->f->flags |= AV_FRAME_FLAG_KEY * !!(nal->type == H264_NAL_IDR_SLICE);
1650 
1651  if (nal->type == H264_NAL_IDR_SLICE ||
1652  (h->recovery_frame == h->poc.frame_num && nal->ref_idc)) {
1653  h->recovery_frame = -1;
1654  h->cur_pic_ptr->recovered = 1;
1655  }
1656  // If we have an IDR, all frames after it in decoded order are
1657  // "recovered".
1658  if (nal->type == H264_NAL_IDR_SLICE)
1659  h->frame_recovered |= FRAME_RECOVERED_IDR;
1660 #if 1
1661  h->cur_pic_ptr->recovered |= h->frame_recovered;
1662 #else
1663  h->cur_pic_ptr->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_IDR);
1664 #endif
1665 
1666  /* Set the frame properties/side data. Only done for the second field in
1667  * field coded frames, since some SEI information is present for each field
1668  * and is merged by the SEI parsing code. */
1669  if (!FIELD_PICTURE(h) || !h->first_field || h->missing_fields > 1) {
1671  if (ret < 0)
1672  return ret;
1673 
1675  if (ret < 0)
1676  return ret;
1677  }
1678 
1679  return 0;
1680 }
1681 
1683  const H2645NAL *nal)
1684 {
1685  const SPS *sps;
1686  const PPS *pps;
1687  int ret;
1688  unsigned int slice_type, tmp, i;
1689  int field_pic_flag, bottom_field_flag;
1690  int first_slice = sl == h->slice_ctx && !h->current_slice;
1691  int picture_structure;
1692 
1693  if (first_slice)
1694  av_assert0(!h->setup_finished);
1695 
1696  sl->first_mb_addr = get_ue_golomb_long(&sl->gb);
1697 
1698  slice_type = get_ue_golomb_31(&sl->gb);
1699  if (slice_type > 9) {
1700  av_log(h->avctx, AV_LOG_ERROR,
1701  "slice type %d too large at %d\n",
1702  slice_type, sl->first_mb_addr);
1703  return AVERROR_INVALIDDATA;
1704  }
1705  if (slice_type > 4) {
1706  slice_type -= 5;
1707  sl->slice_type_fixed = 1;
1708  } else
1709  sl->slice_type_fixed = 0;
1710 
1711  slice_type = ff_h264_golomb_to_pict_type[slice_type];
1712  sl->slice_type = slice_type;
1713  sl->slice_type_nos = slice_type & 3;
1714 
1715  if (nal->type == H264_NAL_IDR_SLICE &&
1717  av_log(h->avctx, AV_LOG_ERROR, "A non-intra slice in an IDR NAL unit.\n");
1718  return AVERROR_INVALIDDATA;
1719  }
1720 
1721  sl->pps_id = get_ue_golomb(&sl->gb);
1722  if (sl->pps_id >= MAX_PPS_COUNT) {
1723  av_log(h->avctx, AV_LOG_ERROR, "pps_id %u out of range\n", sl->pps_id);
1724  return AVERROR_INVALIDDATA;
1725  }
1726  if (!h->ps.pps_list[sl->pps_id]) {
1727  av_log(h->avctx, AV_LOG_ERROR,
1728  "non-existing PPS %u referenced\n",
1729  sl->pps_id);
1730  return AVERROR_INVALIDDATA;
1731  }
1732  pps = (const PPS*)h->ps.pps_list[sl->pps_id]->data;
1733  sps = pps->sps;
1734 
1735  sl->frame_num = get_bits(&sl->gb, sps->log2_max_frame_num);
1736  if (!first_slice) {
1737  if (h->poc.frame_num != sl->frame_num) {
1738  av_log(h->avctx, AV_LOG_ERROR, "Frame num change from %d to %d\n",
1739  h->poc.frame_num, sl->frame_num);
1740  return AVERROR_INVALIDDATA;
1741  }
1742  }
1743 
1744  sl->mb_mbaff = 0;
1745 
1746  if (sps->frame_mbs_only_flag) {
1747  picture_structure = PICT_FRAME;
1748  } else {
1749  if (!sps->direct_8x8_inference_flag && slice_type == AV_PICTURE_TYPE_B) {
1750  av_log(h->avctx, AV_LOG_ERROR, "This stream was generated by a broken encoder, invalid 8x8 inference\n");
1751  return -1;
1752  }
1753  field_pic_flag = get_bits1(&sl->gb);
1754  if (field_pic_flag) {
1755  bottom_field_flag = get_bits1(&sl->gb);
1756  picture_structure = PICT_TOP_FIELD + bottom_field_flag;
1757  } else {
1758  picture_structure = PICT_FRAME;
1759  }
1760  }
1761  sl->picture_structure = picture_structure;
1762  sl->mb_field_decoding_flag = picture_structure != PICT_FRAME;
1763 
1764  if (picture_structure == PICT_FRAME) {
1765  sl->curr_pic_num = sl->frame_num;
1766  sl->max_pic_num = 1 << sps->log2_max_frame_num;
1767  } else {
1768  sl->curr_pic_num = 2 * sl->frame_num + 1;
1769  sl->max_pic_num = 1 << (sps->log2_max_frame_num + 1);
1770  }
1771 
1772  if (nal->type == H264_NAL_IDR_SLICE) {
1773  unsigned idr_pic_id = get_ue_golomb_long(&sl->gb);
1774  if (idr_pic_id < 65536) {
1775  sl->idr_pic_id = idr_pic_id;
1776  } else
1777  av_log(h->avctx, AV_LOG_WARNING, "idr_pic_id is invalid\n");
1778  }
1779 
1780  sl->poc_lsb = 0;
1781  sl->delta_poc_bottom = 0;
1782  if (sps->poc_type == 0) {
1783  sl->poc_lsb = get_bits(&sl->gb, sps->log2_max_poc_lsb);
1784 
1785  if (pps->pic_order_present == 1 && picture_structure == PICT_FRAME)
1786  sl->delta_poc_bottom = get_se_golomb(&sl->gb);
1787  }
1788 
1789  sl->delta_poc[0] = sl->delta_poc[1] = 0;
1790  if (sps->poc_type == 1 && !sps->delta_pic_order_always_zero_flag) {
1791  sl->delta_poc[0] = get_se_golomb(&sl->gb);
1792 
1793  if (pps->pic_order_present == 1 && picture_structure == PICT_FRAME)
1794  sl->delta_poc[1] = get_se_golomb(&sl->gb);
1795  }
1796 
1797  sl->redundant_pic_count = 0;
1798  if (pps->redundant_pic_cnt_present)
1799  sl->redundant_pic_count = get_ue_golomb(&sl->gb);
1800 
1801  if (sl->slice_type_nos == AV_PICTURE_TYPE_B)
1802  sl->direct_spatial_mv_pred = get_bits1(&sl->gb);
1803 
1805  &sl->gb, pps, sl->slice_type_nos,
1806  picture_structure, h->avctx);
1807  if (ret < 0)
1808  return ret;
1809 
1810  if (sl->slice_type_nos != AV_PICTURE_TYPE_I) {
1812  if (ret < 0) {
1813  sl->ref_count[1] = sl->ref_count[0] = 0;
1814  return ret;
1815  }
1816  }
1817 
1818  sl->pwt.use_weight = 0;
1819  for (i = 0; i < 2; i++) {
1820  sl->pwt.luma_weight_flag[i] = 0;
1821  sl->pwt.chroma_weight_flag[i] = 0;
1822  }
1823  if ((pps->weighted_pred && sl->slice_type_nos == AV_PICTURE_TYPE_P) ||
1824  (pps->weighted_bipred_idc == 1 &&
1827  sl->slice_type_nos, &sl->pwt,
1828  picture_structure, h->avctx);
1829  if (ret < 0)
1830  return ret;
1831  }
1832 
1833  sl->explicit_ref_marking = 0;
1834  if (nal->ref_idc) {
1835  ret = ff_h264_decode_ref_pic_marking(sl, &sl->gb, nal, h->avctx);
1836  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1837  return AVERROR_INVALIDDATA;
1838  }
1839 
1840  if (sl->slice_type_nos != AV_PICTURE_TYPE_I && pps->cabac) {
1841  tmp = get_ue_golomb_31(&sl->gb);
1842  if (tmp > 2) {
1843  av_log(h->avctx, AV_LOG_ERROR, "cabac_init_idc %u overflow\n", tmp);
1844  return AVERROR_INVALIDDATA;
1845  }
1846  sl->cabac_init_idc = tmp;
1847  }
1848 
1849  sl->last_qscale_diff = 0;
1850  tmp = pps->init_qp + (unsigned)get_se_golomb(&sl->gb);
1851  if (tmp > 51 + 6 * (sps->bit_depth_luma - 8)) {
1852  av_log(h->avctx, AV_LOG_ERROR, "QP %u out of range\n", tmp);
1853  return AVERROR_INVALIDDATA;
1854  }
1855  sl->qscale = tmp;
1856  sl->chroma_qp[0] = get_chroma_qp(pps, 0, sl->qscale);
1857  sl->chroma_qp[1] = get_chroma_qp(pps, 1, sl->qscale);
1858  // FIXME qscale / qp ... stuff
1859  if (sl->slice_type == AV_PICTURE_TYPE_SP)
1860  get_bits1(&sl->gb); /* sp_for_switch_flag */
1861  if (sl->slice_type == AV_PICTURE_TYPE_SP ||
1863  get_se_golomb(&sl->gb); /* slice_qs_delta */
1864 
1865  sl->deblocking_filter = 1;
1866  sl->slice_alpha_c0_offset = 0;
1867  sl->slice_beta_offset = 0;
1868  if (pps->deblocking_filter_parameters_present) {
1869  tmp = get_ue_golomb_31(&sl->gb);
1870  if (tmp > 2) {
1871  av_log(h->avctx, AV_LOG_ERROR,
1872  "deblocking_filter_idc %u out of range\n", tmp);
1873  return AVERROR_INVALIDDATA;
1874  }
1875  sl->deblocking_filter = tmp;
1876  if (sl->deblocking_filter < 2)
1877  sl->deblocking_filter ^= 1; // 1<->0
1878 
1879  if (sl->deblocking_filter) {
1880  int slice_alpha_c0_offset_div2 = get_se_golomb(&sl->gb);
1881  int slice_beta_offset_div2 = get_se_golomb(&sl->gb);
1882  if (slice_alpha_c0_offset_div2 > 6 ||
1883  slice_alpha_c0_offset_div2 < -6 ||
1884  slice_beta_offset_div2 > 6 ||
1885  slice_beta_offset_div2 < -6) {
1886  av_log(h->avctx, AV_LOG_ERROR,
1887  "deblocking filter parameters %d %d out of range\n",
1888  slice_alpha_c0_offset_div2, slice_beta_offset_div2);
1889  return AVERROR_INVALIDDATA;
1890  }
1891  sl->slice_alpha_c0_offset = slice_alpha_c0_offset_div2 * 2;
1892  sl->slice_beta_offset = slice_beta_offset_div2 * 2;
1893  }
1894  }
1895 
1896  return 0;
1897 }
1898 
1899 /* do all the per-slice initialization needed before we can start decoding the
1900  * actual MBs */
1902  const H2645NAL *nal)
1903 {
1904  int i, j, ret = 0;
1905 
1906  if (h->picture_idr && nal->type != H264_NAL_IDR_SLICE) {
1907  av_log(h->avctx, AV_LOG_ERROR, "Invalid mix of IDR and non-IDR slices\n");
1908  return AVERROR_INVALIDDATA;
1909  }
1910 
1911  av_assert1(h->mb_num == h->mb_width * h->mb_height);
1912  if (sl->first_mb_addr << FIELD_OR_MBAFF_PICTURE(h) >= h->mb_num ||
1913  sl->first_mb_addr >= h->mb_num) {
1914  av_log(h->avctx, AV_LOG_ERROR, "first_mb_in_slice overflow\n");
1915  return AVERROR_INVALIDDATA;
1916  }
1917  sl->resync_mb_x = sl->mb_x = sl->first_mb_addr % h->mb_width;
1918  sl->resync_mb_y = sl->mb_y = (sl->first_mb_addr / h->mb_width) <<
1920  if (h->picture_structure == PICT_BOTTOM_FIELD)
1921  sl->resync_mb_y = sl->mb_y = sl->mb_y + 1;
1922  av_assert1(sl->mb_y < h->mb_height);
1923 
1924  ret = ff_h264_build_ref_list(h, sl);
1925  if (ret < 0)
1926  return ret;
1927 
1928  if (h->ps.pps->weighted_bipred_idc == 2 &&
1930  implicit_weight_table(h, sl, -1);
1931  if (FRAME_MBAFF(h)) {
1932  implicit_weight_table(h, sl, 0);
1933  implicit_weight_table(h, sl, 1);
1934  }
1935  }
1936 
1939  if (!h->setup_finished)
1941 
1942  if (h->avctx->skip_loop_filter >= AVDISCARD_ALL ||
1943  (h->avctx->skip_loop_filter >= AVDISCARD_NONKEY &&
1944  h->nal_unit_type != H264_NAL_IDR_SLICE) ||
1945  (h->avctx->skip_loop_filter >= AVDISCARD_NONINTRA &&
1947  (h->avctx->skip_loop_filter >= AVDISCARD_BIDIR &&
1949  (h->avctx->skip_loop_filter >= AVDISCARD_NONREF &&
1950  nal->ref_idc == 0))
1951  sl->deblocking_filter = 0;
1952 
1953  if (sl->deblocking_filter == 1 && h->nb_slice_ctx > 1) {
1954  if (h->avctx->flags2 & AV_CODEC_FLAG2_FAST) {
1955  /* Cheat slightly for speed:
1956  * Do not bother to deblock across slices. */
1957  sl->deblocking_filter = 2;
1958  } else {
1959  h->postpone_filter = 1;
1960  }
1961  }
1962  sl->qp_thresh = 15 -
1964  FFMAX3(0,
1965  h->ps.pps->chroma_qp_index_offset[0],
1966  h->ps.pps->chroma_qp_index_offset[1]) +
1967  6 * (h->ps.sps->bit_depth_luma - 8);
1968 
1969  sl->slice_num = ++h->current_slice;
1970 
1971  if (sl->slice_num)
1972  h->slice_row[(sl->slice_num-1)&(MAX_SLICES-1)]= sl->resync_mb_y;
1973  if ( h->slice_row[sl->slice_num&(MAX_SLICES-1)] + 3 >= sl->resync_mb_y
1974  && h->slice_row[sl->slice_num&(MAX_SLICES-1)] <= sl->resync_mb_y
1975  && sl->slice_num >= MAX_SLICES) {
1976  //in case of ASO this check needs to be updated depending on how we decide to assign slice numbers in this case
1977  av_log(h->avctx, AV_LOG_WARNING, "Possibly too many slices (%d >= %d), increase MAX_SLICES and recompile if there are artifacts\n", sl->slice_num, MAX_SLICES);
1978  }
1979 
1980  for (j = 0; j < 2; j++) {
1981  int id_list[16];
1982  int *ref2frm = h->ref2frm[sl->slice_num & (MAX_SLICES - 1)][j];
1983  for (i = 0; i < 16; i++) {
1984  id_list[i] = 60;
1985  if (j < sl->list_count && i < sl->ref_count[j] &&
1986  sl->ref_list[j][i].parent->f->buf[0]) {
1987  int k;
1988  AVBuffer *buf = sl->ref_list[j][i].parent->f->buf[0]->buffer;
1989  for (k = 0; k < h->short_ref_count; k++)
1990  if (h->short_ref[k]->f->buf[0]->buffer == buf) {
1991  id_list[i] = k;
1992  break;
1993  }
1994  for (k = 0; k < h->long_ref_count; k++)
1995  if (h->long_ref[k] && h->long_ref[k]->f->buf[0]->buffer == buf) {
1996  id_list[i] = h->short_ref_count + k;
1997  break;
1998  }
1999  }
2000  }
2001 
2002  ref2frm[0] =
2003  ref2frm[1] = -1;
2004  for (i = 0; i < 16; i++)
2005  ref2frm[i + 2] = 4 * id_list[i] + (sl->ref_list[j][i].reference & 3);
2006  ref2frm[18 + 0] =
2007  ref2frm[18 + 1] = -1;
2008  for (i = 16; i < 48; i++)
2009  ref2frm[i + 4] = 4 * id_list[(i - 16) >> 1] +
2010  (sl->ref_list[j][i].reference & 3);
2011  }
2012 
2013  if (h->avctx->debug & FF_DEBUG_PICT_INFO) {
2014  av_log(h->avctx, AV_LOG_DEBUG,
2015  "slice:%d %c mb:%d %c%s%s frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %s\n",
2016  sl->slice_num,
2017  (h->picture_structure == PICT_FRAME ? 'F' : h->picture_structure == PICT_TOP_FIELD ? 'T' : 'B'),
2018  sl->mb_y * h->mb_width + sl->mb_x,
2020  sl->slice_type_fixed ? " fix" : "",
2021  nal->type == H264_NAL_IDR_SLICE ? " IDR" : "",
2022  h->poc.frame_num,
2023  h->cur_pic_ptr->field_poc[0],
2024  h->cur_pic_ptr->field_poc[1],
2025  sl->ref_count[0], sl->ref_count[1],
2026  sl->qscale,
2027  sl->deblocking_filter,
2029  sl->pwt.use_weight,
2030  sl->pwt.use_weight == 1 && sl->pwt.use_weight_chroma ? "c" : "",
2031  sl->slice_type == AV_PICTURE_TYPE_B ? (sl->direct_spatial_mv_pred ? "SPAT" : "TEMP") : "");
2032  }
2033 
2034  return 0;
2035 }
2036 
2038 {
2039  H264SliceContext *sl = h->slice_ctx + h->nb_slice_ctx_queued;
2040  int first_slice = sl == h->slice_ctx && !h->current_slice;
2041  int ret;
2042 
2043  sl->gb = nal->gb;
2044 
2045  ret = h264_slice_header_parse(h, sl, nal);
2046  if (ret < 0)
2047  return ret;
2048 
2049  // discard redundant pictures
2050  if (sl->redundant_pic_count > 0) {
2051  sl->ref_count[0] = sl->ref_count[1] = 0;
2052  return 0;
2053  }
2054 
2055  if (sl->first_mb_addr == 0 || !h->current_slice) {
2056  if (h->setup_finished) {
2057  av_log(h->avctx, AV_LOG_ERROR, "Too many fields\n");
2058  return AVERROR_INVALIDDATA;
2059  }
2060  }
2061 
2062  if (sl->first_mb_addr == 0) { // FIXME better field boundary detection
2063  if (h->current_slice) {
2064  // this slice starts a new field
2065  // first decode any pending queued slices
2066  if (h->nb_slice_ctx_queued) {
2067  H264SliceContext tmp_ctx;
2068 
2070  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
2071  return ret;
2072 
2073  memcpy(&tmp_ctx, h->slice_ctx, sizeof(tmp_ctx));
2074  memcpy(h->slice_ctx, sl, sizeof(tmp_ctx));
2075  memcpy(sl, &tmp_ctx, sizeof(tmp_ctx));
2076  sl = h->slice_ctx;
2077  }
2078 
2079  if (h->cur_pic_ptr && FIELD_PICTURE(h) && h->first_field) {
2080  ret = ff_h264_field_end(h, h->slice_ctx, 1);
2081  if (ret < 0)
2082  return ret;
2083  } else if (h->cur_pic_ptr && !FIELD_PICTURE(h) && !h->first_field && h->nal_unit_type == H264_NAL_IDR_SLICE) {
2084  av_log(h, AV_LOG_WARNING, "Broken frame packetizing\n");
2085  ret = ff_h264_field_end(h, h->slice_ctx, 1);
2086  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0);
2087  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1);
2088  h->cur_pic_ptr = NULL;
2089  if (ret < 0)
2090  return ret;
2091  } else
2092  return AVERROR_INVALIDDATA;
2093  }
2094 
2095  if (!h->first_field) {
2096  if (h->cur_pic_ptr && !h->droppable) {
2097  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
2098  h->picture_structure == PICT_BOTTOM_FIELD);
2099  }
2100  h->cur_pic_ptr = NULL;
2101  }
2102  }
2103 
2104  if (!h->current_slice)
2105  av_assert0(sl == h->slice_ctx);
2106 
2107  if (h->current_slice == 0 && !h->first_field) {
2108  if (
2109  (h->avctx->skip_frame >= AVDISCARD_NONREF && !h->nal_ref_idc) ||
2110  (h->avctx->skip_frame >= AVDISCARD_BIDIR && sl->slice_type_nos == AV_PICTURE_TYPE_B) ||
2111  (h->avctx->skip_frame >= AVDISCARD_NONINTRA && sl->slice_type_nos != AV_PICTURE_TYPE_I) ||
2112  (h->avctx->skip_frame >= AVDISCARD_NONKEY && h->nal_unit_type != H264_NAL_IDR_SLICE && h->sei.recovery_point.recovery_frame_cnt < 0) ||
2113  h->avctx->skip_frame >= AVDISCARD_ALL) {
2114  return 0;
2115  }
2116  }
2117 
2118  if (!first_slice) {
2119  const PPS *pps = (const PPS*)h->ps.pps_list[sl->pps_id]->data;
2120 
2121  if (h->ps.pps->sps_id != pps->sps_id ||
2122  h->ps.pps->transform_8x8_mode != pps->transform_8x8_mode /*||
2123  (h->setup_finished && h->ps.pps != pps)*/) {
2124  av_log(h->avctx, AV_LOG_ERROR, "PPS changed between slices\n");
2125  return AVERROR_INVALIDDATA;
2126  }
2127  if (h->ps.sps != pps->sps) {
2128  av_log(h->avctx, AV_LOG_ERROR,
2129  "SPS changed in the middle of the frame\n");
2130  return AVERROR_INVALIDDATA;
2131  }
2132  }
2133 
2134  if (h->current_slice == 0) {
2135  ret = h264_field_start(h, sl, nal, first_slice);
2136  if (ret < 0)
2137  return ret;
2138  } else {
2139  if (h->picture_structure != sl->picture_structure ||
2140  h->droppable != (nal->ref_idc == 0)) {
2141  av_log(h->avctx, AV_LOG_ERROR,
2142  "Changing field mode (%d -> %d) between slices is not allowed\n",
2143  h->picture_structure, sl->picture_structure);
2144  return AVERROR_INVALIDDATA;
2145  } else if (!h->cur_pic_ptr) {
2146  av_log(h->avctx, AV_LOG_ERROR,
2147  "unset cur_pic_ptr on slice %d\n",
2148  h->current_slice + 1);
2149  return AVERROR_INVALIDDATA;
2150  }
2151  }
2152 
2153  ret = h264_slice_init(h, sl, nal);
2154  if (ret < 0)
2155  return ret;
2156 
2157  h->nb_slice_ctx_queued++;
2158 
2159  return 0;
2160 }
2161 
2163 {
2164  switch (sl->slice_type) {
2165  case AV_PICTURE_TYPE_P:
2166  return 0;
2167  case AV_PICTURE_TYPE_B:
2168  return 1;
2169  case AV_PICTURE_TYPE_I:
2170  return 2;
2171  case AV_PICTURE_TYPE_SP:
2172  return 3;
2173  case AV_PICTURE_TYPE_SI:
2174  return 4;
2175  default:
2176  return AVERROR_INVALIDDATA;
2177  }
2178 }
2179 
2181  H264SliceContext *sl,
2182  int mb_type, int top_xy,
2183  int left_xy[LEFT_MBS],
2184  int top_type,
2185  int left_type[LEFT_MBS],
2186  int mb_xy, int list)
2187 {
2188  int b_stride = h->b_stride;
2189  int16_t(*mv_dst)[2] = &sl->mv_cache[list][scan8[0]];
2190  int8_t *ref_cache = &sl->ref_cache[list][scan8[0]];
2191  if (IS_INTER(mb_type) || IS_DIRECT(mb_type)) {
2192  if (USES_LIST(top_type, list)) {
2193  const int b_xy = h->mb2b_xy[top_xy] + 3 * b_stride;
2194  const int b8_xy = 4 * top_xy + 2;
2195  const int *ref2frm = &h->ref2frm[h->slice_table[top_xy] & (MAX_SLICES - 1)][list][(MB_MBAFF(sl) ? 20 : 2)];
2196  AV_COPY128(mv_dst - 1 * 8, h->cur_pic.motion_val[list][b_xy + 0]);
2197  ref_cache[0 - 1 * 8] =
2198  ref_cache[1 - 1 * 8] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 0]];
2199  ref_cache[2 - 1 * 8] =
2200  ref_cache[3 - 1 * 8] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 1]];
2201  } else {
2202  AV_ZERO128(mv_dst - 1 * 8);
2203  AV_WN32A(&ref_cache[0 - 1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2204  }
2205 
2206  if (!IS_INTERLACED(mb_type ^ left_type[LTOP])) {
2207  if (USES_LIST(left_type[LTOP], list)) {
2208  const int b_xy = h->mb2b_xy[left_xy[LTOP]] + 3;
2209  const int b8_xy = 4 * left_xy[LTOP] + 1;
2210  const int *ref2frm = &h->ref2frm[h->slice_table[left_xy[LTOP]] & (MAX_SLICES - 1)][list][(MB_MBAFF(sl) ? 20 : 2)];
2211  AV_COPY32(mv_dst - 1 + 0, h->cur_pic.motion_val[list][b_xy + b_stride * 0]);
2212  AV_COPY32(mv_dst - 1 + 8, h->cur_pic.motion_val[list][b_xy + b_stride * 1]);
2213  AV_COPY32(mv_dst - 1 + 16, h->cur_pic.motion_val[list][b_xy + b_stride * 2]);
2214  AV_COPY32(mv_dst - 1 + 24, h->cur_pic.motion_val[list][b_xy + b_stride * 3]);
2215  ref_cache[-1 + 0] =
2216  ref_cache[-1 + 8] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 2 * 0]];
2217  ref_cache[-1 + 16] =
2218  ref_cache[-1 + 24] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 2 * 1]];
2219  } else {
2220  AV_ZERO32(mv_dst - 1 + 0);
2221  AV_ZERO32(mv_dst - 1 + 8);
2222  AV_ZERO32(mv_dst - 1 + 16);
2223  AV_ZERO32(mv_dst - 1 + 24);
2224  ref_cache[-1 + 0] =
2225  ref_cache[-1 + 8] =
2226  ref_cache[-1 + 16] =
2227  ref_cache[-1 + 24] = LIST_NOT_USED;
2228  }
2229  }
2230  }
2231 
2232  if (!USES_LIST(mb_type, list)) {
2233  fill_rectangle(mv_dst, 4, 4, 8, pack16to32(0, 0), 4);
2234  AV_WN32A(&ref_cache[0 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2235  AV_WN32A(&ref_cache[1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2236  AV_WN32A(&ref_cache[2 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2237  AV_WN32A(&ref_cache[3 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2238  return;
2239  }
2240 
2241  {
2242  int8_t *ref = &h->cur_pic.ref_index[list][4 * mb_xy];
2243  const int *ref2frm = &h->ref2frm[sl->slice_num & (MAX_SLICES - 1)][list][(MB_MBAFF(sl) ? 20 : 2)];
2244  uint32_t ref01 = (pack16to32(ref2frm[ref[0]], ref2frm[ref[1]]) & 0x00FF00FF) * 0x0101;
2245  uint32_t ref23 = (pack16to32(ref2frm[ref[2]], ref2frm[ref[3]]) & 0x00FF00FF) * 0x0101;
2246  AV_WN32A(&ref_cache[0 * 8], ref01);
2247  AV_WN32A(&ref_cache[1 * 8], ref01);
2248  AV_WN32A(&ref_cache[2 * 8], ref23);
2249  AV_WN32A(&ref_cache[3 * 8], ref23);
2250  }
2251 
2252  {
2253  int16_t(*mv_src)[2] = &h->cur_pic.motion_val[list][4 * sl->mb_x + 4 * sl->mb_y * b_stride];
2254  AV_COPY128(mv_dst + 8 * 0, mv_src + 0 * b_stride);
2255  AV_COPY128(mv_dst + 8 * 1, mv_src + 1 * b_stride);
2256  AV_COPY128(mv_dst + 8 * 2, mv_src + 2 * b_stride);
2257  AV_COPY128(mv_dst + 8 * 3, mv_src + 3 * b_stride);
2258  }
2259 }
2260 
2261 /**
2262  * @return non zero if the loop filter can be skipped
2263  */
2264 static int fill_filter_caches(const H264Context *h, H264SliceContext *sl, int mb_type)
2265 {
2266  const int mb_xy = sl->mb_xy;
2267  int top_xy, left_xy[LEFT_MBS];
2268  int top_type, left_type[LEFT_MBS];
2269  uint8_t *nnz;
2270  uint8_t *nnz_cache;
2271 
2272  top_xy = mb_xy - (h->mb_stride << MB_FIELD(sl));
2273 
2274  left_xy[LBOT] = left_xy[LTOP] = mb_xy - 1;
2275  if (FRAME_MBAFF(h)) {
2276  const int left_mb_field_flag = IS_INTERLACED(h->cur_pic.mb_type[mb_xy - 1]);
2277  const int curr_mb_field_flag = IS_INTERLACED(mb_type);
2278  if (sl->mb_y & 1) {
2279  if (left_mb_field_flag != curr_mb_field_flag)
2280  left_xy[LTOP] -= h->mb_stride;
2281  } else {
2282  if (curr_mb_field_flag)
2283  top_xy += h->mb_stride &
2284  (((h->cur_pic.mb_type[top_xy] >> 7) & 1) - 1);
2285  if (left_mb_field_flag != curr_mb_field_flag)
2286  left_xy[LBOT] += h->mb_stride;
2287  }
2288  }
2289 
2290  sl->top_mb_xy = top_xy;
2291  sl->left_mb_xy[LTOP] = left_xy[LTOP];
2292  sl->left_mb_xy[LBOT] = left_xy[LBOT];
2293  {
2294  /* For sufficiently low qp, filtering wouldn't do anything.
2295  * This is a conservative estimate: could also check beta_offset
2296  * and more accurate chroma_qp. */
2297  int qp_thresh = sl->qp_thresh; // FIXME strictly we should store qp_thresh for each mb of a slice
2298  int qp = h->cur_pic.qscale_table[mb_xy];
2299  if (qp <= qp_thresh &&
2300  (left_xy[LTOP] < 0 ||
2301  ((qp + h->cur_pic.qscale_table[left_xy[LTOP]] + 1) >> 1) <= qp_thresh) &&
2302  (top_xy < 0 ||
2303  ((qp + h->cur_pic.qscale_table[top_xy] + 1) >> 1) <= qp_thresh)) {
2304  if (!FRAME_MBAFF(h))
2305  return 1;
2306  if ((left_xy[LTOP] < 0 ||
2307  ((qp + h->cur_pic.qscale_table[left_xy[LBOT]] + 1) >> 1) <= qp_thresh) &&
2308  (top_xy < h->mb_stride ||
2309  ((qp + h->cur_pic.qscale_table[top_xy - h->mb_stride] + 1) >> 1) <= qp_thresh))
2310  return 1;
2311  }
2312  }
2313 
2314  top_type = h->cur_pic.mb_type[top_xy];
2315  left_type[LTOP] = h->cur_pic.mb_type[left_xy[LTOP]];
2316  left_type[LBOT] = h->cur_pic.mb_type[left_xy[LBOT]];
2317  if (sl->deblocking_filter == 2) {
2318  if (h->slice_table[top_xy] != sl->slice_num)
2319  top_type = 0;
2320  if (h->slice_table[left_xy[LBOT]] != sl->slice_num)
2321  left_type[LTOP] = left_type[LBOT] = 0;
2322  } else {
2323  if (h->slice_table[top_xy] == 0xFFFF)
2324  top_type = 0;
2325  if (h->slice_table[left_xy[LBOT]] == 0xFFFF)
2326  left_type[LTOP] = left_type[LBOT] = 0;
2327  }
2328  sl->top_type = top_type;
2329  sl->left_type[LTOP] = left_type[LTOP];
2330  sl->left_type[LBOT] = left_type[LBOT];
2331 
2332  if (IS_INTRA(mb_type))
2333  return 0;
2334 
2335  fill_filter_caches_inter(h, sl, mb_type, top_xy, left_xy,
2336  top_type, left_type, mb_xy, 0);
2337  if (sl->list_count == 2)
2338  fill_filter_caches_inter(h, sl, mb_type, top_xy, left_xy,
2339  top_type, left_type, mb_xy, 1);
2340 
2341  nnz = h->non_zero_count[mb_xy];
2342  nnz_cache = sl->non_zero_count_cache;
2343  AV_COPY32(&nnz_cache[4 + 8 * 1], &nnz[0]);
2344  AV_COPY32(&nnz_cache[4 + 8 * 2], &nnz[4]);
2345  AV_COPY32(&nnz_cache[4 + 8 * 3], &nnz[8]);
2346  AV_COPY32(&nnz_cache[4 + 8 * 4], &nnz[12]);
2347  sl->cbp = h->cbp_table[mb_xy];
2348 
2349  if (top_type) {
2350  nnz = h->non_zero_count[top_xy];
2351  AV_COPY32(&nnz_cache[4 + 8 * 0], &nnz[3 * 4]);
2352  }
2353 
2354  if (left_type[LTOP]) {
2355  nnz = h->non_zero_count[left_xy[LTOP]];
2356  nnz_cache[3 + 8 * 1] = nnz[3 + 0 * 4];
2357  nnz_cache[3 + 8 * 2] = nnz[3 + 1 * 4];
2358  nnz_cache[3 + 8 * 3] = nnz[3 + 2 * 4];
2359  nnz_cache[3 + 8 * 4] = nnz[3 + 3 * 4];
2360  }
2361 
2362  /* CAVLC 8x8dct requires NNZ values for residual decoding that differ
2363  * from what the loop filter needs */
2364  if (!CABAC(h) && h->ps.pps->transform_8x8_mode) {
2365  if (IS_8x8DCT(top_type)) {
2366  nnz_cache[4 + 8 * 0] =
2367  nnz_cache[5 + 8 * 0] = (h->cbp_table[top_xy] & 0x4000) >> 12;
2368  nnz_cache[6 + 8 * 0] =
2369  nnz_cache[7 + 8 * 0] = (h->cbp_table[top_xy] & 0x8000) >> 12;
2370  }
2371  if (IS_8x8DCT(left_type[LTOP])) {
2372  nnz_cache[3 + 8 * 1] =
2373  nnz_cache[3 + 8 * 2] = (h->cbp_table[left_xy[LTOP]] & 0x2000) >> 12; // FIXME check MBAFF
2374  }
2375  if (IS_8x8DCT(left_type[LBOT])) {
2376  nnz_cache[3 + 8 * 3] =
2377  nnz_cache[3 + 8 * 4] = (h->cbp_table[left_xy[LBOT]] & 0x8000) >> 12; // FIXME check MBAFF
2378  }
2379 
2380  if (IS_8x8DCT(mb_type)) {
2381  nnz_cache[scan8[0]] =
2382  nnz_cache[scan8[1]] =
2383  nnz_cache[scan8[2]] =
2384  nnz_cache[scan8[3]] = (sl->cbp & 0x1000) >> 12;
2385 
2386  nnz_cache[scan8[0 + 4]] =
2387  nnz_cache[scan8[1 + 4]] =
2388  nnz_cache[scan8[2 + 4]] =
2389  nnz_cache[scan8[3 + 4]] = (sl->cbp & 0x2000) >> 12;
2390 
2391  nnz_cache[scan8[0 + 8]] =
2392  nnz_cache[scan8[1 + 8]] =
2393  nnz_cache[scan8[2 + 8]] =
2394  nnz_cache[scan8[3 + 8]] = (sl->cbp & 0x4000) >> 12;
2395 
2396  nnz_cache[scan8[0 + 12]] =
2397  nnz_cache[scan8[1 + 12]] =
2398  nnz_cache[scan8[2 + 12]] =
2399  nnz_cache[scan8[3 + 12]] = (sl->cbp & 0x8000) >> 12;
2400  }
2401  }
2402 
2403  return 0;
2404 }
2405 
2406 static void loop_filter(const H264Context *h, H264SliceContext *sl, int start_x, int end_x)
2407 {
2408  uint8_t *dest_y, *dest_cb, *dest_cr;
2409  int linesize, uvlinesize, mb_x, mb_y;
2410  const int end_mb_y = sl->mb_y + FRAME_MBAFF(h);
2411  const int old_slice_type = sl->slice_type;
2412  const int pixel_shift = h->pixel_shift;
2413  const int block_h = 16 >> h->chroma_y_shift;
2414 
2415  if (h->postpone_filter)
2416  return;
2417 
2418  if (sl->deblocking_filter) {
2419  for (mb_x = start_x; mb_x < end_x; mb_x++)
2420  for (mb_y = end_mb_y - FRAME_MBAFF(h); mb_y <= end_mb_y; mb_y++) {
2421  int mb_xy, mb_type;
2422  mb_xy = sl->mb_xy = mb_x + mb_y * h->mb_stride;
2423  mb_type = h->cur_pic.mb_type[mb_xy];
2424 
2425  if (FRAME_MBAFF(h))
2426  sl->mb_mbaff =
2427  sl->mb_field_decoding_flag = !!IS_INTERLACED(mb_type);
2428 
2429  sl->mb_x = mb_x;
2430  sl->mb_y = mb_y;
2431  dest_y = h->cur_pic.f->data[0] +
2432  ((mb_x << pixel_shift) + mb_y * sl->linesize) * 16;
2433  dest_cb = h->cur_pic.f->data[1] +
2434  (mb_x << pixel_shift) * (8 << CHROMA444(h)) +
2435  mb_y * sl->uvlinesize * block_h;
2436  dest_cr = h->cur_pic.f->data[2] +
2437  (mb_x << pixel_shift) * (8 << CHROMA444(h)) +
2438  mb_y * sl->uvlinesize * block_h;
2439  // FIXME simplify above
2440 
2441  if (MB_FIELD(sl)) {
2442  linesize = sl->mb_linesize = sl->linesize * 2;
2443  uvlinesize = sl->mb_uvlinesize = sl->uvlinesize * 2;
2444  if (mb_y & 1) { // FIXME move out of this function?
2445  dest_y -= sl->linesize * 15;
2446  dest_cb -= sl->uvlinesize * (block_h - 1);
2447  dest_cr -= sl->uvlinesize * (block_h - 1);
2448  }
2449  } else {
2450  linesize = sl->mb_linesize = sl->linesize;
2451  uvlinesize = sl->mb_uvlinesize = sl->uvlinesize;
2452  }
2453  backup_mb_border(h, sl, dest_y, dest_cb, dest_cr, linesize,
2454  uvlinesize, 0);
2455  if (fill_filter_caches(h, sl, mb_type))
2456  continue;
2457  sl->chroma_qp[0] = get_chroma_qp(h->ps.pps, 0, h->cur_pic.qscale_table[mb_xy]);
2458  sl->chroma_qp[1] = get_chroma_qp(h->ps.pps, 1, h->cur_pic.qscale_table[mb_xy]);
2459 
2460  if (FRAME_MBAFF(h)) {
2461  ff_h264_filter_mb(h, sl, mb_x, mb_y, dest_y, dest_cb, dest_cr,
2462  linesize, uvlinesize);
2463  } else {
2464  ff_h264_filter_mb_fast(h, sl, mb_x, mb_y, dest_y, dest_cb,
2465  dest_cr, linesize, uvlinesize);
2466  }
2467  }
2468  }
2469  sl->slice_type = old_slice_type;
2470  sl->mb_x = end_x;
2471  sl->mb_y = end_mb_y - FRAME_MBAFF(h);
2472  sl->chroma_qp[0] = get_chroma_qp(h->ps.pps, 0, sl->qscale);
2473  sl->chroma_qp[1] = get_chroma_qp(h->ps.pps, 1, sl->qscale);
2474 }
2475 
2477 {
2478  const int mb_xy = sl->mb_x + sl->mb_y * h->mb_stride;
2479  int mb_type = (h->slice_table[mb_xy - 1] == sl->slice_num) ?
2480  h->cur_pic.mb_type[mb_xy - 1] :
2481  (h->slice_table[mb_xy - h->mb_stride] == sl->slice_num) ?
2482  h->cur_pic.mb_type[mb_xy - h->mb_stride] : 0;
2483  sl->mb_mbaff = sl->mb_field_decoding_flag = IS_INTERLACED(mb_type) ? 1 : 0;
2484 }
2485 
2486 /**
2487  * Draw edges and report progress for the last MB row.
2488  */
2490 {
2491  int top = 16 * (sl->mb_y >> FIELD_PICTURE(h));
2492  int pic_height = 16 * h->mb_height >> FIELD_PICTURE(h);
2493  int height = 16 << FRAME_MBAFF(h);
2494  int deblock_border = (16 + 4) << FRAME_MBAFF(h);
2495 
2496  if (sl->deblocking_filter) {
2497  if ((top + height) >= pic_height)
2498  height += deblock_border;
2499  top -= deblock_border;
2500  }
2501 
2502  if (top >= pic_height || (top + height) < 0)
2503  return;
2504 
2505  height = FFMIN(height, pic_height - top);
2506  if (top < 0) {
2507  height = top + height;
2508  top = 0;
2509  }
2510 
2511  ff_h264_draw_horiz_band(h, sl, top, height);
2512 
2513  if (h->droppable || h->er.error_occurred)
2514  return;
2515 
2516  ff_thread_report_progress(&h->cur_pic_ptr->tf, top + height - 1,
2517  h->picture_structure == PICT_BOTTOM_FIELD);
2518 }
2519 
2521  int startx, int starty,
2522  int endx, int endy, int status)
2523 {
2524  if (!sl->h264->enable_er)
2525  return;
2526 
2527  if (CONFIG_ERROR_RESILIENCE) {
2528  ff_er_add_slice(sl->er, startx, starty, endx, endy, status);
2529  }
2530 }
2531 
2532 static int decode_slice(struct AVCodecContext *avctx, void *arg)
2533 {
2534  H264SliceContext *sl = arg;
2535  const H264Context *h = sl->h264;
2536  int lf_x_start = sl->mb_x;
2537  int orig_deblock = sl->deblocking_filter;
2538  int ret;
2539 
2540  sl->linesize = h->cur_pic_ptr->f->linesize[0];
2541  sl->uvlinesize = h->cur_pic_ptr->f->linesize[1];
2542 
2543  ret = alloc_scratch_buffers(sl, sl->linesize);
2544  if (ret < 0)
2545  return ret;
2546 
2547  sl->mb_skip_run = -1;
2548 
2549  av_assert0(h->block_offset[15] == (4 * ((scan8[15] - scan8[0]) & 7) << h->pixel_shift) + 4 * sl->linesize * ((scan8[15] - scan8[0]) >> 3));
2550 
2551  if (h->postpone_filter)
2552  sl->deblocking_filter = 0;
2553 
2554  sl->is_complex = FRAME_MBAFF(h) || h->picture_structure != PICT_FRAME ||
2555  (CONFIG_GRAY && (h->flags & AV_CODEC_FLAG_GRAY));
2556 
2557  if (!(h->avctx->active_thread_type & FF_THREAD_SLICE) && h->picture_structure == PICT_FRAME && sl->er->error_status_table) {
2558  const int start_i = av_clip(sl->resync_mb_x + sl->resync_mb_y * h->mb_width, 0, h->mb_num - 1);
2559  if (start_i) {
2560  int prev_status = sl->er->error_status_table[sl->er->mb_index2xy[start_i - 1]];
2561  prev_status &= ~ VP_START;
2562  if (prev_status != (ER_MV_END | ER_DC_END | ER_AC_END))
2563  sl->er->error_occurred = 1;
2564  }
2565  }
2566 
2567  if (h->ps.pps->cabac) {
2568  /* realign */
2569  align_get_bits(&sl->gb);
2570 
2571  /* init cabac */
2573  sl->gb.buffer + get_bits_count(&sl->gb) / 8,
2574  (get_bits_left(&sl->gb) + 7) / 8);
2575  if (ret < 0)
2576  return ret;
2577 
2579 
2580  for (;;) {
2581  int ret, eos;
2582  if (sl->mb_x + sl->mb_y * h->mb_width >= sl->next_slice_idx) {
2583  av_log(h->avctx, AV_LOG_ERROR, "Slice overlaps with next at %d\n",
2584  sl->next_slice_idx);
2585  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2586  sl->mb_y, ER_MB_ERROR);
2587  return AVERROR_INVALIDDATA;
2588  }
2589 
2590  ret = ff_h264_decode_mb_cabac(h, sl);
2591 
2592  if (ret >= 0)
2593  ff_h264_hl_decode_mb(h, sl);
2594 
2595  // FIXME optimal? or let mb_decode decode 16x32 ?
2596  if (ret >= 0 && FRAME_MBAFF(h)) {
2597  sl->mb_y++;
2598 
2599  ret = ff_h264_decode_mb_cabac(h, sl);
2600 
2601  if (ret >= 0)
2602  ff_h264_hl_decode_mb(h, sl);
2603  sl->mb_y--;
2604  }
2605  eos = get_cabac_terminate(&sl->cabac);
2606 
2607  if ((h->workaround_bugs & FF_BUG_TRUNCATED) &&
2608  sl->cabac.bytestream > sl->cabac.bytestream_end + 2) {
2609  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x - 1,
2610  sl->mb_y, ER_MB_END);
2611  if (sl->mb_x >= lf_x_start)
2612  loop_filter(h, sl, lf_x_start, sl->mb_x + 1);
2613  goto finish;
2614  }
2615  if (sl->cabac.bytestream > sl->cabac.bytestream_end + 2 )
2616  av_log(h->avctx, AV_LOG_DEBUG, "bytestream overread %"PTRDIFF_SPECIFIER"\n", sl->cabac.bytestream_end - sl->cabac.bytestream);
2617  if (ret < 0 || sl->cabac.bytestream > sl->cabac.bytestream_end + 4) {
2618  av_log(h->avctx, AV_LOG_ERROR,
2619  "error while decoding MB %d %d, bytestream %"PTRDIFF_SPECIFIER"\n",
2620  sl->mb_x, sl->mb_y,
2621  sl->cabac.bytestream_end - sl->cabac.bytestream);
2622  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2623  sl->mb_y, ER_MB_ERROR);
2624  return AVERROR_INVALIDDATA;
2625  }
2626 
2627  if (++sl->mb_x >= h->mb_width) {
2628  loop_filter(h, sl, lf_x_start, sl->mb_x);
2629  sl->mb_x = lf_x_start = 0;
2630  decode_finish_row(h, sl);
2631  ++sl->mb_y;
2632  if (FIELD_OR_MBAFF_PICTURE(h)) {
2633  ++sl->mb_y;
2634  if (FRAME_MBAFF(h) && sl->mb_y < h->mb_height)
2636  }
2637  }
2638 
2639  if (eos || sl->mb_y >= h->mb_height) {
2640  ff_tlog(h->avctx, "slice end %d %d\n",
2641  get_bits_count(&sl->gb), sl->gb.size_in_bits);
2642  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x - 1,
2643  sl->mb_y, ER_MB_END);
2644  if (sl->mb_x > lf_x_start)
2645  loop_filter(h, sl, lf_x_start, sl->mb_x);
2646  goto finish;
2647  }
2648  }
2649  } else {
2650  for (;;) {
2651  int ret;
2652 
2653  if (sl->mb_x + sl->mb_y * h->mb_width >= sl->next_slice_idx) {
2654  av_log(h->avctx, AV_LOG_ERROR, "Slice overlaps with next at %d\n",
2655  sl->next_slice_idx);
2656  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2657  sl->mb_y, ER_MB_ERROR);
2658  return AVERROR_INVALIDDATA;
2659  }
2660 
2661  ret = ff_h264_decode_mb_cavlc(h, sl);
2662 
2663  if (ret >= 0)
2664  ff_h264_hl_decode_mb(h, sl);
2665 
2666  // FIXME optimal? or let mb_decode decode 16x32 ?
2667  if (ret >= 0 && FRAME_MBAFF(h)) {
2668  sl->mb_y++;
2669  ret = ff_h264_decode_mb_cavlc(h, sl);
2670 
2671  if (ret >= 0)
2672  ff_h264_hl_decode_mb(h, sl);
2673  sl->mb_y--;
2674  }
2675 
2676  if (ret < 0) {
2677  av_log(h->avctx, AV_LOG_ERROR,
2678  "error while decoding MB %d %d\n", sl->mb_x, sl->mb_y);
2679  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2680  sl->mb_y, ER_MB_ERROR);
2681  return ret;
2682  }
2683 
2684  if (++sl->mb_x >= h->mb_width) {
2685  loop_filter(h, sl, lf_x_start, sl->mb_x);
2686  sl->mb_x = lf_x_start = 0;
2687  decode_finish_row(h, sl);
2688  ++sl->mb_y;
2689  if (FIELD_OR_MBAFF_PICTURE(h)) {
2690  ++sl->mb_y;
2691  if (FRAME_MBAFF(h) && sl->mb_y < h->mb_height)
2693  }
2694  if (sl->mb_y >= h->mb_height) {
2695  ff_tlog(h->avctx, "slice end %d %d\n",
2696  get_bits_count(&sl->gb), sl->gb.size_in_bits);
2697 
2698  if ( get_bits_left(&sl->gb) == 0
2699  || get_bits_left(&sl->gb) > 0 && !(h->avctx->err_recognition & AV_EF_AGGRESSIVE)) {
2700  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y,
2701  sl->mb_x - 1, sl->mb_y, ER_MB_END);
2702 
2703  goto finish;
2704  } else {
2705  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y,
2706  sl->mb_x, sl->mb_y, ER_MB_END);
2707 
2708  return AVERROR_INVALIDDATA;
2709  }
2710  }
2711  }
2712 
2713  if (get_bits_left(&sl->gb) <= 0 && sl->mb_skip_run <= 0) {
2714  ff_tlog(h->avctx, "slice end %d %d\n",
2715  get_bits_count(&sl->gb), sl->gb.size_in_bits);
2716 
2717  if (get_bits_left(&sl->gb) == 0) {
2718  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y,
2719  sl->mb_x - 1, sl->mb_y, ER_MB_END);
2720  if (sl->mb_x > lf_x_start)
2721  loop_filter(h, sl, lf_x_start, sl->mb_x);
2722 
2723  goto finish;
2724  } else {
2725  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2726  sl->mb_y, ER_MB_ERROR);
2727 
2728  return AVERROR_INVALIDDATA;
2729  }
2730  }
2731  }
2732  }
2733 
2734 finish:
2735  sl->deblocking_filter = orig_deblock;
2736  return 0;
2737 }
2738 
2739 /**
2740  * Call decode_slice() for each context.
2741  *
2742  * @param h h264 master context
2743  */
2745 {
2746  AVCodecContext *const avctx = h->avctx;
2747  H264SliceContext *sl;
2748  int context_count = h->nb_slice_ctx_queued;
2749  int ret = 0;
2750  int i, j;
2751 
2752  h->slice_ctx[0].next_slice_idx = INT_MAX;
2753 
2754  if (h->avctx->hwaccel || context_count < 1)
2755  return 0;
2756 
2757  av_assert0(context_count && h->slice_ctx[context_count - 1].mb_y < h->mb_height);
2758 
2759  if (context_count == 1) {
2760 
2761  h->slice_ctx[0].next_slice_idx = h->mb_width * h->mb_height;
2762  h->postpone_filter = 0;
2763 
2764  ret = decode_slice(avctx, &h->slice_ctx[0]);
2765  h->mb_y = h->slice_ctx[0].mb_y;
2766  if (ret < 0)
2767  goto finish;
2768  } else {
2769  av_assert0(context_count > 0);
2770  for (i = 0; i < context_count; i++) {
2771  int next_slice_idx = h->mb_width * h->mb_height;
2772  int slice_idx;
2773 
2774  sl = &h->slice_ctx[i];
2775 
2776  /* make sure none of those slices overlap */
2777  slice_idx = sl->mb_y * h->mb_width + sl->mb_x;
2778  for (j = 0; j < context_count; j++) {
2779  H264SliceContext *sl2 = &h->slice_ctx[j];
2780  int slice_idx2 = sl2->mb_y * h->mb_width + sl2->mb_x;
2781 
2782  if (i == j || slice_idx2 < slice_idx)
2783  continue;
2784  next_slice_idx = FFMIN(next_slice_idx, slice_idx2);
2785  }
2786  sl->next_slice_idx = next_slice_idx;
2787  }
2788 
2789  avctx->execute(avctx, decode_slice, h->slice_ctx,
2790  NULL, context_count, sizeof(h->slice_ctx[0]));
2791 
2792  /* pull back stuff from slices to master context */
2793  sl = &h->slice_ctx[context_count - 1];
2794  h->mb_y = sl->mb_y;
2795 
2796  if (h->postpone_filter) {
2797  h->postpone_filter = 0;
2798 
2799  for (i = 0; i < context_count; i++) {
2800  int y_end, x_end;
2801 
2802  sl = &h->slice_ctx[i];
2803  y_end = FFMIN(sl->mb_y + 1, h->mb_height);
2804  x_end = (sl->mb_y >= h->mb_height) ? h->mb_width : sl->mb_x;
2805 
2806  for (j = sl->resync_mb_y; j < y_end; j += 1 + FIELD_OR_MBAFF_PICTURE(h)) {
2807  sl->mb_y = j;
2808  loop_filter(h, sl, j > sl->resync_mb_y ? 0 : sl->resync_mb_x,
2809  j == y_end - 1 ? x_end : h->mb_width);
2810  }
2811  }
2812  }
2813  }
2814 
2815 finish:
2816  h->nb_slice_ctx_queued = 0;
2817  return ret;
2818 }
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:38
er_add_slice
static void er_add_slice(H264SliceContext *sl, int startx, int starty, int endx, int endy, int status)
Definition: h264_slice.c:2520
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:81
ff_h264_filter_mb_fast
void ff_h264_filter_mb_fast(const H264Context *h, H264SliceContext *sl, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize)
Definition: h264_loopfilter.c:416
h264_slice_header_init
static int h264_slice_header_init(H264Context *h)
Definition: h264_slice.c:952
implicit_weight_table
static void implicit_weight_table(const H264Context *h, H264SliceContext *sl, int field)
Initialize implicit_weight table.
Definition: h264_slice.c:679
H264SliceContext::mb_xy
int mb_xy
Definition: h264dec.h:224
ff_h264_unref_picture
void ff_h264_unref_picture(H264Context *h, H264Picture *pic)
Definition: h264_picture.c:36
av_buffer_pool_init
AVBufferPool * av_buffer_pool_init(size_t size, AVBufferRef *(*alloc)(size_t size))
Allocate and initialize a buffer pool.
Definition: buffer.c:280
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_TIMECODE_STR_SIZE
#define AV_TIMECODE_STR_SIZE
Definition: timecode.h:33
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:253
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
H264SliceContext::ref_cache
int8_t ref_cache[2][5 *8]
Definition: h264dec.h:292
status
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
ff_h264_free_tables
void ff_h264_free_tables(H264Context *h)
Definition: h264dec.c:134
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
av_clip
#define av_clip
Definition: common.h:95
h264_init_ps
static int h264_init_ps(H264Context *h, const H264SliceContext *sl, int first_slice)
Definition: h264_slice.c:1045
H264SliceContext::max_pic_num
int max_pic_num
Definition: h264dec.h:324
H264SliceContext::nb_mmco
int nb_mmco
Definition: h264dec.h:315
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:664
CHROMA422
#define CHROMA422(h)
Definition: h264dec.h:92
FF_BUG_TRUNCATED
#define FF_BUG_TRUNCATED
Definition: avcodec.h:1349
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
cabac.h
H264Picture::poc
int poc
frame POC
Definition: h264dec.h:129
h264_export_frame_props
static int h264_export_frame_props(H264Context *h)
Definition: h264_slice.c:1164
ff_h264_sei_ctx_replace
static int ff_h264_sei_ctx_replace(H264SEIContext *dst, const H264SEIContext *src)
Definition: h264_sei.h:132
ff_thread_release_ext_buffer
void ff_thread_release_ext_buffer(AVCodecContext *avctx, ThreadFrame *f)
Unref a ThreadFrame.
Definition: pthread_frame.c:1045
H264Picture::f
AVFrame * f
Definition: h264dec.h:108
out
FILE * out
Definition: movenc.c:54
ff_thread_get_format
#define ff_thread_get_format
Definition: thread.h:65
cb
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:239
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:812
av_clip_int8
#define av_clip_int8
Definition: common.h:104
zigzag_scan8x8_cavlc
static const uint8_t zigzag_scan8x8_cavlc[64+1]
Definition: h264_slice.c:100
ff_h264_replace_picture
int ff_h264_replace_picture(H264Context *h, H264Picture *dst, const H264Picture *src)
Definition: h264_picture.c:145
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
ff_thread_can_start_frame
int ff_thread_can_start_frame(AVCodecContext *avctx)
Definition: pthread_frame.c:960
ff_h2645_sei_to_frame
int ff_h2645_sei_to_frame(AVFrame *frame, H2645SEI *sei, enum AVCodecID codec_id, AVCodecContext *avctx, const H2645VUI *vui, unsigned bit_depth_luma, unsigned bit_depth_chroma, int seed)
Definition: h2645_sei.c:459
H264Picture::ref_index
int8_t * ref_index[2]
Definition: h264dec.h:126
HWACCEL_MAX
#define HWACCEL_MAX
MB_MBAFF
#define MB_MBAFF(h)
Definition: h264dec.h:65
H264SliceContext::mvd_table
uint8_t(*[2] mvd_table)[2]
Definition: h264dec.h:305
ff_h264_set_erpic
void ff_h264_set_erpic(ERPicture *dst, H264Picture *src)
Definition: h264_picture.c:195
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:256
H264_SEI_PIC_STRUCT_TOP_BOTTOM
@ H264_SEI_PIC_STRUCT_TOP_BOTTOM
3: top field, bottom field, in that order
Definition: h264_sei.h:35
H264Picture::pps
const PPS * pps
Definition: h264dec.h:153
AV_FRAME_DATA_S12M_TIMECODE
@ AV_FRAME_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1.
Definition: frame.h:152
av_mod_uintp2
#define av_mod_uintp2
Definition: common.h:122
GetBitContext::size_in_bits
int size_in_bits
Definition: get_bits.h:110
H2645NAL::ref_idc
int ref_idc
H.264 only, nal_ref_idc.
Definition: h2645_parse.h:57
ff_h264_slice_context_init
void ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl)
Init slice context.
Definition: h264dec.c:258
ERContext::mb_index2xy
int * mb_index2xy
Definition: error_resilience.h:59
predict_field_decoding_flag
static void predict_field_decoding_flag(const H264Context *h, H264SliceContext *sl)
Definition: h264_slice.c:2476
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
pixdesc.h
AVFrame::width
int width
Definition: frame.h:402
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:669
get_ue_golomb
static int get_ue_golomb(GetBitContext *gb)
Read an unsigned Exp-Golomb code in the range 0 to 8190.
Definition: golomb.h:53
internal.h
ff_h264_update_thread_context
int ff_h264_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: h264_slice.c:309
alloc_scratch_buffers
static int alloc_scratch_buffers(H264SliceContext *sl, int linesize)
Definition: h264_slice.c:132
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:569
FRAME_RECOVERED_IDR
#define FRAME_RECOVERED_IDR
We have seen an IDR, so all the following frames in coded order are correctly decodable.
Definition: h264dec.h:516
decode_finish_row
static void decode_finish_row(const H264Context *h, H264SliceContext *sl)
Draw edges and report progress for the last MB row.
Definition: h264_slice.c:2489
H264SliceContext::ref_count
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264dec.h:260
FF_COMPLIANCE_STRICT
#define FF_COMPLIANCE_STRICT
Strictly conform to all the things in the spec no matter what consequences.
Definition: defs.h:59
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:465
ff_er_frame_start
void ff_er_frame_start(ERContext *s)
Definition: error_resilience.c:787
H264Picture::qscale_table
int8_t * qscale_table
Definition: h264dec.h:114
H264SliceContext::left_mb_xy
int left_mb_xy[LEFT_MBS]
Definition: h264dec.h:204
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:247
H264PredWeightTable::use_weight_chroma
int use_weight_chroma
Definition: h264_parse.h:71
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:37
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
Definition: pixfmt.h:596
AV_WN32A
#define AV_WN32A(p, v)
Definition: intreadwrite.h:538
ff_h264_update_thread_context_for_user
int ff_h264_update_thread_context_for_user(AVCodecContext *dst, const AVCodecContext *src)
Definition: h264_slice.c:459
ff_er_add_slice
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
Definition: error_resilience.c:822
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:639
H264Picture::ref_index_buf
AVBufferRef * ref_index_buf[2]
Definition: h264dec.h:125
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
ff_h264_pred_weight_table
int ff_h264_pred_weight_table(GetBitContext *gb, const SPS *sps, const int *ref_count, int slice_type_nos, H264PredWeightTable *pwt, int picture_structure, void *logctx)
Definition: h264_parse.c:29
FRAME_RECOVERED_SEI
#define FRAME_RECOVERED_SEI
Sufficient number of frames have been decoded since a SEI recovery point, so all the following frames...
Definition: h264dec.h:521
H264SliceContext::is_complex
int is_complex
Definition: h264dec.h:231
ER_DC_END
#define ER_DC_END
Definition: error_resilience.h:35
ff_h264_decode_ref_pic_list_reordering
int ff_h264_decode_ref_pic_list_reordering(H264SliceContext *sl, void *logctx)
Definition: h264_refs.c:422
mpegutils.h
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:580
H264Picture::invalid_gap
int invalid_gap
Definition: h264dec.h:148
av_timecode_get_smpte
uint32_t av_timecode_get_smpte(AVRational rate, int drop, int hh, int mm, int ss, int ff)
Convert sei info to SMPTE 12M binary representation.
Definition: timecode.c:69
H264Picture::pps_buf
AVBufferRef * pps_buf
Definition: h264dec.h:152
thread.h
ff_thread_await_progress
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
ThreadFrame::f
AVFrame * f
Definition: threadframe.h:28
AV_PIX_FMT_VULKAN
@ AV_PIX_FMT_VULKAN
Vulkan hardware images.
Definition: pixfmt.h:376
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1383
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:351
H264SliceContext::mb_x
int mb_x
Definition: h264dec.h:223
AV_FRAME_FLAG_TOP_FIELD_FIRST
#define AV_FRAME_FLAG_TOP_FIELD_FIRST
A flag to mark frames where the top field is displayed first if the content is interlaced.
Definition: frame.h:631
H264Picture::frame_num
int frame_num
frame_num (raw frame_num from slice header)
Definition: h264dec.h:130
H264SliceContext::next_slice_idx
int next_slice_idx
Definition: h264dec.h:229
H264SliceContext
Definition: h264dec.h:170
fill_filter_caches_inter
static av_always_inline void fill_filter_caches_inter(const H264Context *h, H264SliceContext *sl, int mb_type, int top_xy, int left_xy[LEFT_MBS], int top_type, int left_type[LEFT_MBS], int mb_xy, int list)
Definition: h264_slice.c:2180
golomb.h
exp golomb vlc stuff
MB_FIELD
#define MB_FIELD(sl)
Definition: h264dec.h:66
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:325
ff_h264_filter_mb
void ff_h264_filter_mb(const H264Context *h, H264SliceContext *sl, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize)
Definition: h264_loopfilter.c:716
H264SliceContext::mv_cache
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
Definition: h264dec.h:291
AV_CODEC_FLAG_OUTPUT_CORRUPT
#define AV_CODEC_FLAG_OUTPUT_CORRUPT
Output even those frames that might be corrupted.
Definition: avcodec.h:224
AVHWAccel
Definition: avcodec.h:2111
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:483
finish
static void finish(void)
Definition: movenc.c:342
get_chroma_qp
static av_always_inline int get_chroma_qp(const PPS *pps, int t, int qscale)
Get the chroma qp.
Definition: h264dec.h:647
H264Picture::mmco_reset
int mmco_reset
MMCO_RESET set this 1.
Definition: h264dec.h:131
fail
#define fail()
Definition: checkasm.h:137
copy_picture_range
static void copy_picture_range(H264Picture **to, H264Picture **from, int count, H264Context *new_base, H264Context *old_base)
Definition: h264_slice.c:293
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:481
timecode.h
h264_select_output_frame
static int h264_select_output_frame(H264Context *h)
Definition: h264_slice.c:1287
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:463
USES_LIST
#define USES_LIST(a, list)
Definition: mpegutils.h:92
CABACContext::bytestream
const uint8_t * bytestream
Definition: cabac.h:45
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2964
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
H264Picture::mb_stride
int mb_stride
Definition: h264dec.h:156
IN_RANGE
#define IN_RANGE(a, b, size)
Definition: h264_slice.c:286
scan8
static const uint8_t scan8[16 *3+3]
Definition: h264_parse.h:40
ff_h264_flush_change
void ff_h264_flush_change(H264Context *h)
Definition: h264dec.c:443
ff_h264qpel_init
av_cold void ff_h264qpel_init(H264QpelContext *c, int bit_depth)
Definition: h264qpel.c:49
ff_h264_sei_process_picture_timing
int ff_h264_sei_process_picture_timing(H264SEIPictureTiming *h, const SPS *sps, void *logctx)
Parse the contents of a picture timing message given an active SPS.
Definition: h264_sei.c:65
h264_frame_start
static int h264_frame_start(H264Context *h)
Definition: h264_slice.c:471
H264SliceContext::deblocking_filter
int deblocking_filter
disable_deblocking_filter_idc with 1 <-> 0
Definition: h264dec.h:186
H264PredWeightTable::luma_log2_weight_denom
int luma_log2_weight_denom
Definition: h264_parse.h:72
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:260
ff_hwaccel_frame_priv_alloc
AVBufferRef * ff_hwaccel_frame_priv_alloc(AVCodecContext *avctx, const AVHWAccel *hwaccel)
Allocate a hwaccel frame private data and create an AVBufferRef from it.
Definition: decode.c:1722
H264Picture::f_grain
AVFrame * f_grain
Definition: h264dec.h:111
H264SliceContext::picture_structure
int picture_structure
Definition: h264dec.h:233
ff_h264_golomb_to_pict_type
const uint8_t ff_h264_golomb_to_pict_type[5]
Definition: h264data.c:37
release_unused_pictures
static void release_unused_pictures(H264Context *h, int remove_current)
Definition: h264_slice.c:119
H264PredWeightTable::use_weight
int use_weight
Definition: h264_parse.h:70
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
H264SliceContext::direct_spatial_mv_pred
int direct_spatial_mv_pred
Definition: h264dec.h:244
H264SliceContext::slice_num
int slice_num
Definition: h264dec.h:175
pack16to32
static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
Definition: h264_parse.h:127
non_j_pixfmt
static enum AVPixelFormat non_j_pixfmt(enum AVPixelFormat a)
Definition: h264_slice.c:1034
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:468
ff_h264_init_cabac_states
void ff_h264_init_cabac_states(const H264Context *h, H264SliceContext *sl)
Definition: h264_cabac.c:1262
ff_h264_hl_decode_mb
void ff_h264_hl_decode_mb(const H264Context *h, H264SliceContext *sl)
Definition: h264_mb.c:799
avassert.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
film_grain_params.h
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
ff_color_frame
void ff_color_frame(AVFrame *frame, const int color[4])
Definition: utils.c:409
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:618
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:609
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
av_buffer_pool_get
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:384
ff_h264_queue_decode_slice
int ff_h264_queue_decode_slice(H264Context *h, const H2645NAL *nal)
Submit a slice for decoding.
Definition: h264_slice.c:2037
width
#define width
H264Context::DPB
H264Picture DPB[H264_MAX_PICTURE_COUNT]
Definition: h264dec.h:339
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:127
H264PredWeightTable::chroma_log2_weight_denom
int chroma_log2_weight_denom
Definition: h264_parse.h:73
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:50
AV_ZERO32
#define AV_ZERO32(d)
Definition: intreadwrite.h:629
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:413
FIELD_PICTURE
#define FIELD_PICTURE(h)
Definition: h264dec.h:68
ff_h264_execute_ref_pic_marking
int ff_h264_execute_ref_pic_marking(H264Context *h)
Execute the reference picture marking (memory management control operations).
Definition: h264_refs.c:609
ff_h264_decode_ref_pic_marking
int ff_h264_decode_ref_pic_marking(H264SliceContext *sl, GetBitContext *gb, const H2645NAL *nal, void *logctx)
Definition: h264_refs.c:833
from
const char * from
Definition: jacosubdec.c:66
to
const char * to
Definition: webvttdec.c:35
h264_slice_header_parse
static int h264_slice_header_parse(const H264Context *h, H264SliceContext *sl, const H2645NAL *nal)
Definition: h264_slice.c:1682
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
H264PredWeightTable::chroma_weight_flag
int chroma_weight_flag[2]
7.4.3.2 chroma_weight_lX_flag
Definition: h264_parse.h:75
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:297
h264data.h
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:462
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
H264Ref::parent
H264Picture * parent
Definition: h264dec.h:167
PICT_TOP_FIELD
#define PICT_TOP_FIELD
Definition: mpegutils.h:36
decode.h
field_scan8x8_cavlc
static const uint8_t field_scan8x8_cavlc[64+1]
Definition: h264_slice.c:80
H264SliceContext::slice_alpha_c0_offset
int slice_alpha_c0_offset
Definition: h264dec.h:187
IS_INTRA
#define IS_INTRA(x, y)
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
AVFrame::crop_right
size_t crop_right
Definition: frame.h:771
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
H264_NAL_IDR_SLICE
@ H264_NAL_IDR_SLICE
Definition: h264.h:39
H264SliceContext::slice_type
int slice_type
Definition: h264dec.h:176
H264SliceContext::resync_mb_x
int resync_mb_x
Definition: h264dec.h:225
H264Picture::sei_recovery_frame_cnt
int sei_recovery_frame_cnt
Definition: h264dec.h:149
AVDISCARD_BIDIR
@ AVDISCARD_BIDIR
discard all bidirectional frames
Definition: defs.h:73
get_se_golomb
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
Definition: golomb.h:239
H2645NAL::type
int type
NAL unit type.
Definition: h2645_parse.h:52
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: codec_id.h:79
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
H264Context::enable_er
int enable_er
Definition: h264dec.h:544
ff_h264_draw_horiz_band
void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl, int y, int height)
Definition: h264dec.c:99
H264SliceContext::curr_pic_num
int curr_pic_num
Definition: h264dec.h:323
ff_thread_ref_frame
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
Definition: utils.c:896
arg
const char * arg
Definition: jacosubdec.c:67
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:64
if
if(ret)
Definition: filter_design.txt:179
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:76
threadframe.h
GetBitContext::buffer
const uint8_t * buffer
Definition: get_bits.h:108
alloc_picture
static int alloc_picture(H264Context *h, H264Picture *pic)
Definition: h264_slice.c:191
H264Picture::motion_val_buf
AVBufferRef * motion_val_buf[2]
Definition: h264dec.h:116
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:148
NULL
#define NULL
Definition: coverity.c:32
AV_COPY128
#define AV_COPY128(d, s)
Definition: intreadwrite.h:609
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
AV_COPY64
#define AV_COPY64(d, s)
Definition: intreadwrite.h:605
H264SliceContext::edge_emu_buffer
uint8_t * edge_emu_buffer
Definition: h264dec.h:276
SPS
Sequence parameter set.
Definition: h264_ps.h:45
TRANSPOSE
#define TRANSPOSE(x)
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
ER_MB_ERROR
#define ER_MB_ERROR
Definition: error_resilience.h:38
ff_h264_decode_mb_cabac
int ff_h264_decode_mb_cabac(const H264Context *h, H264SliceContext *sl)
Decode a macroblock.
Definition: h264_cabac.c:1920
AV_PICTURE_TYPE_SI
@ AV_PICTURE_TYPE_SI
Switching Intra.
Definition: avutil.h:278
AVFrame::coded_picture_number
attribute_deprecated int coded_picture_number
picture number in bitstream order
Definition: frame.h:464
H264SliceContext::chroma_qp
int chroma_qp[2]
Definition: h264dec.h:181
AV_CODEC_FLAG2_FAST
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:345
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:378
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
av_buffer_pool_uninit
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:322
PPS
Picture parameter set.
Definition: h264_ps.h:111
av_fast_mallocz
void av_fast_mallocz(void *ptr, unsigned int *size, size_t min_size)
Allocate and clear a buffer, reusing the given one if large enough.
Definition: mem.c:560
ff_set_sar
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
Definition: utils.c:106
mathops.h
list
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
Definition: filter_design.txt:25
IS_INTERLACED
#define IS_INTERLACED(a)
Definition: mpegutils.h:76
H264Picture::mb_height
int mb_height
Definition: h264dec.h:155
MAX_PPS_COUNT
#define MAX_PPS_COUNT
Definition: h264_ps.h:39
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:466
H264SliceContext::qscale
int qscale
Definition: h264dec.h:180
get_pixel_format
static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
Definition: h264_slice.c:777
fill_filter_caches
static int fill_filter_caches(const H264Context *h, H264SliceContext *sl, int mb_type)
Definition: h264_slice.c:2264
ERContext::error_occurred
int error_occurred
Definition: error_resilience.h:66
AV_ZERO128
#define AV_ZERO128(d)
Definition: intreadwrite.h:637
init_scan_tables
static void init_scan_tables(H264Context *h)
initialize scan tables
Definition: h264_slice.c:743
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:480
H264SliceContext::top_borders_allocated
int top_borders_allocated[2]
Definition: h264dec.h:280
AV_PICTURE_TYPE_SP
@ AV_PICTURE_TYPE_SP
Switching Predicted.
Definition: avutil.h:279
FIELD_OR_MBAFF_PICTURE
#define FIELD_OR_MBAFF_PICTURE(h)
Definition: h264dec.h:85
H264SliceContext::mb_skip_run
int mb_skip_run
Definition: h264dec.h:230
h264_ps.h
init_dimensions
static void init_dimensions(H264Context *h)
Definition: h264_slice.c:912
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
H264SliceContext::top_type
int top_type
Definition: h264dec.h:207
AVFrame::crop_bottom
size_t crop_bottom
Definition: frame.h:769
H264SliceContext::resync_mb_y
int resync_mb_y
Definition: h264dec.h:226
H264_SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM
@ H264_SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM
6: bottom field, top field, bottom field repeated, in that order
Definition: h264_sei.h:38
DELAYED_PIC_REF
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output.
Definition: diracdec.c:67
H264SEIPictureTiming
Definition: h264_sei.h:54
H264SliceContext::cabac
CABACContext cabac
Cabac.
Definition: h264dec.h:310
H264SliceContext::redundant_pic_count
int redundant_pic_count
Definition: h264dec.h:237
AVFrame::crop_left
size_t crop_left
Definition: frame.h:770
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:75
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:432
ff_zigzag_scan
const uint8_t ff_zigzag_scan[16+1]
Definition: mathtables.c:109
H264Picture::reference
int reference
Definition: h264dec.h:146
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:310
CABAC
#define CABAC(h)
Definition: h264_cabac.c:28
LEFT_MBS
#define LEFT_MBS
Definition: h264dec.h:69
pps
static int FUNC() pps(CodedBitstreamContext *ctx, RWContext *rw, H264RawPPS *current)
Definition: cbs_h264_syntax_template.c:404
rectangle.h
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
H264SliceContext::mb_uvlinesize
ptrdiff_t mb_uvlinesize
Definition: h264dec.h:221
VP_START
#define VP_START
< current MB is the first after a resync marker
Definition: error_resilience.h:30
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:470
H264SliceContext::pwt
H264PredWeightTable pwt
Definition: h264dec.h:190
H264Picture::tf
ThreadFrame tf
Definition: h264dec.h:109
H264Picture::mb_type
uint32_t * mb_type
Definition: h264dec.h:120
ff_h264_decode_mb_cavlc
int ff_h264_decode_mb_cavlc(const H264Context *h, H264SliceContext *sl)
Decode a macroblock.
Definition: h264_cavlc.c:695
H264_SEI_PIC_STRUCT_BOTTOM_TOP
@ H264_SEI_PIC_STRUCT_BOTTOM_TOP
4: bottom field, top field, in that order
Definition: h264_sei.h:36
H264Picture::recovered
int recovered
picture at IDR or recovery point + recovery count
Definition: h264dec.h:147
H2645NAL::gb
GetBitContext gb
Definition: h2645_parse.h:47
H264SliceContext::top_mb_xy
int top_mb_xy
Definition: h264dec.h:202
H264SliceContext::qp_thresh
int qp_thresh
QP threshold to skip loopfilter.
Definition: h264dec.h:182
H2645NAL
Definition: h2645_parse.h:34
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:472
H264SliceContext::top_borders
uint8_t(*[2] top_borders)[(16 *3) *2]
Definition: h264dec.h:277
AVFrameSideData::data
uint8_t * data
Definition: frame.h:238
h264chroma.h
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1538
H264SliceContext::cbp
int cbp
Definition: h264dec.h:248
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:417
H264SliceContext::left_type
int left_type[LEFT_MBS]
Definition: h264dec.h:209
ff_h264_direct_ref_list_init
void ff_h264_direct_ref_list_init(const H264Context *const h, H264SliceContext *sl)
Definition: h264_direct.c:120
H264SliceContext::mb_y
int mb_y
Definition: h264dec.h:223
H264PredWeightTable::implicit_weight
int implicit_weight[48][48][2]
Definition: h264_parse.h:79
height
#define height
decode_slice
static int decode_slice(struct AVCodecContext *avctx, void *arg)
Definition: h264_slice.c:2532
H264SliceContext::explicit_ref_marking
int explicit_ref_marking
Definition: h264dec.h:316
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
pt
int pt
Definition: rtp.c:35
H264SliceContext::uvlinesize
ptrdiff_t uvlinesize
Definition: h264dec.h:219
AVBufferRef::buffer
AVBuffer * buffer
Definition: buffer.h:83
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:333
H264SliceContext::slice_type_nos
int slice_type_nos
S free slice type (SI/SP are remapped to I/P)
Definition: h264dec.h:177
H264SliceContext::delta_poc_bottom
int delta_poc_bottom
Definition: h264dec.h:321
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:119
FRAME_MBAFF
#define FRAME_MBAFF(h)
Definition: h264dec.h:67
IS_DIRECT
#define IS_DIRECT(a)
Definition: mpegutils.h:77
H264_SEI_PIC_STRUCT_FRAME
@ H264_SEI_PIC_STRUCT_FRAME
0: frame
Definition: h264_sei.h:32
get_cabac_terminate
static int av_unused get_cabac_terminate(CABACContext *c)
Definition: cabac_functions.h:187
H264_SEI_PIC_STRUCT_FRAME_TRIPLING
@ H264_SEI_PIC_STRUCT_FRAME_TRIPLING
8: frame tripling
Definition: h264_sei.h:40
field_scan
static const uint8_t field_scan[16+1]
Definition: h264_slice.c:54
loop_filter
static void loop_filter(const H264Context *h, H264SliceContext *sl, int start_x, int end_x)
Definition: h264_slice.c:2406
ff_init_cabac_decoder
int ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size)
Definition: cabac.c:162
H264SliceContext::mb_mbaff
int mb_mbaff
mb_aff_frame && mb_field_decoding_flag
Definition: h264dec.h:235
field_scan8x8
static const uint8_t field_scan8x8[64+1]
Definition: h264_slice.c:61
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:187
av_get_picture_type_char
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:40
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:302
LIST_NOT_USED
#define LIST_NOT_USED
Definition: h264dec.h:389
H264Picture::field_picture
int field_picture
whether or not picture was encoded in separate fields
Definition: h264dec.h:139
h264dec.h
H264SliceContext::poc_lsb
int poc_lsb
Definition: h264dec.h:320
H264SliceContext::first_mb_addr
unsigned int first_mb_addr
Definition: h264dec.h:227
ff_h264_direct_dist_scale_factor
void ff_h264_direct_dist_scale_factor(const H264Context *const h, H264SliceContext *sl)
Definition: h264_direct.c:61
H264Picture::needs_fg
int needs_fg
whether picture needs film grain synthesis (see f_grain)
Definition: h264dec.h:150
AVBuffer
A reference counted buffer type.
Definition: buffer_internal.h:38
H264_MAX_DPB_FRAMES
@ H264_MAX_DPB_FRAMES
Definition: h264.h:76
H264Context
H264Context.
Definition: h264dec.h:330
AVDISCARD_NONINTRA
@ AVDISCARD_NONINTRA
discard all non intra frames
Definition: defs.h:74
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
av_timecode_make_smpte_tc_string2
char * av_timecode_make_smpte_tc_string2(char *buf, AVRational rate, uint32_t tcsmpte, int prevent_df, int skip_field)
Get the timecode string from the SMPTE timecode format.
Definition: timecode.c:138
AV_CODEC_FLAG2_SHOW_ALL
#define AV_CODEC_FLAG2_SHOW_ALL
Show all frames before the first keyframe.
Definition: avcodec.h:368
AV_FRAME_FLAG_CORRUPT
#define AV_FRAME_FLAG_CORRUPT
The frame data may be corrupted, e.g.
Definition: frame.h:614
H264_SEI_PIC_STRUCT_FRAME_DOUBLING
@ H264_SEI_PIC_STRUCT_FRAME_DOUBLING
7: frame doubling
Definition: h264_sei.h:39
H264SliceContext::frame_num
int frame_num
Definition: h264dec.h:318
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:482
display.h
ff_h264_execute_decode_slices
int ff_h264_execute_decode_slices(H264Context *h)
Call decode_slice() for each context.
Definition: h264_slice.c:2744
H264SliceContext::mb_linesize
ptrdiff_t mb_linesize
may be equal to s->linesize or s->linesize * 2, for mbaff
Definition: h264dec.h:220
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
cabac_functions.h
H264Picture::hwaccel_priv_buf
AVBufferRef * hwaccel_priv_buf
Definition: h264dec.h:122
tb
#define tb
Definition: regdef.h:68
AV_COPY32
#define AV_COPY32(d, s)
Definition: intreadwrite.h:601
av_buffer_replace
int av_buffer_replace(AVBufferRef **pdst, const AVBufferRef *src)
Ensure dst refers to the same data as src.
Definition: buffer.c:233
ff_h264_parse_ref_count
int ff_h264_parse_ref_count(int *plist_count, int ref_count[2], GetBitContext *gb, const PPS *pps, int slice_type_nos, int picture_structure, void *logctx)
Definition: h264_parse.c:221
ff_h264_alloc_tables
int ff_h264_alloc_tables(H264Context *h)
Allocate tables.
Definition: h264dec.c:179
ff_thread_get_ext_buffer
int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around ff_get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:1003
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:626
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:652
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:464
H264SliceContext::list_count
unsigned int list_count
Definition: h264dec.h:261
avcodec.h
H264SliceContext::h264
const struct H264Context * h264
Definition: h264dec.h:171
av_cmp_q
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:89
ff_h264dsp_init
av_cold void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
Definition: h264dsp.c:66
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
av_buffer_allocz
AVBufferRef * av_buffer_allocz(size_t size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:93
ff_h264_ref_picture
int ff_h264_ref_picture(H264Context *h, H264Picture *dst, H264Picture *src)
Definition: h264_picture.c:92
ret
ret
Definition: filter_design.txt:187
AV_EF_AGGRESSIVE
#define AV_EF_AGGRESSIVE
consider things that a sane encoder/muxer should not do as an error
Definition: defs.h:56
ff_h264_init_poc
int ff_h264_init_poc(int pic_field_poc[2], int *pic_poc, const SPS *sps, H264POCContext *pc, int picture_structure, int nal_ref_idc)
Definition: h264_parse.c:279
ff_h264_get_profile
int ff_h264_get_profile(const SPS *sps)
Compute profile from profile_idc and constraint_set?_flags.
Definition: h264_parse.c:531
h264_field_start
static int h264_field_start(H264Context *h, const H264SliceContext *sl, const H2645NAL *nal, int first_slice)
Definition: h264_slice.c:1390
H264SliceContext::last_qscale_diff
int last_qscale_diff
Definition: h264dec.h:183
sps
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
Definition: cbs_h264_syntax_template.c:260
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:540
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:469
U
#define U(x)
Definition: vpx_arith.h:37
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:474
H264SliceContext::pps_id
unsigned int pps_id
Definition: h264dec.h:271
H264SliceContext::linesize
ptrdiff_t linesize
Definition: h264dec.h:219
H264SliceContext::slice_beta_offset
int slice_beta_offset
Definition: h264dec.h:188
AVCodecContext
main external API structure.
Definition: avcodec.h:435
AVFrame::height
int height
Definition: frame.h:402
get_ue_golomb_31
static int get_ue_golomb_31(GetBitContext *gb)
read unsigned exp golomb code, constraint to a max of 31.
Definition: golomb.h:120
MAX_SLICES
#define MAX_SLICES
Definition: dxva2_hevc.c:31
backup_mb_border
static av_always_inline void backup_mb_border(const H264Context *h, H264SliceContext *sl, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int simple)
Definition: h264_slice.c:577
ff_h264_build_ref_list
int ff_h264_build_ref_list(H264Context *h, H264SliceContext *sl)
Definition: h264_refs.c:298
AVCodecContext::execute
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:1556
H264SliceContext::bipred_scratchpad
uint8_t * bipred_scratchpad
Definition: h264dec.h:275
ff_h264_pred_init
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:437
H264Picture::field_poc
int field_poc[2]
top/bottom POC
Definition: h264dec.h:128
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
H264SliceContext::mmco
MMCO mmco[H264_MAX_MMCO_COUNT]
Definition: h264dec.h:314
error_resilience.h
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AVHWAccel::frame_priv_data_size
int frame_priv_data_size
Size of per-frame hardware accelerator private data.
Definition: avcodec.h:2220
H264Picture::mb_width
int mb_width
Definition: h264dec.h:155
fill_rectangle
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:815
H264Picture
Definition: h264dec.h:107
ERContext::error_status_table
uint8_t * error_status_table
Definition: error_resilience.h:67
find_unused_picture
static int find_unused_picture(H264Context *h)
Definition: h264_slice.c:274
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
h264_slice_init
static int h264_slice_init(H264Context *h, H264SliceContext *sl, const H2645NAL *nal)
Definition: h264_slice.c:1901
ff_h264chroma_init
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:41
ff_h264_field_end
int ff_h264_field_end(H264Context *h, H264SliceContext *sl, int in_setup)
Definition: h264_picture.c:218
CABACContext::bytestream_end
const uint8_t * bytestream_end
Definition: cabac.h:46
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
init_table_pools
static int init_table_pools(H264Context *h)
Definition: h264_slice.c:164
H264Picture::mb_type_buf
AVBufferRef * mb_type_buf
Definition: h264dec.h:119
H264SliceContext::ref_list
H264Ref ref_list[2][48]
0..15: frame refs, 16..47: mbaff field refs.
Definition: h264dec.h:262
LBOT
#define LBOT
Definition: h264dec.h:71
H264SliceContext::non_zero_count_cache
uint8_t non_zero_count_cache[15 *8]
non zero coeff count cache.
Definition: h264dec.h:286
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:80
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
IS_INTER
#define IS_INTER(a)
Definition: mpegutils.h:72
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
get_ue_golomb_long
static unsigned get_ue_golomb_long(GetBitContext *gb)
Read an unsigned Exp-Golomb code in the range 0 to UINT32_MAX-1.
Definition: golomb.h:104
H264Context::nal_length_size
int nal_length_size
Number of bytes used for nal length (1, 2 or 4)
Definition: h264dec.h:449
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
ER_MB_END
#define ER_MB_END
Definition: error_resilience.h:39
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:236
H264SliceContext::er
ERContext * er
Definition: h264dec.h:173
H264_SEI_PIC_STRUCT_BOTTOM_FIELD
@ H264_SEI_PIC_STRUCT_BOTTOM_FIELD
2: bottom field
Definition: h264_sei.h:34
H264Picture::hwaccel_picture_private
void * hwaccel_picture_private
hardware accelerator private data
Definition: h264dec.h:123
ER_MV_END
#define ER_MV_END
Definition: error_resilience.h:36
H264SliceContext::idr_pic_id
int idr_pic_id
Definition: h264dec.h:319
ff_tlog
#define ff_tlog(ctx,...)
Definition: internal.h:161
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:462
cr
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:240
AVFrame::crop_top
size_t crop_top
Definition: frame.h:768
H264SliceContext::gb
GetBitContext gb
Definition: h264dec.h:172
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:86
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:555
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
H264SliceContext::intra4x4_pred_mode
int8_t * intra4x4_pred_mode
Definition: h264dec.h:199
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
LTOP
#define LTOP
Definition: h264dec.h:70
h264.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:375
H264SliceContext::edge_emu_buffer_allocated
int edge_emu_buffer_allocated
Definition: h264dec.h:279
REBASE_PICTURE
#define REBASE_PICTURE(pic, new_ctx, old_ctx)
Definition: h264_slice.c:288
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
CHROMA444
#define CHROMA444(h)
Definition: h264dec.h:93
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
ff_h264_get_slice_type
int ff_h264_get_slice_type(const H264SliceContext *sl)
Reconstruct bitstream slice_type.
Definition: h264_slice.c:2162
h
h
Definition: vp9dsp_template.c:2038
H264SliceContext::cabac_init_idc
int cabac_init_idc
Definition: h264dec.h:312
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:475
H264PredWeightTable::luma_weight_flag
int luma_weight_flag[2]
7.4.3.2 luma_weight_lX_flag
Definition: h264_parse.h:74
H264_MAX_PICTURE_COUNT
#define H264_MAX_PICTURE_COUNT
Definition: h264dec.h:50
ER_AC_END
#define ER_AC_END
Definition: error_resilience.h:34
H264SliceContext::bipred_scratchpad_allocated
int bipred_scratchpad_allocated
Definition: h264dec.h:278
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:72
H264SliceContext::slice_type_fixed
int slice_type_fixed
Definition: h264dec.h:178
H264Ref::poc
int poc
Definition: h264dec.h:164
IS_8x8DCT
#define IS_8x8DCT(a)
Definition: h264dec.h:96
H264Picture::qscale_table_buf
AVBufferRef * qscale_table_buf
Definition: h264dec.h:113
H264_SEI_PIC_STRUCT_TOP_FIELD
@ H264_SEI_PIC_STRUCT_TOP_FIELD
1: top field
Definition: h264_sei.h:33
H264SliceContext::delta_poc
int delta_poc[2]
Definition: h264dec.h:322
av_color_transfer_name
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
Definition: pixdesc.c:3291
H264Picture::long_ref
int long_ref
1->long term reference 0->short term reference
Definition: h264dec.h:135
H264Ref::reference
int reference
Definition: h264dec.h:163
H264Picture::motion_val
int16_t(*[2] motion_val)[2]
Definition: h264dec.h:117
AV_CODEC_EXPORT_DATA_FILM_GRAIN
#define AV_CODEC_EXPORT_DATA_FILM_GRAIN
Decoding only.
Definition: avcodec.h:408
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:473
H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP
@ H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP
5: top field, bottom field, top field repeated, in that order
Definition: h264_sei.h:37
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2856
H264SliceContext::mb_field_decoding_flag
int mb_field_decoding_flag
Definition: h264dec.h:234
H264Context::is_avc
int is_avc
Used to parse AVC variant of H.264.
Definition: h264dec.h:448