FFmpeg
h264_slice.c
Go to the documentation of this file.
1 /*
2  * H.26L/H.264/AVC/JVT/14496-10/... decoder
3  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * H.264 / AVC / MPEG-4 part10 codec.
25  * @author Michael Niedermayer <michaelni@gmx.at>
26  */
27 
28 #include "libavutil/avassert.h"
29 #include "libavutil/display.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/stereo3d.h"
32 #include "internal.h"
33 #include "cabac.h"
34 #include "cabac_functions.h"
35 #include "error_resilience.h"
36 #include "avcodec.h"
37 #include "h264.h"
38 #include "h264dec.h"
39 #include "h264data.h"
40 #include "h264chroma.h"
41 #include "h264_mvpred.h"
42 #include "h264_ps.h"
43 #include "golomb.h"
44 #include "mathops.h"
45 #include "mpegutils.h"
46 #include "mpegvideo.h"
47 #include "rectangle.h"
48 #include "thread.h"
49 
50 static const uint8_t field_scan[16+1] = {
51  0 + 0 * 4, 0 + 1 * 4, 1 + 0 * 4, 0 + 2 * 4,
52  0 + 3 * 4, 1 + 1 * 4, 1 + 2 * 4, 1 + 3 * 4,
53  2 + 0 * 4, 2 + 1 * 4, 2 + 2 * 4, 2 + 3 * 4,
54  3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4, 3 + 3 * 4,
55 };
56 
57 static const uint8_t field_scan8x8[64+1] = {
58  0 + 0 * 8, 0 + 1 * 8, 0 + 2 * 8, 1 + 0 * 8,
59  1 + 1 * 8, 0 + 3 * 8, 0 + 4 * 8, 1 + 2 * 8,
60  2 + 0 * 8, 1 + 3 * 8, 0 + 5 * 8, 0 + 6 * 8,
61  0 + 7 * 8, 1 + 4 * 8, 2 + 1 * 8, 3 + 0 * 8,
62  2 + 2 * 8, 1 + 5 * 8, 1 + 6 * 8, 1 + 7 * 8,
63  2 + 3 * 8, 3 + 1 * 8, 4 + 0 * 8, 3 + 2 * 8,
64  2 + 4 * 8, 2 + 5 * 8, 2 + 6 * 8, 2 + 7 * 8,
65  3 + 3 * 8, 4 + 1 * 8, 5 + 0 * 8, 4 + 2 * 8,
66  3 + 4 * 8, 3 + 5 * 8, 3 + 6 * 8, 3 + 7 * 8,
67  4 + 3 * 8, 5 + 1 * 8, 6 + 0 * 8, 5 + 2 * 8,
68  4 + 4 * 8, 4 + 5 * 8, 4 + 6 * 8, 4 + 7 * 8,
69  5 + 3 * 8, 6 + 1 * 8, 6 + 2 * 8, 5 + 4 * 8,
70  5 + 5 * 8, 5 + 6 * 8, 5 + 7 * 8, 6 + 3 * 8,
71  7 + 0 * 8, 7 + 1 * 8, 6 + 4 * 8, 6 + 5 * 8,
72  6 + 6 * 8, 6 + 7 * 8, 7 + 2 * 8, 7 + 3 * 8,
73  7 + 4 * 8, 7 + 5 * 8, 7 + 6 * 8, 7 + 7 * 8,
74 };
75 
76 static const uint8_t field_scan8x8_cavlc[64+1] = {
77  0 + 0 * 8, 1 + 1 * 8, 2 + 0 * 8, 0 + 7 * 8,
78  2 + 2 * 8, 2 + 3 * 8, 2 + 4 * 8, 3 + 3 * 8,
79  3 + 4 * 8, 4 + 3 * 8, 4 + 4 * 8, 5 + 3 * 8,
80  5 + 5 * 8, 7 + 0 * 8, 6 + 6 * 8, 7 + 4 * 8,
81  0 + 1 * 8, 0 + 3 * 8, 1 + 3 * 8, 1 + 4 * 8,
82  1 + 5 * 8, 3 + 1 * 8, 2 + 5 * 8, 4 + 1 * 8,
83  3 + 5 * 8, 5 + 1 * 8, 4 + 5 * 8, 6 + 1 * 8,
84  5 + 6 * 8, 7 + 1 * 8, 6 + 7 * 8, 7 + 5 * 8,
85  0 + 2 * 8, 0 + 4 * 8, 0 + 5 * 8, 2 + 1 * 8,
86  1 + 6 * 8, 4 + 0 * 8, 2 + 6 * 8, 5 + 0 * 8,
87  3 + 6 * 8, 6 + 0 * 8, 4 + 6 * 8, 6 + 2 * 8,
88  5 + 7 * 8, 6 + 4 * 8, 7 + 2 * 8, 7 + 6 * 8,
89  1 + 0 * 8, 1 + 2 * 8, 0 + 6 * 8, 3 + 0 * 8,
90  1 + 7 * 8, 3 + 2 * 8, 2 + 7 * 8, 4 + 2 * 8,
91  3 + 7 * 8, 5 + 2 * 8, 4 + 7 * 8, 5 + 4 * 8,
92  6 + 3 * 8, 6 + 5 * 8, 7 + 3 * 8, 7 + 7 * 8,
93 };
94 
95 // zigzag_scan8x8_cavlc[i] = zigzag_scan8x8[(i/4) + 16*(i%4)]
96 static const uint8_t zigzag_scan8x8_cavlc[64+1] = {
97  0 + 0 * 8, 1 + 1 * 8, 1 + 2 * 8, 2 + 2 * 8,
98  4 + 1 * 8, 0 + 5 * 8, 3 + 3 * 8, 7 + 0 * 8,
99  3 + 4 * 8, 1 + 7 * 8, 5 + 3 * 8, 6 + 3 * 8,
100  2 + 7 * 8, 6 + 4 * 8, 5 + 6 * 8, 7 + 5 * 8,
101  1 + 0 * 8, 2 + 0 * 8, 0 + 3 * 8, 3 + 1 * 8,
102  3 + 2 * 8, 0 + 6 * 8, 4 + 2 * 8, 6 + 1 * 8,
103  2 + 5 * 8, 2 + 6 * 8, 6 + 2 * 8, 5 + 4 * 8,
104  3 + 7 * 8, 7 + 3 * 8, 4 + 7 * 8, 7 + 6 * 8,
105  0 + 1 * 8, 3 + 0 * 8, 0 + 4 * 8, 4 + 0 * 8,
106  2 + 3 * 8, 1 + 5 * 8, 5 + 1 * 8, 5 + 2 * 8,
107  1 + 6 * 8, 3 + 5 * 8, 7 + 1 * 8, 4 + 5 * 8,
108  4 + 6 * 8, 7 + 4 * 8, 5 + 7 * 8, 6 + 7 * 8,
109  0 + 2 * 8, 2 + 1 * 8, 1 + 3 * 8, 5 + 0 * 8,
110  1 + 4 * 8, 2 + 4 * 8, 6 + 0 * 8, 4 + 3 * 8,
111  0 + 7 * 8, 4 + 4 * 8, 7 + 2 * 8, 3 + 6 * 8,
112  5 + 5 * 8, 6 + 5 * 8, 6 + 6 * 8, 7 + 7 * 8,
113 };
114 
115 static void release_unused_pictures(H264Context *h, int remove_current)
116 {
117  int i;
118 
119  /* release non reference frames */
120  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
121  if (h->DPB[i].f->buf[0] && !h->DPB[i].reference &&
122  (remove_current || &h->DPB[i] != h->cur_pic_ptr)) {
123  ff_h264_unref_picture(h, &h->DPB[i]);
124  }
125  }
126 }
127 
128 static int alloc_scratch_buffers(H264SliceContext *sl, int linesize)
129 {
130  const H264Context *h = sl->h264;
131  int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
132 
133  av_fast_malloc(&sl->bipred_scratchpad, &sl->bipred_scratchpad_allocated, 16 * 6 * alloc_size);
134  // edge emu needs blocksize + filter length - 1
135  // (= 21x21 for H.264)
136  av_fast_malloc(&sl->edge_emu_buffer, &sl->edge_emu_buffer_allocated, alloc_size * 2 * 21);
137 
139  h->mb_width * 16 * 3 * sizeof(uint8_t) * 2);
141  h->mb_width * 16 * 3 * sizeof(uint8_t) * 2);
142 
143  if (!sl->bipred_scratchpad || !sl->edge_emu_buffer ||
144  !sl->top_borders[0] || !sl->top_borders[1]) {
147  av_freep(&sl->top_borders[0]);
148  av_freep(&sl->top_borders[1]);
149 
152  sl->top_borders_allocated[0] = 0;
153  sl->top_borders_allocated[1] = 0;
154  return AVERROR(ENOMEM);
155  }
156 
157  return 0;
158 }
159 
161 {
162  const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
163  const int mb_array_size = h->mb_stride * h->mb_height;
164  const int b4_stride = h->mb_width * 4 + 1;
165  const int b4_array_size = b4_stride * h->mb_height * 4;
166 
167  h->qscale_table_pool = av_buffer_pool_init(big_mb_num + h->mb_stride,
169  h->mb_type_pool = av_buffer_pool_init((big_mb_num + h->mb_stride) *
170  sizeof(uint32_t), av_buffer_allocz);
171  h->motion_val_pool = av_buffer_pool_init(2 * (b4_array_size + 4) *
172  sizeof(int16_t), av_buffer_allocz);
173  h->ref_index_pool = av_buffer_pool_init(4 * mb_array_size, av_buffer_allocz);
174 
175  if (!h->qscale_table_pool || !h->mb_type_pool || !h->motion_val_pool ||
176  !h->ref_index_pool) {
177  av_buffer_pool_uninit(&h->qscale_table_pool);
178  av_buffer_pool_uninit(&h->mb_type_pool);
179  av_buffer_pool_uninit(&h->motion_val_pool);
180  av_buffer_pool_uninit(&h->ref_index_pool);
181  return AVERROR(ENOMEM);
182  }
183 
184  return 0;
185 }
186 
188 {
189  int i, ret = 0;
190 
191  av_assert0(!pic->f->data[0]);
192 
193  pic->tf.f = pic->f;
194  ret = ff_thread_get_buffer(h->avctx, &pic->tf, pic->reference ?
196  if (ret < 0)
197  goto fail;
198 
199  if (h->avctx->hwaccel) {
200  const AVHWAccel *hwaccel = h->avctx->hwaccel;
202  if (hwaccel->frame_priv_data_size) {
204  if (!pic->hwaccel_priv_buf)
205  return AVERROR(ENOMEM);
207  }
208  }
209  if (CONFIG_GRAY && !h->avctx->hwaccel && h->flags & AV_CODEC_FLAG_GRAY && pic->f->data[2]) {
210  int h_chroma_shift, v_chroma_shift;
212  &h_chroma_shift, &v_chroma_shift);
213 
214  for(i=0; i<AV_CEIL_RSHIFT(pic->f->height, v_chroma_shift); i++) {
215  memset(pic->f->data[1] + pic->f->linesize[1]*i,
216  0x80, AV_CEIL_RSHIFT(pic->f->width, h_chroma_shift));
217  memset(pic->f->data[2] + pic->f->linesize[2]*i,
218  0x80, AV_CEIL_RSHIFT(pic->f->width, h_chroma_shift));
219  }
220  }
221 
222  if (!h->qscale_table_pool) {
224  if (ret < 0)
225  goto fail;
226  }
227 
228  pic->qscale_table_buf = av_buffer_pool_get(h->qscale_table_pool);
229  pic->mb_type_buf = av_buffer_pool_get(h->mb_type_pool);
230  if (!pic->qscale_table_buf || !pic->mb_type_buf)
231  goto fail;
232 
233  pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
234  pic->qscale_table = pic->qscale_table_buf->data + 2 * h->mb_stride + 1;
235 
236  for (i = 0; i < 2; i++) {
237  pic->motion_val_buf[i] = av_buffer_pool_get(h->motion_val_pool);
238  pic->ref_index_buf[i] = av_buffer_pool_get(h->ref_index_pool);
239  if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
240  goto fail;
241 
242  pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
243  pic->ref_index[i] = pic->ref_index_buf[i]->data;
244  }
245 
246  pic->pps_buf = av_buffer_ref(h->ps.pps_ref);
247  if (!pic->pps_buf)
248  goto fail;
249  pic->pps = (const PPS*)pic->pps_buf->data;
250 
251  pic->mb_width = h->mb_width;
252  pic->mb_height = h->mb_height;
253  pic->mb_stride = h->mb_stride;
254 
255  return 0;
256 fail:
257  ff_h264_unref_picture(h, pic);
258  return (ret < 0) ? ret : AVERROR(ENOMEM);
259 }
260 
262 {
263  int i;
264 
265  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
266  if (!h->DPB[i].f->buf[0])
267  return i;
268  }
269  return AVERROR_INVALIDDATA;
270 }
271 
272 
273 #define IN_RANGE(a, b, size) (((void*)(a) >= (void*)(b)) && ((void*)(a) < (void*)((b) + (size))))
274 
275 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
276  (((pic) && (pic) >= (old_ctx)->DPB && \
277  (pic) < (old_ctx)->DPB + H264_MAX_PICTURE_COUNT) ? \
278  &(new_ctx)->DPB[(pic) - (old_ctx)->DPB] : NULL)
279 
280 static void copy_picture_range(H264Picture **to, H264Picture **from, int count,
281  H264Context *new_base,
282  H264Context *old_base)
283 {
284  int i;
285 
286  for (i = 0; i < count; i++) {
287  av_assert1(!from[i] ||
288  IN_RANGE(from[i], old_base, 1) ||
289  IN_RANGE(from[i], old_base->DPB, H264_MAX_PICTURE_COUNT));
290  to[i] = REBASE_PICTURE(from[i], new_base, old_base);
291  }
292 }
293 
295 
297  const AVCodecContext *src)
298 {
299  H264Context *h = dst->priv_data, *h1 = src->priv_data;
300  int inited = h->context_initialized, err = 0;
301  int need_reinit = 0;
302  int i, ret;
303 
304  if (dst == src)
305  return 0;
306 
307  if (inited && !h1->ps.sps)
308  return AVERROR_INVALIDDATA;
309 
310  if (inited &&
311  (h->width != h1->width ||
312  h->height != h1->height ||
313  h->mb_width != h1->mb_width ||
314  h->mb_height != h1->mb_height ||
315  !h->ps.sps ||
316  h->ps.sps->bit_depth_luma != h1->ps.sps->bit_depth_luma ||
317  h->ps.sps->chroma_format_idc != h1->ps.sps->chroma_format_idc ||
318  h->ps.sps->colorspace != h1->ps.sps->colorspace)) {
319  need_reinit = 1;
320  }
321 
322  /* copy block_offset since frame_start may not be called */
323  memcpy(h->block_offset, h1->block_offset, sizeof(h->block_offset));
324 
325  // SPS/PPS
326  for (i = 0; i < FF_ARRAY_ELEMS(h->ps.sps_list); i++) {
327  ret = av_buffer_replace(&h->ps.sps_list[i], h1->ps.sps_list[i]);
328  if (ret < 0)
329  return ret;
330  }
331  for (i = 0; i < FF_ARRAY_ELEMS(h->ps.pps_list); i++) {
332  ret = av_buffer_replace(&h->ps.pps_list[i], h1->ps.pps_list[i]);
333  if (ret < 0)
334  return ret;
335  }
336 
337  ret = av_buffer_replace(&h->ps.pps_ref, h1->ps.pps_ref);
338  if (ret < 0)
339  return ret;
340  h->ps.pps = NULL;
341  h->ps.sps = NULL;
342  if (h1->ps.pps_ref) {
343  h->ps.pps = (const PPS*)h->ps.pps_ref->data;
344  h->ps.sps = h->ps.pps->sps;
345  }
346 
347  if (need_reinit || !inited) {
348  h->width = h1->width;
349  h->height = h1->height;
350  h->mb_height = h1->mb_height;
351  h->mb_width = h1->mb_width;
352  h->mb_num = h1->mb_num;
353  h->mb_stride = h1->mb_stride;
354  h->b_stride = h1->b_stride;
355  h->x264_build = h1->x264_build;
356 
357  if (h->context_initialized || h1->context_initialized) {
358  if ((err = h264_slice_header_init(h)) < 0) {
359  av_log(h->avctx, AV_LOG_ERROR, "h264_slice_header_init() failed");
360  return err;
361  }
362  }
363 
364  /* copy block_offset since frame_start may not be called */
365  memcpy(h->block_offset, h1->block_offset, sizeof(h->block_offset));
366  }
367 
368  h->avctx->coded_height = h1->avctx->coded_height;
369  h->avctx->coded_width = h1->avctx->coded_width;
370  h->avctx->width = h1->avctx->width;
371  h->avctx->height = h1->avctx->height;
372  h->width_from_caller = h1->width_from_caller;
373  h->height_from_caller = h1->height_from_caller;
374  h->coded_picture_number = h1->coded_picture_number;
375  h->first_field = h1->first_field;
376  h->picture_structure = h1->picture_structure;
377  h->mb_aff_frame = h1->mb_aff_frame;
378  h->droppable = h1->droppable;
379 
380  for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
381  ff_h264_unref_picture(h, &h->DPB[i]);
382  if (h1->DPB[i].f->buf[0] &&
383  (ret = ff_h264_ref_picture(h, &h->DPB[i], &h1->DPB[i])) < 0)
384  return ret;
385  }
386 
387  h->cur_pic_ptr = REBASE_PICTURE(h1->cur_pic_ptr, h, h1);
388  ff_h264_unref_picture(h, &h->cur_pic);
389  if (h1->cur_pic.f->buf[0]) {
390  ret = ff_h264_ref_picture(h, &h->cur_pic, &h1->cur_pic);
391  if (ret < 0)
392  return ret;
393  }
394 
395  h->enable_er = h1->enable_er;
396  h->workaround_bugs = h1->workaround_bugs;
397  h->droppable = h1->droppable;
398 
399  // extradata/NAL handling
400  h->is_avc = h1->is_avc;
401  h->nal_length_size = h1->nal_length_size;
402 
403  memcpy(&h->poc, &h1->poc, sizeof(h->poc));
404 
405  memcpy(h->short_ref, h1->short_ref, sizeof(h->short_ref));
406  memcpy(h->long_ref, h1->long_ref, sizeof(h->long_ref));
407  memcpy(h->delayed_pic, h1->delayed_pic, sizeof(h->delayed_pic));
408  memcpy(h->last_pocs, h1->last_pocs, sizeof(h->last_pocs));
409 
410  h->next_output_pic = h1->next_output_pic;
411  h->next_outputed_poc = h1->next_outputed_poc;
412 
413  memcpy(h->mmco, h1->mmco, sizeof(h->mmco));
414  h->nb_mmco = h1->nb_mmco;
415  h->mmco_reset = h1->mmco_reset;
416  h->explicit_ref_marking = h1->explicit_ref_marking;
417  h->long_ref_count = h1->long_ref_count;
418  h->short_ref_count = h1->short_ref_count;
419 
420  copy_picture_range(h->short_ref, h1->short_ref, 32, h, h1);
421  copy_picture_range(h->long_ref, h1->long_ref, 32, h, h1);
422  copy_picture_range(h->delayed_pic, h1->delayed_pic,
423  MAX_DELAYED_PIC_COUNT + 2, h, h1);
424 
425  h->frame_recovered = h1->frame_recovered;
426 
427  ret = av_buffer_replace(&h->sei.a53_caption.buf_ref, h1->sei.a53_caption.buf_ref);
428  if (ret < 0)
429  return ret;
430 
431  for (i = 0; i < h->sei.unregistered.nb_buf_ref; i++)
432  av_buffer_unref(&h->sei.unregistered.buf_ref[i]);
433  h->sei.unregistered.nb_buf_ref = 0;
434 
435  if (h1->sei.unregistered.nb_buf_ref) {
436  ret = av_reallocp_array(&h->sei.unregistered.buf_ref,
437  h1->sei.unregistered.nb_buf_ref,
438  sizeof(*h->sei.unregistered.buf_ref));
439  if (ret < 0)
440  return ret;
441 
442  for (i = 0; i < h1->sei.unregistered.nb_buf_ref; i++) {
443  h->sei.unregistered.buf_ref[i] = av_buffer_ref(h1->sei.unregistered.buf_ref[i]);
444  if (!h->sei.unregistered.buf_ref[i])
445  return AVERROR(ENOMEM);
446  h->sei.unregistered.nb_buf_ref++;
447  }
448  }
449  h->sei.unregistered.x264_build = h1->sei.unregistered.x264_build;
450 
451  if (!h->cur_pic_ptr)
452  return 0;
453 
454  if (!h->droppable) {
456  h->poc.prev_poc_msb = h->poc.poc_msb;
457  h->poc.prev_poc_lsb = h->poc.poc_lsb;
458  }
459  h->poc.prev_frame_num_offset = h->poc.frame_num_offset;
460  h->poc.prev_frame_num = h->poc.frame_num;
461 
462  h->recovery_frame = h1->recovery_frame;
463 
464  return err;
465 }
466 
468  const AVCodecContext *src)
469 {
470  H264Context *h = dst->priv_data;
471  const H264Context *h1 = src->priv_data;
472 
473  h->is_avc = h1->is_avc;
474  h->nal_length_size = h1->nal_length_size;
475 
476  return 0;
477 }
478 
480 {
481  H264Picture *pic;
482  int i, ret;
483  const int pixel_shift = h->pixel_shift;
484 
485  if (!ff_thread_can_start_frame(h->avctx)) {
486  av_log(h->avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
487  return -1;
488  }
489 
491  h->cur_pic_ptr = NULL;
492 
494  if (i < 0) {
495  av_log(h->avctx, AV_LOG_ERROR, "no frame buffer available\n");
496  return i;
497  }
498  pic = &h->DPB[i];
499 
500  pic->reference = h->droppable ? 0 : h->picture_structure;
501  pic->f->coded_picture_number = h->coded_picture_number++;
502  pic->field_picture = h->picture_structure != PICT_FRAME;
503  pic->frame_num = h->poc.frame_num;
504  /*
505  * Zero key_frame here; IDR markings per slice in frame or fields are ORed
506  * in later.
507  * See decode_nal_units().
508  */
509  pic->f->key_frame = 0;
510  pic->mmco_reset = 0;
511  pic->recovered = 0;
512  pic->invalid_gap = 0;
513  pic->sei_recovery_frame_cnt = h->sei.recovery_point.recovery_frame_cnt;
514 
515  pic->f->pict_type = h->slice_ctx[0].slice_type;
516 
517  pic->f->crop_left = h->crop_left;
518  pic->f->crop_right = h->crop_right;
519  pic->f->crop_top = h->crop_top;
520  pic->f->crop_bottom = h->crop_bottom;
521 
522  if ((ret = alloc_picture(h, pic)) < 0)
523  return ret;
524 
525  h->cur_pic_ptr = pic;
526  ff_h264_unref_picture(h, &h->cur_pic);
527  if (CONFIG_ERROR_RESILIENCE) {
528  ff_h264_set_erpic(&h->slice_ctx[0].er.cur_pic, NULL);
529  }
530 
531  if ((ret = ff_h264_ref_picture(h, &h->cur_pic, h->cur_pic_ptr)) < 0)
532  return ret;
533 
534  for (i = 0; i < h->nb_slice_ctx; i++) {
535  h->slice_ctx[i].linesize = h->cur_pic_ptr->f->linesize[0];
536  h->slice_ctx[i].uvlinesize = h->cur_pic_ptr->f->linesize[1];
537  }
538 
539  if (CONFIG_ERROR_RESILIENCE && h->enable_er) {
540  ff_er_frame_start(&h->slice_ctx[0].er);
541  ff_h264_set_erpic(&h->slice_ctx[0].er.last_pic, NULL);
542  ff_h264_set_erpic(&h->slice_ctx[0].er.next_pic, NULL);
543  }
544 
545  for (i = 0; i < 16; i++) {
546  h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
547  h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
548  }
549  for (i = 0; i < 16; i++) {
550  h->block_offset[16 + i] =
551  h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
552  h->block_offset[48 + 16 + i] =
553  h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
554  }
555 
556  /* We mark the current picture as non-reference after allocating it, so
557  * that if we break out due to an error it can be released automatically
558  * in the next ff_mpv_frame_start().
559  */
560  h->cur_pic_ptr->reference = 0;
561 
562  h->cur_pic_ptr->field_poc[0] = h->cur_pic_ptr->field_poc[1] = INT_MAX;
563 
564  h->next_output_pic = NULL;
565 
566  h->postpone_filter = 0;
567 
568  h->mb_aff_frame = h->ps.sps->mb_aff && (h->picture_structure == PICT_FRAME);
569 
570  if (h->sei.unregistered.x264_build >= 0)
571  h->x264_build = h->sei.unregistered.x264_build;
572 
573  assert(h->cur_pic_ptr->long_ref == 0);
574 
575  return 0;
576 }
577 
579  uint8_t *src_y,
580  uint8_t *src_cb, uint8_t *src_cr,
581  int linesize, int uvlinesize,
582  int simple)
583 {
584  uint8_t *top_border;
585  int top_idx = 1;
586  const int pixel_shift = h->pixel_shift;
587  int chroma444 = CHROMA444(h);
588  int chroma422 = CHROMA422(h);
589 
590  src_y -= linesize;
591  src_cb -= uvlinesize;
592  src_cr -= uvlinesize;
593 
594  if (!simple && FRAME_MBAFF(h)) {
595  if (sl->mb_y & 1) {
596  if (!MB_MBAFF(sl)) {
597  top_border = sl->top_borders[0][sl->mb_x];
598  AV_COPY128(top_border, src_y + 15 * linesize);
599  if (pixel_shift)
600  AV_COPY128(top_border + 16, src_y + 15 * linesize + 16);
601  if (simple || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
602  if (chroma444) {
603  if (pixel_shift) {
604  AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize);
605  AV_COPY128(top_border + 48, src_cb + 15 * uvlinesize + 16);
606  AV_COPY128(top_border + 64, src_cr + 15 * uvlinesize);
607  AV_COPY128(top_border + 80, src_cr + 15 * uvlinesize + 16);
608  } else {
609  AV_COPY128(top_border + 16, src_cb + 15 * uvlinesize);
610  AV_COPY128(top_border + 32, src_cr + 15 * uvlinesize);
611  }
612  } else if (chroma422) {
613  if (pixel_shift) {
614  AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize);
615  AV_COPY128(top_border + 48, src_cr + 15 * uvlinesize);
616  } else {
617  AV_COPY64(top_border + 16, src_cb + 15 * uvlinesize);
618  AV_COPY64(top_border + 24, src_cr + 15 * uvlinesize);
619  }
620  } else {
621  if (pixel_shift) {
622  AV_COPY128(top_border + 32, src_cb + 7 * uvlinesize);
623  AV_COPY128(top_border + 48, src_cr + 7 * uvlinesize);
624  } else {
625  AV_COPY64(top_border + 16, src_cb + 7 * uvlinesize);
626  AV_COPY64(top_border + 24, src_cr + 7 * uvlinesize);
627  }
628  }
629  }
630  }
631  } else if (MB_MBAFF(sl)) {
632  top_idx = 0;
633  } else
634  return;
635  }
636 
637  top_border = sl->top_borders[top_idx][sl->mb_x];
638  /* There are two lines saved, the line above the top macroblock
639  * of a pair, and the line above the bottom macroblock. */
640  AV_COPY128(top_border, src_y + 16 * linesize);
641  if (pixel_shift)
642  AV_COPY128(top_border + 16, src_y + 16 * linesize + 16);
643 
644  if (simple || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
645  if (chroma444) {
646  if (pixel_shift) {
647  AV_COPY128(top_border + 32, src_cb + 16 * linesize);
648  AV_COPY128(top_border + 48, src_cb + 16 * linesize + 16);
649  AV_COPY128(top_border + 64, src_cr + 16 * linesize);
650  AV_COPY128(top_border + 80, src_cr + 16 * linesize + 16);
651  } else {
652  AV_COPY128(top_border + 16, src_cb + 16 * linesize);
653  AV_COPY128(top_border + 32, src_cr + 16 * linesize);
654  }
655  } else if (chroma422) {
656  if (pixel_shift) {
657  AV_COPY128(top_border + 32, src_cb + 16 * uvlinesize);
658  AV_COPY128(top_border + 48, src_cr + 16 * uvlinesize);
659  } else {
660  AV_COPY64(top_border + 16, src_cb + 16 * uvlinesize);
661  AV_COPY64(top_border + 24, src_cr + 16 * uvlinesize);
662  }
663  } else {
664  if (pixel_shift) {
665  AV_COPY128(top_border + 32, src_cb + 8 * uvlinesize);
666  AV_COPY128(top_border + 48, src_cr + 8 * uvlinesize);
667  } else {
668  AV_COPY64(top_border + 16, src_cb + 8 * uvlinesize);
669  AV_COPY64(top_border + 24, src_cr + 8 * uvlinesize);
670  }
671  }
672  }
673 }
674 
675 /**
676  * Initialize implicit_weight table.
677  * @param field 0/1 initialize the weight for interlaced MBAFF
678  * -1 initializes the rest
679  */
681 {
682  int ref0, ref1, i, cur_poc, ref_start, ref_count0, ref_count1;
683 
684  for (i = 0; i < 2; i++) {
685  sl->pwt.luma_weight_flag[i] = 0;
686  sl->pwt.chroma_weight_flag[i] = 0;
687  }
688 
689  if (field < 0) {
690  if (h->picture_structure == PICT_FRAME) {
691  cur_poc = h->cur_pic_ptr->poc;
692  } else {
693  cur_poc = h->cur_pic_ptr->field_poc[h->picture_structure - 1];
694  }
695  if (sl->ref_count[0] == 1 && sl->ref_count[1] == 1 && !FRAME_MBAFF(h) &&
696  sl->ref_list[0][0].poc + (int64_t)sl->ref_list[1][0].poc == 2LL * cur_poc) {
697  sl->pwt.use_weight = 0;
698  sl->pwt.use_weight_chroma = 0;
699  return;
700  }
701  ref_start = 0;
702  ref_count0 = sl->ref_count[0];
703  ref_count1 = sl->ref_count[1];
704  } else {
705  cur_poc = h->cur_pic_ptr->field_poc[field];
706  ref_start = 16;
707  ref_count0 = 16 + 2 * sl->ref_count[0];
708  ref_count1 = 16 + 2 * sl->ref_count[1];
709  }
710 
711  sl->pwt.use_weight = 2;
712  sl->pwt.use_weight_chroma = 2;
713  sl->pwt.luma_log2_weight_denom = 5;
715 
716  for (ref0 = ref_start; ref0 < ref_count0; ref0++) {
717  int64_t poc0 = sl->ref_list[0][ref0].poc;
718  for (ref1 = ref_start; ref1 < ref_count1; ref1++) {
719  int w = 32;
720  if (!sl->ref_list[0][ref0].parent->long_ref && !sl->ref_list[1][ref1].parent->long_ref) {
721  int poc1 = sl->ref_list[1][ref1].poc;
722  int td = av_clip_int8(poc1 - poc0);
723  if (td) {
724  int tb = av_clip_int8(cur_poc - poc0);
725  int tx = (16384 + (FFABS(td) >> 1)) / td;
726  int dist_scale_factor = (tb * tx + 32) >> 8;
727  if (dist_scale_factor >= -64 && dist_scale_factor <= 128)
728  w = 64 - dist_scale_factor;
729  }
730  }
731  if (field < 0) {
732  sl->pwt.implicit_weight[ref0][ref1][0] =
733  sl->pwt.implicit_weight[ref0][ref1][1] = w;
734  } else {
735  sl->pwt.implicit_weight[ref0][ref1][field] = w;
736  }
737  }
738  }
739 }
740 
741 /**
742  * initialize scan tables
743  */
745 {
746  int i;
747  for (i = 0; i < 16; i++) {
748 #define TRANSPOSE(x) ((x) >> 2) | (((x) << 2) & 0xF)
749  h->zigzag_scan[i] = TRANSPOSE(ff_zigzag_scan[i]);
750  h->field_scan[i] = TRANSPOSE(field_scan[i]);
751 #undef TRANSPOSE
752  }
753  for (i = 0; i < 64; i++) {
754 #define TRANSPOSE(x) ((x) >> 3) | (((x) & 7) << 3)
755  h->zigzag_scan8x8[i] = TRANSPOSE(ff_zigzag_direct[i]);
756  h->zigzag_scan8x8_cavlc[i] = TRANSPOSE(zigzag_scan8x8_cavlc[i]);
757  h->field_scan8x8[i] = TRANSPOSE(field_scan8x8[i]);
758  h->field_scan8x8_cavlc[i] = TRANSPOSE(field_scan8x8_cavlc[i]);
759 #undef TRANSPOSE
760  }
761  if (h->ps.sps->transform_bypass) { // FIXME same ugly
762  memcpy(h->zigzag_scan_q0 , ff_zigzag_scan , sizeof(h->zigzag_scan_q0 ));
763  memcpy(h->zigzag_scan8x8_q0 , ff_zigzag_direct , sizeof(h->zigzag_scan8x8_q0 ));
764  memcpy(h->zigzag_scan8x8_cavlc_q0 , zigzag_scan8x8_cavlc , sizeof(h->zigzag_scan8x8_cavlc_q0));
765  memcpy(h->field_scan_q0 , field_scan , sizeof(h->field_scan_q0 ));
766  memcpy(h->field_scan8x8_q0 , field_scan8x8 , sizeof(h->field_scan8x8_q0 ));
767  memcpy(h->field_scan8x8_cavlc_q0 , field_scan8x8_cavlc , sizeof(h->field_scan8x8_cavlc_q0 ));
768  } else {
769  memcpy(h->zigzag_scan_q0 , h->zigzag_scan , sizeof(h->zigzag_scan_q0 ));
770  memcpy(h->zigzag_scan8x8_q0 , h->zigzag_scan8x8 , sizeof(h->zigzag_scan8x8_q0 ));
771  memcpy(h->zigzag_scan8x8_cavlc_q0 , h->zigzag_scan8x8_cavlc , sizeof(h->zigzag_scan8x8_cavlc_q0));
772  memcpy(h->field_scan_q0 , h->field_scan , sizeof(h->field_scan_q0 ));
773  memcpy(h->field_scan8x8_q0 , h->field_scan8x8 , sizeof(h->field_scan8x8_q0 ));
774  memcpy(h->field_scan8x8_cavlc_q0 , h->field_scan8x8_cavlc , sizeof(h->field_scan8x8_cavlc_q0 ));
775  }
776 }
777 
778 static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
779 {
780 #define HWACCEL_MAX (CONFIG_H264_DXVA2_HWACCEL + \
781  (CONFIG_H264_D3D11VA_HWACCEL * 2) + \
782  CONFIG_H264_NVDEC_HWACCEL + \
783  CONFIG_H264_VAAPI_HWACCEL + \
784  CONFIG_H264_VIDEOTOOLBOX_HWACCEL + \
785  CONFIG_H264_VDPAU_HWACCEL)
786  enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmt = pix_fmts;
787  const enum AVPixelFormat *choices = pix_fmts;
788  int i;
789 
790  switch (h->ps.sps->bit_depth_luma) {
791  case 9:
792  if (CHROMA444(h)) {
793  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
794  *fmt++ = AV_PIX_FMT_GBRP9;
795  } else
796  *fmt++ = AV_PIX_FMT_YUV444P9;
797  } else if (CHROMA422(h))
798  *fmt++ = AV_PIX_FMT_YUV422P9;
799  else
800  *fmt++ = AV_PIX_FMT_YUV420P9;
801  break;
802  case 10:
803  if (CHROMA444(h)) {
804  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
805  *fmt++ = AV_PIX_FMT_GBRP10;
806  } else
807  *fmt++ = AV_PIX_FMT_YUV444P10;
808  } else if (CHROMA422(h))
809  *fmt++ = AV_PIX_FMT_YUV422P10;
810  else
811  *fmt++ = AV_PIX_FMT_YUV420P10;
812  break;
813  case 12:
814  if (CHROMA444(h)) {
815  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
816  *fmt++ = AV_PIX_FMT_GBRP12;
817  } else
818  *fmt++ = AV_PIX_FMT_YUV444P12;
819  } else if (CHROMA422(h))
820  *fmt++ = AV_PIX_FMT_YUV422P12;
821  else
822  *fmt++ = AV_PIX_FMT_YUV420P12;
823  break;
824  case 14:
825  if (CHROMA444(h)) {
826  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
827  *fmt++ = AV_PIX_FMT_GBRP14;
828  } else
829  *fmt++ = AV_PIX_FMT_YUV444P14;
830  } else if (CHROMA422(h))
831  *fmt++ = AV_PIX_FMT_YUV422P14;
832  else
833  *fmt++ = AV_PIX_FMT_YUV420P14;
834  break;
835  case 8:
836 #if CONFIG_H264_VDPAU_HWACCEL
837  *fmt++ = AV_PIX_FMT_VDPAU;
838 #endif
839 #if CONFIG_H264_NVDEC_HWACCEL
840  *fmt++ = AV_PIX_FMT_CUDA;
841 #endif
842  if (CHROMA444(h)) {
843  if (h->avctx->colorspace == AVCOL_SPC_RGB)
844  *fmt++ = AV_PIX_FMT_GBRP;
845  else if (h->avctx->color_range == AVCOL_RANGE_JPEG)
846  *fmt++ = AV_PIX_FMT_YUVJ444P;
847  else
848  *fmt++ = AV_PIX_FMT_YUV444P;
849  } else if (CHROMA422(h)) {
850  if (h->avctx->color_range == AVCOL_RANGE_JPEG)
851  *fmt++ = AV_PIX_FMT_YUVJ422P;
852  else
853  *fmt++ = AV_PIX_FMT_YUV422P;
854  } else {
855 #if CONFIG_H264_DXVA2_HWACCEL
856  *fmt++ = AV_PIX_FMT_DXVA2_VLD;
857 #endif
858 #if CONFIG_H264_D3D11VA_HWACCEL
859  *fmt++ = AV_PIX_FMT_D3D11VA_VLD;
860  *fmt++ = AV_PIX_FMT_D3D11;
861 #endif
862 #if CONFIG_H264_VAAPI_HWACCEL
863  *fmt++ = AV_PIX_FMT_VAAPI;
864 #endif
865 #if CONFIG_H264_VIDEOTOOLBOX_HWACCEL
866  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
867 #endif
868  if (h->avctx->codec->pix_fmts)
869  choices = h->avctx->codec->pix_fmts;
870  else if (h->avctx->color_range == AVCOL_RANGE_JPEG)
871  *fmt++ = AV_PIX_FMT_YUVJ420P;
872  else
873  *fmt++ = AV_PIX_FMT_YUV420P;
874  }
875  break;
876  default:
877  av_log(h->avctx, AV_LOG_ERROR,
878  "Unsupported bit depth %d\n", h->ps.sps->bit_depth_luma);
879  return AVERROR_INVALIDDATA;
880  }
881 
882  *fmt = AV_PIX_FMT_NONE;
883 
884  for (i=0; choices[i] != AV_PIX_FMT_NONE; i++)
885  if (choices[i] == h->avctx->pix_fmt && !force_callback)
886  return choices[i];
887  return ff_thread_get_format(h->avctx, choices);
888 }
889 
890 /* export coded and cropped frame dimensions to AVCodecContext */
892 {
893  const SPS *sps = (const SPS*)h->ps.sps;
894  int cr = sps->crop_right;
895  int cl = sps->crop_left;
896  int ct = sps->crop_top;
897  int cb = sps->crop_bottom;
898  int width = h->width - (cr + cl);
899  int height = h->height - (ct + cb);
900  av_assert0(sps->crop_right + sps->crop_left < (unsigned)h->width);
901  av_assert0(sps->crop_top + sps->crop_bottom < (unsigned)h->height);
902 
903  /* handle container cropping */
904  if (h->width_from_caller > 0 && h->height_from_caller > 0 &&
905  !sps->crop_top && !sps->crop_left &&
906  FFALIGN(h->width_from_caller, 16) == FFALIGN(width, 16) &&
907  FFALIGN(h->height_from_caller, 16) == FFALIGN(height, 16) &&
908  h->width_from_caller <= width &&
909  h->height_from_caller <= height) {
910  width = h->width_from_caller;
911  height = h->height_from_caller;
912  cl = 0;
913  ct = 0;
914  cr = h->width - width;
915  cb = h->height - height;
916  } else {
917  h->width_from_caller = 0;
918  h->height_from_caller = 0;
919  }
920 
921  h->avctx->coded_width = h->width;
922  h->avctx->coded_height = h->height;
923  h->avctx->width = width;
924  h->avctx->height = height;
925  h->crop_right = cr;
926  h->crop_left = cl;
927  h->crop_top = ct;
928  h->crop_bottom = cb;
929 }
930 
932 {
933  const SPS *sps = h->ps.sps;
934  int i, ret;
935 
936  if (!sps) {
938  goto fail;
939  }
940 
941  ff_set_sar(h->avctx, sps->sar);
942  av_pix_fmt_get_chroma_sub_sample(h->avctx->pix_fmt,
943  &h->chroma_x_shift, &h->chroma_y_shift);
944 
945  if (sps->timing_info_present_flag) {
946  int64_t den = sps->time_scale;
947  if (h->x264_build < 44U)
948  den *= 2;
949  av_reduce(&h->avctx->framerate.den, &h->avctx->framerate.num,
950  sps->num_units_in_tick * h->avctx->ticks_per_frame, den, 1 << 30);
951  }
952 
954 
955  h->first_field = 0;
956  h->prev_interlaced_frame = 1;
957 
960  if (ret < 0) {
961  av_log(h->avctx, AV_LOG_ERROR, "Could not allocate memory\n");
962  goto fail;
963  }
964 
965  if (sps->bit_depth_luma < 8 || sps->bit_depth_luma > 14 ||
966  sps->bit_depth_luma == 11 || sps->bit_depth_luma == 13
967  ) {
968  av_log(h->avctx, AV_LOG_ERROR, "Unsupported bit depth %d\n",
969  sps->bit_depth_luma);
971  goto fail;
972  }
973 
974  h->cur_bit_depth_luma =
975  h->avctx->bits_per_raw_sample = sps->bit_depth_luma;
976  h->cur_chroma_format_idc = sps->chroma_format_idc;
977  h->pixel_shift = sps->bit_depth_luma > 8;
978  h->chroma_format_idc = sps->chroma_format_idc;
979  h->bit_depth_luma = sps->bit_depth_luma;
980 
981  ff_h264dsp_init(&h->h264dsp, sps->bit_depth_luma,
982  sps->chroma_format_idc);
983  ff_h264chroma_init(&h->h264chroma, sps->bit_depth_chroma);
984  ff_h264qpel_init(&h->h264qpel, sps->bit_depth_luma);
985  ff_h264_pred_init(&h->hpc, h->avctx->codec_id, sps->bit_depth_luma,
986  sps->chroma_format_idc);
987  ff_videodsp_init(&h->vdsp, sps->bit_depth_luma);
988 
989  if (!HAVE_THREADS || !(h->avctx->active_thread_type & FF_THREAD_SLICE)) {
990  ret = ff_h264_slice_context_init(h, &h->slice_ctx[0]);
991  if (ret < 0) {
992  av_log(h->avctx, AV_LOG_ERROR, "context_init() failed.\n");
993  goto fail;
994  }
995  } else {
996  for (i = 0; i < h->nb_slice_ctx; i++) {
997  H264SliceContext *sl = &h->slice_ctx[i];
998 
999  sl->h264 = h;
1000  sl->intra4x4_pred_mode = h->intra4x4_pred_mode + i * 8 * 2 * h->mb_stride;
1001  sl->mvd_table[0] = h->mvd_table[0] + i * 8 * 2 * h->mb_stride;
1002  sl->mvd_table[1] = h->mvd_table[1] + i * 8 * 2 * h->mb_stride;
1003 
1004  if ((ret = ff_h264_slice_context_init(h, sl)) < 0) {
1005  av_log(h->avctx, AV_LOG_ERROR, "context_init() failed.\n");
1006  goto fail;
1007  }
1008  }
1009  }
1010 
1011  h->context_initialized = 1;
1012 
1013  return 0;
1014 fail:
1016  h->context_initialized = 0;
1017  return ret;
1018 }
1019 
1021 {
1022  switch (a) {
1026  default:
1027  return a;
1028  }
1029 }
1030 
1031 static int h264_init_ps(H264Context *h, const H264SliceContext *sl, int first_slice)
1032 {
1033  const SPS *sps;
1034  int needs_reinit = 0, must_reinit, ret;
1035 
1036  if (first_slice) {
1037  av_buffer_unref(&h->ps.pps_ref);
1038  h->ps.pps = NULL;
1039  h->ps.pps_ref = av_buffer_ref(h->ps.pps_list[sl->pps_id]);
1040  if (!h->ps.pps_ref)
1041  return AVERROR(ENOMEM);
1042  h->ps.pps = (const PPS*)h->ps.pps_ref->data;
1043  }
1044 
1045  if (h->ps.sps != h->ps.pps->sps) {
1046  h->ps.sps = (const SPS*)h->ps.pps->sps;
1047 
1048  if (h->mb_width != h->ps.sps->mb_width ||
1049  h->mb_height != h->ps.sps->mb_height ||
1050  h->cur_bit_depth_luma != h->ps.sps->bit_depth_luma ||
1051  h->cur_chroma_format_idc != h->ps.sps->chroma_format_idc
1052  )
1053  needs_reinit = 1;
1054 
1055  if (h->bit_depth_luma != h->ps.sps->bit_depth_luma ||
1056  h->chroma_format_idc != h->ps.sps->chroma_format_idc)
1057  needs_reinit = 1;
1058  }
1059  sps = h->ps.sps;
1060 
1061  must_reinit = (h->context_initialized &&
1062  ( 16*sps->mb_width != h->avctx->coded_width
1063  || 16*sps->mb_height != h->avctx->coded_height
1064  || h->cur_bit_depth_luma != sps->bit_depth_luma
1065  || h->cur_chroma_format_idc != sps->chroma_format_idc
1066  || h->mb_width != sps->mb_width
1067  || h->mb_height != sps->mb_height
1068  ));
1069  if (h->avctx->pix_fmt == AV_PIX_FMT_NONE
1070  || (non_j_pixfmt(h->avctx->pix_fmt) != non_j_pixfmt(get_pixel_format(h, 0))))
1071  must_reinit = 1;
1072 
1073  if (first_slice && av_cmp_q(sps->sar, h->avctx->sample_aspect_ratio))
1074  must_reinit = 1;
1075 
1076  if (!h->setup_finished) {
1077  h->avctx->profile = ff_h264_get_profile(sps);
1078  h->avctx->level = sps->level_idc;
1079  h->avctx->refs = sps->ref_frame_count;
1080 
1081  h->mb_width = sps->mb_width;
1082  h->mb_height = sps->mb_height;
1083  h->mb_num = h->mb_width * h->mb_height;
1084  h->mb_stride = h->mb_width + 1;
1085 
1086  h->b_stride = h->mb_width * 4;
1087 
1088  h->chroma_y_shift = sps->chroma_format_idc <= 1; // 400 uses yuv420p
1089 
1090  h->width = 16 * h->mb_width;
1091  h->height = 16 * h->mb_height;
1092 
1093  init_dimensions(h);
1094 
1095  if (sps->video_signal_type_present_flag) {
1096  h->avctx->color_range = sps->full_range > 0 ? AVCOL_RANGE_JPEG
1097  : AVCOL_RANGE_MPEG;
1098  if (sps->colour_description_present_flag) {
1099  if (h->avctx->colorspace != sps->colorspace)
1100  needs_reinit = 1;
1101  h->avctx->color_primaries = sps->color_primaries;
1102  h->avctx->color_trc = sps->color_trc;
1103  h->avctx->colorspace = sps->colorspace;
1104  }
1105  }
1106 
1107  if (h->sei.alternative_transfer.present &&
1108  av_color_transfer_name(h->sei.alternative_transfer.preferred_transfer_characteristics) &&
1109  h->sei.alternative_transfer.preferred_transfer_characteristics != AVCOL_TRC_UNSPECIFIED) {
1110  h->avctx->color_trc = h->sei.alternative_transfer.preferred_transfer_characteristics;
1111  }
1112  }
1113  h->avctx->chroma_sample_location = sps->chroma_location;
1114 
1115  if (!h->context_initialized || must_reinit || needs_reinit) {
1116  int flush_changes = h->context_initialized;
1117  h->context_initialized = 0;
1118  if (sl != h->slice_ctx) {
1119  av_log(h->avctx, AV_LOG_ERROR,
1120  "changing width %d -> %d / height %d -> %d on "
1121  "slice %d\n",
1122  h->width, h->avctx->coded_width,
1123  h->height, h->avctx->coded_height,
1124  h->current_slice + 1);
1125  return AVERROR_INVALIDDATA;
1126  }
1127 
1128  av_assert1(first_slice);
1129 
1130  if (flush_changes)
1132 
1133  if ((ret = get_pixel_format(h, 1)) < 0)
1134  return ret;
1135  h->avctx->pix_fmt = ret;
1136 
1137  av_log(h->avctx, AV_LOG_VERBOSE, "Reinit context to %dx%d, "
1138  "pix_fmt: %s\n", h->width, h->height, av_get_pix_fmt_name(h->avctx->pix_fmt));
1139 
1140  if ((ret = h264_slice_header_init(h)) < 0) {
1141  av_log(h->avctx, AV_LOG_ERROR,
1142  "h264_slice_header_init() failed\n");
1143  return ret;
1144  }
1145  }
1146 
1147  return 0;
1148 }
1149 
1151 {
1152  const SPS *sps = h->ps.sps;
1153  H264Picture *cur = h->cur_pic_ptr;
1154  AVFrame *out = cur->f;
1155 
1156  out->interlaced_frame = 0;
1157  out->repeat_pict = 0;
1158 
1159  /* Signal interlacing information externally. */
1160  /* Prioritize picture timing SEI information over used
1161  * decoding process if it exists. */
1162  if (h->sei.picture_timing.present) {
1163  int ret = ff_h264_sei_process_picture_timing(&h->sei.picture_timing, sps,
1164  h->avctx);
1165  if (ret < 0) {
1166  av_log(h->avctx, AV_LOG_ERROR, "Error processing a picture timing SEI\n");
1167  if (h->avctx->err_recognition & AV_EF_EXPLODE)
1168  return ret;
1169  h->sei.picture_timing.present = 0;
1170  }
1171  }
1172 
1173  if (sps->pic_struct_present_flag && h->sei.picture_timing.present) {
1174  H264SEIPictureTiming *pt = &h->sei.picture_timing;
1175  switch (pt->pic_struct) {
1177  break;
1180  out->interlaced_frame = 1;
1181  break;
1185  out->interlaced_frame = 1;
1186  else
1187  // try to flag soft telecine progressive
1188  out->interlaced_frame = h->prev_interlaced_frame;
1189  break;
1192  /* Signal the possibility of telecined film externally
1193  * (pic_struct 5,6). From these hints, let the applications
1194  * decide if they apply deinterlacing. */
1195  out->repeat_pict = 1;
1196  break;
1198  out->repeat_pict = 2;
1199  break;
1201  out->repeat_pict = 4;
1202  break;
1203  }
1204 
1205  if ((pt->ct_type & 3) &&
1206  pt->pic_struct <= H264_SEI_PIC_STRUCT_BOTTOM_TOP)
1207  out->interlaced_frame = (pt->ct_type & (1 << 1)) != 0;
1208  } else {
1209  /* Derive interlacing flag from used decoding process. */
1210  out->interlaced_frame = FIELD_OR_MBAFF_PICTURE(h);
1211  }
1212  h->prev_interlaced_frame = out->interlaced_frame;
1213 
1214  if (cur->field_poc[0] != cur->field_poc[1]) {
1215  /* Derive top_field_first from field pocs. */
1216  out->top_field_first = cur->field_poc[0] < cur->field_poc[1];
1217  } else {
1218  if (sps->pic_struct_present_flag && h->sei.picture_timing.present) {
1219  /* Use picture timing SEI information. Even if it is a
1220  * information of a past frame, better than nothing. */
1221  if (h->sei.picture_timing.pic_struct == H264_SEI_PIC_STRUCT_TOP_BOTTOM ||
1222  h->sei.picture_timing.pic_struct == H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP)
1223  out->top_field_first = 1;
1224  else
1225  out->top_field_first = 0;
1226  } else if (out->interlaced_frame) {
1227  /* Default to top field first when pic_struct_present_flag
1228  * is not set but interlaced frame detected */
1229  out->top_field_first = 1;
1230  } else {
1231  /* Most likely progressive */
1232  out->top_field_first = 0;
1233  }
1234  }
1235 
1236  if (h->sei.frame_packing.present &&
1237  h->sei.frame_packing.arrangement_type <= 6 &&
1238  h->sei.frame_packing.content_interpretation_type > 0 &&
1239  h->sei.frame_packing.content_interpretation_type < 3) {
1240  H264SEIFramePacking *fp = &h->sei.frame_packing;
1242  if (stereo) {
1243  switch (fp->arrangement_type) {
1245  stereo->type = AV_STEREO3D_CHECKERBOARD;
1246  break;
1248  stereo->type = AV_STEREO3D_COLUMNS;
1249  break;
1251  stereo->type = AV_STEREO3D_LINES;
1252  break;
1254  if (fp->quincunx_sampling_flag)
1256  else
1257  stereo->type = AV_STEREO3D_SIDEBYSIDE;
1258  break;
1260  stereo->type = AV_STEREO3D_TOPBOTTOM;
1261  break;
1263  stereo->type = AV_STEREO3D_FRAMESEQUENCE;
1264  break;
1265  case H264_SEI_FPA_TYPE_2D:
1266  stereo->type = AV_STEREO3D_2D;
1267  break;
1268  }
1269 
1270  if (fp->content_interpretation_type == 2)
1271  stereo->flags = AV_STEREO3D_FLAG_INVERT;
1272 
1273  if (fp->arrangement_type == H264_SEI_FPA_TYPE_INTERLEAVE_TEMPORAL) {
1274  if (fp->current_frame_is_frame0_flag)
1275  stereo->view = AV_STEREO3D_VIEW_LEFT;
1276  else
1277  stereo->view = AV_STEREO3D_VIEW_RIGHT;
1278  }
1279  }
1280  }
1281 
1282  if (h->sei.display_orientation.present &&
1283  (h->sei.display_orientation.anticlockwise_rotation ||
1284  h->sei.display_orientation.hflip ||
1285  h->sei.display_orientation.vflip)) {
1286  H264SEIDisplayOrientation *o = &h->sei.display_orientation;
1287  double angle = o->anticlockwise_rotation * 360 / (double) (1 << 16);
1290  sizeof(int32_t) * 9);
1291  if (rotation) {
1292  av_display_rotation_set((int32_t *)rotation->data, angle);
1293  av_display_matrix_flip((int32_t *)rotation->data,
1294  o->hflip, o->vflip);
1295  }
1296  }
1297 
1298  if (h->sei.afd.present) {
1300  sizeof(uint8_t));
1301 
1302  if (sd) {
1303  *sd->data = h->sei.afd.active_format_description;
1304  h->sei.afd.present = 0;
1305  }
1306  }
1307 
1308  if (h->sei.a53_caption.buf_ref) {
1309  H264SEIA53Caption *a53 = &h->sei.a53_caption;
1310 
1312  if (!sd)
1313  av_buffer_unref(&a53->buf_ref);
1314  a53->buf_ref = NULL;
1315 
1316  h->avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS;
1317  }
1318 
1319  for (int i = 0; i < h->sei.unregistered.nb_buf_ref; i++) {
1320  H264SEIUnregistered *unreg = &h->sei.unregistered;
1321 
1322  if (unreg->buf_ref[i]) {
1325  unreg->buf_ref[i]);
1326  if (!sd)
1327  av_buffer_unref(&unreg->buf_ref[i]);
1328  unreg->buf_ref[i] = NULL;
1329  }
1330  }
1331  h->sei.unregistered.nb_buf_ref = 0;
1332 
1333  if (h->sei.picture_timing.timecode_cnt > 0) {
1334  uint32_t *tc_sd;
1335  char tcbuf[AV_TIMECODE_STR_SIZE];
1336 
1339  sizeof(uint32_t)*4);
1340  if (!tcside)
1341  return AVERROR(ENOMEM);
1342 
1343  tc_sd = (uint32_t*)tcside->data;
1344  tc_sd[0] = h->sei.picture_timing.timecode_cnt;
1345 
1346  for (int i = 0; i < tc_sd[0]; i++) {
1347  int drop = h->sei.picture_timing.timecode[i].dropframe;
1348  int hh = h->sei.picture_timing.timecode[i].hours;
1349  int mm = h->sei.picture_timing.timecode[i].minutes;
1350  int ss = h->sei.picture_timing.timecode[i].seconds;
1351  int ff = h->sei.picture_timing.timecode[i].frame;
1352 
1353  tc_sd[i + 1] = av_timecode_get_smpte(h->avctx->framerate, drop, hh, mm, ss, ff);
1354  av_timecode_make_smpte_tc_string2(tcbuf, h->avctx->framerate, tc_sd[i + 1], 0, 0);
1355  av_dict_set(&out->metadata, "timecode", tcbuf, 0);
1356  }
1357  h->sei.picture_timing.timecode_cnt = 0;
1358  }
1359 
1360  return 0;
1361 }
1362 
1364 {
1365  const SPS *sps = h->ps.sps;
1366  H264Picture *out = h->cur_pic_ptr;
1367  H264Picture *cur = h->cur_pic_ptr;
1368  int i, pics, out_of_order, out_idx;
1369 
1370  cur->mmco_reset = h->mmco_reset;
1371  h->mmco_reset = 0;
1372 
1373  if (sps->bitstream_restriction_flag ||
1374  h->avctx->strict_std_compliance >= FF_COMPLIANCE_STRICT) {
1375  h->avctx->has_b_frames = FFMAX(h->avctx->has_b_frames, sps->num_reorder_frames);
1376  }
1377 
1378  for (i = 0; 1; i++) {
1379  if(i == MAX_DELAYED_PIC_COUNT || cur->poc < h->last_pocs[i]){
1380  if(i)
1381  h->last_pocs[i-1] = cur->poc;
1382  break;
1383  } else if(i) {
1384  h->last_pocs[i-1]= h->last_pocs[i];
1385  }
1386  }
1387  out_of_order = MAX_DELAYED_PIC_COUNT - i;
1388  if( cur->f->pict_type == AV_PICTURE_TYPE_B
1389  || (h->last_pocs[MAX_DELAYED_PIC_COUNT-2] > INT_MIN && h->last_pocs[MAX_DELAYED_PIC_COUNT-1] - (int64_t)h->last_pocs[MAX_DELAYED_PIC_COUNT-2] > 2))
1390  out_of_order = FFMAX(out_of_order, 1);
1391  if (out_of_order == MAX_DELAYED_PIC_COUNT) {
1392  av_log(h->avctx, AV_LOG_VERBOSE, "Invalid POC %d<%d\n", cur->poc, h->last_pocs[0]);
1393  for (i = 1; i < MAX_DELAYED_PIC_COUNT; i++)
1394  h->last_pocs[i] = INT_MIN;
1395  h->last_pocs[0] = cur->poc;
1396  cur->mmco_reset = 1;
1397  } else if(h->avctx->has_b_frames < out_of_order && !sps->bitstream_restriction_flag){
1398  int loglevel = h->avctx->frame_number > 1 ? AV_LOG_WARNING : AV_LOG_VERBOSE;
1399  av_log(h->avctx, loglevel, "Increasing reorder buffer to %d\n", out_of_order);
1400  h->avctx->has_b_frames = out_of_order;
1401  }
1402 
1403  pics = 0;
1404  while (h->delayed_pic[pics])
1405  pics++;
1406 
1408 
1409  h->delayed_pic[pics++] = cur;
1410  if (cur->reference == 0)
1411  cur->reference = DELAYED_PIC_REF;
1412 
1413  out = h->delayed_pic[0];
1414  out_idx = 0;
1415  for (i = 1; h->delayed_pic[i] &&
1416  !h->delayed_pic[i]->f->key_frame &&
1417  !h->delayed_pic[i]->mmco_reset;
1418  i++)
1419  if (h->delayed_pic[i]->poc < out->poc) {
1420  out = h->delayed_pic[i];
1421  out_idx = i;
1422  }
1423  if (h->avctx->has_b_frames == 0 &&
1424  (h->delayed_pic[0]->f->key_frame || h->delayed_pic[0]->mmco_reset))
1425  h->next_outputed_poc = INT_MIN;
1426  out_of_order = out->poc < h->next_outputed_poc;
1427 
1428  if (out_of_order || pics > h->avctx->has_b_frames) {
1429  out->reference &= ~DELAYED_PIC_REF;
1430  for (i = out_idx; h->delayed_pic[i]; i++)
1431  h->delayed_pic[i] = h->delayed_pic[i + 1];
1432  }
1433  if (!out_of_order && pics > h->avctx->has_b_frames) {
1434  h->next_output_pic = out;
1435  if (out_idx == 0 && h->delayed_pic[0] && (h->delayed_pic[0]->f->key_frame || h->delayed_pic[0]->mmco_reset)) {
1436  h->next_outputed_poc = INT_MIN;
1437  } else
1438  h->next_outputed_poc = out->poc;
1439 
1440  if (out->recovered) {
1441  // We have reached an recovery point and all frames after it in
1442  // display order are "recovered".
1443  h->frame_recovered |= FRAME_RECOVERED_SEI;
1444  }
1445  out->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_SEI);
1446 
1447  if (!out->recovered) {
1448  if (!(h->avctx->flags & AV_CODEC_FLAG_OUTPUT_CORRUPT) &&
1449  !(h->avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL)) {
1450  h->next_output_pic = NULL;
1451  } else {
1452  out->f->flags |= AV_FRAME_FLAG_CORRUPT;
1453  }
1454  }
1455  } else {
1456  av_log(h->avctx, AV_LOG_DEBUG, "no picture %s\n", out_of_order ? "ooo" : "");
1457  }
1458 
1459  return 0;
1460 }
1461 
1462 /* This function is called right after decoding the slice header for a first
1463  * slice in a field (or a frame). It decides whether we are decoding a new frame
1464  * or a second field in a pair and does the necessary setup.
1465  */
1467  const H2645NAL *nal, int first_slice)
1468 {
1469  int i;
1470  const SPS *sps;
1471 
1472  int last_pic_structure, last_pic_droppable, ret;
1473 
1474  ret = h264_init_ps(h, sl, first_slice);
1475  if (ret < 0)
1476  return ret;
1477 
1478  sps = h->ps.sps;
1479 
1480  if (sps && sps->bitstream_restriction_flag &&
1481  h->avctx->has_b_frames < sps->num_reorder_frames) {
1482  h->avctx->has_b_frames = sps->num_reorder_frames;
1483  }
1484 
1485  last_pic_droppable = h->droppable;
1486  last_pic_structure = h->picture_structure;
1487  h->droppable = (nal->ref_idc == 0);
1488  h->picture_structure = sl->picture_structure;
1489 
1490  h->poc.frame_num = sl->frame_num;
1491  h->poc.poc_lsb = sl->poc_lsb;
1492  h->poc.delta_poc_bottom = sl->delta_poc_bottom;
1493  h->poc.delta_poc[0] = sl->delta_poc[0];
1494  h->poc.delta_poc[1] = sl->delta_poc[1];
1495 
1496  /* Shorten frame num gaps so we don't have to allocate reference
1497  * frames just to throw them away */
1498  if (h->poc.frame_num != h->poc.prev_frame_num) {
1499  int unwrap_prev_frame_num = h->poc.prev_frame_num;
1500  int max_frame_num = 1 << sps->log2_max_frame_num;
1501 
1502  if (unwrap_prev_frame_num > h->poc.frame_num)
1503  unwrap_prev_frame_num -= max_frame_num;
1504 
1505  if ((h->poc.frame_num - unwrap_prev_frame_num) > sps->ref_frame_count) {
1506  unwrap_prev_frame_num = (h->poc.frame_num - sps->ref_frame_count) - 1;
1507  if (unwrap_prev_frame_num < 0)
1508  unwrap_prev_frame_num += max_frame_num;
1509 
1510  h->poc.prev_frame_num = unwrap_prev_frame_num;
1511  }
1512  }
1513 
1514  /* See if we have a decoded first field looking for a pair...
1515  * Here, we're using that to see if we should mark previously
1516  * decode frames as "finished".
1517  * We have to do that before the "dummy" in-between frame allocation,
1518  * since that can modify h->cur_pic_ptr. */
1519  if (h->first_field) {
1520  int last_field = last_pic_structure == PICT_BOTTOM_FIELD;
1521  av_assert0(h->cur_pic_ptr);
1522  av_assert0(h->cur_pic_ptr->f->buf[0]);
1523  assert(h->cur_pic_ptr->reference != DELAYED_PIC_REF);
1524 
1525  /* Mark old field/frame as completed */
1526  if (h->cur_pic_ptr->tf.owner[last_field] == h->avctx) {
1527  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, last_field);
1528  }
1529 
1530  /* figure out if we have a complementary field pair */
1531  if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) {
1532  /* Previous field is unmatched. Don't display it, but let it
1533  * remain for reference if marked as such. */
1534  if (last_pic_structure != PICT_FRAME) {
1535  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
1536  last_pic_structure == PICT_TOP_FIELD);
1537  }
1538  } else {
1539  if (h->cur_pic_ptr->frame_num != h->poc.frame_num) {
1540  /* This and previous field were reference, but had
1541  * different frame_nums. Consider this field first in
1542  * pair. Throw away previous field except for reference
1543  * purposes. */
1544  if (last_pic_structure != PICT_FRAME) {
1545  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
1546  last_pic_structure == PICT_TOP_FIELD);
1547  }
1548  } else {
1549  /* Second field in complementary pair */
1550  if (!((last_pic_structure == PICT_TOP_FIELD &&
1551  h->picture_structure == PICT_BOTTOM_FIELD) ||
1552  (last_pic_structure == PICT_BOTTOM_FIELD &&
1553  h->picture_structure == PICT_TOP_FIELD))) {
1554  av_log(h->avctx, AV_LOG_ERROR,
1555  "Invalid field mode combination %d/%d\n",
1556  last_pic_structure, h->picture_structure);
1557  h->picture_structure = last_pic_structure;
1558  h->droppable = last_pic_droppable;
1559  return AVERROR_INVALIDDATA;
1560  } else if (last_pic_droppable != h->droppable) {
1561  avpriv_request_sample(h->avctx,
1562  "Found reference and non-reference fields in the same frame, which");
1563  h->picture_structure = last_pic_structure;
1564  h->droppable = last_pic_droppable;
1565  return AVERROR_PATCHWELCOME;
1566  }
1567  }
1568  }
1569  }
1570 
1571  while (h->poc.frame_num != h->poc.prev_frame_num && !h->first_field &&
1572  h->poc.frame_num != (h->poc.prev_frame_num + 1) % (1 << sps->log2_max_frame_num)) {
1573  H264Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL;
1574  av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n",
1575  h->poc.frame_num, h->poc.prev_frame_num);
1576  if (!sps->gaps_in_frame_num_allowed_flag)
1577  for(i=0; i<FF_ARRAY_ELEMS(h->last_pocs); i++)
1578  h->last_pocs[i] = INT_MIN;
1579  ret = h264_frame_start(h);
1580  if (ret < 0) {
1581  h->first_field = 0;
1582  return ret;
1583  }
1584 
1585  h->poc.prev_frame_num++;
1586  h->poc.prev_frame_num %= 1 << sps->log2_max_frame_num;
1587  h->cur_pic_ptr->frame_num = h->poc.prev_frame_num;
1588  h->cur_pic_ptr->invalid_gap = !sps->gaps_in_frame_num_allowed_flag;
1589  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0);
1590  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1);
1591 
1592  h->explicit_ref_marking = 0;
1594  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1595  return ret;
1596  /* Error concealment: If a ref is missing, copy the previous ref
1597  * in its place.
1598  * FIXME: Avoiding a memcpy would be nice, but ref handling makes
1599  * many assumptions about there being no actual duplicates.
1600  * FIXME: This does not copy padding for out-of-frame motion
1601  * vectors. Given we are concealing a lost frame, this probably
1602  * is not noticeable by comparison, but it should be fixed. */
1603  if (h->short_ref_count) {
1604  int c[4] = {
1605  1<<(h->ps.sps->bit_depth_luma-1),
1606  1<<(h->ps.sps->bit_depth_chroma-1),
1607  1<<(h->ps.sps->bit_depth_chroma-1),
1608  -1
1609  };
1610 
1611  if (prev &&
1612  h->short_ref[0]->f->width == prev->f->width &&
1613  h->short_ref[0]->f->height == prev->f->height &&
1614  h->short_ref[0]->f->format == prev->f->format) {
1615  ff_thread_await_progress(&prev->tf, INT_MAX, 0);
1616  if (prev->field_picture)
1617  ff_thread_await_progress(&prev->tf, INT_MAX, 1);
1618  ff_thread_release_buffer(h->avctx, &h->short_ref[0]->tf);
1619  h->short_ref[0]->tf.f = h->short_ref[0]->f;
1620  ret = ff_thread_ref_frame(&h->short_ref[0]->tf, &prev->tf);
1621  if (ret < 0)
1622  return ret;
1623  h->short_ref[0]->poc = prev->poc + 2U;
1624  ff_thread_report_progress(&h->short_ref[0]->tf, INT_MAX, 0);
1625  if (h->short_ref[0]->field_picture)
1626  ff_thread_report_progress(&h->short_ref[0]->tf, INT_MAX, 1);
1627  } else if (!h->frame_recovered && !h->avctx->hwaccel)
1628  ff_color_frame(h->short_ref[0]->f, c);
1629  h->short_ref[0]->frame_num = h->poc.prev_frame_num;
1630  }
1631  }
1632 
1633  /* See if we have a decoded first field looking for a pair...
1634  * We're using that to see whether to continue decoding in that
1635  * frame, or to allocate a new one. */
1636  if (h->first_field) {
1637  av_assert0(h->cur_pic_ptr);
1638  av_assert0(h->cur_pic_ptr->f->buf[0]);
1639  assert(h->cur_pic_ptr->reference != DELAYED_PIC_REF);
1640 
1641  /* figure out if we have a complementary field pair */
1642  if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) {
1643  /* Previous field is unmatched. Don't display it, but let it
1644  * remain for reference if marked as such. */
1645  h->missing_fields ++;
1646  h->cur_pic_ptr = NULL;
1647  h->first_field = FIELD_PICTURE(h);
1648  } else {
1649  h->missing_fields = 0;
1650  if (h->cur_pic_ptr->frame_num != h->poc.frame_num) {
1651  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
1652  h->picture_structure==PICT_BOTTOM_FIELD);
1653  /* This and the previous field had different frame_nums.
1654  * Consider this field first in pair. Throw away previous
1655  * one except for reference purposes. */
1656  h->first_field = 1;
1657  h->cur_pic_ptr = NULL;
1658  } else if (h->cur_pic_ptr->reference & DELAYED_PIC_REF) {
1659  /* This frame was already output, we cannot draw into it
1660  * anymore.
1661  */
1662  h->first_field = 1;
1663  h->cur_pic_ptr = NULL;
1664  } else {
1665  /* Second field in complementary pair */
1666  h->first_field = 0;
1667  }
1668  }
1669  } else {
1670  /* Frame or first field in a potentially complementary pair */
1671  h->first_field = FIELD_PICTURE(h);
1672  }
1673 
1674  if (!FIELD_PICTURE(h) || h->first_field) {
1675  if (h264_frame_start(h) < 0) {
1676  h->first_field = 0;
1677  return AVERROR_INVALIDDATA;
1678  }
1679  } else {
1680  int field = h->picture_structure == PICT_BOTTOM_FIELD;
1682  h->cur_pic_ptr->tf.owner[field] = h->avctx;
1683  }
1684  /* Some macroblocks can be accessed before they're available in case
1685  * of lost slices, MBAFF or threading. */
1686  if (FIELD_PICTURE(h)) {
1687  for(i = (h->picture_structure == PICT_BOTTOM_FIELD); i<h->mb_height; i++)
1688  memset(h->slice_table + i*h->mb_stride, -1, (h->mb_stride - (i+1==h->mb_height)) * sizeof(*h->slice_table));
1689  } else {
1690  memset(h->slice_table, -1,
1691  (h->mb_height * h->mb_stride - 1) * sizeof(*h->slice_table));
1692  }
1693 
1694  ret = ff_h264_init_poc(h->cur_pic_ptr->field_poc, &h->cur_pic_ptr->poc,
1695  h->ps.sps, &h->poc, h->picture_structure, nal->ref_idc);
1696  if (ret < 0)
1697  return ret;
1698 
1699  memcpy(h->mmco, sl->mmco, sl->nb_mmco * sizeof(*h->mmco));
1700  h->nb_mmco = sl->nb_mmco;
1701  h->explicit_ref_marking = sl->explicit_ref_marking;
1702 
1703  h->picture_idr = nal->type == H264_NAL_IDR_SLICE;
1704 
1705  if (h->sei.recovery_point.recovery_frame_cnt >= 0) {
1706  const int sei_recovery_frame_cnt = h->sei.recovery_point.recovery_frame_cnt;
1707 
1708  if (h->poc.frame_num != sei_recovery_frame_cnt || sl->slice_type_nos != AV_PICTURE_TYPE_I)
1709  h->valid_recovery_point = 1;
1710 
1711  if ( h->recovery_frame < 0
1712  || av_mod_uintp2(h->recovery_frame - h->poc.frame_num, h->ps.sps->log2_max_frame_num) > sei_recovery_frame_cnt) {
1713  h->recovery_frame = av_mod_uintp2(h->poc.frame_num + sei_recovery_frame_cnt, h->ps.sps->log2_max_frame_num);
1714 
1715  if (!h->valid_recovery_point)
1716  h->recovery_frame = h->poc.frame_num;
1717  }
1718  }
1719 
1720  h->cur_pic_ptr->f->key_frame |= (nal->type == H264_NAL_IDR_SLICE);
1721 
1722  if (nal->type == H264_NAL_IDR_SLICE ||
1723  (h->recovery_frame == h->poc.frame_num && nal->ref_idc)) {
1724  h->recovery_frame = -1;
1725  h->cur_pic_ptr->recovered = 1;
1726  }
1727  // If we have an IDR, all frames after it in decoded order are
1728  // "recovered".
1729  if (nal->type == H264_NAL_IDR_SLICE)
1730  h->frame_recovered |= FRAME_RECOVERED_IDR;
1731 #if 1
1732  h->cur_pic_ptr->recovered |= h->frame_recovered;
1733 #else
1734  h->cur_pic_ptr->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_IDR);
1735 #endif
1736 
1737  /* Set the frame properties/side data. Only done for the second field in
1738  * field coded frames, since some SEI information is present for each field
1739  * and is merged by the SEI parsing code. */
1740  if (!FIELD_PICTURE(h) || !h->first_field || h->missing_fields > 1) {
1742  if (ret < 0)
1743  return ret;
1744 
1746  if (ret < 0)
1747  return ret;
1748  }
1749 
1750  return 0;
1751 }
1752 
1754  const H2645NAL *nal)
1755 {
1756  const SPS *sps;
1757  const PPS *pps;
1758  int ret;
1759  unsigned int slice_type, tmp, i;
1760  int field_pic_flag, bottom_field_flag;
1761  int first_slice = sl == h->slice_ctx && !h->current_slice;
1762  int picture_structure;
1763 
1764  if (first_slice)
1765  av_assert0(!h->setup_finished);
1766 
1767  sl->first_mb_addr = get_ue_golomb_long(&sl->gb);
1768 
1769  slice_type = get_ue_golomb_31(&sl->gb);
1770  if (slice_type > 9) {
1771  av_log(h->avctx, AV_LOG_ERROR,
1772  "slice type %d too large at %d\n",
1773  slice_type, sl->first_mb_addr);
1774  return AVERROR_INVALIDDATA;
1775  }
1776  if (slice_type > 4) {
1777  slice_type -= 5;
1778  sl->slice_type_fixed = 1;
1779  } else
1780  sl->slice_type_fixed = 0;
1781 
1782  slice_type = ff_h264_golomb_to_pict_type[slice_type];
1783  sl->slice_type = slice_type;
1784  sl->slice_type_nos = slice_type & 3;
1785 
1786  if (nal->type == H264_NAL_IDR_SLICE &&
1788  av_log(h->avctx, AV_LOG_ERROR, "A non-intra slice in an IDR NAL unit.\n");
1789  return AVERROR_INVALIDDATA;
1790  }
1791 
1792  sl->pps_id = get_ue_golomb(&sl->gb);
1793  if (sl->pps_id >= MAX_PPS_COUNT) {
1794  av_log(h->avctx, AV_LOG_ERROR, "pps_id %u out of range\n", sl->pps_id);
1795  return AVERROR_INVALIDDATA;
1796  }
1797  if (!h->ps.pps_list[sl->pps_id]) {
1798  av_log(h->avctx, AV_LOG_ERROR,
1799  "non-existing PPS %u referenced\n",
1800  sl->pps_id);
1801  return AVERROR_INVALIDDATA;
1802  }
1803  pps = (const PPS*)h->ps.pps_list[sl->pps_id]->data;
1804  sps = pps->sps;
1805 
1806  sl->frame_num = get_bits(&sl->gb, sps->log2_max_frame_num);
1807  if (!first_slice) {
1808  if (h->poc.frame_num != sl->frame_num) {
1809  av_log(h->avctx, AV_LOG_ERROR, "Frame num change from %d to %d\n",
1810  h->poc.frame_num, sl->frame_num);
1811  return AVERROR_INVALIDDATA;
1812  }
1813  }
1814 
1815  sl->mb_mbaff = 0;
1816 
1817  if (sps->frame_mbs_only_flag) {
1818  picture_structure = PICT_FRAME;
1819  } else {
1820  if (!sps->direct_8x8_inference_flag && slice_type == AV_PICTURE_TYPE_B) {
1821  av_log(h->avctx, AV_LOG_ERROR, "This stream was generated by a broken encoder, invalid 8x8 inference\n");
1822  return -1;
1823  }
1824  field_pic_flag = get_bits1(&sl->gb);
1825  if (field_pic_flag) {
1826  bottom_field_flag = get_bits1(&sl->gb);
1827  picture_structure = PICT_TOP_FIELD + bottom_field_flag;
1828  } else {
1829  picture_structure = PICT_FRAME;
1830  }
1831  }
1832  sl->picture_structure = picture_structure;
1833  sl->mb_field_decoding_flag = picture_structure != PICT_FRAME;
1834 
1835  if (picture_structure == PICT_FRAME) {
1836  sl->curr_pic_num = sl->frame_num;
1837  sl->max_pic_num = 1 << sps->log2_max_frame_num;
1838  } else {
1839  sl->curr_pic_num = 2 * sl->frame_num + 1;
1840  sl->max_pic_num = 1 << (sps->log2_max_frame_num + 1);
1841  }
1842 
1843  if (nal->type == H264_NAL_IDR_SLICE)
1844  get_ue_golomb_long(&sl->gb); /* idr_pic_id */
1845 
1846  if (sps->poc_type == 0) {
1847  sl->poc_lsb = get_bits(&sl->gb, sps->log2_max_poc_lsb);
1848 
1849  if (pps->pic_order_present == 1 && picture_structure == PICT_FRAME)
1850  sl->delta_poc_bottom = get_se_golomb(&sl->gb);
1851  }
1852 
1853  if (sps->poc_type == 1 && !sps->delta_pic_order_always_zero_flag) {
1854  sl->delta_poc[0] = get_se_golomb(&sl->gb);
1855 
1856  if (pps->pic_order_present == 1 && picture_structure == PICT_FRAME)
1857  sl->delta_poc[1] = get_se_golomb(&sl->gb);
1858  }
1859 
1860  sl->redundant_pic_count = 0;
1861  if (pps->redundant_pic_cnt_present)
1862  sl->redundant_pic_count = get_ue_golomb(&sl->gb);
1863 
1864  if (sl->slice_type_nos == AV_PICTURE_TYPE_B)
1865  sl->direct_spatial_mv_pred = get_bits1(&sl->gb);
1866 
1868  &sl->gb, pps, sl->slice_type_nos,
1869  picture_structure, h->avctx);
1870  if (ret < 0)
1871  return ret;
1872 
1873  if (sl->slice_type_nos != AV_PICTURE_TYPE_I) {
1875  if (ret < 0) {
1876  sl->ref_count[1] = sl->ref_count[0] = 0;
1877  return ret;
1878  }
1879  }
1880 
1881  sl->pwt.use_weight = 0;
1882  for (i = 0; i < 2; i++) {
1883  sl->pwt.luma_weight_flag[i] = 0;
1884  sl->pwt.chroma_weight_flag[i] = 0;
1885  }
1886  if ((pps->weighted_pred && sl->slice_type_nos == AV_PICTURE_TYPE_P) ||
1887  (pps->weighted_bipred_idc == 1 &&
1890  sl->slice_type_nos, &sl->pwt,
1891  picture_structure, h->avctx);
1892  if (ret < 0)
1893  return ret;
1894  }
1895 
1896  sl->explicit_ref_marking = 0;
1897  if (nal->ref_idc) {
1898  ret = ff_h264_decode_ref_pic_marking(sl, &sl->gb, nal, h->avctx);
1899  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
1900  return AVERROR_INVALIDDATA;
1901  }
1902 
1903  if (sl->slice_type_nos != AV_PICTURE_TYPE_I && pps->cabac) {
1904  tmp = get_ue_golomb_31(&sl->gb);
1905  if (tmp > 2) {
1906  av_log(h->avctx, AV_LOG_ERROR, "cabac_init_idc %u overflow\n", tmp);
1907  return AVERROR_INVALIDDATA;
1908  }
1909  sl->cabac_init_idc = tmp;
1910  }
1911 
1912  sl->last_qscale_diff = 0;
1913  tmp = pps->init_qp + (unsigned)get_se_golomb(&sl->gb);
1914  if (tmp > 51 + 6 * (sps->bit_depth_luma - 8)) {
1915  av_log(h->avctx, AV_LOG_ERROR, "QP %u out of range\n", tmp);
1916  return AVERROR_INVALIDDATA;
1917  }
1918  sl->qscale = tmp;
1919  sl->chroma_qp[0] = get_chroma_qp(pps, 0, sl->qscale);
1920  sl->chroma_qp[1] = get_chroma_qp(pps, 1, sl->qscale);
1921  // FIXME qscale / qp ... stuff
1922  if (sl->slice_type == AV_PICTURE_TYPE_SP)
1923  get_bits1(&sl->gb); /* sp_for_switch_flag */
1924  if (sl->slice_type == AV_PICTURE_TYPE_SP ||
1926  get_se_golomb(&sl->gb); /* slice_qs_delta */
1927 
1928  sl->deblocking_filter = 1;
1929  sl->slice_alpha_c0_offset = 0;
1930  sl->slice_beta_offset = 0;
1931  if (pps->deblocking_filter_parameters_present) {
1932  tmp = get_ue_golomb_31(&sl->gb);
1933  if (tmp > 2) {
1934  av_log(h->avctx, AV_LOG_ERROR,
1935  "deblocking_filter_idc %u out of range\n", tmp);
1936  return AVERROR_INVALIDDATA;
1937  }
1938  sl->deblocking_filter = tmp;
1939  if (sl->deblocking_filter < 2)
1940  sl->deblocking_filter ^= 1; // 1<->0
1941 
1942  if (sl->deblocking_filter) {
1943  int slice_alpha_c0_offset_div2 = get_se_golomb(&sl->gb);
1944  int slice_beta_offset_div2 = get_se_golomb(&sl->gb);
1945  if (slice_alpha_c0_offset_div2 > 6 ||
1946  slice_alpha_c0_offset_div2 < -6 ||
1947  slice_beta_offset_div2 > 6 ||
1948  slice_beta_offset_div2 < -6) {
1949  av_log(h->avctx, AV_LOG_ERROR,
1950  "deblocking filter parameters %d %d out of range\n",
1951  slice_alpha_c0_offset_div2, slice_beta_offset_div2);
1952  return AVERROR_INVALIDDATA;
1953  }
1954  sl->slice_alpha_c0_offset = slice_alpha_c0_offset_div2 * 2;
1955  sl->slice_beta_offset = slice_beta_offset_div2 * 2;
1956  }
1957  }
1958 
1959  return 0;
1960 }
1961 
1962 /* do all the per-slice initialization needed before we can start decoding the
1963  * actual MBs */
1965  const H2645NAL *nal)
1966 {
1967  int i, j, ret = 0;
1968 
1969  if (h->picture_idr && nal->type != H264_NAL_IDR_SLICE) {
1970  av_log(h->avctx, AV_LOG_ERROR, "Invalid mix of IDR and non-IDR slices\n");
1971  return AVERROR_INVALIDDATA;
1972  }
1973 
1974  av_assert1(h->mb_num == h->mb_width * h->mb_height);
1975  if (sl->first_mb_addr << FIELD_OR_MBAFF_PICTURE(h) >= h->mb_num ||
1976  sl->first_mb_addr >= h->mb_num) {
1977  av_log(h->avctx, AV_LOG_ERROR, "first_mb_in_slice overflow\n");
1978  return AVERROR_INVALIDDATA;
1979  }
1980  sl->resync_mb_x = sl->mb_x = sl->first_mb_addr % h->mb_width;
1981  sl->resync_mb_y = sl->mb_y = (sl->first_mb_addr / h->mb_width) <<
1983  if (h->picture_structure == PICT_BOTTOM_FIELD)
1984  sl->resync_mb_y = sl->mb_y = sl->mb_y + 1;
1985  av_assert1(sl->mb_y < h->mb_height);
1986 
1987  ret = ff_h264_build_ref_list(h, sl);
1988  if (ret < 0)
1989  return ret;
1990 
1991  if (h->ps.pps->weighted_bipred_idc == 2 &&
1993  implicit_weight_table(h, sl, -1);
1994  if (FRAME_MBAFF(h)) {
1995  implicit_weight_table(h, sl, 0);
1996  implicit_weight_table(h, sl, 1);
1997  }
1998  }
1999 
2002  if (!h->setup_finished)
2004 
2005  if (h->avctx->skip_loop_filter >= AVDISCARD_ALL ||
2006  (h->avctx->skip_loop_filter >= AVDISCARD_NONKEY &&
2007  h->nal_unit_type != H264_NAL_IDR_SLICE) ||
2008  (h->avctx->skip_loop_filter >= AVDISCARD_NONINTRA &&
2010  (h->avctx->skip_loop_filter >= AVDISCARD_BIDIR &&
2012  (h->avctx->skip_loop_filter >= AVDISCARD_NONREF &&
2013  nal->ref_idc == 0))
2014  sl->deblocking_filter = 0;
2015 
2016  if (sl->deblocking_filter == 1 && h->nb_slice_ctx > 1) {
2017  if (h->avctx->flags2 & AV_CODEC_FLAG2_FAST) {
2018  /* Cheat slightly for speed:
2019  * Do not bother to deblock across slices. */
2020  sl->deblocking_filter = 2;
2021  } else {
2022  h->postpone_filter = 1;
2023  }
2024  }
2025  sl->qp_thresh = 15 -
2027  FFMAX3(0,
2028  h->ps.pps->chroma_qp_index_offset[0],
2029  h->ps.pps->chroma_qp_index_offset[1]) +
2030  6 * (h->ps.sps->bit_depth_luma - 8);
2031 
2032  sl->slice_num = ++h->current_slice;
2033 
2034  if (sl->slice_num)
2035  h->slice_row[(sl->slice_num-1)&(MAX_SLICES-1)]= sl->resync_mb_y;
2036  if ( h->slice_row[sl->slice_num&(MAX_SLICES-1)] + 3 >= sl->resync_mb_y
2037  && h->slice_row[sl->slice_num&(MAX_SLICES-1)] <= sl->resync_mb_y
2038  && sl->slice_num >= MAX_SLICES) {
2039  //in case of ASO this check needs to be updated depending on how we decide to assign slice numbers in this case
2040  av_log(h->avctx, AV_LOG_WARNING, "Possibly too many slices (%d >= %d), increase MAX_SLICES and recompile if there are artifacts\n", sl->slice_num, MAX_SLICES);
2041  }
2042 
2043  for (j = 0; j < 2; j++) {
2044  int id_list[16];
2045  int *ref2frm = h->ref2frm[sl->slice_num & (MAX_SLICES - 1)][j];
2046  for (i = 0; i < 16; i++) {
2047  id_list[i] = 60;
2048  if (j < sl->list_count && i < sl->ref_count[j] &&
2049  sl->ref_list[j][i].parent->f->buf[0]) {
2050  int k;
2051  AVBuffer *buf = sl->ref_list[j][i].parent->f->buf[0]->buffer;
2052  for (k = 0; k < h->short_ref_count; k++)
2053  if (h->short_ref[k]->f->buf[0]->buffer == buf) {
2054  id_list[i] = k;
2055  break;
2056  }
2057  for (k = 0; k < h->long_ref_count; k++)
2058  if (h->long_ref[k] && h->long_ref[k]->f->buf[0]->buffer == buf) {
2059  id_list[i] = h->short_ref_count + k;
2060  break;
2061  }
2062  }
2063  }
2064 
2065  ref2frm[0] =
2066  ref2frm[1] = -1;
2067  for (i = 0; i < 16; i++)
2068  ref2frm[i + 2] = 4 * id_list[i] + (sl->ref_list[j][i].reference & 3);
2069  ref2frm[18 + 0] =
2070  ref2frm[18 + 1] = -1;
2071  for (i = 16; i < 48; i++)
2072  ref2frm[i + 4] = 4 * id_list[(i - 16) >> 1] +
2073  (sl->ref_list[j][i].reference & 3);
2074  }
2075 
2076  if (h->avctx->debug & FF_DEBUG_PICT_INFO) {
2077  av_log(h->avctx, AV_LOG_DEBUG,
2078  "slice:%d %s mb:%d %c%s%s frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %s\n",
2079  sl->slice_num,
2080  (h->picture_structure == PICT_FRAME ? "F" : h->picture_structure == PICT_TOP_FIELD ? "T" : "B"),
2081  sl->mb_y * h->mb_width + sl->mb_x,
2083  sl->slice_type_fixed ? " fix" : "",
2084  nal->type == H264_NAL_IDR_SLICE ? " IDR" : "",
2085  h->poc.frame_num,
2086  h->cur_pic_ptr->field_poc[0],
2087  h->cur_pic_ptr->field_poc[1],
2088  sl->ref_count[0], sl->ref_count[1],
2089  sl->qscale,
2090  sl->deblocking_filter,
2092  sl->pwt.use_weight,
2093  sl->pwt.use_weight == 1 && sl->pwt.use_weight_chroma ? "c" : "",
2094  sl->slice_type == AV_PICTURE_TYPE_B ? (sl->direct_spatial_mv_pred ? "SPAT" : "TEMP") : "");
2095  }
2096 
2097  return 0;
2098 }
2099 
2101 {
2102  H264SliceContext *sl = h->slice_ctx + h->nb_slice_ctx_queued;
2103  int first_slice = sl == h->slice_ctx && !h->current_slice;
2104  int ret;
2105 
2106  sl->gb = nal->gb;
2107 
2108  ret = h264_slice_header_parse(h, sl, nal);
2109  if (ret < 0)
2110  return ret;
2111 
2112  // discard redundant pictures
2113  if (sl->redundant_pic_count > 0) {
2114  sl->ref_count[0] = sl->ref_count[1] = 0;
2115  return 0;
2116  }
2117 
2118  if (sl->first_mb_addr == 0 || !h->current_slice) {
2119  if (h->setup_finished) {
2120  av_log(h->avctx, AV_LOG_ERROR, "Too many fields\n");
2121  return AVERROR_INVALIDDATA;
2122  }
2123  }
2124 
2125  if (sl->first_mb_addr == 0) { // FIXME better field boundary detection
2126  if (h->current_slice) {
2127  // this slice starts a new field
2128  // first decode any pending queued slices
2129  if (h->nb_slice_ctx_queued) {
2130  H264SliceContext tmp_ctx;
2131 
2133  if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
2134  return ret;
2135 
2136  memcpy(&tmp_ctx, h->slice_ctx, sizeof(tmp_ctx));
2137  memcpy(h->slice_ctx, sl, sizeof(tmp_ctx));
2138  memcpy(sl, &tmp_ctx, sizeof(tmp_ctx));
2139  sl = h->slice_ctx;
2140  }
2141 
2142  if (h->cur_pic_ptr && FIELD_PICTURE(h) && h->first_field) {
2143  ret = ff_h264_field_end(h, h->slice_ctx, 1);
2144  if (ret < 0)
2145  return ret;
2146  } else if (h->cur_pic_ptr && !FIELD_PICTURE(h) && !h->first_field && h->nal_unit_type == H264_NAL_IDR_SLICE) {
2147  av_log(h, AV_LOG_WARNING, "Broken frame packetizing\n");
2148  ret = ff_h264_field_end(h, h->slice_ctx, 1);
2149  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0);
2150  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1);
2151  h->cur_pic_ptr = NULL;
2152  if (ret < 0)
2153  return ret;
2154  } else
2155  return AVERROR_INVALIDDATA;
2156  }
2157 
2158  if (!h->first_field) {
2159  if (h->cur_pic_ptr && !h->droppable) {
2160  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
2161  h->picture_structure == PICT_BOTTOM_FIELD);
2162  }
2163  h->cur_pic_ptr = NULL;
2164  }
2165  }
2166 
2167  if (!h->current_slice)
2168  av_assert0(sl == h->slice_ctx);
2169 
2170  if (h->current_slice == 0 && !h->first_field) {
2171  if (
2172  (h->avctx->skip_frame >= AVDISCARD_NONREF && !h->nal_ref_idc) ||
2173  (h->avctx->skip_frame >= AVDISCARD_BIDIR && sl->slice_type_nos == AV_PICTURE_TYPE_B) ||
2174  (h->avctx->skip_frame >= AVDISCARD_NONINTRA && sl->slice_type_nos != AV_PICTURE_TYPE_I) ||
2175  (h->avctx->skip_frame >= AVDISCARD_NONKEY && h->nal_unit_type != H264_NAL_IDR_SLICE && h->sei.recovery_point.recovery_frame_cnt < 0) ||
2176  h->avctx->skip_frame >= AVDISCARD_ALL) {
2177  return 0;
2178  }
2179  }
2180 
2181  if (!first_slice) {
2182  const PPS *pps = (const PPS*)h->ps.pps_list[sl->pps_id]->data;
2183 
2184  if (h->ps.pps->sps_id != pps->sps_id ||
2185  h->ps.pps->transform_8x8_mode != pps->transform_8x8_mode /*||
2186  (h->setup_finished && h->ps.pps != pps)*/) {
2187  av_log(h->avctx, AV_LOG_ERROR, "PPS changed between slices\n");
2188  return AVERROR_INVALIDDATA;
2189  }
2190  if (h->ps.sps != pps->sps) {
2191  av_log(h->avctx, AV_LOG_ERROR,
2192  "SPS changed in the middle of the frame\n");
2193  return AVERROR_INVALIDDATA;
2194  }
2195  }
2196 
2197  if (h->current_slice == 0) {
2198  ret = h264_field_start(h, sl, nal, first_slice);
2199  if (ret < 0)
2200  return ret;
2201  } else {
2202  if (h->picture_structure != sl->picture_structure ||
2203  h->droppable != (nal->ref_idc == 0)) {
2204  av_log(h->avctx, AV_LOG_ERROR,
2205  "Changing field mode (%d -> %d) between slices is not allowed\n",
2206  h->picture_structure, sl->picture_structure);
2207  return AVERROR_INVALIDDATA;
2208  } else if (!h->cur_pic_ptr) {
2209  av_log(h->avctx, AV_LOG_ERROR,
2210  "unset cur_pic_ptr on slice %d\n",
2211  h->current_slice + 1);
2212  return AVERROR_INVALIDDATA;
2213  }
2214  }
2215 
2216  ret = h264_slice_init(h, sl, nal);
2217  if (ret < 0)
2218  return ret;
2219 
2220  h->nb_slice_ctx_queued++;
2221 
2222  return 0;
2223 }
2224 
2226 {
2227  switch (sl->slice_type) {
2228  case AV_PICTURE_TYPE_P:
2229  return 0;
2230  case AV_PICTURE_TYPE_B:
2231  return 1;
2232  case AV_PICTURE_TYPE_I:
2233  return 2;
2234  case AV_PICTURE_TYPE_SP:
2235  return 3;
2236  case AV_PICTURE_TYPE_SI:
2237  return 4;
2238  default:
2239  return AVERROR_INVALIDDATA;
2240  }
2241 }
2242 
2244  H264SliceContext *sl,
2245  int mb_type, int top_xy,
2246  int left_xy[LEFT_MBS],
2247  int top_type,
2248  int left_type[LEFT_MBS],
2249  int mb_xy, int list)
2250 {
2251  int b_stride = h->b_stride;
2252  int16_t(*mv_dst)[2] = &sl->mv_cache[list][scan8[0]];
2253  int8_t *ref_cache = &sl->ref_cache[list][scan8[0]];
2254  if (IS_INTER(mb_type) || IS_DIRECT(mb_type)) {
2255  if (USES_LIST(top_type, list)) {
2256  const int b_xy = h->mb2b_xy[top_xy] + 3 * b_stride;
2257  const int b8_xy = 4 * top_xy + 2;
2258  const int *ref2frm = &h->ref2frm[h->slice_table[top_xy] & (MAX_SLICES - 1)][list][(MB_MBAFF(sl) ? 20 : 2)];
2259  AV_COPY128(mv_dst - 1 * 8, h->cur_pic.motion_val[list][b_xy + 0]);
2260  ref_cache[0 - 1 * 8] =
2261  ref_cache[1 - 1 * 8] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 0]];
2262  ref_cache[2 - 1 * 8] =
2263  ref_cache[3 - 1 * 8] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 1]];
2264  } else {
2265  AV_ZERO128(mv_dst - 1 * 8);
2266  AV_WN32A(&ref_cache[0 - 1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2267  }
2268 
2269  if (!IS_INTERLACED(mb_type ^ left_type[LTOP])) {
2270  if (USES_LIST(left_type[LTOP], list)) {
2271  const int b_xy = h->mb2b_xy[left_xy[LTOP]] + 3;
2272  const int b8_xy = 4 * left_xy[LTOP] + 1;
2273  const int *ref2frm = &h->ref2frm[h->slice_table[left_xy[LTOP]] & (MAX_SLICES - 1)][list][(MB_MBAFF(sl) ? 20 : 2)];
2274  AV_COPY32(mv_dst - 1 + 0, h->cur_pic.motion_val[list][b_xy + b_stride * 0]);
2275  AV_COPY32(mv_dst - 1 + 8, h->cur_pic.motion_val[list][b_xy + b_stride * 1]);
2276  AV_COPY32(mv_dst - 1 + 16, h->cur_pic.motion_val[list][b_xy + b_stride * 2]);
2277  AV_COPY32(mv_dst - 1 + 24, h->cur_pic.motion_val[list][b_xy + b_stride * 3]);
2278  ref_cache[-1 + 0] =
2279  ref_cache[-1 + 8] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 2 * 0]];
2280  ref_cache[-1 + 16] =
2281  ref_cache[-1 + 24] = ref2frm[h->cur_pic.ref_index[list][b8_xy + 2 * 1]];
2282  } else {
2283  AV_ZERO32(mv_dst - 1 + 0);
2284  AV_ZERO32(mv_dst - 1 + 8);
2285  AV_ZERO32(mv_dst - 1 + 16);
2286  AV_ZERO32(mv_dst - 1 + 24);
2287  ref_cache[-1 + 0] =
2288  ref_cache[-1 + 8] =
2289  ref_cache[-1 + 16] =
2290  ref_cache[-1 + 24] = LIST_NOT_USED;
2291  }
2292  }
2293  }
2294 
2295  if (!USES_LIST(mb_type, list)) {
2296  fill_rectangle(mv_dst, 4, 4, 8, pack16to32(0, 0), 4);
2297  AV_WN32A(&ref_cache[0 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2298  AV_WN32A(&ref_cache[1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2299  AV_WN32A(&ref_cache[2 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2300  AV_WN32A(&ref_cache[3 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
2301  return;
2302  }
2303 
2304  {
2305  int8_t *ref = &h->cur_pic.ref_index[list][4 * mb_xy];
2306  const int *ref2frm = &h->ref2frm[sl->slice_num & (MAX_SLICES - 1)][list][(MB_MBAFF(sl) ? 20 : 2)];
2307  uint32_t ref01 = (pack16to32(ref2frm[ref[0]], ref2frm[ref[1]]) & 0x00FF00FF) * 0x0101;
2308  uint32_t ref23 = (pack16to32(ref2frm[ref[2]], ref2frm[ref[3]]) & 0x00FF00FF) * 0x0101;
2309  AV_WN32A(&ref_cache[0 * 8], ref01);
2310  AV_WN32A(&ref_cache[1 * 8], ref01);
2311  AV_WN32A(&ref_cache[2 * 8], ref23);
2312  AV_WN32A(&ref_cache[3 * 8], ref23);
2313  }
2314 
2315  {
2316  int16_t(*mv_src)[2] = &h->cur_pic.motion_val[list][4 * sl->mb_x + 4 * sl->mb_y * b_stride];
2317  AV_COPY128(mv_dst + 8 * 0, mv_src + 0 * b_stride);
2318  AV_COPY128(mv_dst + 8 * 1, mv_src + 1 * b_stride);
2319  AV_COPY128(mv_dst + 8 * 2, mv_src + 2 * b_stride);
2320  AV_COPY128(mv_dst + 8 * 3, mv_src + 3 * b_stride);
2321  }
2322 }
2323 
2324 /**
2325  * @return non zero if the loop filter can be skipped
2326  */
2327 static int fill_filter_caches(const H264Context *h, H264SliceContext *sl, int mb_type)
2328 {
2329  const int mb_xy = sl->mb_xy;
2330  int top_xy, left_xy[LEFT_MBS];
2331  int top_type, left_type[LEFT_MBS];
2332  uint8_t *nnz;
2333  uint8_t *nnz_cache;
2334 
2335  top_xy = mb_xy - (h->mb_stride << MB_FIELD(sl));
2336 
2337  left_xy[LBOT] = left_xy[LTOP] = mb_xy - 1;
2338  if (FRAME_MBAFF(h)) {
2339  const int left_mb_field_flag = IS_INTERLACED(h->cur_pic.mb_type[mb_xy - 1]);
2340  const int curr_mb_field_flag = IS_INTERLACED(mb_type);
2341  if (sl->mb_y & 1) {
2342  if (left_mb_field_flag != curr_mb_field_flag)
2343  left_xy[LTOP] -= h->mb_stride;
2344  } else {
2345  if (curr_mb_field_flag)
2346  top_xy += h->mb_stride &
2347  (((h->cur_pic.mb_type[top_xy] >> 7) & 1) - 1);
2348  if (left_mb_field_flag != curr_mb_field_flag)
2349  left_xy[LBOT] += h->mb_stride;
2350  }
2351  }
2352 
2353  sl->top_mb_xy = top_xy;
2354  sl->left_mb_xy[LTOP] = left_xy[LTOP];
2355  sl->left_mb_xy[LBOT] = left_xy[LBOT];
2356  {
2357  /* For sufficiently low qp, filtering wouldn't do anything.
2358  * This is a conservative estimate: could also check beta_offset
2359  * and more accurate chroma_qp. */
2360  int qp_thresh = sl->qp_thresh; // FIXME strictly we should store qp_thresh for each mb of a slice
2361  int qp = h->cur_pic.qscale_table[mb_xy];
2362  if (qp <= qp_thresh &&
2363  (left_xy[LTOP] < 0 ||
2364  ((qp + h->cur_pic.qscale_table[left_xy[LTOP]] + 1) >> 1) <= qp_thresh) &&
2365  (top_xy < 0 ||
2366  ((qp + h->cur_pic.qscale_table[top_xy] + 1) >> 1) <= qp_thresh)) {
2367  if (!FRAME_MBAFF(h))
2368  return 1;
2369  if ((left_xy[LTOP] < 0 ||
2370  ((qp + h->cur_pic.qscale_table[left_xy[LBOT]] + 1) >> 1) <= qp_thresh) &&
2371  (top_xy < h->mb_stride ||
2372  ((qp + h->cur_pic.qscale_table[top_xy - h->mb_stride] + 1) >> 1) <= qp_thresh))
2373  return 1;
2374  }
2375  }
2376 
2377  top_type = h->cur_pic.mb_type[top_xy];
2378  left_type[LTOP] = h->cur_pic.mb_type[left_xy[LTOP]];
2379  left_type[LBOT] = h->cur_pic.mb_type[left_xy[LBOT]];
2380  if (sl->deblocking_filter == 2) {
2381  if (h->slice_table[top_xy] != sl->slice_num)
2382  top_type = 0;
2383  if (h->slice_table[left_xy[LBOT]] != sl->slice_num)
2384  left_type[LTOP] = left_type[LBOT] = 0;
2385  } else {
2386  if (h->slice_table[top_xy] == 0xFFFF)
2387  top_type = 0;
2388  if (h->slice_table[left_xy[LBOT]] == 0xFFFF)
2389  left_type[LTOP] = left_type[LBOT] = 0;
2390  }
2391  sl->top_type = top_type;
2392  sl->left_type[LTOP] = left_type[LTOP];
2393  sl->left_type[LBOT] = left_type[LBOT];
2394 
2395  if (IS_INTRA(mb_type))
2396  return 0;
2397 
2398  fill_filter_caches_inter(h, sl, mb_type, top_xy, left_xy,
2399  top_type, left_type, mb_xy, 0);
2400  if (sl->list_count == 2)
2401  fill_filter_caches_inter(h, sl, mb_type, top_xy, left_xy,
2402  top_type, left_type, mb_xy, 1);
2403 
2404  nnz = h->non_zero_count[mb_xy];
2405  nnz_cache = sl->non_zero_count_cache;
2406  AV_COPY32(&nnz_cache[4 + 8 * 1], &nnz[0]);
2407  AV_COPY32(&nnz_cache[4 + 8 * 2], &nnz[4]);
2408  AV_COPY32(&nnz_cache[4 + 8 * 3], &nnz[8]);
2409  AV_COPY32(&nnz_cache[4 + 8 * 4], &nnz[12]);
2410  sl->cbp = h->cbp_table[mb_xy];
2411 
2412  if (top_type) {
2413  nnz = h->non_zero_count[top_xy];
2414  AV_COPY32(&nnz_cache[4 + 8 * 0], &nnz[3 * 4]);
2415  }
2416 
2417  if (left_type[LTOP]) {
2418  nnz = h->non_zero_count[left_xy[LTOP]];
2419  nnz_cache[3 + 8 * 1] = nnz[3 + 0 * 4];
2420  nnz_cache[3 + 8 * 2] = nnz[3 + 1 * 4];
2421  nnz_cache[3 + 8 * 3] = nnz[3 + 2 * 4];
2422  nnz_cache[3 + 8 * 4] = nnz[3 + 3 * 4];
2423  }
2424 
2425  /* CAVLC 8x8dct requires NNZ values for residual decoding that differ
2426  * from what the loop filter needs */
2427  if (!CABAC(h) && h->ps.pps->transform_8x8_mode) {
2428  if (IS_8x8DCT(top_type)) {
2429  nnz_cache[4 + 8 * 0] =
2430  nnz_cache[5 + 8 * 0] = (h->cbp_table[top_xy] & 0x4000) >> 12;
2431  nnz_cache[6 + 8 * 0] =
2432  nnz_cache[7 + 8 * 0] = (h->cbp_table[top_xy] & 0x8000) >> 12;
2433  }
2434  if (IS_8x8DCT(left_type[LTOP])) {
2435  nnz_cache[3 + 8 * 1] =
2436  nnz_cache[3 + 8 * 2] = (h->cbp_table[left_xy[LTOP]] & 0x2000) >> 12; // FIXME check MBAFF
2437  }
2438  if (IS_8x8DCT(left_type[LBOT])) {
2439  nnz_cache[3 + 8 * 3] =
2440  nnz_cache[3 + 8 * 4] = (h->cbp_table[left_xy[LBOT]] & 0x8000) >> 12; // FIXME check MBAFF
2441  }
2442 
2443  if (IS_8x8DCT(mb_type)) {
2444  nnz_cache[scan8[0]] =
2445  nnz_cache[scan8[1]] =
2446  nnz_cache[scan8[2]] =
2447  nnz_cache[scan8[3]] = (sl->cbp & 0x1000) >> 12;
2448 
2449  nnz_cache[scan8[0 + 4]] =
2450  nnz_cache[scan8[1 + 4]] =
2451  nnz_cache[scan8[2 + 4]] =
2452  nnz_cache[scan8[3 + 4]] = (sl->cbp & 0x2000) >> 12;
2453 
2454  nnz_cache[scan8[0 + 8]] =
2455  nnz_cache[scan8[1 + 8]] =
2456  nnz_cache[scan8[2 + 8]] =
2457  nnz_cache[scan8[3 + 8]] = (sl->cbp & 0x4000) >> 12;
2458 
2459  nnz_cache[scan8[0 + 12]] =
2460  nnz_cache[scan8[1 + 12]] =
2461  nnz_cache[scan8[2 + 12]] =
2462  nnz_cache[scan8[3 + 12]] = (sl->cbp & 0x8000) >> 12;
2463  }
2464  }
2465 
2466  return 0;
2467 }
2468 
2469 static void loop_filter(const H264Context *h, H264SliceContext *sl, int start_x, int end_x)
2470 {
2471  uint8_t *dest_y, *dest_cb, *dest_cr;
2472  int linesize, uvlinesize, mb_x, mb_y;
2473  const int end_mb_y = sl->mb_y + FRAME_MBAFF(h);
2474  const int old_slice_type = sl->slice_type;
2475  const int pixel_shift = h->pixel_shift;
2476  const int block_h = 16 >> h->chroma_y_shift;
2477 
2478  if (h->postpone_filter)
2479  return;
2480 
2481  if (sl->deblocking_filter) {
2482  for (mb_x = start_x; mb_x < end_x; mb_x++)
2483  for (mb_y = end_mb_y - FRAME_MBAFF(h); mb_y <= end_mb_y; mb_y++) {
2484  int mb_xy, mb_type;
2485  mb_xy = sl->mb_xy = mb_x + mb_y * h->mb_stride;
2486  mb_type = h->cur_pic.mb_type[mb_xy];
2487 
2488  if (FRAME_MBAFF(h))
2489  sl->mb_mbaff =
2490  sl->mb_field_decoding_flag = !!IS_INTERLACED(mb_type);
2491 
2492  sl->mb_x = mb_x;
2493  sl->mb_y = mb_y;
2494  dest_y = h->cur_pic.f->data[0] +
2495  ((mb_x << pixel_shift) + mb_y * sl->linesize) * 16;
2496  dest_cb = h->cur_pic.f->data[1] +
2497  (mb_x << pixel_shift) * (8 << CHROMA444(h)) +
2498  mb_y * sl->uvlinesize * block_h;
2499  dest_cr = h->cur_pic.f->data[2] +
2500  (mb_x << pixel_shift) * (8 << CHROMA444(h)) +
2501  mb_y * sl->uvlinesize * block_h;
2502  // FIXME simplify above
2503 
2504  if (MB_FIELD(sl)) {
2505  linesize = sl->mb_linesize = sl->linesize * 2;
2506  uvlinesize = sl->mb_uvlinesize = sl->uvlinesize * 2;
2507  if (mb_y & 1) { // FIXME move out of this function?
2508  dest_y -= sl->linesize * 15;
2509  dest_cb -= sl->uvlinesize * (block_h - 1);
2510  dest_cr -= sl->uvlinesize * (block_h - 1);
2511  }
2512  } else {
2513  linesize = sl->mb_linesize = sl->linesize;
2514  uvlinesize = sl->mb_uvlinesize = sl->uvlinesize;
2515  }
2516  backup_mb_border(h, sl, dest_y, dest_cb, dest_cr, linesize,
2517  uvlinesize, 0);
2518  if (fill_filter_caches(h, sl, mb_type))
2519  continue;
2520  sl->chroma_qp[0] = get_chroma_qp(h->ps.pps, 0, h->cur_pic.qscale_table[mb_xy]);
2521  sl->chroma_qp[1] = get_chroma_qp(h->ps.pps, 1, h->cur_pic.qscale_table[mb_xy]);
2522 
2523  if (FRAME_MBAFF(h)) {
2524  ff_h264_filter_mb(h, sl, mb_x, mb_y, dest_y, dest_cb, dest_cr,
2525  linesize, uvlinesize);
2526  } else {
2527  ff_h264_filter_mb_fast(h, sl, mb_x, mb_y, dest_y, dest_cb,
2528  dest_cr, linesize, uvlinesize);
2529  }
2530  }
2531  }
2532  sl->slice_type = old_slice_type;
2533  sl->mb_x = end_x;
2534  sl->mb_y = end_mb_y - FRAME_MBAFF(h);
2535  sl->chroma_qp[0] = get_chroma_qp(h->ps.pps, 0, sl->qscale);
2536  sl->chroma_qp[1] = get_chroma_qp(h->ps.pps, 1, sl->qscale);
2537 }
2538 
2540 {
2541  const int mb_xy = sl->mb_x + sl->mb_y * h->mb_stride;
2542  int mb_type = (h->slice_table[mb_xy - 1] == sl->slice_num) ?
2543  h->cur_pic.mb_type[mb_xy - 1] :
2544  (h->slice_table[mb_xy - h->mb_stride] == sl->slice_num) ?
2545  h->cur_pic.mb_type[mb_xy - h->mb_stride] : 0;
2546  sl->mb_mbaff = sl->mb_field_decoding_flag = IS_INTERLACED(mb_type) ? 1 : 0;
2547 }
2548 
2549 /**
2550  * Draw edges and report progress for the last MB row.
2551  */
2553 {
2554  int top = 16 * (sl->mb_y >> FIELD_PICTURE(h));
2555  int pic_height = 16 * h->mb_height >> FIELD_PICTURE(h);
2556  int height = 16 << FRAME_MBAFF(h);
2557  int deblock_border = (16 + 4) << FRAME_MBAFF(h);
2558 
2559  if (sl->deblocking_filter) {
2560  if ((top + height) >= pic_height)
2561  height += deblock_border;
2562  top -= deblock_border;
2563  }
2564 
2565  if (top >= pic_height || (top + height) < 0)
2566  return;
2567 
2568  height = FFMIN(height, pic_height - top);
2569  if (top < 0) {
2570  height = top + height;
2571  top = 0;
2572  }
2573 
2574  ff_h264_draw_horiz_band(h, sl, top, height);
2575 
2576  if (h->droppable || sl->h264->slice_ctx[0].er.error_occurred)
2577  return;
2578 
2579  ff_thread_report_progress(&h->cur_pic_ptr->tf, top + height - 1,
2580  h->picture_structure == PICT_BOTTOM_FIELD);
2581 }
2582 
2584  int startx, int starty,
2585  int endx, int endy, int status)
2586 {
2587  if (!sl->h264->enable_er)
2588  return;
2589 
2590  if (CONFIG_ERROR_RESILIENCE) {
2591  ERContext *er = &sl->h264->slice_ctx[0].er;
2592 
2593  ff_er_add_slice(er, startx, starty, endx, endy, status);
2594  }
2595 }
2596 
2597 static int decode_slice(struct AVCodecContext *avctx, void *arg)
2598 {
2599  H264SliceContext *sl = arg;
2600  const H264Context *h = sl->h264;
2601  int lf_x_start = sl->mb_x;
2602  int orig_deblock = sl->deblocking_filter;
2603  int ret;
2604 
2605  sl->linesize = h->cur_pic_ptr->f->linesize[0];
2606  sl->uvlinesize = h->cur_pic_ptr->f->linesize[1];
2607 
2608  ret = alloc_scratch_buffers(sl, sl->linesize);
2609  if (ret < 0)
2610  return ret;
2611 
2612  sl->mb_skip_run = -1;
2613 
2614  av_assert0(h->block_offset[15] == (4 * ((scan8[15] - scan8[0]) & 7) << h->pixel_shift) + 4 * sl->linesize * ((scan8[15] - scan8[0]) >> 3));
2615 
2616  if (h->postpone_filter)
2617  sl->deblocking_filter = 0;
2618 
2619  sl->is_complex = FRAME_MBAFF(h) || h->picture_structure != PICT_FRAME ||
2620  (CONFIG_GRAY && (h->flags & AV_CODEC_FLAG_GRAY));
2621 
2622  if (!(h->avctx->active_thread_type & FF_THREAD_SLICE) && h->picture_structure == PICT_FRAME && h->slice_ctx[0].er.error_status_table) {
2623  const int start_i = av_clip(sl->resync_mb_x + sl->resync_mb_y * h->mb_width, 0, h->mb_num - 1);
2624  if (start_i) {
2625  int prev_status = h->slice_ctx[0].er.error_status_table[h->slice_ctx[0].er.mb_index2xy[start_i - 1]];
2626  prev_status &= ~ VP_START;
2627  if (prev_status != (ER_MV_END | ER_DC_END | ER_AC_END))
2628  h->slice_ctx[0].er.error_occurred = 1;
2629  }
2630  }
2631 
2632  if (h->ps.pps->cabac) {
2633  /* realign */
2634  align_get_bits(&sl->gb);
2635 
2636  /* init cabac */
2638  sl->gb.buffer + get_bits_count(&sl->gb) / 8,
2639  (get_bits_left(&sl->gb) + 7) / 8);
2640  if (ret < 0)
2641  return ret;
2642 
2644 
2645  for (;;) {
2646  int ret, eos;
2647  if (sl->mb_x + sl->mb_y * h->mb_width >= sl->next_slice_idx) {
2648  av_log(h->avctx, AV_LOG_ERROR, "Slice overlaps with next at %d\n",
2649  sl->next_slice_idx);
2650  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2651  sl->mb_y, ER_MB_ERROR);
2652  return AVERROR_INVALIDDATA;
2653  }
2654 
2655  ret = ff_h264_decode_mb_cabac(h, sl);
2656 
2657  if (ret >= 0)
2658  ff_h264_hl_decode_mb(h, sl);
2659 
2660  // FIXME optimal? or let mb_decode decode 16x32 ?
2661  if (ret >= 0 && FRAME_MBAFF(h)) {
2662  sl->mb_y++;
2663 
2664  ret = ff_h264_decode_mb_cabac(h, sl);
2665 
2666  if (ret >= 0)
2667  ff_h264_hl_decode_mb(h, sl);
2668  sl->mb_y--;
2669  }
2670  eos = get_cabac_terminate(&sl->cabac);
2671 
2672  if ((h->workaround_bugs & FF_BUG_TRUNCATED) &&
2673  sl->cabac.bytestream > sl->cabac.bytestream_end + 2) {
2674  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x - 1,
2675  sl->mb_y, ER_MB_END);
2676  if (sl->mb_x >= lf_x_start)
2677  loop_filter(h, sl, lf_x_start, sl->mb_x + 1);
2678  goto finish;
2679  }
2680  if (sl->cabac.bytestream > sl->cabac.bytestream_end + 2 )
2681  av_log(h->avctx, AV_LOG_DEBUG, "bytestream overread %"PTRDIFF_SPECIFIER"\n", sl->cabac.bytestream_end - sl->cabac.bytestream);
2682  if (ret < 0 || sl->cabac.bytestream > sl->cabac.bytestream_end + 4) {
2683  av_log(h->avctx, AV_LOG_ERROR,
2684  "error while decoding MB %d %d, bytestream %"PTRDIFF_SPECIFIER"\n",
2685  sl->mb_x, sl->mb_y,
2686  sl->cabac.bytestream_end - sl->cabac.bytestream);
2687  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2688  sl->mb_y, ER_MB_ERROR);
2689  return AVERROR_INVALIDDATA;
2690  }
2691 
2692  if (++sl->mb_x >= h->mb_width) {
2693  loop_filter(h, sl, lf_x_start, sl->mb_x);
2694  sl->mb_x = lf_x_start = 0;
2695  decode_finish_row(h, sl);
2696  ++sl->mb_y;
2697  if (FIELD_OR_MBAFF_PICTURE(h)) {
2698  ++sl->mb_y;
2699  if (FRAME_MBAFF(h) && sl->mb_y < h->mb_height)
2701  }
2702  }
2703 
2704  if (eos || sl->mb_y >= h->mb_height) {
2705  ff_tlog(h->avctx, "slice end %d %d\n",
2706  get_bits_count(&sl->gb), sl->gb.size_in_bits);
2707  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x - 1,
2708  sl->mb_y, ER_MB_END);
2709  if (sl->mb_x > lf_x_start)
2710  loop_filter(h, sl, lf_x_start, sl->mb_x);
2711  goto finish;
2712  }
2713  }
2714  } else {
2715  for (;;) {
2716  int ret;
2717 
2718  if (sl->mb_x + sl->mb_y * h->mb_width >= sl->next_slice_idx) {
2719  av_log(h->avctx, AV_LOG_ERROR, "Slice overlaps with next at %d\n",
2720  sl->next_slice_idx);
2721  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2722  sl->mb_y, ER_MB_ERROR);
2723  return AVERROR_INVALIDDATA;
2724  }
2725 
2726  ret = ff_h264_decode_mb_cavlc(h, sl);
2727 
2728  if (ret >= 0)
2729  ff_h264_hl_decode_mb(h, sl);
2730 
2731  // FIXME optimal? or let mb_decode decode 16x32 ?
2732  if (ret >= 0 && FRAME_MBAFF(h)) {
2733  sl->mb_y++;
2734  ret = ff_h264_decode_mb_cavlc(h, sl);
2735 
2736  if (ret >= 0)
2737  ff_h264_hl_decode_mb(h, sl);
2738  sl->mb_y--;
2739  }
2740 
2741  if (ret < 0) {
2742  av_log(h->avctx, AV_LOG_ERROR,
2743  "error while decoding MB %d %d\n", sl->mb_x, sl->mb_y);
2744  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2745  sl->mb_y, ER_MB_ERROR);
2746  return ret;
2747  }
2748 
2749  if (++sl->mb_x >= h->mb_width) {
2750  loop_filter(h, sl, lf_x_start, sl->mb_x);
2751  sl->mb_x = lf_x_start = 0;
2752  decode_finish_row(h, sl);
2753  ++sl->mb_y;
2754  if (FIELD_OR_MBAFF_PICTURE(h)) {
2755  ++sl->mb_y;
2756  if (FRAME_MBAFF(h) && sl->mb_y < h->mb_height)
2758  }
2759  if (sl->mb_y >= h->mb_height) {
2760  ff_tlog(h->avctx, "slice end %d %d\n",
2761  get_bits_count(&sl->gb), sl->gb.size_in_bits);
2762 
2763  if ( get_bits_left(&sl->gb) == 0
2764  || get_bits_left(&sl->gb) > 0 && !(h->avctx->err_recognition & AV_EF_AGGRESSIVE)) {
2765  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y,
2766  sl->mb_x - 1, sl->mb_y, ER_MB_END);
2767 
2768  goto finish;
2769  } else {
2770  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y,
2771  sl->mb_x, sl->mb_y, ER_MB_END);
2772 
2773  return AVERROR_INVALIDDATA;
2774  }
2775  }
2776  }
2777 
2778  if (get_bits_left(&sl->gb) <= 0 && sl->mb_skip_run <= 0) {
2779  ff_tlog(h->avctx, "slice end %d %d\n",
2780  get_bits_count(&sl->gb), sl->gb.size_in_bits);
2781 
2782  if (get_bits_left(&sl->gb) == 0) {
2783  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y,
2784  sl->mb_x - 1, sl->mb_y, ER_MB_END);
2785  if (sl->mb_x > lf_x_start)
2786  loop_filter(h, sl, lf_x_start, sl->mb_x);
2787 
2788  goto finish;
2789  } else {
2790  er_add_slice(sl, sl->resync_mb_x, sl->resync_mb_y, sl->mb_x,
2791  sl->mb_y, ER_MB_ERROR);
2792 
2793  return AVERROR_INVALIDDATA;
2794  }
2795  }
2796  }
2797  }
2798 
2799 finish:
2800  sl->deblocking_filter = orig_deblock;
2801  return 0;
2802 }
2803 
2804 /**
2805  * Call decode_slice() for each context.
2806  *
2807  * @param h h264 master context
2808  */
2810 {
2811  AVCodecContext *const avctx = h->avctx;
2812  H264SliceContext *sl;
2813  int context_count = h->nb_slice_ctx_queued;
2814  int ret = 0;
2815  int i, j;
2816 
2817  h->slice_ctx[0].next_slice_idx = INT_MAX;
2818 
2819  if (h->avctx->hwaccel || context_count < 1)
2820  return 0;
2821 
2822  av_assert0(context_count && h->slice_ctx[context_count - 1].mb_y < h->mb_height);
2823 
2824  if (context_count == 1) {
2825 
2826  h->slice_ctx[0].next_slice_idx = h->mb_width * h->mb_height;
2827  h->postpone_filter = 0;
2828 
2829  ret = decode_slice(avctx, &h->slice_ctx[0]);
2830  h->mb_y = h->slice_ctx[0].mb_y;
2831  if (ret < 0)
2832  goto finish;
2833  } else {
2834  av_assert0(context_count > 0);
2835  for (i = 0; i < context_count; i++) {
2836  int next_slice_idx = h->mb_width * h->mb_height;
2837  int slice_idx;
2838 
2839  sl = &h->slice_ctx[i];
2840  if (CONFIG_ERROR_RESILIENCE) {
2841  sl->er.error_count = 0;
2842  }
2843 
2844  /* make sure none of those slices overlap */
2845  slice_idx = sl->mb_y * h->mb_width + sl->mb_x;
2846  for (j = 0; j < context_count; j++) {
2847  H264SliceContext *sl2 = &h->slice_ctx[j];
2848  int slice_idx2 = sl2->mb_y * h->mb_width + sl2->mb_x;
2849 
2850  if (i == j || slice_idx2 < slice_idx)
2851  continue;
2852  next_slice_idx = FFMIN(next_slice_idx, slice_idx2);
2853  }
2854  sl->next_slice_idx = next_slice_idx;
2855  }
2856 
2857  avctx->execute(avctx, decode_slice, h->slice_ctx,
2858  NULL, context_count, sizeof(h->slice_ctx[0]));
2859 
2860  /* pull back stuff from slices to master context */
2861  sl = &h->slice_ctx[context_count - 1];
2862  h->mb_y = sl->mb_y;
2863  if (CONFIG_ERROR_RESILIENCE) {
2864  for (i = 1; i < context_count; i++)
2865  h->slice_ctx[0].er.error_count += h->slice_ctx[i].er.error_count;
2866  }
2867 
2868  if (h->postpone_filter) {
2869  h->postpone_filter = 0;
2870 
2871  for (i = 0; i < context_count; i++) {
2872  int y_end, x_end;
2873 
2874  sl = &h->slice_ctx[i];
2875  y_end = FFMIN(sl->mb_y + 1, h->mb_height);
2876  x_end = (sl->mb_y >= h->mb_height) ? h->mb_width : sl->mb_x;
2877 
2878  for (j = sl->resync_mb_y; j < y_end; j += 1 + FIELD_OR_MBAFF_PICTURE(h)) {
2879  sl->mb_y = j;
2880  loop_filter(h, sl, j > sl->resync_mb_y ? 0 : sl->resync_mb_x,
2881  j == y_end - 1 ? x_end : h->mb_width);
2882  }
2883  }
2884  }
2885  }
2886 
2887 finish:
2888  h->nb_slice_ctx_queued = 0;
2889  return ret;
2890 }
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:39
er_add_slice
static void er_add_slice(H264SliceContext *sl, int startx, int starty, int endx, int endy, int status)
Definition: h264_slice.c:2583
ff_h264_filter_mb_fast
void ff_h264_filter_mb_fast(const H264Context *h, H264SliceContext *sl, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize)
Definition: h264_loopfilter.c:419
h264_slice_header_init
static int h264_slice_header_init(H264Context *h)
Definition: h264_slice.c:931
implicit_weight_table
static void implicit_weight_table(const H264Context *h, H264SliceContext *sl, int field)
Initialize implicit_weight table.
Definition: h264_slice.c:680
H264SliceContext::mb_xy
int mb_xy
Definition: h264dec.h:238
ff_h264_unref_picture
void ff_h264_unref_picture(H264Context *h, H264Picture *pic)
Definition: h264_picture.c:44
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
AV_TIMECODE_STR_SIZE
#define AV_TIMECODE_STR_SIZE
Definition: timecode.h:33
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:235
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
H264SliceContext::ref_cache
int8_t ref_cache[2][5 *8]
Definition: h264dec.h:307
status
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
ff_h264_free_tables
void ff_h264_free_tables(H264Context *h)
Definition: h264dec.c:138
H264SEIDisplayOrientation::hflip
int hflip
Definition: h264_sei.h:147
AV_STEREO3D_VIEW_LEFT
@ AV_STEREO3D_VIEW_LEFT
Frame contains only the left view.
Definition: stereo3d.h:156
av_clip
#define av_clip
Definition: common.h:122
h264_init_ps
static int h264_init_ps(H264Context *h, const H264SliceContext *sl, int first_slice)
Definition: h264_slice.c:1031
H264SliceContext::max_pic_num
int max_pic_num
Definition: h264dec.h:338
H264SliceContext::nb_mmco
int nb_mmco
Definition: h264dec.h:330
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
CHROMA422
#define CHROMA422(h)
Definition: h264dec.h:99
FF_BUG_TRUNCATED
#define FF_BUG_TRUNCATED
Definition: avcodec.h:1586
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
cabac.h
AV_STEREO3D_SIDEBYSIDE_QUINCUNX
@ AV_STEREO3D_SIDEBYSIDE_QUINCUNX
Views are next to each other, but when upscaling apply a checkerboard pattern.
Definition: stereo3d.h:117
H264_NAL_IDR_SLICE
@ H264_NAL_IDR_SLICE
Definition: h264.h:39
H264Picture::poc
int poc
frame POC
Definition: h264dec.h:149
h264_export_frame_props
static int h264_export_frame_props(H264Context *h)
Definition: h264_slice.c:1150
H264Picture::f
AVFrame * f
Definition: h264dec.h:130
ff_thread_ref_frame
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
Definition: utils.c:907
out
FILE * out
Definition: movenc.c:54
cb
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:215
av_clip_int8
#define av_clip_int8
Definition: common.h:131
zigzag_scan8x8_cavlc
static const uint8_t zigzag_scan8x8_cavlc[64+1]
Definition: h264_slice.c:96
ff_thread_can_start_frame
int ff_thread_can_start_frame(AVCodecContext *avctx)
Definition: pthread_frame.c:984
H264Context::slice_ctx
H264SliceContext * slice_ctx
Definition: h264dec.h:357
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:92
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
H264Picture::ref_index
int8_t * ref_index[2]
Definition: h264dec.h:146
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, buffer_size_t size)
Add a new side data to a frame.
Definition: frame.c:727
HWACCEL_MAX
#define HWACCEL_MAX
ff_h264_slice_context_init
int ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl)
Init context Allocate buffers which are not shared amongst multiple threads.
Definition: h264dec.c:222
AVFrame::coded_picture_number
int coded_picture_number
picture number in bitstream order
Definition: frame.h:438
MB_MBAFF
#define MB_MBAFF(h)
Definition: h264dec.h:72
H264SliceContext::mvd_table
uint8_t(*[2] mvd_table)[2]
Definition: h264dec.h:320
ff_h264_set_erpic
void ff_h264_set_erpic(ERPicture *dst, H264Picture *src)
Definition: h264_picture.c:136
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
H264_SEI_PIC_STRUCT_TOP_BOTTOM
@ H264_SEI_PIC_STRUCT_TOP_BOTTOM
3: top field, bottom field, in that order
Definition: h264_sei.h:34
H264Picture::pps
const PPS * pps
Definition: h264dec.h:167
AV_FRAME_DATA_S12M_TIMECODE
@ AV_FRAME_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1.
Definition: frame.h:168
av_mod_uintp2
#define av_mod_uintp2
Definition: common.h:149
GetBitContext::size_in_bits
int size_in_bits
Definition: get_bits.h:68
H2645NAL::ref_idc
int ref_idc
H.264 only, nal_ref_idc.
Definition: h2645_parse.h:70
predict_field_decoding_flag
static void predict_field_decoding_flag(const H264Context *h, H264SliceContext *sl)
Definition: h264_slice.c:2539
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:324
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:27
AVFrame::width
int width
Definition: frame.h:382
w
uint8_t w
Definition: llviddspenc.c:39
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:586
get_ue_golomb
static int get_ue_golomb(GetBitContext *gb)
Read an unsigned Exp-Golomb code in the range 0 to 8190.
Definition: golomb.h:55
av_display_matrix_flip
void av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip)
Flip the input matrix horizontally and/or vertically.
Definition: display.c:65
internal.h
ff_h264_update_thread_context
int ff_h264_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: h264_slice.c:296
alloc_scratch_buffers
static int alloc_scratch_buffers(H264SliceContext *sl, int linesize)
Definition: h264_slice.c:128
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:486
FRAME_RECOVERED_IDR
#define FRAME_RECOVERED_IDR
We have seen an IDR, so all the following frames in coded order are correctly decodable.
Definition: h264dec.h:524
H264SliceContext::mmco
MMCO mmco[MAX_MMCO_COUNT]
Definition: h264dec.h:329
decode_finish_row
static void decode_finish_row(const H264Context *h, H264SliceContext *sl)
Draw edges and report progress for the last MB row.
Definition: h264_slice.c:2552
H264SliceContext::ref_count
unsigned int ref_count[2]
num_ref_idx_l0/1_active_minus1 + 1
Definition: h264dec.h:274
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:399
ff_er_frame_start
void ff_er_frame_start(ERContext *s)
Definition: error_resilience.c:797
H264_SEI_FPA_TYPE_CHECKERBOARD
@ H264_SEI_FPA_TYPE_CHECKERBOARD
Definition: h264_sei.h:46
H264Picture::qscale_table
int8_t * qscale_table
Definition: h264dec.h:134
H264SliceContext::h264
struct H264Context * h264
Definition: h264dec.h:185
H264SliceContext::left_mb_xy
int left_mb_xy[LEFT_MBS]
Definition: h264dec.h:218
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:229
ERContext
Definition: error_resilience.h:53
H264PredWeightTable::use_weight_chroma
int use_weight_chroma
Definition: h264_parse.h:32
av_buffer_allocz
AVBufferRef * av_buffer_allocz(buffer_size_t size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:83
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:210
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:38
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB)
Definition: pixfmt.h:513
AV_WN32A
#define AV_WN32A(p, v)
Definition: intreadwrite.h:538
ff_h264_update_thread_context_for_user
int ff_h264_update_thread_context_for_user(AVCodecContext *dst, const AVCodecContext *src)
Definition: h264_slice.c:467
ff_er_add_slice
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
Definition: error_resilience.c:830
av_display_rotation_set
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure counterclockwise rotation by the specified angle...
Definition: display.c:50
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:84
ff_tlog
#define ff_tlog(ctx,...)
Definition: internal.h:91
mpegvideo.h
H264Picture::ref_index_buf
AVBufferRef * ref_index_buf[2]
Definition: h264dec.h:145
ff_h264_pred_weight_table
int ff_h264_pred_weight_table(GetBitContext *gb, const SPS *sps, const int *ref_count, int slice_type_nos, H264PredWeightTable *pwt, int picture_structure, void *logctx)
Definition: h264_parse.c:27
FRAME_RECOVERED_SEI
#define FRAME_RECOVERED_SEI
Sufficient number of frames have been decoded since a SEI recovery point, so all the following frames...
Definition: h264dec.h:529
H264SliceContext::is_complex
int is_complex
Definition: h264dec.h:245
ER_DC_END
#define ER_DC_END
Definition: error_resilience.h:35
ff_h264_decode_ref_pic_list_reordering
int ff_h264_decode_ref_pic_list_reordering(H264SliceContext *sl, void *logctx)
Definition: h264_refs.c:423
mpegutils.h
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:515
H264Picture::invalid_gap
int invalid_gap
Definition: h264dec.h:163
av_timecode_get_smpte
uint32_t av_timecode_get_smpte(AVRational rate, int drop, int hh, int mm, int ss, int ff)
Convert sei info to SMPTE 12M binary representation.
Definition: timecode.c:68
AV_STEREO3D_VIEW_RIGHT
@ AV_STEREO3D_VIEW_RIGHT
Frame contains only the right view.
Definition: stereo3d.h:161
H264Picture::pps_buf
AVBufferRef * pps_buf
Definition: h264dec.h:166
thread.h
ff_thread_await_progress
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
ThreadFrame::f
AVFrame * f
Definition: thread.h:35
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1624
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:338
H264SliceContext::mb_x
int mb_x
Definition: h264dec.h:237
h264_mvpred.h
H264Picture::frame_num
int frame_num
frame_num (raw frame_num from slice header)
Definition: h264dec.h:150
H264SliceContext::next_slice_idx
int next_slice_idx
Definition: h264dec.h:243
H264SliceContext
Definition: h264dec.h:184
fill_filter_caches_inter
static av_always_inline void fill_filter_caches_inter(const H264Context *h, H264SliceContext *sl, int mb_type, int top_xy, int left_xy[LEFT_MBS], int top_type, int left_type[LEFT_MBS], int mb_xy, int list)
Definition: h264_slice.c:2243
golomb.h
exp golomb vlc stuff
MB_FIELD
#define MB_FIELD(sl)
Definition: h264dec.h:73
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:67
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
ff_h264_filter_mb
void ff_h264_filter_mb(const H264Context *h, H264SliceContext *sl, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize)
Definition: h264_loopfilter.c:719
av_buffer_pool_init
AVBufferPool * av_buffer_pool_init(buffer_size_t size, AVBufferRef *(*alloc)(buffer_size_t size))
Allocate and initialize a buffer pool.
Definition: buffer.c:266
H264SliceContext::mv_cache
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
Definition: h264dec.h:306
AV_CODEC_FLAG_OUTPUT_CORRUPT
#define AV_CODEC_FLAG_OUTPUT_CORRUPT
Output even those frames that might be corrupted.
Definition: avcodec.h:283
AVHWAccel
Definition: avcodec.h:2438
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:417
finish
static void finish(void)
Definition: movenc.c:342
get_chroma_qp
static av_always_inline int get_chroma_qp(const PPS *pps, int t, int qscale)
Get the chroma qp.
Definition: h264dec.h:688
U
#define U(x)
Definition: vp56_arith.h:37
H264Picture::mmco_reset
int mmco_reset
MMCO_RESET set this 1.
Definition: h264dec.h:151
fail
#define fail()
Definition: checkasm.h:133
copy_picture_range
static void copy_picture_range(H264Picture **to, H264Picture **from, int count, H264Context *new_base, H264Context *old_base)
Definition: h264_slice.c:280
AV_STEREO3D_2D
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:55
H264SEIA53Caption
Definition: h264_sei.h:107
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:415
h264_select_output_frame
static int h264_select_output_frame(H264Context *h)
Definition: h264_slice.c:1363
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:397
H264SliceContext::er
ERContext er
Definition: h264dec.h:187
USES_LIST
#define USES_LIST(a, list)
Definition: mpegutils.h:99
CABACContext::bytestream
const uint8_t * bytestream
Definition: cabac.h:45
AVFrame::key_frame
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:402
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2601
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
H264Picture::mb_stride
int mb_stride
Definition: h264dec.h:170
IN_RANGE
#define IN_RANGE(a, b, size)
Definition: h264_slice.c:273
ff_h264_flush_change
void ff_h264_flush_change(H264Context *h)
Definition: h264dec.c:439
ff_h264qpel_init
av_cold void ff_h264qpel_init(H264QpelContext *c, int bit_depth)
Definition: h264qpel.c:49
ff_h264_sei_process_picture_timing
int ff_h264_sei_process_picture_timing(H264SEIPictureTiming *h, const SPS *sps, void *logctx)
Parse the contents of a picture timing message given an active SPS.
Definition: h264_sei.c:62
h264_frame_start
static int h264_frame_start(H264Context *h)
Definition: h264_slice.c:479
H264SliceContext::deblocking_filter
int deblocking_filter
disable_deblocking_filter_idc with 1 <-> 0
Definition: h264dec.h:200
H264PredWeightTable::luma_log2_weight_denom
int luma_log2_weight_denom
Definition: h264_parse.h:33
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:261
H264SliceContext::picture_structure
int picture_structure
Definition: h264dec.h:247
ff_h264_golomb_to_pict_type
const uint8_t ff_h264_golomb_to_pict_type[5]
Definition: h264data.c:37
release_unused_pictures
static void release_unused_pictures(H264Context *h, int remove_current)
Definition: h264_slice.c:115
H264PredWeightTable::use_weight
int use_weight
Definition: h264_parse.h:31
H264_SEI_FPA_TYPE_SIDE_BY_SIDE
@ H264_SEI_FPA_TYPE_SIDE_BY_SIDE
Definition: h264_sei.h:49
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
H264SliceContext::direct_spatial_mv_pred
int direct_spatial_mv_pred
Definition: h264dec.h:258
H264SliceContext::slice_num
int slice_num
Definition: h264dec.h:189
H264_SEI_FPA_TYPE_INTERLEAVE_TEMPORAL
@ H264_SEI_FPA_TYPE_INTERLEAVE_TEMPORAL
Definition: h264_sei.h:51
non_j_pixfmt
static enum AVPixelFormat non_j_pixfmt(enum AVPixelFormat a)
Definition: h264_slice.c:1020
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:402
ff_h264_init_cabac_states
void ff_h264_init_cabac_states(const H264Context *h, H264SliceContext *sl)
Definition: h264_cabac.c:1262
ff_h264_hl_decode_mb
void ff_h264_hl_decode_mb(const H264Context *h, H264SliceContext *sl)
Definition: h264_mb.c:799
avassert.h
AV_STEREO3D_FRAMESEQUENCE
@ AV_STEREO3D_FRAMESEQUENCE
Views are alternated temporally.
Definition: stereo3d.h:92
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
ff_color_frame
void ff_color_frame(AVFrame *frame, const int color[4])
Definition: utils.c:392
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:592
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
av_buffer_pool_get
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:373
ff_h264_queue_decode_slice
int ff_h264_queue_decode_slice(H264Context *h, const H2645NAL *nal)
Submit a slice for decoding.
Definition: h264_slice.c:2100
width
#define width
H264Context::DPB
H264Picture DPB[H264_MAX_PICTURE_COUNT]
Definition: h264dec.h:352
AV_STEREO3D_LINES
@ AV_STEREO3D_LINES
Views are packed per line, as if interlaced.
Definition: stereo3d.h:129
FFMAX3
#define FFMAX3(a, b, c)
Definition: common.h:104
stereo3d.h
H264_SEI_FPA_TYPE_TOP_BOTTOM
@ H264_SEI_FPA_TYPE_TOP_BOTTOM
Definition: h264_sei.h:50
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:137
H264SEIA53Caption::buf_ref
AVBufferRef * buf_ref
Definition: h264_sei.h:108
H264PredWeightTable::chroma_log2_weight_denom
int chroma_log2_weight_denom
Definition: h264_parse.h:34
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
AV_ZERO32
#define AV_ZERO32(d)
Definition: intreadwrite.h:629
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:514
FIELD_PICTURE
#define FIELD_PICTURE(h)
Definition: h264dec.h:75
ff_h264_execute_ref_pic_marking
int ff_h264_execute_ref_pic_marking(H264Context *h)
Execute the reference picture marking (memory management control operations).
Definition: h264_refs.c:610
ff_h264_decode_ref_pic_marking
int ff_h264_decode_ref_pic_marking(H264SliceContext *sl, GetBitContext *gb, const H2645NAL *nal, void *logctx)
Definition: h264_refs.c:834
from
const char * from
Definition: jacosubdec.c:65
to
const char * to
Definition: webvttdec.c:34
h264_slice_header_parse
static int h264_slice_header_parse(const H264Context *h, H264SliceContext *sl, const H2645NAL *nal)
Definition: h264_slice.c:1753
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
H264PredWeightTable::chroma_weight_flag
int chroma_weight_flag[2]
7.4.3.2 chroma_weight_lX_flag
Definition: h264_parse.h:36
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:303
h264data.h
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:396
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
H264Ref::parent
H264Picture * parent
Definition: h264dec.h:181
PICT_TOP_FIELD
#define PICT_TOP_FIELD
Definition: mpegutils.h:37
field_scan8x8_cavlc
static const uint8_t field_scan8x8_cavlc[64+1]
Definition: h264_slice.c:76
H264SliceContext::slice_alpha_c0_offset
int slice_alpha_c0_offset
Definition: h264dec.h:201
IS_INTRA
#define IS_INTRA(x, y)
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
AVFrame::crop_right
size_t crop_right
Definition: frame.h:687
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
H264SliceContext::slice_type
int slice_type
Definition: h264dec.h:190
H264SliceContext::resync_mb_x
int resync_mb_x
Definition: h264dec.h:239
H264Picture::sei_recovery_frame_cnt
int sei_recovery_frame_cnt
Definition: h264dec.h:164
AVDISCARD_BIDIR
@ AVDISCARD_BIDIR
discard all bidirectional frames
Definition: avcodec.h:233
get_se_golomb
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
Definition: golomb.h:241
H2645NAL::type
int type
NAL unit type.
Definition: h2645_parse.h:52
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
H264Context::enable_er
int enable_er
Definition: h264dec.h:552
ff_h264_draw_horiz_band
void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl, int y, int height)
Definition: h264dec.c:103
H264SliceContext::curr_pic_num
int curr_pic_num
Definition: h264dec.h:337
int32_t
int32_t
Definition: audio_convert.c:194
arg
const char * arg
Definition: jacosubdec.c:66
AVStereo3D::flags
int flags
Additional information about the frame packing.
Definition: stereo3d.h:185
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
if
if(ret)
Definition: filter_design.txt:179
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: avcodec.h:236
GetBitContext::buffer
const uint8_t * buffer
Definition: get_bits.h:62
alloc_picture
static int alloc_picture(H264Context *h, H264Picture *pic)
Definition: h264_slice.c:187
H264Picture::motion_val_buf
AVBufferRef * motion_val_buf[2]
Definition: h264dec.h:136
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:192
NULL
#define NULL
Definition: coverity.c:32
AV_COPY128
#define AV_COPY128(d, s)
Definition: intreadwrite.h:609
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:125
AV_COPY64
#define AV_COPY64(d, s)
Definition: intreadwrite.h:605
H264SliceContext::edge_emu_buffer
uint8_t * edge_emu_buffer
Definition: h264dec.h:291
H264SEIUnregistered
Definition: h264_sei.h:111
SPS
Sequence parameter set.
Definition: h264_ps.h:44
TRANSPOSE
#define TRANSPOSE(x)
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
ER_MB_ERROR
#define ER_MB_ERROR
Definition: error_resilience.h:38
ff_h264_decode_mb_cabac
int ff_h264_decode_mb_cabac(const H264Context *h, H264SliceContext *sl)
Decode a macroblock.
Definition: h264_cabac.c:1911
AV_PICTURE_TYPE_SI
@ AV_PICTURE_TYPE_SI
Switching Intra.
Definition: avutil.h:278
H264SliceContext::chroma_qp
int chroma_qp[2]
Definition: h264dec.h:195
AV_CODEC_FLAG2_FAST
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:348
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
src
#define src
Definition: vp8dsp.c:255
av_buffer_pool_uninit
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:308
PPS
Picture parameter set.
Definition: h264_ps.h:111
av_fast_mallocz
void av_fast_mallocz(void *ptr, unsigned int *size, size_t min_size)
Allocate and clear a buffer, reusing the given one if large enough.
Definition: mem.c:507
mathops.h
list
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
Definition: filter_design.txt:25
IS_INTERLACED
#define IS_INTERLACED(a)
Definition: mpegutils.h:83
av_frame_new_side_data_from_buf
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
Definition: frame.c:695
H264Picture::mb_height
int mb_height
Definition: h264dec.h:169
MAX_PPS_COUNT
#define MAX_PPS_COUNT
Definition: h264_ps.h:38
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:400
H264SliceContext::qscale
int qscale
Definition: h264dec.h:194
get_pixel_format
static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
Definition: h264_slice.c:778
fill_filter_caches
static int fill_filter_caches(const H264Context *h, H264SliceContext *sl, int mb_type)
Definition: h264_slice.c:2327
ERContext::error_occurred
int error_occurred
Definition: error_resilience.h:65
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:1656
fp
#define fp
Definition: regdef.h:44
AV_ZERO128
#define AV_ZERO128(d)
Definition: intreadwrite.h:637
init_scan_tables
static void init_scan_tables(H264Context *h)
initialize scan tables
Definition: h264_slice.c:744
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:414
H264SliceContext::top_borders_allocated
int top_borders_allocated[2]
Definition: h264dec.h:295
AV_FRAME_DATA_AFD
@ AV_FRAME_DATA_AFD
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
Definition: frame.h:89
AV_PICTURE_TYPE_SP
@ AV_PICTURE_TYPE_SP
Switching Predicted.
Definition: avutil.h:279
FIELD_OR_MBAFF_PICTURE
#define FIELD_OR_MBAFF_PICTURE(h)
Definition: h264dec.h:92
H264SliceContext::mb_skip_run
int mb_skip_run
Definition: h264dec.h:244
h264_ps.h
init_dimensions
static void init_dimensions(H264Context *h)
Definition: h264_slice.c:891
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AV_FRAME_DATA_SEI_UNREGISTERED
@ AV_FRAME_DATA_SEI_UNREGISTERED
User data unregistered metadata associated with a video frame.
Definition: frame.h:194
H264SliceContext::top_type
int top_type
Definition: h264dec.h:221
AVFrame::crop_bottom
size_t crop_bottom
Definition: frame.h:685
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
H264SliceContext::resync_mb_y
int resync_mb_y
Definition: h264dec.h:240
H264_SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM
@ H264_SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM
6: bottom field, top field, bottom field repeated, in that order
Definition: h264_sei.h:37
ff_thread_release_buffer
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
Definition: pthread_frame.c:1106
DELAYED_PIC_REF
#define DELAYED_PIC_REF
Value of Picture.reference when Picture is not a reference picture, but is held for delayed output.
Definition: diracdec.c:68
H264SEIPictureTiming
Definition: h264_sei.h:66
H264SliceContext::cabac
CABACContext cabac
Cabac.
Definition: h264dec.h:325
H264SliceContext::redundant_pic_count
int redundant_pic_count
Definition: h264dec.h:251
AVFrame::crop_left
size_t crop_left
Definition: frame.h:686
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: avcodec.h:235
ERContext::error_count
atomic_int error_count
Definition: error_resilience.h:64
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:407
ff_zigzag_scan
const uint8_t ff_zigzag_scan[16+1]
Definition: mathtables.c:109
AV_STEREO3D_CHECKERBOARD
@ AV_STEREO3D_CHECKERBOARD
Views are packed in a checkerboard-like structure per pixel.
Definition: stereo3d.h:104
H264Picture::reference
int reference
Definition: h264dec.h:161
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:308
CABAC
#define CABAC(h)
Definition: h264_cabac.c:28
LEFT_MBS
#define LEFT_MBS
Definition: h264dec.h:76
pps
static int FUNC() pps(CodedBitstreamContext *ctx, RWContext *rw, H264RawPPS *current)
Definition: cbs_h264_syntax_template.c:404
H264SEIFramePacking
Definition: h264_sei.h:133
rectangle.h
FF_COMPLIANCE_STRICT
#define FF_COMPLIANCE_STRICT
Strictly conform to all the things in the spec no matter what consequences.
Definition: avcodec.h:1603
FFMAX
#define FFMAX(a, b)
Definition: common.h:103
H264SliceContext::mb_uvlinesize
ptrdiff_t mb_uvlinesize
Definition: h264dec.h:235
VP_START
#define VP_START
< current MB is the first after a resync marker
Definition: error_resilience.h:30
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:404
H264SliceContext::pwt
H264PredWeightTable pwt
Definition: h264dec.h:204
MAX_DELAYED_PIC_COUNT
#define MAX_DELAYED_PIC_COUNT
Definition: h264dec.h:57
H264Picture::tf
ThreadFrame tf
Definition: h264dec.h:131
H264Picture::mb_type
uint32_t * mb_type
Definition: h264dec.h:140
ff_h264_decode_mb_cavlc
int ff_h264_decode_mb_cavlc(const H264Context *h, H264SliceContext *sl)
Decode a macroblock.
Definition: h264_cavlc.c:697
H264_SEI_PIC_STRUCT_BOTTOM_TOP
@ H264_SEI_PIC_STRUCT_BOTTOM_TOP
4: bottom field, top field, in that order
Definition: h264_sei.h:35
H264Picture::recovered
int recovered
picture at IDR or recovery point + recovery count
Definition: h264dec.h:162
H2645NAL::gb
GetBitContext gb
Definition: h2645_parse.h:47
H264SliceContext::top_mb_xy
int top_mb_xy
Definition: h264dec.h:216
H264SliceContext::qp_thresh
int qp_thresh
QP threshold to skip loopfilter.
Definition: h264dec.h:196
H2645NAL
Definition: h2645_parse.h:32
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:406
H264SliceContext::top_borders
uint8_t(*[2] top_borders)[(16 *3) *2]
Definition: h264dec.h:292
AVFrameSideData::data
uint8_t * data
Definition: frame.h:228
h264chroma.h
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1785
H264SliceContext::cbp
int cbp
Definition: h264dec.h:262
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:397
H264SliceContext::left_type
int left_type[LEFT_MBS]
Definition: h264dec.h:223
ff_h264_direct_ref_list_init
void ff_h264_direct_ref_list_init(const H264Context *const h, H264SliceContext *sl)
Definition: h264_direct.c:121
H264SliceContext::mb_y
int mb_y
Definition: h264dec.h:237
H264PredWeightTable::implicit_weight
int implicit_weight[48][48][2]
Definition: h264_parse.h:40
height
#define height
decode_slice
static int decode_slice(struct AVCodecContext *avctx, void *arg)
Definition: h264_slice.c:2597
H264SliceContext::explicit_ref_marking
int explicit_ref_marking
Definition: h264dec.h:331
FFMIN
#define FFMIN(a, b)
Definition: common.h:105
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
av_reallocp_array
int av_reallocp_array(void *ptr, size_t nmemb, size_t size)
Allocate, reallocate, or free an array through a pointer to a pointer.
Definition: mem.c:206
H264_SEI_FPA_TYPE_INTERLEAVE_COLUMN
@ H264_SEI_FPA_TYPE_INTERLEAVE_COLUMN
Definition: h264_sei.h:47
pt
int pt
Definition: rtp.c:35
H264SliceContext::uvlinesize
ptrdiff_t uvlinesize
Definition: h264dec.h:233
AVBufferRef::buffer
AVBuffer * buffer
Definition: buffer.h:85
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:313
H264SEIDisplayOrientation::anticlockwise_rotation
int anticlockwise_rotation
Definition: h264_sei.h:146
H264SliceContext::slice_type_nos
int slice_type_nos
S free slice type (SI/SP are remapped to I/P)
Definition: h264dec.h:191
H264SliceContext::delta_poc_bottom
int delta_poc_bottom
Definition: h264dec.h:335
AV_STEREO3D_FLAG_INVERT
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:167
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Definition: pixfmt.h:122
FRAME_MBAFF
#define FRAME_MBAFF(h)
Definition: h264dec.h:74
IS_DIRECT
#define IS_DIRECT(a)
Definition: mpegutils.h:84
H264_SEI_PIC_STRUCT_FRAME
@ H264_SEI_PIC_STRUCT_FRAME
0: frame
Definition: h264_sei.h:31
pack16to32
static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
Definition: h264dec.h:667
get_cabac_terminate
static int av_unused get_cabac_terminate(CABACContext *c)
Definition: cabac_functions.h:182
H264_SEI_PIC_STRUCT_FRAME_TRIPLING
@ H264_SEI_PIC_STRUCT_FRAME_TRIPLING
8: frame tripling
Definition: h264_sei.h:39
field_scan
static const uint8_t field_scan[16+1]
Definition: h264_slice.c:50
loop_filter
static void loop_filter(const H264Context *h, H264SliceContext *sl, int start_x, int end_x)
Definition: h264_slice.c:2469
ff_init_cabac_decoder
int ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size)
Definition: cabac.c:165
H264SliceContext::mb_mbaff
int mb_mbaff
mb_aff_frame && mb_field_decoding_flag
Definition: h264dec.h:249
field_scan8x8
static const uint8_t field_scan8x8[64+1]
Definition: h264_slice.c:57
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:197
av_get_picture_type_char
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:88
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:282
LIST_NOT_USED
#define LIST_NOT_USED
Definition: h264dec.h:397
H264Picture::field_picture
int field_picture
whether or not picture was encoded in separate fields
Definition: h264dec.h:159
h264dec.h
H264SliceContext::poc_lsb
int poc_lsb
Definition: h264dec.h:334
H264SliceContext::first_mb_addr
unsigned int first_mb_addr
Definition: h264dec.h:241
ff_h264_direct_dist_scale_factor
void ff_h264_direct_dist_scale_factor(const H264Context *const h, H264SliceContext *sl)
Definition: h264_direct.c:62
i
int i
Definition: input.c:407
AVBuffer
A reference counted buffer type.
Definition: buffer_internal.h:34
H264Context
H264Context.
Definition: h264dec.h:344
AVDISCARD_NONINTRA
@ AVDISCARD_NONINTRA
discard all non intra frames
Definition: avcodec.h:234
av_timecode_make_smpte_tc_string2
char * av_timecode_make_smpte_tc_string2(char *buf, AVRational rate, uint32_t tcsmpte, int prevent_df, int skip_field)
Get the timecode string from the SMPTE timecode format.
Definition: timecode.c:136
AV_CODEC_FLAG2_SHOW_ALL
#define AV_CODEC_FLAG2_SHOW_ALL
Show all frames before the first keyframe.
Definition: avcodec.h:376
AV_FRAME_FLAG_CORRUPT
#define AV_FRAME_FLAG_CORRUPT
The frame data may be corrupted, e.g.
Definition: frame.h:549
H264_SEI_PIC_STRUCT_FRAME_DOUBLING
@ H264_SEI_PIC_STRUCT_FRAME_DOUBLING
7: frame doubling
Definition: h264_sei.h:38
ff_h264chroma_init
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:41
H264SliceContext::frame_num
int frame_num
Definition: h264dec.h:333
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:416
display.h
AV_STEREO3D_TOPBOTTOM
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:79
ff_h264_execute_decode_slices
int ff_h264_execute_decode_slices(H264Context *h)
Call decode_slice() for each context.
Definition: h264_slice.c:2809
H264SliceContext::mb_linesize
ptrdiff_t mb_linesize
may be equal to s->linesize or s->linesize * 2, for mbaff
Definition: h264dec.h:234
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
av_always_inline
#define av_always_inline
Definition: attributes.h:49
uint8_t
uint8_t
Definition: audio_convert.c:194
cabac_functions.h
H264Picture::hwaccel_priv_buf
AVBufferRef * hwaccel_priv_buf
Definition: h264dec.h:142
tb
#define tb
Definition: regdef.h:68
AV_COPY32
#define AV_COPY32(d, s)
Definition: intreadwrite.h:601
ff_h264_parse_ref_count
int ff_h264_parse_ref_count(int *plist_count, int ref_count[2], GetBitContext *gb, const PPS *pps, int slice_type_nos, int picture_structure, void *logctx)
Definition: h264_parse.c:219
H264_SEI_FPA_TYPE_INTERLEAVE_ROW
@ H264_SEI_FPA_TYPE_INTERLEAVE_ROW
Definition: h264_sei.h:48
ff_h264_alloc_tables
int ff_h264_alloc_tables(H264Context *h)
Allocate tables.
Definition: h264dec.c:181
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:569
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:398
H264SliceContext::list_count
unsigned int list_count
Definition: h264dec.h:275
avcodec.h
av_cmp_q
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:89
ff_h264dsp_init
av_cold void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
Definition: h264dsp.c:67
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
ff_h264_ref_picture
int ff_h264_ref_picture(H264Context *h, H264Picture *dst, H264Picture *src)
Definition: h264_picture.c:66
ret
ret
Definition: filter_design.txt:187
ff_h264_init_poc
int ff_h264_init_poc(int pic_field_poc[2], int *pic_poc, const SPS *sps, H264POCContext *pc, int picture_structure, int nal_ref_idc)
Definition: h264_parse.c:277
ff_h264_get_profile
int ff_h264_get_profile(const SPS *sps)
Compute profile from profile_idc and constraint_set?_flags.
Definition: h264_parse.c:529
AV_STEREO3D_COLUMNS
@ AV_STEREO3D_COLUMNS
Views are packed per column.
Definition: stereo3d.h:141
h264_field_start
static int h264_field_start(H264Context *h, const H264SliceContext *sl, const H2645NAL *nal, int first_slice)
Definition: h264_slice.c:1466
AVStereo3D::type
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:180
H264SliceContext::last_qscale_diff
int last_qscale_diff
Definition: h264dec.h:197
sps
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
Definition: cbs_h264_syntax_template.c:260
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:693
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:403
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:408
ff_set_sar
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
Definition: utils.c:99
H264SliceContext::pps_id
unsigned int pps_id
Definition: h264dec.h:285
H264SliceContext::linesize
ptrdiff_t linesize
Definition: h264dec.h:233
H264SliceContext::slice_beta_offset
int slice_beta_offset
Definition: h264dec.h:202
AVCodecContext
main external API structure.
Definition: avcodec.h:536
AVFrame::height
int height
Definition: frame.h:382
get_ue_golomb_31
static int get_ue_golomb_31(GetBitContext *gb)
read unsigned exp golomb code, constraint to a max of 31.
Definition: golomb.h:122
MAX_SLICES
#define MAX_SLICES
Definition: dxva2_hevc.c:29
backup_mb_border
static av_always_inline void backup_mb_border(const H264Context *h, H264SliceContext *sl, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int simple)
Definition: h264_slice.c:578
ff_h264_build_ref_list
int ff_h264_build_ref_list(H264Context *h, H264SliceContext *sl)
Definition: h264_refs.c:299
AVCodecContext::execute
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:1824
H264SliceContext::bipred_scratchpad
uint8_t * bipred_scratchpad
Definition: h264dec.h:290
ff_h264_pred_init
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:411
H264Picture::field_poc
int field_poc[2]
top/bottom POC
Definition: h264dec.h:148
av_buffer_replace
int av_buffer_replace(AVBufferRef **pdst, AVBufferRef *src)
Ensure dst refers to the same data as src.
Definition: buffer.c:219
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
error_resilience.h
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AVHWAccel::frame_priv_data_size
int frame_priv_data_size
Size of per-frame hardware accelerator private data.
Definition: avcodec.h:2548
H264Picture::mb_width
int mb_width
Definition: h264dec.h:169
fill_rectangle
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:828
H264Picture
Definition: h264dec.h:129
find_unused_picture
static int find_unused_picture(H264Context *h)
Definition: h264_slice.c:261
ff_thread_get_format
FF_DISABLE_DEPRECATION_WARNINGS enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Wrapper around get_format() for frame-multithreaded codecs.
Definition: pthread_frame.c:1070
scan8
static const uint8_t scan8[16 *3+3]
Definition: h264dec.h:651
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
h264_slice_init
static int h264_slice_init(H264Context *h, H264SliceContext *sl, const H2645NAL *nal)
Definition: h264_slice.c:1964
H264SEIDisplayOrientation::vflip
int vflip
Definition: h264_sei.h:147
H264SEIDisplayOrientation
Definition: h264_sei.h:144
FF_CODEC_PROPERTY_CLOSED_CAPTIONS
#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS
Definition: avcodec.h:2185
ff_h264_field_end
int ff_h264_field_end(H264Context *h, H264SliceContext *sl, int in_setup)
Definition: h264_picture.c:159
av_buffer_ref
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
CABACContext::bytestream_end
const uint8_t * bytestream_end
Definition: cabac.h:46
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
init_table_pools
static int init_table_pools(H264Context *h)
Definition: h264_slice.c:160
H264Picture::mb_type_buf
AVBufferRef * mb_type_buf
Definition: h264dec.h:139
H264SEIUnregistered::buf_ref
AVBufferRef ** buf_ref
Definition: h264_sei.h:113
H264SliceContext::ref_list
H264Ref ref_list[2][48]
0..15: frame refs, 16..47: mbaff field refs.
Definition: h264dec.h:276
LBOT
#define LBOT
Definition: h264dec.h:78
AV_EF_AGGRESSIVE
#define AV_EF_AGGRESSIVE
consider things that a sane encoder should not do as an error
Definition: avcodec.h:1661
H264SliceContext::non_zero_count_cache
uint8_t non_zero_count_cache[15 *8]
non zero coeff count cache.
Definition: h264dec.h:301
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
IS_INTER
#define IS_INTER(a)
Definition: mpegutils.h:79
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
get_ue_golomb_long
static unsigned get_ue_golomb_long(GetBitContext *gb)
Read an unsigned Exp-Golomb code in the range 0 to UINT32_MAX-1.
Definition: golomb.h:106
H264Context::nal_length_size
int nal_length_size
Number of bytes used for nal length (1, 2 or 4)
Definition: h264dec.h:458
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:33
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:39
ER_MB_END
#define ER_MB_END
Definition: error_resilience.h:39
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:226
H264_SEI_PIC_STRUCT_BOTTOM_FIELD
@ H264_SEI_PIC_STRUCT_BOTTOM_FIELD
2: bottom field
Definition: h264_sei.h:33
H264Picture::hwaccel_picture_private
void * hwaccel_picture_private
hardware accelerator private data
Definition: h264dec.h:143
ER_MV_END
#define ER_MV_END
Definition: error_resilience.h:36
AVStereo3D::view
enum AVStereo3DView view
Determines which views are packed.
Definition: stereo3d.h:190
H264_SEI_FPA_TYPE_2D
@ H264_SEI_FPA_TYPE_2D
Definition: h264_sei.h:52
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:48
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:563
cr
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:216
AVFrame::crop_top
size_t crop_top
Definition: frame.h:684
H264SliceContext::gb
GetBitContext gb
Definition: h264dec.h:186
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:502
H264SliceContext::intra4x4_pred_mode
int8_t * intra4x4_pred_mode
Definition: h264dec.h:213
LTOP
#define LTOP
Definition: h264dec.h:77
h264.h
imgutils.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:355
H264SliceContext::edge_emu_buffer_allocated
int edge_emu_buffer_allocated
Definition: h264dec.h:294
REBASE_PICTURE
#define REBASE_PICTURE(pic, new_ctx, old_ctx)
Definition: h264_slice.c:275
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
CHROMA444
#define CHROMA444(h)
Definition: h264dec.h:100
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
ff_h264_get_slice_type
int ff_h264_get_slice_type(const H264SliceContext *sl)
Reconstruct bitstream slice_type.
Definition: h264_slice.c:2225
h
h
Definition: vp9dsp_template.c:2038
H264SliceContext::cabac_init_idc
int cabac_init_idc
Definition: h264dec.h:327
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:409
H264PredWeightTable::luma_weight_flag
int luma_weight_flag[2]
7.4.3.2 luma_weight_lX_flag
Definition: h264_parse.h:35
H264_MAX_PICTURE_COUNT
#define H264_MAX_PICTURE_COUNT
Definition: h264dec.h:53
ER_AC_END
#define ER_AC_END
Definition: error_resilience.h:34
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:176
H264SliceContext::bipred_scratchpad_allocated
int bipred_scratchpad_allocated
Definition: h264dec.h:293
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: avcodec.h:232
H264SliceContext::slice_type_fixed
int slice_type_fixed
Definition: h264dec.h:192
H264Ref::poc
int poc
Definition: h264dec.h:178
IS_8x8DCT
#define IS_8x8DCT(a)
Definition: h264dec.h:105
H264Picture::qscale_table_buf
AVBufferRef * qscale_table_buf
Definition: h264dec.h:133
H264_SEI_PIC_STRUCT_TOP_FIELD
@ H264_SEI_PIC_STRUCT_TOP_FIELD
1: top field
Definition: h264_sei.h:32
H264SliceContext::delta_poc
int delta_poc[2]
Definition: h264dec.h:336
av_color_transfer_name
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
Definition: pixdesc.c:2940
H264Picture::long_ref
int long_ref
1->long term reference 0->short term reference
Definition: h264dec.h:155
H264Ref::reference
int reference
Definition: h264dec.h:177
H264Picture::motion_val
int16_t(*[2] motion_val)[2]
Definition: h264dec.h:137
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:407
H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP
@ H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP
5: top field, bottom field, top field repeated, in that order
Definition: h264_sei.h:36
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2489
H264SliceContext::mb_field_decoding_flag
int mb_field_decoding_flag
Definition: h264dec.h:248
H264Context::is_avc
int is_avc
Used to parse AVC variant of H.264.
Definition: h264dec.h:457