FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
h264.c
Go to the documentation of this file.
1 /*
2  * H.26L/H.264/AVC/JVT/14496-10/... decoder
3  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * H.264 / AVC / MPEG4 part10 codec.
25  * @author Michael Niedermayer <michaelni@gmx.at>
26  */
27 
28 #define UNCHECKED_BITSTREAM_READER 1
29 
30 #include "libavutil/imgutils.h"
31 #include "libavutil/opt.h"
32 #include "internal.h"
33 #include "cabac.h"
34 #include "cabac_functions.h"
35 #include "dsputil.h"
36 #include "error_resilience.h"
37 #include "avcodec.h"
38 #include "mpegvideo.h"
39 #include "h264.h"
40 #include "h264data.h"
41 #include "h264chroma.h"
42 #include "h264_mvpred.h"
43 #include "golomb.h"
44 #include "mathops.h"
45 #include "rectangle.h"
46 #include "svq3.h"
47 #include "thread.h"
48 #include "vdpau_internal.h"
49 #include "libavutil/avassert.h"
50 
51 // #undef NDEBUG
52 #include <assert.h>
53 
54 const uint16_t ff_h264_mb_sizes[4] = { 256, 384, 512, 768 };
55 
56 static const uint8_t rem6[QP_MAX_NUM + 1] = {
57  0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2,
58  3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5,
59  0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2,
60  3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5,
61  0, 1, 2, 3,
62 };
63 
64 static const uint8_t div6[QP_MAX_NUM + 1] = {
65  0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3,
66  3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6,
67  7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 10, 10, 10,
68  10,10,10,11,11,11,11,11,11,12,12,12,12,12,12,13,13,13, 13, 13, 13,
69  14,14,14,14,
70 };
71 
73 #if CONFIG_H264_DXVA2_HWACCEL
75 #endif
76 #if CONFIG_H264_VAAPI_HWACCEL
78 #endif
79 #if CONFIG_H264_VDA_HWACCEL
81 #endif
82 #if CONFIG_H264_VDPAU_HWACCEL
84 #endif
87 };
88 
90 #if CONFIG_H264_DXVA2_HWACCEL
92 #endif
93 #if CONFIG_H264_VAAPI_HWACCEL
95 #endif
96 #if CONFIG_H264_VDA_HWACCEL
98 #endif
99 #if CONFIG_H264_VDPAU_HWACCEL
101 #endif
104 };
105 
107 {
108  H264Context *h = avctx->priv_data;
109  return h ? h->sps.num_reorder_frames : 0;
110 }
111 
112 static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
113  int (*mv)[2][4][2],
114  int mb_x, int mb_y, int mb_intra, int mb_skipped)
115 {
116  H264Context *h = opaque;
117 
118  h->mb_x = mb_x;
119  h->mb_y = mb_y;
120  h->mb_xy = mb_x + mb_y * h->mb_stride;
121  memset(h->non_zero_count_cache, 0, sizeof(h->non_zero_count_cache));
122  av_assert1(ref >= 0);
123  /* FIXME: It is possible albeit uncommon that slice references
124  * differ between slices. We take the easy approach and ignore
125  * it for now. If this turns out to have any relevance in
126  * practice then correct remapping should be added. */
127  if (ref >= h->ref_count[0])
128  ref = 0;
129  if (!h->ref_list[0][ref].f.data[0]) {
130  av_log(h->avctx, AV_LOG_DEBUG, "Reference not available for error concealing\n");
131  ref = 0;
132  }
133  if ((h->ref_list[0][ref].reference&3) != 3) {
134  av_log(h->avctx, AV_LOG_DEBUG, "Reference invalid\n");
135  return;
136  }
137  fill_rectangle(&h->cur_pic.ref_index[0][4 * h->mb_xy],
138  2, 2, 2, ref, 1);
139  fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
140  fill_rectangle(h->mv_cache[0][scan8[0]], 4, 4, 8,
141  pack16to32((*mv)[0][0][0], (*mv)[0][0][1]), 4);
142  h->mb_mbaff =
143  h->mb_field_decoding_flag = 0;
145 }
146 
148 {
149  AVCodecContext *avctx = h->avctx;
150  Picture *cur = &h->cur_pic;
151  Picture *last = h->ref_list[0][0].f.data[0] ? &h->ref_list[0][0] : NULL;
152  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
153  int vshift = desc->log2_chroma_h;
154  const int field_pic = h->picture_structure != PICT_FRAME;
155  if (field_pic) {
156  height <<= 1;
157  y <<= 1;
158  }
159 
160  height = FFMIN(height, avctx->height - y);
161 
162  if (field_pic && h->first_field && !(avctx->slice_flags & SLICE_FLAG_ALLOW_FIELD))
163  return;
164 
165  if (avctx->draw_horiz_band) {
166  AVFrame *src;
168  int i;
169 
170  if (cur->f.pict_type == AV_PICTURE_TYPE_B || h->low_delay ||
172  src = &cur->f;
173  else if (last)
174  src = &last->f;
175  else
176  return;
177 
178  offset[0] = y * src->linesize[0];
179  offset[1] =
180  offset[2] = (y >> vshift) * src->linesize[1];
181  for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
182  offset[i] = 0;
183 
184  emms_c();
185 
186  avctx->draw_horiz_band(avctx, src, offset,
187  y, h->picture_structure, height);
188  }
189 }
190 
191 static void unref_picture(H264Context *h, Picture *pic)
192 {
193  int off = offsetof(Picture, tf) + sizeof(pic->tf);
194  int i;
195 
196  if (!pic->f.data[0])
197  return;
198 
199  ff_thread_release_buffer(h->avctx, &pic->tf);
201 
204  for (i = 0; i < 2; i++) {
206  av_buffer_unref(&pic->ref_index_buf[i]);
207  }
208 
209  memset((uint8_t*)pic + off, 0, sizeof(*pic) - off);
210 }
211 
212 static void release_unused_pictures(H264Context *h, int remove_current)
213 {
214  int i;
215 
216  /* release non reference frames */
217  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
218  if (h->DPB[i].f.data[0] && !h->DPB[i].reference &&
219  (remove_current || &h->DPB[i] != h->cur_pic_ptr)) {
220  unref_picture(h, &h->DPB[i]);
221  }
222  }
223 }
224 
225 static int ref_picture(H264Context *h, Picture *dst, Picture *src)
226 {
227  int ret, i;
228 
229  av_assert0(!dst->f.buf[0]);
230  av_assert0(src->f.buf[0]);
231 
232  src->tf.f = &src->f;
233  dst->tf.f = &dst->f;
234  ret = ff_thread_ref_frame(&dst->tf, &src->tf);
235  if (ret < 0)
236  goto fail;
237 
238 
241  if (!dst->qscale_table_buf || !dst->mb_type_buf)
242  goto fail;
243  dst->qscale_table = src->qscale_table;
244  dst->mb_type = src->mb_type;
245 
246  for (i = 0; i < 2; i ++) {
247  dst->motion_val_buf[i] = av_buffer_ref(src->motion_val_buf[i]);
248  dst->ref_index_buf[i] = av_buffer_ref(src->ref_index_buf[i]);
249  if (!dst->motion_val_buf[i] || !dst->ref_index_buf[i])
250  goto fail;
251  dst->motion_val[i] = src->motion_val[i];
252  dst->ref_index[i] = src->ref_index[i];
253  }
254 
255  if (src->hwaccel_picture_private) {
257  if (!dst->hwaccel_priv_buf)
258  goto fail;
260  }
261 
262  for (i = 0; i < 2; i++)
263  dst->field_poc[i] = src->field_poc[i];
264 
265  memcpy(dst->ref_poc, src->ref_poc, sizeof(src->ref_poc));
266  memcpy(dst->ref_count, src->ref_count, sizeof(src->ref_count));
267 
268  dst->poc = src->poc;
269  dst->frame_num = src->frame_num;
270  dst->mmco_reset = src->mmco_reset;
271  dst->pic_id = src->pic_id;
272  dst->long_ref = src->long_ref;
273  dst->mbaff = src->mbaff;
274  dst->field_picture = src->field_picture;
275  dst->needs_realloc = src->needs_realloc;
276  dst->reference = src->reference;
277  dst->sync = src->sync;
278  dst->crop = src->crop;
279  dst->crop_left = src->crop_left;
280  dst->crop_top = src->crop_top;
281 
282  return 0;
283 fail:
284  unref_picture(h, dst);
285  return ret;
286 }
287 
288 
289 static int alloc_scratch_buffers(H264Context *h, int linesize)
290 {
291  int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
292 
293  if (h->bipred_scratchpad)
294  return 0;
295 
296  h->bipred_scratchpad = av_malloc(16 * 6 * alloc_size);
297  // edge emu needs blocksize + filter length - 1
298  // (= 21x21 for h264)
299  h->edge_emu_buffer = av_mallocz(alloc_size * 2 * 21);
300  h->me.scratchpad = av_mallocz(alloc_size * 2 * 16 * 2);
301 
302  if (!h->bipred_scratchpad || !h->edge_emu_buffer || !h->me.scratchpad) {
305  av_freep(&h->me.scratchpad);
306  return AVERROR(ENOMEM);
307  }
308 
309  h->me.temp = h->me.scratchpad;
310 
311  return 0;
312 }
313 
315 {
316  const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
317  const int mb_array_size = h->mb_stride * h->mb_height;
318  const int b4_stride = h->mb_width * 4 + 1;
319  const int b4_array_size = b4_stride * h->mb_height * 4;
320 
321  h->qscale_table_pool = av_buffer_pool_init(big_mb_num + h->mb_stride,
323  h->mb_type_pool = av_buffer_pool_init((big_mb_num + h->mb_stride) *
324  sizeof(uint32_t), av_buffer_allocz);
325  h->motion_val_pool = av_buffer_pool_init(2 * (b4_array_size + 4) *
326  sizeof(int16_t), av_buffer_allocz);
327  h->ref_index_pool = av_buffer_pool_init(4 * mb_array_size, av_buffer_allocz);
328 
329  if (!h->qscale_table_pool || !h->mb_type_pool || !h->motion_val_pool ||
330  !h->ref_index_pool) {
335  return AVERROR(ENOMEM);
336  }
337 
338  return 0;
339 }
340 
341 static int alloc_picture(H264Context *h, Picture *pic)
342 {
343  int i, ret = 0;
344 
345  av_assert0(!pic->f.data[0]);
346 
347  pic->tf.f = &pic->f;
348  ret = ff_thread_get_buffer(h->avctx, &pic->tf, pic->reference ?
350  if (ret < 0)
351  goto fail;
352 
353  h->linesize = pic->f.linesize[0];
354  h->uvlinesize = pic->f.linesize[1];
355  pic->crop = h->sps.crop;
356  pic->crop_top = h->sps.crop_top;
357  pic->crop_left= h->sps.crop_left;
358 
359  if (h->avctx->hwaccel) {
360  const AVHWAccel *hwaccel = h->avctx->hwaccel;
362  if (hwaccel->priv_data_size) {
364  if (!pic->hwaccel_priv_buf)
365  return AVERROR(ENOMEM);
367  }
368  }
369 
370  if (!h->qscale_table_pool) {
371  ret = init_table_pools(h);
372  if (ret < 0)
373  goto fail;
374  }
375 
378  if (!pic->qscale_table_buf || !pic->mb_type_buf)
379  goto fail;
380 
381  pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
382  pic->qscale_table = pic->qscale_table_buf->data + 2 * h->mb_stride + 1;
383 
384  for (i = 0; i < 2; i++) {
387  if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i])
388  goto fail;
389 
390  pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
391  pic->ref_index[i] = pic->ref_index_buf[i]->data;
392  }
393 
394  return 0;
395 fail:
396  unref_picture(h, pic);
397  return (ret < 0) ? ret : AVERROR(ENOMEM);
398 }
399 
400 static inline int pic_is_unused(H264Context *h, Picture *pic)
401 {
402  if (pic->f.data[0] == NULL)
403  return 1;
404  if (pic->needs_realloc && !(pic->reference & DELAYED_PIC_REF))
405  return 1;
406  return 0;
407 }
408 
410 {
411  int i;
412 
413  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
414  if (pic_is_unused(h, &h->DPB[i]))
415  break;
416  }
417  if (i == MAX_PICTURE_COUNT)
418  return AVERROR_INVALIDDATA;
419 
420  if (h->DPB[i].needs_realloc) {
421  h->DPB[i].needs_realloc = 0;
422  unref_picture(h, &h->DPB[i]);
423  }
424 
425  return i;
426 }
427 
428 /**
429  * Check if the top & left blocks are available if needed and
430  * change the dc mode so it only uses the available blocks.
431  */
433 {
434  static const int8_t top[12] = {
435  -1, 0, LEFT_DC_PRED, -1, -1, -1, -1, -1, 0
436  };
437  static const int8_t left[12] = {
438  0, -1, TOP_DC_PRED, 0, -1, -1, -1, 0, -1, DC_128_PRED
439  };
440  int i;
441 
442  if (!(h->top_samples_available & 0x8000)) {
443  for (i = 0; i < 4; i++) {
444  int status = top[h->intra4x4_pred_mode_cache[scan8[0] + i]];
445  if (status < 0) {
447  "top block unavailable for requested intra4x4 mode %d at %d %d\n",
448  status, h->mb_x, h->mb_y);
449  return -1;
450  } else if (status) {
451  h->intra4x4_pred_mode_cache[scan8[0] + i] = status;
452  }
453  }
454  }
455 
456  if ((h->left_samples_available & 0x8888) != 0x8888) {
457  static const int mask[4] = { 0x8000, 0x2000, 0x80, 0x20 };
458  for (i = 0; i < 4; i++)
459  if (!(h->left_samples_available & mask[i])) {
460  int status = left[h->intra4x4_pred_mode_cache[scan8[0] + 8 * i]];
461  if (status < 0) {
463  "left block unavailable for requested intra4x4 mode %d at %d %d\n",
464  status, h->mb_x, h->mb_y);
465  return -1;
466  } else if (status) {
467  h->intra4x4_pred_mode_cache[scan8[0] + 8 * i] = status;
468  }
469  }
470  }
471 
472  return 0;
473 } // FIXME cleanup like ff_h264_check_intra_pred_mode
474 
475 /**
476  * Check if the top & left blocks are available if needed and
477  * change the dc mode so it only uses the available blocks.
478  */
479 int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma)
480 {
481  static const int8_t top[4] = { LEFT_DC_PRED8x8, 1, -1, -1 };
482  static const int8_t left[5] = { TOP_DC_PRED8x8, -1, 2, -1, DC_128_PRED8x8 };
483 
484  if (mode > 3U) {
486  "out of range intra chroma pred mode at %d %d\n",
487  h->mb_x, h->mb_y);
488  return -1;
489  }
490 
491  if (!(h->top_samples_available & 0x8000)) {
492  mode = top[mode];
493  if (mode < 0) {
495  "top block unavailable for requested intra mode at %d %d\n",
496  h->mb_x, h->mb_y);
497  return -1;
498  }
499  }
500 
501  if ((h->left_samples_available & 0x8080) != 0x8080) {
502  mode = left[mode];
503  if (is_chroma && (h->left_samples_available & 0x8080)) {
504  // mad cow disease mode, aka MBAFF + constrained_intra_pred
505  mode = ALZHEIMER_DC_L0T_PRED8x8 +
506  (!(h->left_samples_available & 0x8000)) +
507  2 * (mode == DC_128_PRED8x8);
508  }
509  if (mode < 0) {
511  "left block unavailable for requested intra mode at %d %d\n",
512  h->mb_x, h->mb_y);
513  return -1;
514  }
515  }
516 
517  return mode;
518 }
519 
521  int *dst_length, int *consumed, int length)
522 {
523  int i, si, di;
524  uint8_t *dst;
525  int bufidx;
526 
527  // src[0]&0x80; // forbidden bit
528  h->nal_ref_idc = src[0] >> 5;
529  h->nal_unit_type = src[0] & 0x1F;
530 
531  src++;
532  length--;
533 
534 #define STARTCODE_TEST \
535  if (i + 2 < length && src[i + 1] == 0 && src[i + 2] <= 3) { \
536  if (src[i + 2] != 3) { \
537  /* startcode, so we must be past the end */ \
538  length = i; \
539  } \
540  break; \
541  }
542 #if HAVE_FAST_UNALIGNED
543 #define FIND_FIRST_ZERO \
544  if (i > 0 && !src[i]) \
545  i--; \
546  while (src[i]) \
547  i++
548 #if HAVE_FAST_64BIT
549  for (i = 0; i + 1 < length; i += 9) {
550  if (!((~AV_RN64A(src + i) &
551  (AV_RN64A(src + i) - 0x0100010001000101ULL)) &
552  0x8000800080008080ULL))
553  continue;
554  FIND_FIRST_ZERO;
556  i -= 7;
557  }
558 #else
559  for (i = 0; i + 1 < length; i += 5) {
560  if (!((~AV_RN32A(src + i) &
561  (AV_RN32A(src + i) - 0x01000101U)) &
562  0x80008080U))
563  continue;
564  FIND_FIRST_ZERO;
566  i -= 3;
567  }
568 #endif
569 #else
570  for (i = 0; i + 1 < length; i += 2) {
571  if (src[i])
572  continue;
573  if (i > 0 && src[i - 1] == 0)
574  i--;
576  }
577 #endif
578 
579  // use second escape buffer for inter data
580  bufidx = h->nal_unit_type == NAL_DPC ? 1 : 0;
581 
582  si = h->rbsp_buffer_size[bufidx];
583  av_fast_padded_malloc(&h->rbsp_buffer[bufidx], &h->rbsp_buffer_size[bufidx], length+MAX_MBPAIR_SIZE);
584  dst = h->rbsp_buffer[bufidx];
585 
586  if (dst == NULL)
587  return NULL;
588 
589  if(i>=length-1){ //no escaped 0
590  *dst_length= length;
591  *consumed= length+1; //+1 for the header
592  if(h->avctx->flags2 & CODEC_FLAG2_FAST){
593  return src;
594  }else{
595  memcpy(dst, src, length);
596  return dst;
597  }
598  }
599 
600  memcpy(dst, src, i);
601  si = di = i;
602  while (si + 2 < length) {
603  // remove escapes (very rare 1:2^22)
604  if (src[si + 2] > 3) {
605  dst[di++] = src[si++];
606  dst[di++] = src[si++];
607  } else if (src[si] == 0 && src[si + 1] == 0) {
608  if (src[si + 2] == 3) { // escape
609  dst[di++] = 0;
610  dst[di++] = 0;
611  si += 3;
612  continue;
613  } else // next start code
614  goto nsc;
615  }
616 
617  dst[di++] = src[si++];
618  }
619  while (si < length)
620  dst[di++] = src[si++];
621 nsc:
622 
623  memset(dst + di, 0, FF_INPUT_BUFFER_PADDING_SIZE);
624 
625  *dst_length = di;
626  *consumed = si + 1; // +1 for the header
627  /* FIXME store exact number of bits in the getbitcontext
628  * (it is needed for decoding) */
629  return dst;
630 }
631 
632 /**
633  * Identify the exact end of the bitstream
634  * @return the length of the trailing, or 0 if damaged
635  */
637 {
638  int v = *src;
639  int r;
640 
641  tprintf(h->avctx, "rbsp trailing %X\n", v);
642 
643  for (r = 1; r < 9; r++) {
644  if (v & 1)
645  return r;
646  v >>= 1;
647  }
648  return 0;
649 }
650 
651 static inline int get_lowest_part_list_y(H264Context *h, Picture *pic, int n,
652  int height, int y_offset, int list)
653 {
654  int raw_my = h->mv_cache[list][scan8[n]][1];
655  int filter_height_down = (raw_my & 3) ? 3 : 0;
656  int full_my = (raw_my >> 2) + y_offset;
657  int bottom = full_my + filter_height_down + height;
658 
659  av_assert2(height >= 0);
660 
661  return FFMAX(0, bottom);
662 }
663 
664 static inline void get_lowest_part_y(H264Context *h, int refs[2][48], int n,
665  int height, int y_offset, int list0,
666  int list1, int *nrefs)
667 {
668  int my;
669 
670  y_offset += 16 * (h->mb_y >> MB_FIELD(h));
671 
672  if (list0) {
673  int ref_n = h->ref_cache[0][scan8[n]];
674  Picture *ref = &h->ref_list[0][ref_n];
675 
676  // Error resilience puts the current picture in the ref list.
677  // Don't try to wait on these as it will cause a deadlock.
678  // Fields can wait on each other, though.
679  if (ref->tf.progress->data != h->cur_pic.tf.progress->data ||
680  (ref->reference & 3) != h->picture_structure) {
681  my = get_lowest_part_list_y(h, ref, n, height, y_offset, 0);
682  if (refs[0][ref_n] < 0)
683  nrefs[0] += 1;
684  refs[0][ref_n] = FFMAX(refs[0][ref_n], my);
685  }
686  }
687 
688  if (list1) {
689  int ref_n = h->ref_cache[1][scan8[n]];
690  Picture *ref = &h->ref_list[1][ref_n];
691 
692  if (ref->tf.progress->data != h->cur_pic.tf.progress->data ||
693  (ref->reference & 3) != h->picture_structure) {
694  my = get_lowest_part_list_y(h, ref, n, height, y_offset, 1);
695  if (refs[1][ref_n] < 0)
696  nrefs[1] += 1;
697  refs[1][ref_n] = FFMAX(refs[1][ref_n], my);
698  }
699  }
700 }
701 
702 /**
703  * Wait until all reference frames are available for MC operations.
704  *
705  * @param h the H264 context
706  */
708 {
709  const int mb_xy = h->mb_xy;
710  const int mb_type = h->cur_pic.mb_type[mb_xy];
711  int refs[2][48];
712  int nrefs[2] = { 0 };
713  int ref, list;
714 
715  memset(refs, -1, sizeof(refs));
716 
717  if (IS_16X16(mb_type)) {
718  get_lowest_part_y(h, refs, 0, 16, 0,
719  IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1), nrefs);
720  } else if (IS_16X8(mb_type)) {
721  get_lowest_part_y(h, refs, 0, 8, 0,
722  IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1), nrefs);
723  get_lowest_part_y(h, refs, 8, 8, 8,
724  IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1), nrefs);
725  } else if (IS_8X16(mb_type)) {
726  get_lowest_part_y(h, refs, 0, 16, 0,
727  IS_DIR(mb_type, 0, 0), IS_DIR(mb_type, 0, 1), nrefs);
728  get_lowest_part_y(h, refs, 4, 16, 0,
729  IS_DIR(mb_type, 1, 0), IS_DIR(mb_type, 1, 1), nrefs);
730  } else {
731  int i;
732 
733  av_assert2(IS_8X8(mb_type));
734 
735  for (i = 0; i < 4; i++) {
736  const int sub_mb_type = h->sub_mb_type[i];
737  const int n = 4 * i;
738  int y_offset = (i & 2) << 2;
739 
740  if (IS_SUB_8X8(sub_mb_type)) {
741  get_lowest_part_y(h, refs, n, 8, y_offset,
742  IS_DIR(sub_mb_type, 0, 0),
743  IS_DIR(sub_mb_type, 0, 1),
744  nrefs);
745  } else if (IS_SUB_8X4(sub_mb_type)) {
746  get_lowest_part_y(h, refs, n, 4, y_offset,
747  IS_DIR(sub_mb_type, 0, 0),
748  IS_DIR(sub_mb_type, 0, 1),
749  nrefs);
750  get_lowest_part_y(h, refs, n + 2, 4, y_offset + 4,
751  IS_DIR(sub_mb_type, 0, 0),
752  IS_DIR(sub_mb_type, 0, 1),
753  nrefs);
754  } else if (IS_SUB_4X8(sub_mb_type)) {
755  get_lowest_part_y(h, refs, n, 8, y_offset,
756  IS_DIR(sub_mb_type, 0, 0),
757  IS_DIR(sub_mb_type, 0, 1),
758  nrefs);
759  get_lowest_part_y(h, refs, n + 1, 8, y_offset,
760  IS_DIR(sub_mb_type, 0, 0),
761  IS_DIR(sub_mb_type, 0, 1),
762  nrefs);
763  } else {
764  int j;
765  av_assert2(IS_SUB_4X4(sub_mb_type));
766  for (j = 0; j < 4; j++) {
767  int sub_y_offset = y_offset + 2 * (j & 2);
768  get_lowest_part_y(h, refs, n + j, 4, sub_y_offset,
769  IS_DIR(sub_mb_type, 0, 0),
770  IS_DIR(sub_mb_type, 0, 1),
771  nrefs);
772  }
773  }
774  }
775  }
776 
777  for (list = h->list_count - 1; list >= 0; list--)
778  for (ref = 0; ref < 48 && nrefs[list]; ref++) {
779  int row = refs[list][ref];
780  if (row >= 0) {
781  Picture *ref_pic = &h->ref_list[list][ref];
782  int ref_field = ref_pic->reference - 1;
783  int ref_field_picture = ref_pic->field_picture;
784  int pic_height = 16 * h->mb_height >> ref_field_picture;
785 
786  row <<= MB_MBAFF(h);
787  nrefs[list]--;
788 
789  if (!FIELD_PICTURE(h) && ref_field_picture) { // frame referencing two fields
790  ff_thread_await_progress(&ref_pic->tf,
791  FFMIN((row >> 1) - !(row & 1),
792  pic_height - 1),
793  1);
794  ff_thread_await_progress(&ref_pic->tf,
795  FFMIN((row >> 1), pic_height - 1),
796  0);
797  } else if (FIELD_PICTURE(h) && !ref_field_picture) { // field referencing one field of a frame
798  ff_thread_await_progress(&ref_pic->tf,
799  FFMIN(row * 2 + ref_field,
800  pic_height - 1),
801  0);
802  } else if (FIELD_PICTURE(h)) {
803  ff_thread_await_progress(&ref_pic->tf,
804  FFMIN(row, pic_height - 1),
805  ref_field);
806  } else {
807  ff_thread_await_progress(&ref_pic->tf,
808  FFMIN(row, pic_height - 1),
809  0);
810  }
811  }
812  }
813 }
814 
816  int n, int square, int height,
817  int delta, int list,
818  uint8_t *dest_y, uint8_t *dest_cb,
819  uint8_t *dest_cr,
820  int src_x_offset, int src_y_offset,
821  qpel_mc_func *qpix_op,
822  h264_chroma_mc_func chroma_op,
823  int pixel_shift, int chroma_idc)
824 {
825  const int mx = h->mv_cache[list][scan8[n]][0] + src_x_offset * 8;
826  int my = h->mv_cache[list][scan8[n]][1] + src_y_offset * 8;
827  const int luma_xy = (mx & 3) + ((my & 3) << 2);
828  int offset = ((mx >> 2) << pixel_shift) + (my >> 2) * h->mb_linesize;
829  uint8_t *src_y = pic->f.data[0] + offset;
830  uint8_t *src_cb, *src_cr;
831  int extra_width = 0;
832  int extra_height = 0;
833  int emu = 0;
834  const int full_mx = mx >> 2;
835  const int full_my = my >> 2;
836  const int pic_width = 16 * h->mb_width;
837  const int pic_height = 16 * h->mb_height >> MB_FIELD(h);
838  int ysh;
839 
840  if (mx & 7)
841  extra_width -= 3;
842  if (my & 7)
843  extra_height -= 3;
844 
845  if (full_mx < 0 - extra_width ||
846  full_my < 0 - extra_height ||
847  full_mx + 16 /*FIXME*/ > pic_width + extra_width ||
848  full_my + 16 /*FIXME*/ > pic_height + extra_height) {
850  src_y - (2 << pixel_shift) - 2 * h->mb_linesize,
851  h->mb_linesize,
852  16 + 5, 16 + 5 /*FIXME*/, full_mx - 2,
853  full_my - 2, pic_width, pic_height);
854  src_y = h->edge_emu_buffer + (2 << pixel_shift) + 2 * h->mb_linesize;
855  emu = 1;
856  }
857 
858  qpix_op[luma_xy](dest_y, src_y, h->mb_linesize); // FIXME try variable height perhaps?
859  if (!square)
860  qpix_op[luma_xy](dest_y + delta, src_y + delta, h->mb_linesize);
861 
862  if (CONFIG_GRAY && h->flags & CODEC_FLAG_GRAY)
863  return;
864 
865  if (chroma_idc == 3 /* yuv444 */) {
866  src_cb = pic->f.data[1] + offset;
867  if (emu) {
869  src_cb - (2 << pixel_shift) - 2 * h->mb_linesize,
870  h->mb_linesize,
871  16 + 5, 16 + 5 /*FIXME*/,
872  full_mx - 2, full_my - 2,
873  pic_width, pic_height);
874  src_cb = h->edge_emu_buffer + (2 << pixel_shift) + 2 * h->mb_linesize;
875  }
876  qpix_op[luma_xy](dest_cb, src_cb, h->mb_linesize); // FIXME try variable height perhaps?
877  if (!square)
878  qpix_op[luma_xy](dest_cb + delta, src_cb + delta, h->mb_linesize);
879 
880  src_cr = pic->f.data[2] + offset;
881  if (emu) {
883  src_cr - (2 << pixel_shift) - 2 * h->mb_linesize,
884  h->mb_linesize,
885  16 + 5, 16 + 5 /*FIXME*/,
886  full_mx - 2, full_my - 2,
887  pic_width, pic_height);
888  src_cr = h->edge_emu_buffer + (2 << pixel_shift) + 2 * h->mb_linesize;
889  }
890  qpix_op[luma_xy](dest_cr, src_cr, h->mb_linesize); // FIXME try variable height perhaps?
891  if (!square)
892  qpix_op[luma_xy](dest_cr + delta, src_cr + delta, h->mb_linesize);
893  return;
894  }
895 
896  ysh = 3 - (chroma_idc == 2 /* yuv422 */);
897  if (chroma_idc == 1 /* yuv420 */ && MB_FIELD(h)) {
898  // chroma offset when predicting from a field of opposite parity
899  my += 2 * ((h->mb_y & 1) - (pic->reference - 1));
900  emu |= (my >> 3) < 0 || (my >> 3) + 8 >= (pic_height >> 1);
901  }
902 
903  src_cb = pic->f.data[1] + ((mx >> 3) << pixel_shift) +
904  (my >> ysh) * h->mb_uvlinesize;
905  src_cr = pic->f.data[2] + ((mx >> 3) << pixel_shift) +
906  (my >> ysh) * h->mb_uvlinesize;
907 
908  if (emu) {
910  9, 8 * chroma_idc + 1, (mx >> 3), (my >> ysh),
911  pic_width >> 1, pic_height >> (chroma_idc == 1 /* yuv420 */));
912  src_cb = h->edge_emu_buffer;
913  }
914  chroma_op(dest_cb, src_cb, h->mb_uvlinesize,
915  height >> (chroma_idc == 1 /* yuv420 */),
916  mx & 7, (my << (chroma_idc == 2 /* yuv422 */)) & 7);
917 
918  if (emu) {
920  9, 8 * chroma_idc + 1, (mx >> 3), (my >> ysh),
921  pic_width >> 1, pic_height >> (chroma_idc == 1 /* yuv420 */));
922  src_cr = h->edge_emu_buffer;
923  }
924  chroma_op(dest_cr, src_cr, h->mb_uvlinesize, height >> (chroma_idc == 1 /* yuv420 */),
925  mx & 7, (my << (chroma_idc == 2 /* yuv422 */)) & 7);
926 }
927 
929  int height, int delta,
930  uint8_t *dest_y, uint8_t *dest_cb,
931  uint8_t *dest_cr,
932  int x_offset, int y_offset,
933  qpel_mc_func *qpix_put,
934  h264_chroma_mc_func chroma_put,
935  qpel_mc_func *qpix_avg,
936  h264_chroma_mc_func chroma_avg,
937  int list0, int list1,
938  int pixel_shift, int chroma_idc)
939 {
940  qpel_mc_func *qpix_op = qpix_put;
941  h264_chroma_mc_func chroma_op = chroma_put;
942 
943  dest_y += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize;
944  if (chroma_idc == 3 /* yuv444 */) {
945  dest_cb += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize;
946  dest_cr += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize;
947  } else if (chroma_idc == 2 /* yuv422 */) {
948  dest_cb += (x_offset << pixel_shift) + 2 * y_offset * h->mb_uvlinesize;
949  dest_cr += (x_offset << pixel_shift) + 2 * y_offset * h->mb_uvlinesize;
950  } else { /* yuv420 */
951  dest_cb += (x_offset << pixel_shift) + y_offset * h->mb_uvlinesize;
952  dest_cr += (x_offset << pixel_shift) + y_offset * h->mb_uvlinesize;
953  }
954  x_offset += 8 * h->mb_x;
955  y_offset += 8 * (h->mb_y >> MB_FIELD(h));
956 
957  if (list0) {
958  Picture *ref = &h->ref_list[0][h->ref_cache[0][scan8[n]]];
959  mc_dir_part(h, ref, n, square, height, delta, 0,
960  dest_y, dest_cb, dest_cr, x_offset, y_offset,
961  qpix_op, chroma_op, pixel_shift, chroma_idc);
962 
963  qpix_op = qpix_avg;
964  chroma_op = chroma_avg;
965  }
966 
967  if (list1) {
968  Picture *ref = &h->ref_list[1][h->ref_cache[1][scan8[n]]];
969  mc_dir_part(h, ref, n, square, height, delta, 1,
970  dest_y, dest_cb, dest_cr, x_offset, y_offset,
971  qpix_op, chroma_op, pixel_shift, chroma_idc);
972  }
973 }
974 
976  int height, int delta,
977  uint8_t *dest_y, uint8_t *dest_cb,
978  uint8_t *dest_cr,
979  int x_offset, int y_offset,
980  qpel_mc_func *qpix_put,
981  h264_chroma_mc_func chroma_put,
982  h264_weight_func luma_weight_op,
983  h264_weight_func chroma_weight_op,
984  h264_biweight_func luma_weight_avg,
985  h264_biweight_func chroma_weight_avg,
986  int list0, int list1,
987  int pixel_shift, int chroma_idc)
988 {
989  int chroma_height;
990 
991  dest_y += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize;
992  if (chroma_idc == 3 /* yuv444 */) {
993  chroma_height = height;
994  chroma_weight_avg = luma_weight_avg;
995  chroma_weight_op = luma_weight_op;
996  dest_cb += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize;
997  dest_cr += (2 * x_offset << pixel_shift) + 2 * y_offset * h->mb_linesize;
998  } else if (chroma_idc == 2 /* yuv422 */) {
999  chroma_height = height;
1000  dest_cb += (x_offset << pixel_shift) + 2 * y_offset * h->mb_uvlinesize;
1001  dest_cr += (x_offset << pixel_shift) + 2 * y_offset * h->mb_uvlinesize;
1002  } else { /* yuv420 */
1003  chroma_height = height >> 1;
1004  dest_cb += (x_offset << pixel_shift) + y_offset * h->mb_uvlinesize;
1005  dest_cr += (x_offset << pixel_shift) + y_offset * h->mb_uvlinesize;
1006  }
1007  x_offset += 8 * h->mb_x;
1008  y_offset += 8 * (h->mb_y >> MB_FIELD(h));
1009 
1010  if (list0 && list1) {
1011  /* don't optimize for luma-only case, since B-frames usually
1012  * use implicit weights => chroma too. */
1013  uint8_t *tmp_cb = h->bipred_scratchpad;
1014  uint8_t *tmp_cr = h->bipred_scratchpad + (16 << pixel_shift);
1015  uint8_t *tmp_y = h->bipred_scratchpad + 16 * h->mb_uvlinesize;
1016  int refn0 = h->ref_cache[0][scan8[n]];
1017  int refn1 = h->ref_cache[1][scan8[n]];
1018 
1019  mc_dir_part(h, &h->ref_list[0][refn0], n, square, height, delta, 0,
1020  dest_y, dest_cb, dest_cr,
1021  x_offset, y_offset, qpix_put, chroma_put,
1022  pixel_shift, chroma_idc);
1023  mc_dir_part(h, &h->ref_list[1][refn1], n, square, height, delta, 1,
1024  tmp_y, tmp_cb, tmp_cr,
1025  x_offset, y_offset, qpix_put, chroma_put,
1026  pixel_shift, chroma_idc);
1027 
1028  if (h->use_weight == 2) {
1029  int weight0 = h->implicit_weight[refn0][refn1][h->mb_y & 1];
1030  int weight1 = 64 - weight0;
1031  luma_weight_avg(dest_y, tmp_y, h->mb_linesize,
1032  height, 5, weight0, weight1, 0);
1033  chroma_weight_avg(dest_cb, tmp_cb, h->mb_uvlinesize,
1034  chroma_height, 5, weight0, weight1, 0);
1035  chroma_weight_avg(dest_cr, tmp_cr, h->mb_uvlinesize,
1036  chroma_height, 5, weight0, weight1, 0);
1037  } else {
1038  luma_weight_avg(dest_y, tmp_y, h->mb_linesize, height,
1040  h->luma_weight[refn0][0][0],
1041  h->luma_weight[refn1][1][0],
1042  h->luma_weight[refn0][0][1] +
1043  h->luma_weight[refn1][1][1]);
1044  chroma_weight_avg(dest_cb, tmp_cb, h->mb_uvlinesize, chroma_height,
1046  h->chroma_weight[refn0][0][0][0],
1047  h->chroma_weight[refn1][1][0][0],
1048  h->chroma_weight[refn0][0][0][1] +
1049  h->chroma_weight[refn1][1][0][1]);
1050  chroma_weight_avg(dest_cr, tmp_cr, h->mb_uvlinesize, chroma_height,
1052  h->chroma_weight[refn0][0][1][0],
1053  h->chroma_weight[refn1][1][1][0],
1054  h->chroma_weight[refn0][0][1][1] +
1055  h->chroma_weight[refn1][1][1][1]);
1056  }
1057  } else {
1058  int list = list1 ? 1 : 0;
1059  int refn = h->ref_cache[list][scan8[n]];
1060  Picture *ref = &h->ref_list[list][refn];
1061  mc_dir_part(h, ref, n, square, height, delta, list,
1062  dest_y, dest_cb, dest_cr, x_offset, y_offset,
1063  qpix_put, chroma_put, pixel_shift, chroma_idc);
1064 
1065  luma_weight_op(dest_y, h->mb_linesize, height,
1067  h->luma_weight[refn][list][0],
1068  h->luma_weight[refn][list][1]);
1069  if (h->use_weight_chroma) {
1070  chroma_weight_op(dest_cb, h->mb_uvlinesize, chroma_height,
1072  h->chroma_weight[refn][list][0][0],
1073  h->chroma_weight[refn][list][0][1]);
1074  chroma_weight_op(dest_cr, h->mb_uvlinesize, chroma_height,
1076  h->chroma_weight[refn][list][1][0],
1077  h->chroma_weight[refn][list][1][1]);
1078  }
1079  }
1080 }
1081 
1083  int pixel_shift, int chroma_idc)
1084 {
1085  /* fetch pixels for estimated mv 4 macroblocks ahead
1086  * optimized for 64byte cache lines */
1087  const int refn = h->ref_cache[list][scan8[0]];
1088  if (refn >= 0) {
1089  const int mx = (h->mv_cache[list][scan8[0]][0] >> 2) + 16 * h->mb_x + 8;
1090  const int my = (h->mv_cache[list][scan8[0]][1] >> 2) + 16 * h->mb_y;
1091  uint8_t **src = h->ref_list[list][refn].f.data;
1092  int off = (mx << pixel_shift) +
1093  (my + (h->mb_x & 3) * 4) * h->mb_linesize +
1094  (64 << pixel_shift);
1095  h->vdsp.prefetch(src[0] + off, h->linesize, 4);
1096  if (chroma_idc == 3 /* yuv444 */) {
1097  h->vdsp.prefetch(src[1] + off, h->linesize, 4);
1098  h->vdsp.prefetch(src[2] + off, h->linesize, 4);
1099  } else {
1100  off= (((mx>>1)+64)<<pixel_shift) + ((my>>1) + (h->mb_x&7))*h->uvlinesize;
1101  h->vdsp.prefetch(src[1] + off, src[2] - src[1], 2);
1102  }
1103  }
1104 }
1105 
1106 static void free_tables(H264Context *h, int free_rbsp)
1107 {
1108  int i;
1109  H264Context *hx;
1110 
1113  av_freep(&h->cbp_table);
1114  av_freep(&h->mvd_table[0]);
1115  av_freep(&h->mvd_table[1]);
1116  av_freep(&h->direct_table);
1117  av_freep(&h->non_zero_count);
1119  h->slice_table = NULL;
1120  av_freep(&h->list_counts);
1121 
1122  av_freep(&h->mb2b_xy);
1123  av_freep(&h->mb2br_xy);
1124 
1125  for (i = 0; i < 3; i++)
1127 
1132 
1133  if (free_rbsp && h->DPB) {
1134  for (i = 0; i < MAX_PICTURE_COUNT; i++)
1135  unref_picture(h, &h->DPB[i]);
1136  av_freep(&h->DPB);
1137  } else if (h->DPB) {
1138  for (i = 0; i < MAX_PICTURE_COUNT; i++)
1139  h->DPB[i].needs_realloc = 1;
1140  }
1141 
1142  h->cur_pic_ptr = NULL;
1143 
1144  for (i = 0; i < MAX_THREADS; i++) {
1145  hx = h->thread_context[i];
1146  if (!hx)
1147  continue;
1148  av_freep(&hx->top_borders[1]);
1149  av_freep(&hx->top_borders[0]);
1151  av_freep(&hx->edge_emu_buffer);
1152  av_freep(&hx->dc_val_base);
1153  av_freep(&hx->me.scratchpad);
1154  av_freep(&hx->er.mb_index2xy);
1156  av_freep(&hx->er.er_temp_buffer);
1157  av_freep(&hx->er.mbintra_table);
1158  av_freep(&hx->er.mbskip_table);
1159 
1160  if (free_rbsp) {
1161  av_freep(&hx->rbsp_buffer[1]);
1162  av_freep(&hx->rbsp_buffer[0]);
1163  hx->rbsp_buffer_size[0] = 0;
1164  hx->rbsp_buffer_size[1] = 0;
1165  }
1166  if (i)
1167  av_freep(&h->thread_context[i]);
1168  }
1169 }
1170 
1172 {
1173  int i, j, q, x;
1174  const int max_qp = 51 + 6 * (h->sps.bit_depth_luma - 8);
1175 
1176  for (i = 0; i < 6; i++) {
1177  h->dequant8_coeff[i] = h->dequant8_buffer[i];
1178  for (j = 0; j < i; j++)
1179  if (!memcmp(h->pps.scaling_matrix8[j], h->pps.scaling_matrix8[i],
1180  64 * sizeof(uint8_t))) {
1181  h->dequant8_coeff[i] = h->dequant8_buffer[j];
1182  break;
1183  }
1184  if (j < i)
1185  continue;
1186 
1187  for (q = 0; q < max_qp + 1; q++) {
1188  int shift = div6[q];
1189  int idx = rem6[q];
1190  for (x = 0; x < 64; x++)
1191  h->dequant8_coeff[i][q][(x >> 3) | ((x & 7) << 3)] =
1192  ((uint32_t)dequant8_coeff_init[idx][dequant8_coeff_init_scan[((x >> 1) & 12) | (x & 3)]] *
1193  h->pps.scaling_matrix8[i][x]) << shift;
1194  }
1195  }
1196 }
1197 
1199 {
1200  int i, j, q, x;
1201  const int max_qp = 51 + 6 * (h->sps.bit_depth_luma - 8);
1202  for (i = 0; i < 6; i++) {
1203  h->dequant4_coeff[i] = h->dequant4_buffer[i];
1204  for (j = 0; j < i; j++)
1205  if (!memcmp(h->pps.scaling_matrix4[j], h->pps.scaling_matrix4[i],
1206  16 * sizeof(uint8_t))) {
1207  h->dequant4_coeff[i] = h->dequant4_buffer[j];
1208  break;
1209  }
1210  if (j < i)
1211  continue;
1212 
1213  for (q = 0; q < max_qp + 1; q++) {
1214  int shift = div6[q] + 2;
1215  int idx = rem6[q];
1216  for (x = 0; x < 16; x++)
1217  h->dequant4_coeff[i][q][(x >> 2) | ((x << 2) & 0xF)] =
1218  ((uint32_t)dequant4_coeff_init[idx][(x & 1) + ((x >> 2) & 1)] *
1219  h->pps.scaling_matrix4[i][x]) << shift;
1220  }
1221  }
1222 }
1223 
1225 {
1226  int i, x;
1228  if (h->pps.transform_8x8_mode)
1230  if (h->sps.transform_bypass) {
1231  for (i = 0; i < 6; i++)
1232  for (x = 0; x < 16; x++)
1233  h->dequant4_coeff[i][0][x] = 1 << 6;
1235  for (i = 0; i < 6; i++)
1236  for (x = 0; x < 64; x++)
1237  h->dequant8_coeff[i][0][x] = 1 << 6;
1238  }
1239 }
1240 
1242 {
1243  const int big_mb_num = h->mb_stride * (h->mb_height + 1);
1244  const int row_mb_num = 2*h->mb_stride*FFMAX(h->avctx->thread_count, 1);
1245  int x, y, i;
1246 
1248  row_mb_num * 8 * sizeof(uint8_t), fail)
1250  big_mb_num * 48 * sizeof(uint8_t), fail)
1252  (big_mb_num + h->mb_stride) * sizeof(*h->slice_table_base), fail)
1254  big_mb_num * sizeof(uint16_t), fail)
1256  big_mb_num * sizeof(uint8_t), fail)
1257  FF_ALLOCZ_OR_GOTO(h->avctx, h->mvd_table[0],
1258  16 * row_mb_num * sizeof(uint8_t), fail);
1259  FF_ALLOCZ_OR_GOTO(h->avctx, h->mvd_table[1],
1260  16 * row_mb_num * sizeof(uint8_t), fail);
1262  4 * big_mb_num * sizeof(uint8_t), fail);
1264  big_mb_num * sizeof(uint8_t), fail)
1265 
1266  memset(h->slice_table_base, -1,
1267  (big_mb_num + h->mb_stride) * sizeof(*h->slice_table_base));
1268  h->slice_table = h->slice_table_base + h->mb_stride * 2 + 1;
1269 
1271  big_mb_num * sizeof(uint32_t), fail);
1273  big_mb_num * sizeof(uint32_t), fail);
1274  for (y = 0; y < h->mb_height; y++)
1275  for (x = 0; x < h->mb_width; x++) {
1276  const int mb_xy = x + y * h->mb_stride;
1277  const int b_xy = 4 * x + 4 * y * h->b_stride;
1278 
1279  h->mb2b_xy[mb_xy] = b_xy;
1280  h->mb2br_xy[mb_xy] = 8 * (FMO ? mb_xy : (mb_xy % (2 * h->mb_stride)));
1281  }
1282 
1283  if (!h->dequant4_coeff[0])
1285 
1286  if (!h->DPB) {
1287  h->DPB = av_mallocz_array(MAX_PICTURE_COUNT, sizeof(*h->DPB));
1288  if (!h->DPB)
1289  return AVERROR(ENOMEM);
1290  for (i = 0; i < MAX_PICTURE_COUNT; i++)
1293  }
1294 
1295  return 0;
1296 
1297 fail:
1298  free_tables(h, 1);
1299  return -1;
1300 }
1301 
1302 /**
1303  * Mimic alloc_tables(), but for every context thread.
1304  */
1305 static void clone_tables(H264Context *dst, H264Context *src, int i)
1306 {
1307  dst->intra4x4_pred_mode = src->intra4x4_pred_mode + i * 8 * 2 * src->mb_stride;
1308  dst->non_zero_count = src->non_zero_count;
1309  dst->slice_table = src->slice_table;
1310  dst->cbp_table = src->cbp_table;
1311  dst->mb2b_xy = src->mb2b_xy;
1312  dst->mb2br_xy = src->mb2br_xy;
1314  dst->mvd_table[0] = src->mvd_table[0] + i * 8 * 2 * src->mb_stride;
1315  dst->mvd_table[1] = src->mvd_table[1] + i * 8 * 2 * src->mb_stride;
1316  dst->direct_table = src->direct_table;
1317  dst->list_counts = src->list_counts;
1318  dst->DPB = src->DPB;
1319  dst->cur_pic_ptr = src->cur_pic_ptr;
1320  dst->cur_pic = src->cur_pic;
1321  dst->bipred_scratchpad = NULL;
1322  dst->edge_emu_buffer = NULL;
1323  dst->me.scratchpad = NULL;
1325  src->sps.chroma_format_idc);
1326 }
1327 
1328 /**
1329  * Init context
1330  * Allocate buffers which are not shared amongst multiple threads.
1331  */
1333 {
1334  ERContext *er = &h->er;
1335  int mb_array_size = h->mb_height * h->mb_stride;
1336  int y_size = (2 * h->mb_width + 1) * (2 * h->mb_height + 1);
1337  int c_size = h->mb_stride * (h->mb_height + 1);
1338  int yc_size = y_size + 2 * c_size;
1339  int x, y, i;
1340 
1342  h->mb_width * 16 * 3 * sizeof(uint8_t) * 2, fail)
1344  h->mb_width * 16 * 3 * sizeof(uint8_t) * 2, fail)
1345 
1346  h->ref_cache[0][scan8[5] + 1] =
1347  h->ref_cache[0][scan8[7] + 1] =
1348  h->ref_cache[0][scan8[13] + 1] =
1349  h->ref_cache[1][scan8[5] + 1] =
1350  h->ref_cache[1][scan8[7] + 1] =
1351  h->ref_cache[1][scan8[13] + 1] = PART_NOT_AVAILABLE;
1352 
1353  if (CONFIG_ERROR_RESILIENCE) {
1354  /* init ER */
1355  er->avctx = h->avctx;
1356  er->dsp = &h->dsp;
1358  er->opaque = h;
1359  er->quarter_sample = 1;
1360 
1361  er->mb_num = h->mb_num;
1362  er->mb_width = h->mb_width;
1363  er->mb_height = h->mb_height;
1364  er->mb_stride = h->mb_stride;
1365  er->b8_stride = h->mb_width * 2 + 1;
1366 
1367  FF_ALLOCZ_OR_GOTO(h->avctx, er->mb_index2xy, (h->mb_num + 1) * sizeof(int),
1368  fail); // error ressilience code looks cleaner with this
1369  for (y = 0; y < h->mb_height; y++)
1370  for (x = 0; x < h->mb_width; x++)
1371  er->mb_index2xy[x + y * h->mb_width] = x + y * h->mb_stride;
1372 
1373  er->mb_index2xy[h->mb_height * h->mb_width] = (h->mb_height - 1) *
1374  h->mb_stride + h->mb_width;
1375 
1377  mb_array_size * sizeof(uint8_t), fail);
1378 
1379  FF_ALLOC_OR_GOTO(h->avctx, er->mbintra_table, mb_array_size, fail);
1380  memset(er->mbintra_table, 1, mb_array_size);
1381 
1382  FF_ALLOCZ_OR_GOTO(h->avctx, er->mbskip_table, mb_array_size + 2, fail);
1383 
1385  fail);
1386 
1387  FF_ALLOCZ_OR_GOTO(h->avctx, h->dc_val_base, yc_size * sizeof(int16_t), fail);
1388  er->dc_val[0] = h->dc_val_base + h->mb_width * 2 + 2;
1389  er->dc_val[1] = h->dc_val_base + y_size + h->mb_stride + 1;
1390  er->dc_val[2] = er->dc_val[1] + c_size;
1391  for (i = 0; i < yc_size; i++)
1392  h->dc_val_base[i] = 1024;
1393  }
1394 
1395  return 0;
1396 
1397 fail:
1398  return -1; // free_tables will clean up for us
1399 }
1400 
1401 static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size,
1402  int parse_extradata);
1403 
1405 {
1406  AVCodecContext *avctx = h->avctx;
1407 
1408  if (!buf || size <= 0)
1409  return -1;
1410 
1411  if (buf[0] == 1) {
1412  int i, cnt, nalsize;
1413  const unsigned char *p = buf;
1414 
1415  h->is_avc = 1;
1416 
1417  if (size < 7) {
1418  av_log(avctx, AV_LOG_ERROR, "avcC too short\n");
1419  return -1;
1420  }
1421  /* sps and pps in the avcC always have length coded with 2 bytes,
1422  * so put a fake nal_length_size = 2 while parsing them */
1423  h->nal_length_size = 2;
1424  // Decode sps from avcC
1425  cnt = *(p + 5) & 0x1f; // Number of sps
1426  p += 6;
1427  for (i = 0; i < cnt; i++) {
1428  nalsize = AV_RB16(p) + 2;
1429  if(nalsize > size - (p-buf))
1430  return -1;
1431  if (decode_nal_units(h, p, nalsize, 1) < 0) {
1432  av_log(avctx, AV_LOG_ERROR,
1433  "Decoding sps %d from avcC failed\n", i);
1434  return -1;
1435  }
1436  p += nalsize;
1437  }
1438  // Decode pps from avcC
1439  cnt = *(p++); // Number of pps
1440  for (i = 0; i < cnt; i++) {
1441  nalsize = AV_RB16(p) + 2;
1442  if(nalsize > size - (p-buf))
1443  return -1;
1444  if (decode_nal_units(h, p, nalsize, 1) < 0) {
1445  av_log(avctx, AV_LOG_ERROR,
1446  "Decoding pps %d from avcC failed\n", i);
1447  return -1;
1448  }
1449  p += nalsize;
1450  }
1451  // Now store right nal length size, that will be used to parse all other nals
1452  h->nal_length_size = (buf[4] & 0x03) + 1;
1453  } else {
1454  h->is_avc = 0;
1455  if (decode_nal_units(h, buf, size, 1) < 0)
1456  return -1;
1457  }
1458  return size;
1459 }
1460 
1462 {
1463  H264Context *h = avctx->priv_data;
1464  int i;
1465 
1466  h->avctx = avctx;
1467 
1468  h->bit_depth_luma = 8;
1469  h->chroma_format_idc = 1;
1470 
1471  h->avctx->bits_per_raw_sample = 8;
1472  h->cur_chroma_format_idc = 1;
1473 
1474  ff_h264dsp_init(&h->h264dsp, 8, 1);
1475  av_assert0(h->sps.bit_depth_chroma == 0);
1477  ff_h264qpel_init(&h->h264qpel, 8);
1478  ff_h264_pred_init(&h->hpc, h->avctx->codec_id, 8, 1);
1479 
1480  h->dequant_coeff_pps = -1;
1481  h->current_sps_id = -1;
1482 
1483  /* needed so that IDCT permutation is known early */
1484  if (CONFIG_ERROR_RESILIENCE)
1485  ff_dsputil_init(&h->dsp, h->avctx);
1486  ff_videodsp_init(&h->vdsp, 8);
1487 
1488  memset(h->pps.scaling_matrix4, 16, 6 * 16 * sizeof(uint8_t));
1489  memset(h->pps.scaling_matrix8, 16, 2 * 64 * sizeof(uint8_t));
1490 
1492  h->slice_context_count = 1;
1493  h->workaround_bugs = avctx->workaround_bugs;
1494  h->flags = avctx->flags;
1495 
1496  /* set defaults */
1497  // s->decode_mb = ff_h263_decode_mb;
1498  if (!avctx->has_b_frames)
1499  h->low_delay = 1;
1500 
1502 
1504 
1505  h->pixel_shift = 0;
1506  h->sps.bit_depth_luma = avctx->bits_per_raw_sample = 8;
1507 
1508  h->thread_context[0] = h;
1509  h->outputed_poc = h->next_outputed_poc = INT_MIN;
1510  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
1511  h->last_pocs[i] = INT_MIN;
1512  h->prev_poc_msb = 1 << 16;
1513  h->prev_frame_num = -1;
1514  h->x264_build = -1;
1515  ff_h264_reset_sei(h);
1516  if (avctx->codec_id == AV_CODEC_ID_H264) {
1517  if (avctx->ticks_per_frame == 1) {
1518  if(h->avctx->time_base.den < INT_MAX/2) {
1519  h->avctx->time_base.den *= 2;
1520  } else
1521  h->avctx->time_base.num /= 2;
1522  }
1523  avctx->ticks_per_frame = 2;
1524  }
1525 
1526  if (avctx->extradata_size > 0 && avctx->extradata &&
1527  ff_h264_decode_extradata(h, avctx->extradata, avctx->extradata_size) < 0) {
1529  return -1;
1530  }
1531 
1535  h->low_delay = 0;
1536  }
1537 
1539  avctx->internal->allocate_progress = 1;
1540 
1541  return 0;
1542 }
1543 
1544 #define IN_RANGE(a, b, size) (((a) >= (b)) && ((a) < ((b) + (size))))
1545 #undef REBASE_PICTURE
1546 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
1547  ((pic && pic >= old_ctx->DPB && \
1548  pic < old_ctx->DPB + MAX_PICTURE_COUNT) ? \
1549  &new_ctx->DPB[pic - old_ctx->DPB] : NULL)
1550 
1552  H264Context *new_base,
1553  H264Context *old_base)
1554 {
1555  int i;
1556 
1557  for (i = 0; i < count; i++) {
1558  assert((IN_RANGE(from[i], old_base, sizeof(*old_base)) ||
1559  IN_RANGE(from[i], old_base->DPB,
1560  sizeof(Picture) * MAX_PICTURE_COUNT) ||
1561  !from[i]));
1562  to[i] = REBASE_PICTURE(from[i], new_base, old_base);
1563  }
1564 }
1565 
1566 static void copy_parameter_set(void **to, void **from, int count, int size)
1567 {
1568  int i;
1569 
1570  for (i = 0; i < count; i++) {
1571  if (to[i] && !from[i])
1572  av_freep(&to[i]);
1573  else if (from[i] && !to[i])
1574  to[i] = av_malloc(size);
1575 
1576  if (from[i])
1577  memcpy(to[i], from[i], size);
1578  }
1579 }
1580 
1582 {
1583  H264Context *h = avctx->priv_data;
1584 
1585  if (!avctx->internal->is_copy)
1586  return 0;
1587  memset(h->sps_buffers, 0, sizeof(h->sps_buffers));
1588  memset(h->pps_buffers, 0, sizeof(h->pps_buffers));
1589 
1590  h->context_initialized = 0;
1591 
1592  return 0;
1593 }
1594 
1595 #define copy_fields(to, from, start_field, end_field) \
1596  memcpy(&to->start_field, &from->start_field, \
1597  (char *)&to->end_field - (char *)&to->start_field)
1598 
1599 static int h264_slice_header_init(H264Context *, int);
1600 
1602 
1604  const AVCodecContext *src)
1605 {
1606  H264Context *h = dst->priv_data, *h1 = src->priv_data;
1607  int inited = h->context_initialized, err = 0;
1608  int context_reinitialized = 0;
1609  int i, ret;
1610 
1611  if (dst == src)
1612  return 0;
1613 
1614  if (inited &&
1615  (h->width != h1->width ||
1616  h->height != h1->height ||
1617  h->mb_width != h1->mb_width ||
1618  h->mb_height != h1->mb_height ||
1619  h->sps.bit_depth_luma != h1->sps.bit_depth_luma ||
1620  h->sps.chroma_format_idc != h1->sps.chroma_format_idc ||
1621  h->sps.colorspace != h1->sps.colorspace)) {
1622 
1623  /* set bits_per_raw_sample to the previous value. the check for changed
1624  * bit depth in h264_set_parameter_from_sps() uses it and sets it to
1625  * the current value */
1627 
1629 
1630  h->width = h1->width;
1631  h->height = h1->height;
1632  h->mb_height = h1->mb_height;
1633  h->mb_width = h1->mb_width;
1634  h->mb_num = h1->mb_num;
1635  h->mb_stride = h1->mb_stride;
1636  h->b_stride = h1->b_stride;
1637  // SPS/PPS
1638  copy_parameter_set((void **)h->sps_buffers, (void **)h1->sps_buffers,
1639  MAX_SPS_COUNT, sizeof(SPS));
1640  h->sps = h1->sps;
1641  copy_parameter_set((void **)h->pps_buffers, (void **)h1->pps_buffers,
1642  MAX_PPS_COUNT, sizeof(PPS));
1643  h->pps = h1->pps;
1644 
1645  if ((err = h264_slice_header_init(h, 1)) < 0) {
1646  av_log(h->avctx, AV_LOG_ERROR, "h264_slice_header_init() failed");
1647  return err;
1648  }
1649  context_reinitialized = 1;
1650 
1651 #if 0
1653  //Note we set context_reinitialized which will cause h264_set_parameter_from_sps to be reexecuted
1654  h->cur_chroma_format_idc = h1->cur_chroma_format_idc;
1655 #endif
1656  }
1657  /* update linesize on resize for h264. The h264 decoder doesn't
1658  * necessarily call ff_MPV_frame_start in the new thread */
1659  h->linesize = h1->linesize;
1660  h->uvlinesize = h1->uvlinesize;
1661 
1662  /* copy block_offset since frame_start may not be called */
1663  memcpy(h->block_offset, h1->block_offset, sizeof(h->block_offset));
1664 
1665  if (!inited) {
1666  for (i = 0; i < MAX_SPS_COUNT; i++)
1667  av_freep(h->sps_buffers + i);
1668 
1669  for (i = 0; i < MAX_PPS_COUNT; i++)
1670  av_freep(h->pps_buffers + i);
1671 
1672  memcpy(h, h1, offsetof(H264Context, intra_pcm_ptr));
1673  memcpy(&h->cabac, &h1->cabac,
1674  sizeof(H264Context) - offsetof(H264Context, cabac));
1675  av_assert0((void*)&h->cabac == &h->mb_padding + 1);
1676 
1677  memset(h->sps_buffers, 0, sizeof(h->sps_buffers));
1678  memset(h->pps_buffers, 0, sizeof(h->pps_buffers));
1679 
1680  memset(&h->er, 0, sizeof(h->er));
1681  memset(&h->me, 0, sizeof(h->me));
1682  memset(&h->mb, 0, sizeof(h->mb));
1683  memset(&h->mb_luma_dc, 0, sizeof(h->mb_luma_dc));
1684  memset(&h->mb_padding, 0, sizeof(h->mb_padding));
1685 
1686  h->avctx = dst;
1687  h->DPB = NULL;
1688  h->qscale_table_pool = NULL;
1689  h->mb_type_pool = NULL;
1690  h->ref_index_pool = NULL;
1691  h->motion_val_pool = NULL;
1692 
1693  if (h1->context_initialized) {
1694  h->context_initialized = 0;
1695 
1696  memset(&h->cur_pic, 0, sizeof(h->cur_pic));
1698  h->cur_pic.tf.f = &h->cur_pic.f;
1699 
1700  if (ff_h264_alloc_tables(h) < 0) {
1701  av_log(dst, AV_LOG_ERROR, "Could not allocate memory for h264\n");
1702  return AVERROR(ENOMEM);
1703  }
1704  context_init(h);
1705  }
1706 
1707  for (i = 0; i < 2; i++) {
1708  h->rbsp_buffer[i] = NULL;
1709  h->rbsp_buffer_size[i] = 0;
1710  }
1711  h->bipred_scratchpad = NULL;
1712  h->edge_emu_buffer = NULL;
1713 
1714  h->thread_context[0] = h;
1715  h->context_initialized = h1->context_initialized;
1716  }
1717 
1718  h->avctx->coded_height = h1->avctx->coded_height;
1719  h->avctx->coded_width = h1->avctx->coded_width;
1720  h->avctx->width = h1->avctx->width;
1721  h->avctx->height = h1->avctx->height;
1722  h->coded_picture_number = h1->coded_picture_number;
1723  h->first_field = h1->first_field;
1724  h->picture_structure = h1->picture_structure;
1725  h->qscale = h1->qscale;
1726  h->droppable = h1->droppable;
1727  h->data_partitioning = h1->data_partitioning;
1728  h->low_delay = h1->low_delay;
1729 
1730  for (i = 0; h->DPB && i < MAX_PICTURE_COUNT; i++) {
1731  unref_picture(h, &h->DPB[i]);
1732  if (h1->DPB[i].f.data[0] &&
1733  (ret = ref_picture(h, &h->DPB[i], &h1->DPB[i])) < 0)
1734  return ret;
1735  }
1736 
1737  h->cur_pic_ptr = REBASE_PICTURE(h1->cur_pic_ptr, h, h1);
1738  unref_picture(h, &h->cur_pic);
1739  if (h1->cur_pic.f.buf[0] && (ret = ref_picture(h, &h->cur_pic, &h1->cur_pic)) < 0)
1740  return ret;
1741 
1742  h->workaround_bugs = h1->workaround_bugs;
1743  h->low_delay = h1->low_delay;
1744  h->droppable = h1->droppable;
1745 
1746  // extradata/NAL handling
1747  h->is_avc = h1->is_avc;
1748 
1749  // SPS/PPS
1750  copy_parameter_set((void **)h->sps_buffers, (void **)h1->sps_buffers,
1751  MAX_SPS_COUNT, sizeof(SPS));
1752  h->sps = h1->sps;
1753  copy_parameter_set((void **)h->pps_buffers, (void **)h1->pps_buffers,
1754  MAX_PPS_COUNT, sizeof(PPS));
1755  h->pps = h1->pps;
1756 
1757  // Dequantization matrices
1758  // FIXME these are big - can they be only copied when PPS changes?
1759  copy_fields(h, h1, dequant4_buffer, dequant4_coeff);
1760 
1761  for (i = 0; i < 6; i++)
1762  h->dequant4_coeff[i] = h->dequant4_buffer[0] +
1763  (h1->dequant4_coeff[i] - h1->dequant4_buffer[0]);
1764 
1765  for (i = 0; i < 6; i++)
1766  h->dequant8_coeff[i] = h->dequant8_buffer[0] +
1767  (h1->dequant8_coeff[i] - h1->dequant8_buffer[0]);
1768 
1769  h->dequant_coeff_pps = h1->dequant_coeff_pps;
1770 
1771  // POC timing
1772  copy_fields(h, h1, poc_lsb, redundant_pic_count);
1773 
1774  // reference lists
1775  copy_fields(h, h1, short_ref, cabac_init_idc);
1776 
1777  copy_picture_range(h->short_ref, h1->short_ref, 32, h, h1);
1778  copy_picture_range(h->long_ref, h1->long_ref, 32, h, h1);
1779  copy_picture_range(h->delayed_pic, h1->delayed_pic,
1780  MAX_DELAYED_PIC_COUNT + 2, h, h1);
1781 
1782  h->sync = h1->sync;
1783 
1784  if (context_reinitialized)
1786 
1787  if (!h->cur_pic_ptr)
1788  return 0;
1789 
1790  if (!h->droppable) {
1792  h->prev_poc_msb = h->poc_msb;
1793  h->prev_poc_lsb = h->poc_lsb;
1794  }
1796  h->prev_frame_num = h->frame_num;
1798 
1799  return err;
1800 }
1801 
1803 {
1804  Picture *pic;
1805  int i, ret;
1806  const int pixel_shift = h->pixel_shift;
1807  int c[4] = {
1808  1<<(h->sps.bit_depth_luma-1),
1809  1<<(h->sps.bit_depth_chroma-1),
1810  1<<(h->sps.bit_depth_chroma-1),
1811  -1
1812  };
1813 
1814  if (!ff_thread_can_start_frame(h->avctx)) {
1815  av_log(h->avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1816  return -1;
1817  }
1818 
1820  h->cur_pic_ptr = NULL;
1821 
1822  i = find_unused_picture(h);
1823  if (i < 0) {
1824  av_log(h->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1825  return i;
1826  }
1827  pic = &h->DPB[i];
1828 
1829  pic->reference = h->droppable ? 0 : h->picture_structure;
1832 
1833  /*
1834  * Zero key_frame here; IDR markings per slice in frame or fields are ORed
1835  * in later.
1836  * See decode_nal_units().
1837  */
1838  pic->f.key_frame = 0;
1839  pic->sync = 0;
1840  pic->mmco_reset = 0;
1841 
1842  if ((ret = alloc_picture(h, pic)) < 0)
1843  return ret;
1844  if(!h->sync && !h->avctx->hwaccel &&
1846  avpriv_color_frame(&pic->f, c);
1847 
1848  h->cur_pic_ptr = pic;
1849  unref_picture(h, &h->cur_pic);
1850  if ((ret = ref_picture(h, &h->cur_pic, h->cur_pic_ptr)) < 0)
1851  return ret;
1852 
1853  if (CONFIG_ERROR_RESILIENCE) {
1854  ff_er_frame_start(&h->er);
1855  h->er.last_pic =
1856  h->er.next_pic = NULL;
1857  }
1858 
1859  assert(h->linesize && h->uvlinesize);
1860 
1861  for (i = 0; i < 16; i++) {
1862  h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1863  h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1864  }
1865  for (i = 0; i < 16; i++) {
1866  h->block_offset[16 + i] =
1867  h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1868  h->block_offset[48 + 16 + i] =
1869  h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 8 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1870  }
1871 
1872  // s->decode = (h->flags & CODEC_FLAG_PSNR) || !s->encoding ||
1873  // h->cur_pic.reference /* || h->contains_intra */ || 1;
1874 
1875  /* We mark the current picture as non-reference after allocating it, so
1876  * that if we break out due to an error it can be released automatically
1877  * in the next ff_MPV_frame_start().
1878  */
1879  h->cur_pic_ptr->reference = 0;
1880 
1881  h->cur_pic_ptr->field_poc[0] = h->cur_pic_ptr->field_poc[1] = INT_MAX;
1882 
1883  h->next_output_pic = NULL;
1884 
1885  assert(h->cur_pic_ptr->long_ref == 0);
1886 
1887  return 0;
1888 }
1889 
1890 /**
1891  * Run setup operations that must be run after slice header decoding.
1892  * This includes finding the next displayed frame.
1893  *
1894  * @param h h264 master context
1895  * @param setup_finished enough NALs have been read that we can call
1896  * ff_thread_finish_setup()
1897  */
1898 static void decode_postinit(H264Context *h, int setup_finished)
1899 {
1900  Picture *out = h->cur_pic_ptr;
1901  Picture *cur = h->cur_pic_ptr;
1902  int i, pics, out_of_order, out_idx;
1903 
1904  h->cur_pic_ptr->f.pict_type = h->pict_type;
1905 
1906  if (h->next_output_pic)
1907  return;
1908 
1909  if (cur->field_poc[0] == INT_MAX || cur->field_poc[1] == INT_MAX) {
1910  /* FIXME: if we have two PAFF fields in one packet, we can't start
1911  * the next thread here. If we have one field per packet, we can.
1912  * The check in decode_nal_units() is not good enough to find this
1913  * yet, so we assume the worst for now. */
1914  // if (setup_finished)
1915  // ff_thread_finish_setup(h->avctx);
1916  return;
1917  }
1918 
1919  cur->f.interlaced_frame = 0;
1920  cur->f.repeat_pict = 0;
1921 
1922  /* Signal interlacing information externally. */
1923  /* Prioritize picture timing SEI information over used
1924  * decoding process if it exists. */
1925 
1926  if (h->sps.pic_struct_present_flag) {
1927  switch (h->sei_pic_struct) {
1928  case SEI_PIC_STRUCT_FRAME:
1929  break;
1932  cur->f.interlaced_frame = 1;
1933  break;
1936  if (FIELD_OR_MBAFF_PICTURE(h))
1937  cur->f.interlaced_frame = 1;
1938  else
1939  // try to flag soft telecine progressive
1941  break;
1944  /* Signal the possibility of telecined film externally
1945  * (pic_struct 5,6). From these hints, let the applications
1946  * decide if they apply deinterlacing. */
1947  cur->f.repeat_pict = 1;
1948  break;
1950  cur->f.repeat_pict = 2;
1951  break;
1953  cur->f.repeat_pict = 4;
1954  break;
1955  }
1956 
1957  if ((h->sei_ct_type & 3) &&
1959  cur->f.interlaced_frame = (h->sei_ct_type & (1 << 1)) != 0;
1960  } else {
1961  /* Derive interlacing flag from used decoding process. */
1963  }
1965 
1966  if (cur->field_poc[0] != cur->field_poc[1]) {
1967  /* Derive top_field_first from field pocs. */
1968  cur->f.top_field_first = cur->field_poc[0] < cur->field_poc[1];
1969  } else {
1970  if (cur->f.interlaced_frame || h->sps.pic_struct_present_flag) {
1971  /* Use picture timing SEI information. Even if it is a
1972  * information of a past frame, better than nothing. */
1975  cur->f.top_field_first = 1;
1976  else
1977  cur->f.top_field_first = 0;
1978  } else {
1979  /* Most likely progressive */
1980  cur->f.top_field_first = 0;
1981  }
1982  }
1983 
1984  cur->mmco_reset = h->mmco_reset;
1985  h->mmco_reset = 0;
1986  // FIXME do something with unavailable reference frames
1987 
1988  /* Sort B-frames into display order */
1989 
1993  h->low_delay = 0;
1994  }
1995 
1999  h->low_delay = 0;
2000  }
2001 
2002  for (i = 0; 1; i++) {
2003  if(i == MAX_DELAYED_PIC_COUNT || cur->poc < h->last_pocs[i]){
2004  if(i)
2005  h->last_pocs[i-1] = cur->poc;
2006  break;
2007  } else if(i) {
2008  h->last_pocs[i-1]= h->last_pocs[i];
2009  }
2010  }
2011  out_of_order = MAX_DELAYED_PIC_COUNT - i;
2012  if( cur->f.pict_type == AV_PICTURE_TYPE_B
2014  out_of_order = FFMAX(out_of_order, 1);
2015  if (out_of_order == MAX_DELAYED_PIC_COUNT) {
2016  av_log(h->avctx, AV_LOG_VERBOSE, "Invalid POC %d<%d\n", cur->poc, h->last_pocs[0]);
2017  for (i = 1; i < MAX_DELAYED_PIC_COUNT; i++)
2018  h->last_pocs[i] = INT_MIN;
2019  h->last_pocs[0] = cur->poc;
2020  cur->mmco_reset = 1;
2021  } else if(h->avctx->has_b_frames < out_of_order && !h->sps.bitstream_restriction_flag){
2022  av_log(h->avctx, AV_LOG_VERBOSE, "Increasing reorder buffer to %d\n", out_of_order);
2023  h->avctx->has_b_frames = out_of_order;
2024  h->low_delay = 0;
2025  }
2026 
2027  pics = 0;
2028  while (h->delayed_pic[pics])
2029  pics++;
2030 
2032 
2033  h->delayed_pic[pics++] = cur;
2034  if (cur->reference == 0)
2035  cur->reference = DELAYED_PIC_REF;
2036 
2037  out = h->delayed_pic[0];
2038  out_idx = 0;
2039  for (i = 1; h->delayed_pic[i] &&
2040  !h->delayed_pic[i]->f.key_frame &&
2041  !h->delayed_pic[i]->mmco_reset;
2042  i++)
2043  if (h->delayed_pic[i]->poc < out->poc) {
2044  out = h->delayed_pic[i];
2045  out_idx = i;
2046  }
2047  if (h->avctx->has_b_frames == 0 &&
2048  (h->delayed_pic[0]->f.key_frame || h->delayed_pic[0]->mmco_reset))
2049  h->next_outputed_poc = INT_MIN;
2050  out_of_order = out->poc < h->next_outputed_poc;
2051 
2052  if (out_of_order || pics > h->avctx->has_b_frames) {
2053  out->reference &= ~DELAYED_PIC_REF;
2054  // for frame threading, the owner must be the second field's thread or
2055  // else the first thread can release the picture and reuse it unsafely
2056  for (i = out_idx; h->delayed_pic[i]; i++)
2057  h->delayed_pic[i] = h->delayed_pic[i + 1];
2058  }
2059  if (!out_of_order && pics > h->avctx->has_b_frames) {
2060  h->next_output_pic = out;
2061  if (out_idx == 0 && h->delayed_pic[0] && (h->delayed_pic[0]->f.key_frame || h->delayed_pic[0]->mmco_reset)) {
2062  h->next_outputed_poc = INT_MIN;
2063  } else
2064  h->next_outputed_poc = out->poc;
2065  } else {
2066  av_log(h->avctx, AV_LOG_DEBUG, "no picture %s\n", out_of_order ? "ooo" : "");
2067  }
2068 
2069  if (h->next_output_pic && h->next_output_pic->sync) {
2070  h->sync |= 2;
2071  }
2072 
2073  if (setup_finished && !h->avctx->hwaccel)
2075 }
2076 
2078  uint8_t *src_cb, uint8_t *src_cr,
2079  int linesize, int uvlinesize,
2080  int simple)
2081 {
2082  uint8_t *top_border;
2083  int top_idx = 1;
2084  const int pixel_shift = h->pixel_shift;
2085  int chroma444 = CHROMA444(h);
2086  int chroma422 = CHROMA422(h);
2087 
2088  src_y -= linesize;
2089  src_cb -= uvlinesize;
2090  src_cr -= uvlinesize;
2091 
2092  if (!simple && FRAME_MBAFF(h)) {
2093  if (h->mb_y & 1) {
2094  if (!MB_MBAFF(h)) {
2095  top_border = h->top_borders[0][h->mb_x];
2096  AV_COPY128(top_border, src_y + 15 * linesize);
2097  if (pixel_shift)
2098  AV_COPY128(top_border + 16, src_y + 15 * linesize + 16);
2099  if (simple || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) {
2100  if (chroma444) {
2101  if (pixel_shift) {
2102  AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize);
2103  AV_COPY128(top_border + 48, src_cb + 15 * uvlinesize + 16);
2104  AV_COPY128(top_border + 64, src_cr + 15 * uvlinesize);
2105  AV_COPY128(top_border + 80, src_cr + 15 * uvlinesize + 16);
2106  } else {
2107  AV_COPY128(top_border + 16, src_cb + 15 * uvlinesize);
2108  AV_COPY128(top_border + 32, src_cr + 15 * uvlinesize);
2109  }
2110  } else if (chroma422) {
2111  if (pixel_shift) {
2112  AV_COPY128(top_border + 32, src_cb + 15 * uvlinesize);
2113  AV_COPY128(top_border + 48, src_cr + 15 * uvlinesize);
2114  } else {
2115  AV_COPY64(top_border + 16, src_cb + 15 * uvlinesize);
2116  AV_COPY64(top_border + 24, src_cr + 15 * uvlinesize);
2117  }
2118  } else {
2119  if (pixel_shift) {
2120  AV_COPY128(top_border + 32, src_cb + 7 * uvlinesize);
2121  AV_COPY128(top_border + 48, src_cr + 7 * uvlinesize);
2122  } else {
2123  AV_COPY64(top_border + 16, src_cb + 7 * uvlinesize);
2124  AV_COPY64(top_border + 24, src_cr + 7 * uvlinesize);
2125  }
2126  }
2127  }
2128  }
2129  } else if (MB_MBAFF(h)) {
2130  top_idx = 0;
2131  } else
2132  return;
2133  }
2134 
2135  top_border = h->top_borders[top_idx][h->mb_x];
2136  /* There are two lines saved, the line above the top macroblock
2137  * of a pair, and the line above the bottom macroblock. */
2138  AV_COPY128(top_border, src_y + 16 * linesize);
2139  if (pixel_shift)
2140  AV_COPY128(top_border + 16, src_y + 16 * linesize + 16);
2141 
2142  if (simple || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) {
2143  if (chroma444) {
2144  if (pixel_shift) {
2145  AV_COPY128(top_border + 32, src_cb + 16 * linesize);
2146  AV_COPY128(top_border + 48, src_cb + 16 * linesize + 16);
2147  AV_COPY128(top_border + 64, src_cr + 16 * linesize);
2148  AV_COPY128(top_border + 80, src_cr + 16 * linesize + 16);
2149  } else {
2150  AV_COPY128(top_border + 16, src_cb + 16 * linesize);
2151  AV_COPY128(top_border + 32, src_cr + 16 * linesize);
2152  }
2153  } else if (chroma422) {
2154  if (pixel_shift) {
2155  AV_COPY128(top_border + 32, src_cb + 16 * uvlinesize);
2156  AV_COPY128(top_border + 48, src_cr + 16 * uvlinesize);
2157  } else {
2158  AV_COPY64(top_border + 16, src_cb + 16 * uvlinesize);
2159  AV_COPY64(top_border + 24, src_cr + 16 * uvlinesize);
2160  }
2161  } else {
2162  if (pixel_shift) {
2163  AV_COPY128(top_border + 32, src_cb + 8 * uvlinesize);
2164  AV_COPY128(top_border + 48, src_cr + 8 * uvlinesize);
2165  } else {
2166  AV_COPY64(top_border + 16, src_cb + 8 * uvlinesize);
2167  AV_COPY64(top_border + 24, src_cr + 8 * uvlinesize);
2168  }
2169  }
2170  }
2171 }
2172 
2174  uint8_t *src_cb, uint8_t *src_cr,
2175  int linesize, int uvlinesize,
2176  int xchg, int chroma444,
2177  int simple, int pixel_shift)
2178 {
2179  int deblock_topleft;
2180  int deblock_top;
2181  int top_idx = 1;
2182  uint8_t *top_border_m1;
2183  uint8_t *top_border;
2184 
2185  if (!simple && FRAME_MBAFF(h)) {
2186  if (h->mb_y & 1) {
2187  if (!MB_MBAFF(h))
2188  return;
2189  } else {
2190  top_idx = MB_MBAFF(h) ? 0 : 1;
2191  }
2192  }
2193 
2194  if (h->deblocking_filter == 2) {
2195  deblock_topleft = h->slice_table[h->mb_xy - 1 - h->mb_stride] == h->slice_num;
2196  deblock_top = h->top_type;
2197  } else {
2198  deblock_topleft = (h->mb_x > 0);
2199  deblock_top = (h->mb_y > !!MB_FIELD(h));
2200  }
2201 
2202  src_y -= linesize + 1 + pixel_shift;
2203  src_cb -= uvlinesize + 1 + pixel_shift;
2204  src_cr -= uvlinesize + 1 + pixel_shift;
2205 
2206  top_border_m1 = h->top_borders[top_idx][h->mb_x - 1];
2207  top_border = h->top_borders[top_idx][h->mb_x];
2208 
2209 #define XCHG(a, b, xchg) \
2210  if (pixel_shift) { \
2211  if (xchg) { \
2212  AV_SWAP64(b + 0, a + 0); \
2213  AV_SWAP64(b + 8, a + 8); \
2214  } else { \
2215  AV_COPY128(b, a); \
2216  } \
2217  } else if (xchg) \
2218  AV_SWAP64(b, a); \
2219  else \
2220  AV_COPY64(b, a);
2221 
2222  if (deblock_top) {
2223  if (deblock_topleft) {
2224  XCHG(top_border_m1 + (8 << pixel_shift),
2225  src_y - (7 << pixel_shift), 1);
2226  }
2227  XCHG(top_border + (0 << pixel_shift), src_y + (1 << pixel_shift), xchg);
2228  XCHG(top_border + (8 << pixel_shift), src_y + (9 << pixel_shift), 1);
2229  if (h->mb_x + 1 < h->mb_width) {
2230  XCHG(h->top_borders[top_idx][h->mb_x + 1],
2231  src_y + (17 << pixel_shift), 1);
2232  }
2233  if (simple || !CONFIG_GRAY || !(h->flags & CODEC_FLAG_GRAY)) {
2234  if (chroma444) {
2235  if (deblock_topleft) {
2236  XCHG(top_border_m1 + (24 << pixel_shift), src_cb - (7 << pixel_shift), 1);
2237  XCHG(top_border_m1 + (40 << pixel_shift), src_cr - (7 << pixel_shift), 1);
2238  }
2239  XCHG(top_border + (16 << pixel_shift), src_cb + (1 << pixel_shift), xchg);
2240  XCHG(top_border + (24 << pixel_shift), src_cb + (9 << pixel_shift), 1);
2241  XCHG(top_border + (32 << pixel_shift), src_cr + (1 << pixel_shift), xchg);
2242  XCHG(top_border + (40 << pixel_shift), src_cr + (9 << pixel_shift), 1);
2243  if (h->mb_x + 1 < h->mb_width) {
2244  XCHG(h->top_borders[top_idx][h->mb_x + 1] + (16 << pixel_shift), src_cb + (17 << pixel_shift), 1);
2245  XCHG(h->top_borders[top_idx][h->mb_x + 1] + (32 << pixel_shift), src_cr + (17 << pixel_shift), 1);
2246  }
2247  } else {
2248  if (deblock_topleft) {
2249  XCHG(top_border_m1 + (16 << pixel_shift), src_cb - (7 << pixel_shift), 1);
2250  XCHG(top_border_m1 + (24 << pixel_shift), src_cr - (7 << pixel_shift), 1);
2251  }
2252  XCHG(top_border + (16 << pixel_shift), src_cb + 1 + pixel_shift, 1);
2253  XCHG(top_border + (24 << pixel_shift), src_cr + 1 + pixel_shift, 1);
2254  }
2255  }
2256  }
2257 }
2258 
2259 static av_always_inline int dctcoef_get(int16_t *mb, int high_bit_depth,
2260  int index)
2261 {
2262  if (high_bit_depth) {
2263  return AV_RN32A(((int32_t *)mb) + index);
2264  } else
2265  return AV_RN16A(mb + index);
2266 }
2267 
2268 static av_always_inline void dctcoef_set(int16_t *mb, int high_bit_depth,
2269  int index, int value)
2270 {
2271  if (high_bit_depth) {
2272  AV_WN32A(((int32_t *)mb) + index, value);
2273  } else
2274  AV_WN16A(mb + index, value);
2275 }
2276 
2278  int mb_type, int is_h264,
2279  int simple,
2280  int transform_bypass,
2281  int pixel_shift,
2282  int *block_offset,
2283  int linesize,
2284  uint8_t *dest_y, int p)
2285 {
2286  void (*idct_add)(uint8_t *dst, int16_t *block, int stride);
2287  void (*idct_dc_add)(uint8_t *dst, int16_t *block, int stride);
2288  int i;
2289  int qscale = p == 0 ? h->qscale : h->chroma_qp[p - 1];
2290  block_offset += 16 * p;
2291  if (IS_INTRA4x4(mb_type)) {
2292  if (IS_8x8DCT(mb_type)) {
2293  if (transform_bypass) {
2294  idct_dc_add =
2296  } else {
2297  idct_dc_add = h->h264dsp.h264_idct8_dc_add;
2299  }
2300  for (i = 0; i < 16; i += 4) {
2301  uint8_t *const ptr = dest_y + block_offset[i];
2302  const int dir = h->intra4x4_pred_mode_cache[scan8[i]];
2303  if (transform_bypass && h->sps.profile_idc == 244 && dir <= 1) {
2304  h->hpc.pred8x8l_add[dir](ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
2305  } else {
2306  const int nnz = h->non_zero_count_cache[scan8[i + p * 16]];
2307  h->hpc.pred8x8l[dir](ptr, (h->topleft_samples_available << i) & 0x8000,
2308  (h->topright_samples_available << i) & 0x4000, linesize);
2309  if (nnz) {
2310  if (nnz == 1 && dctcoef_get(h->mb, pixel_shift, i * 16 + p * 256))
2311  idct_dc_add(ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
2312  else
2313  idct_add(ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
2314  }
2315  }
2316  }
2317  } else {
2318  if (transform_bypass) {
2319  idct_dc_add =
2321  } else {
2322  idct_dc_add = h->h264dsp.h264_idct_dc_add;
2324  }
2325  for (i = 0; i < 16; i++) {
2326  uint8_t *const ptr = dest_y + block_offset[i];
2327  const int dir = h->intra4x4_pred_mode_cache[scan8[i]];
2328 
2329  if (transform_bypass && h->sps.profile_idc == 244 && dir <= 1) {
2330  h->hpc.pred4x4_add[dir](ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
2331  } else {
2332  uint8_t *topright;
2333  int nnz, tr;
2334  uint64_t tr_high;
2335  if (dir == DIAG_DOWN_LEFT_PRED || dir == VERT_LEFT_PRED) {
2336  const int topright_avail = (h->topright_samples_available << i) & 0x8000;
2337  av_assert2(h->mb_y || linesize <= block_offset[i]);
2338  if (!topright_avail) {
2339  if (pixel_shift) {
2340  tr_high = ((uint16_t *)ptr)[3 - linesize / 2] * 0x0001000100010001ULL;
2341  topright = (uint8_t *)&tr_high;
2342  } else {
2343  tr = ptr[3 - linesize] * 0x01010101u;
2344  topright = (uint8_t *)&tr;
2345  }
2346  } else
2347  topright = ptr + (4 << pixel_shift) - linesize;
2348  } else
2349  topright = NULL;
2350 
2351  h->hpc.pred4x4[dir](ptr, topright, linesize);
2352  nnz = h->non_zero_count_cache[scan8[i + p * 16]];
2353  if (nnz) {
2354  if (is_h264) {
2355  if (nnz == 1 && dctcoef_get(h->mb, pixel_shift, i * 16 + p * 256))
2356  idct_dc_add(ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
2357  else
2358  idct_add(ptr, h->mb + (i * 16 + p * 256 << pixel_shift), linesize);
2359  } else if (CONFIG_SVQ3_DECODER)
2360  ff_svq3_add_idct_c(ptr, h->mb + i * 16 + p * 256, linesize, qscale, 0);
2361  }
2362  }
2363  }
2364  }
2365  } else {
2366  h->hpc.pred16x16[h->intra16x16_pred_mode](dest_y, linesize);
2367  if (is_h264) {
2369  if (!transform_bypass)
2370  h->h264dsp.h264_luma_dc_dequant_idct(h->mb + (p * 256 << pixel_shift),
2371  h->mb_luma_dc[p],
2372  h->dequant4_coeff[p][qscale][0]);
2373  else {
2374  static const uint8_t dc_mapping[16] = {
2375  0 * 16, 1 * 16, 4 * 16, 5 * 16,
2376  2 * 16, 3 * 16, 6 * 16, 7 * 16,
2377  8 * 16, 9 * 16, 12 * 16, 13 * 16,
2378  10 * 16, 11 * 16, 14 * 16, 15 * 16 };
2379  for (i = 0; i < 16; i++)
2380  dctcoef_set(h->mb + (p * 256 << pixel_shift),
2381  pixel_shift, dc_mapping[i],
2382  dctcoef_get(h->mb_luma_dc[p],
2383  pixel_shift, i));
2384  }
2385  }
2386  } else if (CONFIG_SVQ3_DECODER)
2387  ff_svq3_luma_dc_dequant_idct_c(h->mb + p * 256,
2388  h->mb_luma_dc[p], qscale);
2389  }
2390 }
2391 
2393  int is_h264, int simple,
2394  int transform_bypass,
2395  int pixel_shift,
2396  int *block_offset,
2397  int linesize,
2398  uint8_t *dest_y, int p)
2399 {
2400  void (*idct_add)(uint8_t *dst, int16_t *block, int stride);
2401  int i;
2402  block_offset += 16 * p;
2403  if (!IS_INTRA4x4(mb_type)) {
2404  if (is_h264) {
2405  if (IS_INTRA16x16(mb_type)) {
2406  if (transform_bypass) {
2407  if (h->sps.profile_idc == 244 &&
2410  h->hpc.pred16x16_add[h->intra16x16_pred_mode](dest_y, block_offset,
2411  h->mb + (p * 256 << pixel_shift),
2412  linesize);
2413  } else {
2414  for (i = 0; i < 16; i++)
2415  if (h->non_zero_count_cache[scan8[i + p * 16]] ||
2416  dctcoef_get(h->mb, pixel_shift, i * 16 + p * 256))
2417  h->h264dsp.h264_add_pixels4_clear(dest_y + block_offset[i],
2418  h->mb + (i * 16 + p * 256 << pixel_shift),
2419  linesize);
2420  }
2421  } else {
2422  h->h264dsp.h264_idct_add16intra(dest_y, block_offset,
2423  h->mb + (p * 256 << pixel_shift),
2424  linesize,
2425  h->non_zero_count_cache + p * 5 * 8);
2426  }
2427  } else if (h->cbp & 15) {
2428  if (transform_bypass) {
2429  const int di = IS_8x8DCT(mb_type) ? 4 : 1;
2432  for (i = 0; i < 16; i += di)
2433  if (h->non_zero_count_cache[scan8[i + p * 16]])
2434  idct_add(dest_y + block_offset[i],
2435  h->mb + (i * 16 + p * 256 << pixel_shift),
2436  linesize);
2437  } else {
2438  if (IS_8x8DCT(mb_type))
2439  h->h264dsp.h264_idct8_add4(dest_y, block_offset,
2440  h->mb + (p * 256 << pixel_shift),
2441  linesize,
2442  h->non_zero_count_cache + p * 5 * 8);
2443  else
2444  h->h264dsp.h264_idct_add16(dest_y, block_offset,
2445  h->mb + (p * 256 << pixel_shift),
2446  linesize,
2447  h->non_zero_count_cache + p * 5 * 8);
2448  }
2449  }
2450  } else if (CONFIG_SVQ3_DECODER) {
2451  for (i = 0; i < 16; i++)
2452  if (h->non_zero_count_cache[scan8[i + p * 16]] || h->mb[i * 16 + p * 256]) {
2453  // FIXME benchmark weird rule, & below
2454  uint8_t *const ptr = dest_y + block_offset[i];
2455  ff_svq3_add_idct_c(ptr, h->mb + i * 16 + p * 256, linesize,
2456  h->qscale, IS_INTRA(mb_type) ? 1 : 0);
2457  }
2458  }
2459  }
2460 }
2461 
2462 #define BITS 8
2463 #define SIMPLE 1
2464 #include "h264_mb_template.c"
2465 
2466 #undef BITS
2467 #define BITS 16
2468 #include "h264_mb_template.c"
2469 
2470 #undef SIMPLE
2471 #define SIMPLE 0
2472 #include "h264_mb_template.c"
2473 
2475 {
2476  const int mb_xy = h->mb_xy;
2477  const int mb_type = h->cur_pic.mb_type[mb_xy];
2478  int is_complex = CONFIG_SMALL || h->is_complex || IS_INTRA_PCM(mb_type) || h->qscale == 0;
2479 
2480  if (CHROMA444(h)) {
2481  if (is_complex || h->pixel_shift)
2482  hl_decode_mb_444_complex(h);
2483  else
2484  hl_decode_mb_444_simple_8(h);
2485  } else if (is_complex) {
2486  hl_decode_mb_complex(h);
2487  } else if (h->pixel_shift) {
2488  hl_decode_mb_simple_16(h);
2489  } else
2490  hl_decode_mb_simple_8(h);
2491 }
2492 
2494 {
2495  int list, i;
2496  int luma_def, chroma_def;
2497 
2498  h->use_weight = 0;
2499  h->use_weight_chroma = 0;
2501  if (h->sps.chroma_format_idc)
2503  luma_def = 1 << h->luma_log2_weight_denom;
2504  chroma_def = 1 << h->chroma_log2_weight_denom;
2505 
2506  for (list = 0; list < 2; list++) {
2507  h->luma_weight_flag[list] = 0;
2508  h->chroma_weight_flag[list] = 0;
2509  for (i = 0; i < h->ref_count[list]; i++) {
2510  int luma_weight_flag, chroma_weight_flag;
2511 
2512  luma_weight_flag = get_bits1(&h->gb);
2513  if (luma_weight_flag) {
2514  h->luma_weight[i][list][0] = get_se_golomb(&h->gb);
2515  h->luma_weight[i][list][1] = get_se_golomb(&h->gb);
2516  if (h->luma_weight[i][list][0] != luma_def ||
2517  h->luma_weight[i][list][1] != 0) {
2518  h->use_weight = 1;
2519  h->luma_weight_flag[list] = 1;
2520  }
2521  } else {
2522  h->luma_weight[i][list][0] = luma_def;
2523  h->luma_weight[i][list][1] = 0;
2524  }
2525 
2526  if (h->sps.chroma_format_idc) {
2527  chroma_weight_flag = get_bits1(&h->gb);
2528  if (chroma_weight_flag) {
2529  int j;
2530  for (j = 0; j < 2; j++) {
2531  h->chroma_weight[i][list][j][0] = get_se_golomb(&h->gb);
2532  h->chroma_weight[i][list][j][1] = get_se_golomb(&h->gb);
2533  if (h->chroma_weight[i][list][j][0] != chroma_def ||
2534  h->chroma_weight[i][list][j][1] != 0) {
2535  h->use_weight_chroma = 1;
2536  h->chroma_weight_flag[list] = 1;
2537  }
2538  }
2539  } else {
2540  int j;
2541  for (j = 0; j < 2; j++) {
2542  h->chroma_weight[i][list][j][0] = chroma_def;
2543  h->chroma_weight[i][list][j][1] = 0;
2544  }
2545  }
2546  }
2547  }
2549  break;
2550  }
2551  h->use_weight = h->use_weight || h->use_weight_chroma;
2552  return 0;
2553 }
2554 
2555 /**
2556  * Initialize implicit_weight table.
2557  * @param field 0/1 initialize the weight for interlaced MBAFF
2558  * -1 initializes the rest
2559  */
2560 static void implicit_weight_table(H264Context *h, int field)
2561 {
2562  int ref0, ref1, i, cur_poc, ref_start, ref_count0, ref_count1;
2563 
2564  for (i = 0; i < 2; i++) {
2565  h->luma_weight_flag[i] = 0;
2566  h->chroma_weight_flag[i] = 0;
2567  }
2568 
2569  if (field < 0) {
2570  if (h->picture_structure == PICT_FRAME) {
2571  cur_poc = h->cur_pic_ptr->poc;
2572  } else {
2573  cur_poc = h->cur_pic_ptr->field_poc[h->picture_structure - 1];
2574  }
2575  if (h->ref_count[0] == 1 && h->ref_count[1] == 1 && !FRAME_MBAFF(h) &&
2576  h->ref_list[0][0].poc + h->ref_list[1][0].poc == 2 * cur_poc) {
2577  h->use_weight = 0;
2578  h->use_weight_chroma = 0;
2579  return;
2580  }
2581  ref_start = 0;
2582  ref_count0 = h->ref_count[0];
2583  ref_count1 = h->ref_count[1];
2584  } else {
2585  cur_poc = h->cur_pic_ptr->field_poc[field];
2586  ref_start = 16;
2587  ref_count0 = 16 + 2 * h->ref_count[0];
2588  ref_count1 = 16 + 2 * h->ref_count[1];
2589  }
2590 
2591  h->use_weight = 2;
2592  h->use_weight_chroma = 2;
2593  h->luma_log2_weight_denom = 5;
2594  h->chroma_log2_weight_denom = 5;
2595 
2596  for (ref0 = ref_start; ref0 < ref_count0; ref0++) {
2597  int poc0 = h->ref_list[0][ref0].poc;
2598  for (ref1 = ref_start; ref1 < ref_count1; ref1++) {
2599  int w = 32;
2600  if (!h->ref_list[0][ref0].long_ref && !h->ref_list[1][ref1].long_ref) {
2601  int poc1 = h->ref_list[1][ref1].poc;
2602  int td = av_clip(poc1 - poc0, -128, 127);
2603  if (td) {
2604  int tb = av_clip(cur_poc - poc0, -128, 127);
2605  int tx = (16384 + (FFABS(td) >> 1)) / td;
2606  int dist_scale_factor = (tb * tx + 32) >> 8;
2607  if (dist_scale_factor >= -64 && dist_scale_factor <= 128)
2608  w = 64 - dist_scale_factor;
2609  }
2610  }
2611  if (field < 0) {
2612  h->implicit_weight[ref0][ref1][0] =
2613  h->implicit_weight[ref0][ref1][1] = w;
2614  } else {
2615  h->implicit_weight[ref0][ref1][field] = w;
2616  }
2617  }
2618  }
2619 }
2620 
2621 /**
2622  * instantaneous decoder refresh.
2623  */
2624 static void idr(H264Context *h)
2625 {
2626  int i;
2628  h->prev_frame_num = 0;
2629  h->prev_frame_num_offset = 0;
2630  h->prev_poc_msb = 1<<16;
2631  h->prev_poc_lsb = 0;
2632  for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
2633  h->last_pocs[i] = INT_MIN;
2634 }
2635 
2636 /* forget old pics after a seek */
2637 static void flush_change(H264Context *h)
2638 {
2639  int i, j;
2640 
2641  h->outputed_poc = h->next_outputed_poc = INT_MIN;
2642  h->prev_interlaced_frame = 1;
2643  idr(h);
2644 
2645  h->prev_frame_num = -1;
2646  if (h->cur_pic_ptr) {
2647  h->cur_pic_ptr->reference = 0;
2648  for (j=i=0; h->delayed_pic[i]; i++)
2649  if (h->delayed_pic[i] != h->cur_pic_ptr)
2650  h->delayed_pic[j++] = h->delayed_pic[i];
2651  h->delayed_pic[j] = NULL;
2652  }
2653  h->first_field = 0;
2654  memset(h->ref_list[0], 0, sizeof(h->ref_list[0]));
2655  memset(h->ref_list[1], 0, sizeof(h->ref_list[1]));
2656  memset(h->default_ref_list[0], 0, sizeof(h->default_ref_list[0]));
2657  memset(h->default_ref_list[1], 0, sizeof(h->default_ref_list[1]));
2658  ff_h264_reset_sei(h);
2659  h->recovery_frame= -1;
2660  h->sync= 0;
2661  h->list_count = 0;
2662  h->current_slice = 0;
2663 }
2664 
2665 /* forget old pics after a seek */
2666 static void flush_dpb(AVCodecContext *avctx)
2667 {
2668  H264Context *h = avctx->priv_data;
2669  int i;
2670 
2671  for (i = 0; i <= MAX_DELAYED_PIC_COUNT; i++) {
2672  if (h->delayed_pic[i])
2673  h->delayed_pic[i]->reference = 0;
2674  h->delayed_pic[i] = NULL;
2675  }
2676 
2677  flush_change(h);
2678 
2679  if (h->DPB)
2680  for (i = 0; i < MAX_PICTURE_COUNT; i++)
2681  unref_picture(h, &h->DPB[i]);
2682  h->cur_pic_ptr = NULL;
2683  unref_picture(h, &h->cur_pic);
2684 
2685  h->mb_x = h->mb_y = 0;
2686 
2687  h->parse_context.state = -1;
2689  h->parse_context.overread = 0;
2691  h->parse_context.index = 0;
2692  h->parse_context.last_index = 0;
2693 }
2694 
2695 int ff_init_poc(H264Context *h, int pic_field_poc[2], int *pic_poc)
2696 {
2697  const int max_frame_num = 1 << h->sps.log2_max_frame_num;
2698  int field_poc[2];
2699 
2701  if (h->frame_num < h->prev_frame_num)
2702  h->frame_num_offset += max_frame_num;
2703 
2704  if (h->sps.poc_type == 0) {
2705  const int max_poc_lsb = 1 << h->sps.log2_max_poc_lsb;
2706 
2707  if (h->poc_lsb < h->prev_poc_lsb && h->prev_poc_lsb - h->poc_lsb >= max_poc_lsb / 2)
2708  h->poc_msb = h->prev_poc_msb + max_poc_lsb;
2709  else if (h->poc_lsb > h->prev_poc_lsb && h->prev_poc_lsb - h->poc_lsb < -max_poc_lsb / 2)
2710  h->poc_msb = h->prev_poc_msb - max_poc_lsb;
2711  else
2712  h->poc_msb = h->prev_poc_msb;
2713  field_poc[0] =
2714  field_poc[1] = h->poc_msb + h->poc_lsb;
2715  if (h->picture_structure == PICT_FRAME)
2716  field_poc[1] += h->delta_poc_bottom;
2717  } else if (h->sps.poc_type == 1) {
2718  int abs_frame_num, expected_delta_per_poc_cycle, expectedpoc;
2719  int i;
2720 
2721  if (h->sps.poc_cycle_length != 0)
2722  abs_frame_num = h->frame_num_offset + h->frame_num;
2723  else
2724  abs_frame_num = 0;
2725 
2726  if (h->nal_ref_idc == 0 && abs_frame_num > 0)
2727  abs_frame_num--;
2728 
2729  expected_delta_per_poc_cycle = 0;
2730  for (i = 0; i < h->sps.poc_cycle_length; i++)
2731  // FIXME integrate during sps parse
2732  expected_delta_per_poc_cycle += h->sps.offset_for_ref_frame[i];
2733 
2734  if (abs_frame_num > 0) {
2735  int poc_cycle_cnt = (abs_frame_num - 1) / h->sps.poc_cycle_length;
2736  int frame_num_in_poc_cycle = (abs_frame_num - 1) % h->sps.poc_cycle_length;
2737 
2738  expectedpoc = poc_cycle_cnt * expected_delta_per_poc_cycle;
2739  for (i = 0; i <= frame_num_in_poc_cycle; i++)
2740  expectedpoc = expectedpoc + h->sps.offset_for_ref_frame[i];
2741  } else
2742  expectedpoc = 0;
2743 
2744  if (h->nal_ref_idc == 0)
2745  expectedpoc = expectedpoc + h->sps.offset_for_non_ref_pic;
2746 
2747  field_poc[0] = expectedpoc + h->delta_poc[0];
2748  field_poc[1] = field_poc[0] + h->sps.offset_for_top_to_bottom_field;
2749 
2750  if (h->picture_structure == PICT_FRAME)
2751  field_poc[1] += h->delta_poc[1];
2752  } else {
2753  int poc = 2 * (h->frame_num_offset + h->frame_num);
2754 
2755  if (!h->nal_ref_idc)
2756  poc--;
2757 
2758  field_poc[0] = poc;
2759  field_poc[1] = poc;
2760  }
2761 
2763  pic_field_poc[0] = field_poc[0];
2765  pic_field_poc[1] = field_poc[1];
2766  if (pic_poc)
2767  *pic_poc = FFMIN(pic_field_poc[0], pic_field_poc[1]);
2768 
2769  return 0;
2770 }
2771 
2772 /**
2773  * initialize scan tables
2774  */
2776 {
2777  int i;
2778  for (i = 0; i < 16; i++) {
2779 #define T(x) (x >> 2) | ((x << 2) & 0xF)
2780  h->zigzag_scan[i] = T(zigzag_scan[i]);
2781  h->field_scan[i] = T(field_scan[i]);
2782 #undef T
2783  }
2784  for (i = 0; i < 64; i++) {
2785 #define T(x) (x >> 3) | ((x & 7) << 3)
2786  h->zigzag_scan8x8[i] = T(ff_zigzag_direct[i]);
2788  h->field_scan8x8[i] = T(field_scan8x8[i]);
2790 #undef T
2791  }
2792  if (h->sps.transform_bypass) { // FIXME same ugly
2793  memcpy(h->zigzag_scan_q0 , zigzag_scan , sizeof(h->zigzag_scan_q0 ));
2794  memcpy(h->zigzag_scan8x8_q0 , ff_zigzag_direct , sizeof(h->zigzag_scan8x8_q0 ));
2796  memcpy(h->field_scan_q0 , field_scan , sizeof(h->field_scan_q0 ));
2797  memcpy(h->field_scan8x8_q0 , field_scan8x8 , sizeof(h->field_scan8x8_q0 ));
2799  } else {
2800  memcpy(h->zigzag_scan_q0 , h->zigzag_scan , sizeof(h->zigzag_scan_q0 ));
2801  memcpy(h->zigzag_scan8x8_q0 , h->zigzag_scan8x8 , sizeof(h->zigzag_scan8x8_q0 ));
2803  memcpy(h->field_scan_q0 , h->field_scan , sizeof(h->field_scan_q0 ));
2804  memcpy(h->field_scan8x8_q0 , h->field_scan8x8 , sizeof(h->field_scan8x8_q0 ));
2806  }
2807 }
2808 
2809 static int field_end(H264Context *h, int in_setup)
2810 {
2811  AVCodecContext *const avctx = h->avctx;
2812  int err = 0;
2813  h->mb_y = 0;
2814 
2815  if (CONFIG_H264_VDPAU_DECODER &&
2818 
2819  if (in_setup || !(avctx->active_thread_type & FF_THREAD_FRAME)) {
2820  if (!h->droppable) {
2822  h->prev_poc_msb = h->poc_msb;
2823  h->prev_poc_lsb = h->poc_lsb;
2824  }
2826  h->prev_frame_num = h->frame_num;
2828  }
2829 
2830  if (avctx->hwaccel) {
2831  if (avctx->hwaccel->end_frame(avctx) < 0)
2832  av_log(avctx, AV_LOG_ERROR,
2833  "hardware accelerator failed to decode picture\n");
2834  }
2835 
2836  if (CONFIG_H264_VDPAU_DECODER &&
2839 
2840  /*
2841  * FIXME: Error handling code does not seem to support interlaced
2842  * when slices span multiple rows
2843  * The ff_er_add_slice calls don't work right for bottom
2844  * fields; they cause massive erroneous error concealing
2845  * Error marking covers both fields (top and bottom).
2846  * This causes a mismatched s->error_count
2847  * and a bad error table. Further, the error count goes to
2848  * INT_MAX when called for bottom field, because mb_y is
2849  * past end by one (callers fault) and resync_mb_y != 0
2850  * causes problems for the first MB line, too.
2851  */
2852  if (CONFIG_ERROR_RESILIENCE &&
2853  !FIELD_PICTURE(h) && h->current_slice && !h->sps.new) {
2854  h->er.cur_pic = h->cur_pic_ptr;
2855  ff_er_frame_end(&h->er);
2856  }
2857  if (!in_setup && !h->droppable)
2860  emms_c();
2861 
2862  h->current_slice = 0;
2863 
2864  return err;
2865 }
2866 
2867 /**
2868  * Replicate H264 "master" context to thread contexts.
2869  */
2871 {
2872  memcpy(dst->block_offset, src->block_offset, sizeof(dst->block_offset));
2873  dst->cur_pic_ptr = src->cur_pic_ptr;
2874  dst->cur_pic = src->cur_pic;
2875  dst->linesize = src->linesize;
2876  dst->uvlinesize = src->uvlinesize;
2877  dst->first_field = src->first_field;
2878 
2879  dst->prev_poc_msb = src->prev_poc_msb;
2880  dst->prev_poc_lsb = src->prev_poc_lsb;
2882  dst->prev_frame_num = src->prev_frame_num;
2883  dst->short_ref_count = src->short_ref_count;
2884 
2885  memcpy(dst->short_ref, src->short_ref, sizeof(dst->short_ref));
2886  memcpy(dst->long_ref, src->long_ref, sizeof(dst->long_ref));
2887  memcpy(dst->default_ref_list, src->default_ref_list, sizeof(dst->default_ref_list));
2888 
2889  memcpy(dst->dequant4_coeff, src->dequant4_coeff, sizeof(src->dequant4_coeff));
2890  memcpy(dst->dequant8_coeff, src->dequant8_coeff, sizeof(src->dequant8_coeff));
2891 
2892  return 0;
2893 }
2894 
2895 /**
2896  * Compute profile from profile_idc and constraint_set?_flags.
2897  *
2898  * @param sps SPS
2899  *
2900  * @return profile as defined by FF_PROFILE_H264_*
2901  */
2903 {
2904  int profile = sps->profile_idc;
2905 
2906  switch (sps->profile_idc) {
2908  // constraint_set1_flag set to 1
2909  profile |= (sps->constraint_set_flags & 1 << 1) ? FF_PROFILE_H264_CONSTRAINED : 0;
2910  break;
2914  // constraint_set3_flag set to 1
2915  profile |= (sps->constraint_set_flags & 1 << 3) ? FF_PROFILE_H264_INTRA : 0;
2916  break;
2917  }
2918 
2919  return profile;
2920 }
2921 
2923 {
2924  if (h->flags & CODEC_FLAG_LOW_DELAY ||
2926  !h->sps.num_reorder_frames)) {
2927  if (h->avctx->has_b_frames > 1 || h->delayed_pic[0])
2928  av_log(h->avctx, AV_LOG_WARNING, "Delayed frames seen. "
2929  "Reenabling low delay requires a codec flush.\n");
2930  else
2931  h->low_delay = 1;
2932  }
2933 
2934  if (h->avctx->has_b_frames < 2)
2935  h->avctx->has_b_frames = !h->low_delay;
2936 
2937  if (h->sps.bit_depth_luma != h->sps.bit_depth_chroma) {
2939  "Different chroma and luma bit depth");
2940  return AVERROR_PATCHWELCOME;
2941  }
2942 
2943  if (h->avctx->bits_per_raw_sample != h->sps.bit_depth_luma ||
2945  if (h->avctx->codec &&
2947  (h->sps.bit_depth_luma != 8 || h->sps.chroma_format_idc > 1)) {
2949  "VDPAU decoding does not support video colorspace.\n");
2950  return AVERROR_INVALIDDATA;
2951  }
2952  if (h->sps.bit_depth_luma >= 8 && h->sps.bit_depth_luma <= 14 &&
2953  h->sps.bit_depth_luma != 11 && h->sps.bit_depth_luma != 13) {
2956  h->pixel_shift = h->sps.bit_depth_luma > 8;
2957 
2959  h->sps.chroma_format_idc);
2963  h->sps.chroma_format_idc);
2964 
2965  if (CONFIG_ERROR_RESILIENCE)
2966  ff_dsputil_init(&h->dsp, h->avctx);
2968  } else {
2969  av_log(h->avctx, AV_LOG_ERROR, "Unsupported bit depth: %d\n",
2970  h->sps.bit_depth_luma);
2971  return AVERROR_INVALIDDATA;
2972  }
2973  }
2974  return 0;
2975 }
2976 
2977 static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
2978 {
2979  switch (h->sps.bit_depth_luma) {
2980  case 9:
2981  if (CHROMA444(h)) {
2982  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
2983  return AV_PIX_FMT_GBRP9;
2984  } else
2985  return AV_PIX_FMT_YUV444P9;
2986  } else if (CHROMA422(h))
2987  return AV_PIX_FMT_YUV422P9;
2988  else
2989  return AV_PIX_FMT_YUV420P9;
2990  break;
2991  case 10:
2992  if (CHROMA444(h)) {
2993  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
2994  return AV_PIX_FMT_GBRP10;
2995  } else
2996  return AV_PIX_FMT_YUV444P10;
2997  } else if (CHROMA422(h))
2998  return AV_PIX_FMT_YUV422P10;
2999  else
3000  return AV_PIX_FMT_YUV420P10;
3001  break;
3002  case 12:
3003  if (CHROMA444(h)) {
3004  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
3005  return AV_PIX_FMT_GBRP12;
3006  } else
3007  return AV_PIX_FMT_YUV444P12;
3008  } else if (CHROMA422(h))
3009  return AV_PIX_FMT_YUV422P12;
3010  else
3011  return AV_PIX_FMT_YUV420P12;
3012  break;
3013  case 14:
3014  if (CHROMA444(h)) {
3015  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
3016  return AV_PIX_FMT_GBRP14;
3017  } else
3018  return AV_PIX_FMT_YUV444P14;
3019  } else if (CHROMA422(h))
3020  return AV_PIX_FMT_YUV422P14;
3021  else
3022  return AV_PIX_FMT_YUV420P14;
3023  break;
3024  case 8:
3025  if (CHROMA444(h)) {
3026  if (h->avctx->colorspace == AVCOL_SPC_RGB) {
3027  av_log(h->avctx, AV_LOG_DEBUG, "Detected GBR colorspace.\n");
3028  return AV_PIX_FMT_GBR24P;
3029  } else if (h->avctx->colorspace == AVCOL_SPC_YCGCO) {
3030  av_log(h->avctx, AV_LOG_WARNING, "Detected unsupported YCgCo colorspace.\n");
3031  }
3034  } else if (CHROMA422(h)) {
3037  } else {
3038  int i;
3039  const enum AVPixelFormat * fmt = h->avctx->codec->pix_fmts ?
3040  h->avctx->codec->pix_fmts :
3044 
3045  for (i=0; fmt[i] != AV_PIX_FMT_NONE; i++)
3046  if (fmt[i] == h->avctx->pix_fmt && !force_callback)
3047  return fmt[i];
3048  return ff_thread_get_format(h->avctx, fmt);
3049  }
3050  break;
3051  default:
3053  "Unsupported bit depth: %d\n", h->sps.bit_depth_luma);
3054  return AVERROR_INVALIDDATA;
3055  }
3056 }
3057 
3058 /* export coded and cropped frame dimensions to AVCodecContext */
3060 {
3061  int width = h->width - (h->sps.crop_right + h->sps.crop_left);
3062  int height = h->height - (h->sps.crop_top + h->sps.crop_bottom);
3063  av_assert0(h->sps.crop_right + h->sps.crop_left < (unsigned)h->width);
3064  av_assert0(h->sps.crop_top + h->sps.crop_bottom < (unsigned)h->height);
3065 
3066  /* handle container cropping */
3067  if (!h->sps.crop &&
3068  FFALIGN(h->avctx->width, 16) == h->width &&
3069  FFALIGN(h->avctx->height, 16) == h->height) {
3070  width = h->avctx->width;
3071  height = h->avctx->height;
3072  }
3073 
3074  if (width <= 0 || height <= 0) {
3075  av_log(h->avctx, AV_LOG_ERROR, "Invalid cropped dimensions: %dx%d.\n",
3076  width, height);
3078  return AVERROR_INVALIDDATA;
3079 
3080  av_log(h->avctx, AV_LOG_WARNING, "Ignoring cropping information.\n");
3081  h->sps.crop_bottom = h->sps.crop_top = h->sps.crop_right = h->sps.crop_left = 0;
3082  h->sps.crop = 0;
3083 
3084  width = h->width;
3085  height = h->height;
3086  }
3087 
3088  h->avctx->coded_width = h->width;
3089  h->avctx->coded_height = h->height;
3090  h->avctx->width = width;
3091  h->avctx->height = height;
3092 
3093  return 0;
3094 }
3095 
3097 {
3098  int nb_slices = (HAVE_THREADS &&
3100  h->avctx->thread_count : 1;
3101  int i;
3102 
3103  h->avctx->sample_aspect_ratio = h->sps.sar;
3106  &h->chroma_x_shift, &h->chroma_y_shift);
3107 
3108  if (h->sps.timing_info_present_flag) {
3109  int64_t den = h->sps.time_scale;
3110  if (h->x264_build < 44U)
3111  den *= 2;
3113  h->sps.num_units_in_tick, den, 1 << 30);
3114  }
3115 
3117 
3118  if (reinit)
3119  free_tables(h, 0);
3120  h->first_field = 0;
3121  h->prev_interlaced_frame = 1;
3122 
3123  init_scan_tables(h);
3124  if (ff_h264_alloc_tables(h) < 0) {
3126  "Could not allocate memory for h264\n");
3127  return AVERROR(ENOMEM);
3128  }
3129 
3130  if (nb_slices > MAX_THREADS || (nb_slices > h->mb_height && h->mb_height)) {
3131  int max_slices;
3132  if (h->mb_height)
3133  max_slices = FFMIN(MAX_THREADS, h->mb_height);
3134  else
3135  max_slices = MAX_THREADS;
3136  av_log(h->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
3137  " reducing to %d\n", nb_slices, max_slices);
3138  nb_slices = max_slices;
3139  }
3140  h->slice_context_count = nb_slices;
3141 
3142  if (!HAVE_THREADS || !(h->avctx->active_thread_type & FF_THREAD_SLICE)) {
3143  if (context_init(h) < 0) {
3144  av_log(h->avctx, AV_LOG_ERROR, "context_init() failed.\n");
3145  return -1;
3146  }
3147  } else {
3148  for (i = 1; i < h->slice_context_count; i++) {
3149  H264Context *c;
3150  c = h->thread_context[i] = av_mallocz(sizeof(H264Context));
3151  c->avctx = h->avctx;
3152  if (CONFIG_ERROR_RESILIENCE) {
3153  c->dsp = h->dsp;
3154  }
3155  c->vdsp = h->vdsp;
3156  c->h264dsp = h->h264dsp;
3157  c->h264qpel = h->h264qpel;
3158  c->h264chroma = h->h264chroma;
3159  c->sps = h->sps;
3160  c->pps = h->pps;
3161  c->pixel_shift = h->pixel_shift;
3163  c->width = h->width;
3164  c->height = h->height;
3165  c->linesize = h->linesize;
3166  c->uvlinesize = h->uvlinesize;
3169  c->qscale = h->qscale;
3170  c->droppable = h->droppable;
3172  c->low_delay = h->low_delay;
3173  c->mb_width = h->mb_width;
3174  c->mb_height = h->mb_height;
3175  c->mb_stride = h->mb_stride;
3176  c->mb_num = h->mb_num;
3177  c->flags = h->flags;
3179  c->pict_type = h->pict_type;
3180 
3181  init_scan_tables(c);
3182  clone_tables(c, h, i);
3183  c->context_initialized = 1;
3184  }
3185 
3186  for (i = 0; i < h->slice_context_count; i++)
3187  if (context_init(h->thread_context[i]) < 0) {
3188  av_log(h->avctx, AV_LOG_ERROR, "context_init() failed.\n");
3189  return -1;
3190  }
3191  }
3192 
3193  h->context_initialized = 1;
3194 
3195  return 0;
3196 }
3197 
3198 /**
3199  * Decode a slice header.
3200  * This will also call ff_MPV_common_init() and frame_start() as needed.
3201  *
3202  * @param h h264context
3203  * @param h0 h264 master context (differs from 'h' when doing sliced based
3204  * parallel decoding)
3205  *
3206  * @return 0 if okay, <0 if an error occurred, 1 if decoding must not be multithreaded
3207  */
3209 {
3210  unsigned int first_mb_in_slice;
3211  unsigned int pps_id;
3212  int num_ref_idx_active_override_flag, ret;
3213  unsigned int slice_type, tmp, i, j;
3214  int last_pic_structure, last_pic_droppable;
3215  int must_reinit;
3216  int needs_reinit = 0;
3217 
3220 
3221  first_mb_in_slice = get_ue_golomb_long(&h->gb);
3222 
3223  if (first_mb_in_slice == 0) { // FIXME better field boundary detection
3224  if (h0->current_slice && FIELD_PICTURE(h)) {
3225  field_end(h, 1);
3226  }
3227 
3228  h0->current_slice = 0;
3229  if (!h0->first_field) {
3230  if (h->cur_pic_ptr && !h->droppable) {
3233  }
3234  h->cur_pic_ptr = NULL;
3235  }
3236  }
3237 
3238  slice_type = get_ue_golomb_31(&h->gb);
3239  if (slice_type > 9) {
3241  "slice type too large (%d) at %d %d\n",
3242  slice_type, h->mb_x, h->mb_y);
3243  return -1;
3244  }
3245  if (slice_type > 4) {
3246  slice_type -= 5;
3247  h->slice_type_fixed = 1;
3248  } else
3249  h->slice_type_fixed = 0;
3250 
3251  slice_type = golomb_to_pict_type[slice_type];
3252  h->slice_type = slice_type;
3253  h->slice_type_nos = slice_type & 3;
3254 
3255  // to make a few old functions happy, it's wrong though
3256  h->pict_type = h->slice_type;
3257 
3258  pps_id = get_ue_golomb(&h->gb);
3259  if (pps_id >= MAX_PPS_COUNT) {
3260  av_log(h->avctx, AV_LOG_ERROR, "pps_id %d out of range\n", pps_id);
3261  return -1;
3262  }
3263  if (!h0->pps_buffers[pps_id]) {
3265  "non-existing PPS %u referenced\n",
3266  pps_id);
3267  return -1;
3268  }
3269  h->pps = *h0->pps_buffers[pps_id];
3270 
3271  if (!h0->sps_buffers[h->pps.sps_id]) {
3273  "non-existing SPS %u referenced\n",
3274  h->pps.sps_id);
3275  return -1;
3276  }
3277 
3278  if (h->pps.sps_id != h->current_sps_id ||
3279  h0->sps_buffers[h->pps.sps_id]->new) {
3280  h0->sps_buffers[h->pps.sps_id]->new = 0;
3281 
3282  h->current_sps_id = h->pps.sps_id;
3283  h->sps = *h0->sps_buffers[h->pps.sps_id];
3284 
3285  if (h->mb_width != h->sps.mb_width ||
3286  h->mb_height != h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag) ||
3289  )
3290  needs_reinit = 1;
3291 
3292  if (h->bit_depth_luma != h->sps.bit_depth_luma ||
3296  needs_reinit = 1;
3297  }
3298  if ((ret = h264_set_parameter_from_sps(h)) < 0)
3299  return ret;
3300  }
3301 
3302  h->avctx->profile = ff_h264_get_profile(&h->sps);
3303  h->avctx->level = h->sps.level_idc;
3304  h->avctx->refs = h->sps.ref_frame_count;
3305 
3306  must_reinit = (h->context_initialized &&
3307  ( 16*h->sps.mb_width != h->avctx->coded_width
3308  || 16*h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag) != h->avctx->coded_height
3312  || h->mb_width != h->sps.mb_width
3313  || h->mb_height != h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag)
3314  ));
3315  if (h0->avctx->pix_fmt != get_pixel_format(h0, 0))
3316  must_reinit = 1;
3317 
3318  h->mb_width = h->sps.mb_width;
3319  h->mb_height = h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag);
3320  h->mb_num = h->mb_width * h->mb_height;
3321  h->mb_stride = h->mb_width + 1;
3322 
3323  h->b_stride = h->mb_width * 4;
3324 
3325  h->chroma_y_shift = h->sps.chroma_format_idc <= 1; // 400 uses yuv420p
3326 
3327  h->width = 16 * h->mb_width;
3328  h->height = 16 * h->mb_height;
3329 
3330  ret = init_dimensions(h);
3331  if (ret < 0)
3332  return ret;
3333 
3336  : AVCOL_RANGE_MPEG;
3338  if (h->avctx->colorspace != h->sps.colorspace)
3339  needs_reinit = 1;
3341  h->avctx->color_trc = h->sps.color_trc;
3342  h->avctx->colorspace = h->sps.colorspace;
3343  }
3344  }
3345 
3346  if (h->context_initialized &&
3347  (h->width != h->avctx->coded_width ||
3348  h->height != h->avctx->coded_height ||
3349  must_reinit ||
3350  needs_reinit)) {
3351 
3352  if (h != h0) {
3353  av_log(h->avctx, AV_LOG_ERROR, "changing width/height on "
3354  "slice %d\n", h0->current_slice + 1);
3355  return AVERROR_INVALIDDATA;
3356  }
3357 
3358  flush_change(h);
3359 
3360  if ((ret = get_pixel_format(h, 1)) < 0)
3361  return ret;
3362  h->avctx->pix_fmt = ret;
3363 
3364  av_log(h->avctx, AV_LOG_INFO, "Reinit context to %dx%d, "
3365  "pix_fmt: %d\n", h->width, h->height, h->avctx->pix_fmt);
3366 
3367  if ((ret = h264_slice_header_init(h, 1)) < 0) {
3369  "h264_slice_header_init() failed\n");
3370  return ret;
3371  }
3372  }
3373  if (!h->context_initialized) {
3374  if (h != h0) {
3376  "Cannot (re-)initialize context during parallel decoding.\n");
3377  return -1;
3378  }
3379 
3380  if ((ret = get_pixel_format(h, 1)) < 0)
3381  return ret;
3382  h->avctx->pix_fmt = ret;
3383 
3384  if ((ret = h264_slice_header_init(h, 0)) < 0) {
3386  "h264_slice_header_init() failed\n");
3387  return ret;
3388  }
3389  }
3390 
3391  if (h == h0 && h->dequant_coeff_pps != pps_id) {
3392  h->dequant_coeff_pps = pps_id;
3394  }
3395 
3396  h->frame_num = get_bits(&h->gb, h->sps.log2_max_frame_num);
3397 
3398  h->mb_mbaff = 0;
3399  h->mb_aff_frame = 0;
3400  last_pic_structure = h0->picture_structure;
3401  last_pic_droppable = h0->droppable;
3402  h->droppable = h->nal_ref_idc == 0;
3403  if (h->sps.frame_mbs_only_flag) {
3405  } else {
3406  if (!h->sps.direct_8x8_inference_flag && slice_type == AV_PICTURE_TYPE_B) {
3407  av_log(h->avctx, AV_LOG_ERROR, "This stream was generated by a broken encoder, invalid 8x8 inference\n");
3408  return -1;
3409  }
3410  if (get_bits1(&h->gb)) { // field_pic_flag
3411  h->picture_structure = PICT_TOP_FIELD + get_bits1(&h->gb); // bottom_field_flag
3412  } else {
3414  h->mb_aff_frame = h->sps.mb_aff;
3415  }
3416  }
3418 
3419  if (h0->current_slice != 0) {
3420  if (last_pic_structure != h->picture_structure ||
3421  last_pic_droppable != h->droppable) {
3423  "Changing field mode (%d -> %d) between slices is not allowed\n",
3424  last_pic_structure, h->picture_structure);
3425  h->picture_structure = last_pic_structure;
3426  h->droppable = last_pic_droppable;
3427  return AVERROR_INVALIDDATA;
3428  } else if (!h0->cur_pic_ptr) {
3430  "unset cur_pic_ptr on %d. slice\n",
3431  h0->current_slice + 1);
3432  return AVERROR_INVALIDDATA;
3433  }
3434  } else {
3435  /* Shorten frame num gaps so we don't have to allocate reference
3436  * frames just to throw them away */
3437  if (h->frame_num != h->prev_frame_num && h->prev_frame_num >= 0) {
3438  int unwrap_prev_frame_num = h->prev_frame_num;
3439  int max_frame_num = 1 << h->sps.log2_max_frame_num;
3440 
3441  if (unwrap_prev_frame_num > h->frame_num)
3442  unwrap_prev_frame_num -= max_frame_num;
3443 
3444  if ((h->frame_num - unwrap_prev_frame_num) > h->sps.ref_frame_count) {
3445  unwrap_prev_frame_num = (h->frame_num - h->sps.ref_frame_count) - 1;
3446  if (unwrap_prev_frame_num < 0)
3447  unwrap_prev_frame_num += max_frame_num;
3448 
3449  h->prev_frame_num = unwrap_prev_frame_num;
3450  }
3451  }
3452 
3453  /* See if we have a decoded first field looking for a pair...
3454  * Here, we're using that to see if we should mark previously
3455  * decode frames as "finished".
3456  * We have to do that before the "dummy" in-between frame allocation,
3457  * since that can modify h->cur_pic_ptr. */
3458  if (h0->first_field) {
3459  assert(h0->cur_pic_ptr);
3460  assert(h0->cur_pic_ptr->f.data[0]);
3461  assert(h0->cur_pic_ptr->reference != DELAYED_PIC_REF);
3462 
3463  /* Mark old field/frame as completed */
3464  if (!last_pic_droppable && h0->cur_pic_ptr->tf.owner == h0->avctx) {
3465  ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX,
3466  last_pic_structure == PICT_BOTTOM_FIELD);
3467  }
3468 
3469  /* figure out if we have a complementary field pair */
3470  if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) {
3471  /* Previous field is unmatched. Don't display it, but let it
3472  * remain for reference if marked as such. */
3473  if (!last_pic_droppable && last_pic_structure != PICT_FRAME) {
3474  ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX,
3475  last_pic_structure == PICT_TOP_FIELD);
3476  }
3477  } else {
3478  if (h0->cur_pic_ptr->frame_num != h->frame_num) {
3479  /* This and previous field were reference, but had
3480  * different frame_nums. Consider this field first in
3481  * pair. Throw away previous field except for reference
3482  * purposes. */
3483  if (!last_pic_droppable && last_pic_structure != PICT_FRAME) {
3484  ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX,
3485  last_pic_structure == PICT_TOP_FIELD);
3486  }
3487  } else {
3488  /* Second field in complementary pair */
3489  if (!((last_pic_structure == PICT_TOP_FIELD &&
3491  (last_pic_structure == PICT_BOTTOM_FIELD &&
3494  "Invalid field mode combination %d/%d\n",
3495  last_pic_structure, h->picture_structure);
3496  h->picture_structure = last_pic_structure;
3497  h->droppable = last_pic_droppable;
3498  return AVERROR_INVALIDDATA;
3499  } else if (last_pic_droppable != h->droppable) {
3501  "Found reference and non-reference fields in the same frame, which");
3502  h->picture_structure = last_pic_structure;
3503  h->droppable = last_pic_droppable;
3504  return AVERROR_PATCHWELCOME;
3505  }
3506  }
3507  }
3508  }
3509 
3510  while (h->frame_num != h->prev_frame_num && h->prev_frame_num >= 0 && !h0->first_field &&
3511  h->frame_num != (h->prev_frame_num + 1) % (1 << h->sps.log2_max_frame_num)) {
3512  Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL;
3513  av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n",
3514  h->frame_num, h->prev_frame_num);
3516  for(i=0; i<FF_ARRAY_ELEMS(h->last_pocs); i++)
3517  h->last_pocs[i] = INT_MIN;
3518  if (h264_frame_start(h) < 0)
3519  return -1;
3520  h->prev_frame_num++;
3521  h->prev_frame_num %= 1 << h->sps.log2_max_frame_num;
3523  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0);
3524  ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1);
3525  if ((ret = ff_generate_sliding_window_mmcos(h, 1)) < 0 &&
3527  return ret;
3528  if (ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index) < 0 &&
3530  return AVERROR_INVALIDDATA;
3531  /* Error concealment: if a ref is missing, copy the previous ref in its place.
3532  * FIXME: avoiding a memcpy would be nice, but ref handling makes many assumptions
3533  * about there being no actual duplicates.
3534  * FIXME: this doesn't copy padding for out-of-frame motion vectors. Given we're
3535  * concealing a lost frame, this probably isn't noticeable by comparison, but it should
3536  * be fixed. */
3537  if (h->short_ref_count) {
3538  if (prev) {
3539  av_image_copy(h->short_ref[0]->f.data, h->short_ref[0]->f.linesize,
3540  (const uint8_t **)prev->f.data, prev->f.linesize,
3541  h->avctx->pix_fmt, h->mb_width * 16, h->mb_height * 16);
3542  h->short_ref[0]->poc = prev->poc + 2;
3543  }
3544  h->short_ref[0]->frame_num = h->prev_frame_num;
3545  }
3546  }
3547 
3548  /* See if we have a decoded first field looking for a pair...
3549  * We're using that to see whether to continue decoding in that
3550  * frame, or to allocate a new one. */
3551  if (h0->first_field) {
3552  assert(h0->cur_pic_ptr);
3553  assert(h0->cur_pic_ptr->f.data[0]);
3554  assert(h0->cur_pic_ptr->reference != DELAYED_PIC_REF);
3555 
3556  /* figure out if we have a complementary field pair */
3557  if (!FIELD_PICTURE(h) || h->picture_structure == last_pic_structure) {
3558  /* Previous field is unmatched. Don't display it, but let it
3559  * remain for reference if marked as such. */
3560  h0->cur_pic_ptr = NULL;
3561  h0->first_field = FIELD_PICTURE(h);
3562  } else {
3563  if (h0->cur_pic_ptr->frame_num != h->frame_num) {
3564  ff_thread_report_progress(&h0->cur_pic_ptr->tf, INT_MAX,
3566  /* This and the previous field had different frame_nums.
3567  * Consider this field first in pair. Throw away previous
3568  * one except for reference purposes. */
3569  h0->first_field = 1;
3570  h0->cur_pic_ptr = NULL;
3571  } else {
3572  /* Second field in complementary pair */
3573  h0->first_field = 0;
3574  }
3575  }
3576  } else {
3577  /* Frame or first field in a potentially complementary pair */
3578  h0->first_field = FIELD_PICTURE(h);
3579  }
3580 
3581  if (!FIELD_PICTURE(h) || h0->first_field) {
3582  if (h264_frame_start(h) < 0) {
3583  h0->first_field = 0;
3584  return -1;
3585  }
3586  } else {
3588  }
3589  /* Some macroblocks can be accessed before they're available in case
3590  * of lost slices, MBAFF or threading. */
3591  if (FIELD_PICTURE(h)) {
3592  for(i = (h->picture_structure == PICT_BOTTOM_FIELD); i<h->mb_height; i++)
3593  memset(h->slice_table + i*h->mb_stride, -1, (h->mb_stride - (i+1==h->mb_height)) * sizeof(*h->slice_table));
3594  } else {
3595  memset(h->slice_table, -1,
3596  (h->mb_height * h->mb_stride - 1) * sizeof(*h->slice_table));
3597  }
3598  h0->last_slice_type = -1;
3599  }
3600  if (h != h0 && (ret = clone_slice(h, h0)) < 0)
3601  return ret;
3602 
3603  /* can't be in alloc_tables because linesize isn't known there.
3604  * FIXME: redo bipred weight to not require extra buffer? */
3605  for (i = 0; i < h->slice_context_count; i++)
3606  if (h->thread_context[i]) {
3608  if (ret < 0)
3609  return ret;
3610  }
3611 
3612  h->cur_pic_ptr->frame_num = h->frame_num; // FIXME frame_num cleanup
3613 
3614  av_assert1(h->mb_num == h->mb_width * h->mb_height);
3615  if (first_mb_in_slice << FIELD_OR_MBAFF_PICTURE(h) >= h->mb_num ||
3616  first_mb_in_slice >= h->mb_num) {
3617  av_log(h->avctx, AV_LOG_ERROR, "first_mb_in_slice overflow\n");
3618  return -1;
3619  }
3620  h->resync_mb_x = h->mb_x = first_mb_in_slice % h->mb_width;
3621  h->resync_mb_y = h->mb_y = (first_mb_in_slice / h->mb_width) << FIELD_OR_MBAFF_PICTURE(h);
3623  h->resync_mb_y = h->mb_y = h->mb_y + 1;
3624  av_assert1(h->mb_y < h->mb_height);
3625 
3626  if (h->picture_structure == PICT_FRAME) {
3627  h->curr_pic_num = h->frame_num;
3628  h->max_pic_num = 1 << h->sps.log2_max_frame_num;
3629  } else {
3630  h->curr_pic_num = 2 * h->frame_num + 1;
3631  h->max_pic_num = 1 << (h->sps.log2_max_frame_num + 1);
3632  }
3633 
3634  if (h->nal_unit_type == NAL_IDR_SLICE)
3635  get_ue_golomb(&h->gb); /* idr_pic_id */
3636 
3637  if (h->sps.poc_type == 0) {
3638  h->poc_lsb = get_bits(&h->gb, h->sps.log2_max_poc_lsb);
3639 
3640  if (h->pps.pic_order_present == 1 && h->picture_structure == PICT_FRAME)
3641  h->delta_poc_bottom = get_se_golomb(&h->gb);
3642  }
3643 
3644  if (h->sps.poc_type == 1 && !h->sps.delta_pic_order_always_zero_flag) {
3645  h->delta_poc[0] = get_se_golomb(&h->gb);
3646 
3647  if (h->pps.pic_order_present == 1 && h->picture_structure == PICT_FRAME)
3648  h->delta_poc[1] = get_se_golomb(&h->gb);
3649  }
3650 
3652 
3655 
3656  // set defaults, might be overridden a few lines later
3657  h->ref_count[0] = h->pps.ref_count[0];
3658  h->ref_count[1] = h->pps.ref_count[1];
3659 
3660  if (h->slice_type_nos != AV_PICTURE_TYPE_I) {
3661  unsigned max[2];
3662  max[0] = max[1] = h->picture_structure == PICT_FRAME ? 15 : 31;
3663 
3666  num_ref_idx_active_override_flag = get_bits1(&h->gb);
3667 
3668  if (num_ref_idx_active_override_flag) {
3669  h->ref_count[0] = get_ue_golomb(&h->gb) + 1;
3670  if (h->slice_type_nos == AV_PICTURE_TYPE_B) {
3671  h->ref_count[1] = get_ue_golomb(&h->gb) + 1;
3672  } else
3673  // full range is spec-ok in this case, even for frames
3674  h->ref_count[1] = 1;
3675  }
3676 
3677  if (h->ref_count[0]-1 > max[0] || h->ref_count[1]-1 > max[1]){
3678  av_log(h->avctx, AV_LOG_ERROR, "reference overflow %u > %u or %u > %u\n", h->ref_count[0]-1, max[0], h->ref_count[1]-1, max[1]);
3679  h->ref_count[0] = h->ref_count[1] = 0;
3680  return AVERROR_INVALIDDATA;
3681  }
3682 
3684  h->list_count = 2;
3685  else
3686  h->list_count = 1;
3687  } else {
3688  h->list_count = 0;
3689  h->ref_count[0] = h->ref_count[1] = 0;
3690  }
3691  if (slice_type != AV_PICTURE_TYPE_I &&
3692  (h0->current_slice == 0 ||
3693  slice_type != h0->last_slice_type ||
3694  memcmp(h0->last_ref_count, h0->ref_count, sizeof(h0->ref_count)))) {
3696  }
3697 
3698  if (h->slice_type_nos != AV_PICTURE_TYPE_I &&
3700  h->ref_count[1] = h->ref_count[0] = 0;
3701  return -1;
3702  }
3703 
3704  if ((h->pps.weighted_pred && h->slice_type_nos == AV_PICTURE_TYPE_P) ||
3705  (h->pps.weighted_bipred_idc == 1 &&
3707  pred_weight_table(h);
3708  else if (h->pps.weighted_bipred_idc == 2 &&
3710  implicit_weight_table(h, -1);
3711  } else {
3712  h->use_weight = 0;
3713  for (i = 0; i < 2; i++) {
3714  h->luma_weight_flag[i] = 0;
3715  h->chroma_weight_flag[i] = 0;
3716  }
3717  }
3718 
3719  // If frame-mt is enabled, only update mmco tables for the first slice
3720  // in a field. Subsequent slices can temporarily clobber h->mmco_index
3721  // or h->mmco, which will cause ref list mix-ups and decoding errors
3722  // further down the line. This may break decoding if the first slice is
3723  // corrupt, thus we only do this if frame-mt is enabled.
3724  if (h->nal_ref_idc &&
3727  h0->current_slice == 0) < 0 &&
3729  return AVERROR_INVALIDDATA;
3730 
3731  if (FRAME_MBAFF(h)) {
3733 
3735  implicit_weight_table(h, 0);
3736  implicit_weight_table(h, 1);
3737  }
3738  }
3739 
3743 
3744  if (h->slice_type_nos != AV_PICTURE_TYPE_I && h->pps.cabac) {
3745  tmp = get_ue_golomb_31(&h->gb);
3746  if (tmp > 2) {
3747  av_log(h->avctx, AV_LOG_ERROR, "cabac_init_idc overflow\n");
3748  return -1;
3749  }
3750  h->cabac_init_idc = tmp;
3751  }
3752 
3753  h->last_qscale_diff = 0;
3754  tmp = h->pps.init_qp + get_se_golomb(&h->gb);
3755  if (tmp > 51 + 6 * (h->sps.bit_depth_luma - 8)) {
3756  av_log(h->avctx, AV_LOG_ERROR, "QP %u out of range\n", tmp);
3757  return -1;
3758  }
3759  h->qscale = tmp;
3760  h->chroma_qp[0] = get_chroma_qp(h, 0, h->qscale);
3761  h->chroma_qp[1] = get_chroma_qp(h, 1, h->qscale);
3762  // FIXME qscale / qp ... stuff
3763  if (h->slice_type == AV_PICTURE_TYPE_SP)
3764  get_bits1(&h->gb); /* sp_for_switch_flag */
3765  if (h->slice_type == AV_PICTURE_TYPE_SP ||
3767  get_se_golomb(&h->gb); /* slice_qs_delta */
3768 
3769  h->deblocking_filter = 1;
3770  h->slice_alpha_c0_offset = 52;
3771  h->slice_beta_offset = 52;
3773  tmp = get_ue_golomb_31(&h->gb);
3774  if (tmp > 2) {
3776  "deblocking_filter_idc %u out of range\n", tmp);
3777  return -1;
3778  }
3779  h->deblocking_filter = tmp;
3780  if (h->deblocking_filter < 2)
3781  h->deblocking_filter ^= 1; // 1<->0
3782 
3783  if (h->deblocking_filter) {
3784  h->slice_alpha_c0_offset += get_se_golomb(&h->gb) << 1;
3785  h->slice_beta_offset += get_se_golomb(&h->gb) << 1;
3786  if (h->slice_alpha_c0_offset > 104U ||
3787  h->slice_beta_offset > 104U) {
3789  "deblocking filter parameters %d %d out of range\n",
3791  return -1;
3792  }
3793  }
3794  }
3795 
3796  if (h->avctx->skip_loop_filter >= AVDISCARD_ALL ||
3802  h->nal_ref_idc == 0))
3803  h->deblocking_filter = 0;
3804 
3805  if (h->deblocking_filter == 1 && h0->max_contexts > 1) {
3806  if (h->avctx->flags2 & CODEC_FLAG2_FAST) {
3807  /* Cheat slightly for speed:
3808  * Do not bother to deblock across slices. */
3809  h->deblocking_filter = 2;
3810  } else {
3811  h0->max_contexts = 1;
3812  if (!h0->single_decode_warning) {
3813  av_log(h->avctx, AV_LOG_INFO,
3814  "Cannot parallelize deblocking type 1, decoding such frames in sequential order\n");
3815  h0->single_decode_warning = 1;
3816  }
3817  if (h != h0) {
3819  "Deblocking switched inside frame.\n");
3820  return 1;
3821  }
3822  }
3823  }
3824  h->qp_thresh = 15 + 52 -
3826  FFMAX3(0,
3828  h->pps.chroma_qp_index_offset[1]) +
3829  6 * (h->sps.bit_depth_luma - 8);
3830 
3831  h0->last_slice_type = slice_type;
3832  memcpy(h0->last_ref_count, h0->ref_count, sizeof(h0->last_ref_count));
3833  h->slice_num = ++h0->current_slice;
3834 
3835  if (h->slice_num)
3836  h0->slice_row[(h->slice_num-1)&(MAX_SLICES-1)]= h->resync_mb_y;
3837  if ( h0->slice_row[h->slice_num&(MAX_SLICES-1)] + 3 >= h->resync_mb_y
3838  && h0->slice_row[h->slice_num&(MAX_SLICES-1)] <= h->resync_mb_y
3839  && h->slice_num >= MAX_SLICES) {
3840  //in case of ASO this check needs to be updated depending on how we decide to assign slice numbers in this case
3841  av_log(h->avctx, AV_LOG_WARNING, "Possibly too many slices (%d >= %d), increase MAX_SLICES and recompile if there are artifacts\n", h->slice_num, MAX_SLICES);
3842  }
3843 
3844  for (j = 0; j < 2; j++) {
3845  int id_list[16];
3846  int *ref2frm = h->ref2frm[h->slice_num & (MAX_SLICES - 1)][j];
3847  for (i = 0; i < 16; i++) {
3848  id_list[i] = 60;
3849  if (j < h->list_count && i < h->ref_count[j] && h->ref_list[j][i].f.buf[0]) {
3850  int k;
3851  AVBuffer *buf = h->ref_list[j][i].f.buf[0]->buffer;
3852  for (k = 0; k < h->short_ref_count; k++)
3853  if (h->short_ref[k]->f.buf[0]->buffer == buf) {
3854  id_list[i] = k;
3855  break;
3856  }
3857  for (k = 0; k < h->long_ref_count; k++)
3858  if (h->long_ref[k] && h->long_ref[k]->f.buf[0]->buffer == buf) {
3859  id_list[i] = h->short_ref_count + k;
3860  break;
3861  }
3862  }
3863  }
3864 
3865  ref2frm[0] =
3866  ref2frm[1] = -1;
3867  for (i = 0; i < 16; i++)
3868  ref2frm[i + 2] = 4 * id_list[i] +
3869  (h->ref_list[j][i].reference & 3);
3870  ref2frm[18 + 0] =
3871  ref2frm[18 + 1] = -1;
3872  for (i = 16; i < 48; i++)
3873  ref2frm[i + 4] = 4 * id_list[(i - 16) >> 1] +
3874  (h->ref_list[j][i].reference & 3);
3875  }
3876 
3877  if (h->ref_count[0]) h->er.last_pic = &h->ref_list[0][0];
3878  if (h->ref_count[1]) h->er.next_pic = &h->ref_list[1][0];
3879 
3880  if (h->avctx->debug & FF_DEBUG_PICT_INFO) {
3882  "slice:%d %s mb:%d %c%s%s pps:%u frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %s\n",
3883  h->slice_num,
3884  (h->picture_structure == PICT_FRAME ? "F" : h->picture_structure == PICT_TOP_FIELD ? "T" : "B"),
3885  first_mb_in_slice,
3887  h->slice_type_fixed ? " fix" : "",
3888  h->nal_unit_type == NAL_IDR_SLICE ? " IDR" : "",
3889  pps_id, h->frame_num,
3890  h->cur_pic_ptr->field_poc[0],
3891  h->cur_pic_ptr->field_poc[1],
3892  h->ref_count[0], h->ref_count[1],
3893  h->qscale,
3894  h->deblocking_filter,
3895  h->slice_alpha_c0_offset / 2 - 26, h->slice_beta_offset / 2 - 26,
3896  h->use_weight,
3897  h->use_weight == 1 && h->use_weight_chroma ? "c" : "",
3898  h->slice_type == AV_PICTURE_TYPE_B ? (h->direct_spatial_mv_pred ? "SPAT" : "TEMP") : "");
3899  }
3900 
3901  return 0;
3902 }
3903 
3905 {
3906  switch (h->slice_type) {
3907  case AV_PICTURE_TYPE_P:
3908  return 0;
3909  case AV_PICTURE_TYPE_B:
3910  return 1;
3911  case AV_PICTURE_TYPE_I:
3912  return 2;
3913  case AV_PICTURE_TYPE_SP:
3914  return 3;
3915  case AV_PICTURE_TYPE_SI:
3916  return 4;
3917  default:
3918  return -1;
3919  }
3920 }
3921 
3923  int mb_type, int top_xy,
3924  int left_xy[LEFT_MBS],
3925  int top_type,
3926  int left_type[LEFT_MBS],
3927  int mb_xy, int list)
3928 {
3929  int b_stride = h->b_stride;
3930  int16_t(*mv_dst)[2] = &h->mv_cache[list][scan8[0]];
3931  int8_t *ref_cache = &h->ref_cache[list][scan8[0]];
3932  if (IS_INTER(mb_type) || IS_DIRECT(mb_type)) {
3933  if (USES_LIST(top_type, list)) {
3934  const int b_xy = h->mb2b_xy[top_xy] + 3 * b_stride;
3935  const int b8_xy = 4 * top_xy + 2;
3936  int (*ref2frm)[64] = (void*)(h->ref2frm[h->slice_table[top_xy] & (MAX_SLICES - 1)][0] + (MB_MBAFF(h) ? 20 : 2));
3937  AV_COPY128(mv_dst - 1 * 8, h->cur_pic.motion_val[list][b_xy + 0]);
3938  ref_cache[0 - 1 * 8] =
3939  ref_cache[1 - 1 * 8] = ref2frm[list][h->cur_pic.ref_index[list][b8_xy + 0]];
3940  ref_cache[2 - 1 * 8] =
3941  ref_cache[3 - 1 * 8] = ref2frm[list][h->cur_pic.ref_index[list][b8_xy + 1]];
3942  } else {
3943  AV_ZERO128(mv_dst - 1 * 8);
3944  AV_WN32A(&ref_cache[0 - 1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
3945  }
3946 
3947  if (!IS_INTERLACED(mb_type ^ left_type[LTOP])) {
3948  if (USES_LIST(left_type[LTOP], list)) {
3949  const int b_xy = h->mb2b_xy[left_xy[LTOP]] + 3;
3950  const int b8_xy = 4 * left_xy[LTOP] + 1;
3951  int (*ref2frm)[64] =(void*)( h->ref2frm[h->slice_table[left_xy[LTOP]] & (MAX_SLICES - 1)][0] + (MB_MBAFF(h) ? 20 : 2));
3952  AV_COPY32(mv_dst - 1 + 0, h->cur_pic.motion_val[list][b_xy + b_stride * 0]);
3953  AV_COPY32(mv_dst - 1 + 8, h->cur_pic.motion_val[list][b_xy + b_stride * 1]);
3954  AV_COPY32(mv_dst - 1 + 16, h->cur_pic.motion_val[list][b_xy + b_stride * 2]);
3955  AV_COPY32(mv_dst - 1 + 24, h->cur_pic.motion_val[list][b_xy + b_stride * 3]);
3956  ref_cache[-1 + 0] =
3957  ref_cache[-1 + 8] = ref2frm[list][h->cur_pic.ref_index[list][b8_xy + 2 * 0]];
3958  ref_cache[-1 + 16] =
3959  ref_cache[-1 + 24] = ref2frm[list][h->cur_pic.ref_index[list][b8_xy + 2 * 1]];
3960  } else {
3961  AV_ZERO32(mv_dst - 1 + 0);
3962  AV_ZERO32(mv_dst - 1 + 8);
3963  AV_ZERO32(mv_dst - 1 + 16);
3964  AV_ZERO32(mv_dst - 1 + 24);
3965  ref_cache[-1 + 0] =
3966  ref_cache[-1 + 8] =
3967  ref_cache[-1 + 16] =
3968  ref_cache[-1 + 24] = LIST_NOT_USED;
3969  }
3970  }
3971  }
3972 
3973  if (!USES_LIST(mb_type, list)) {
3974  fill_rectangle(mv_dst, 4, 4, 8, pack16to32(0, 0), 4);
3975  AV_WN32A(&ref_cache[0 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
3976  AV_WN32A(&ref_cache[1 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
3977  AV_WN32A(&ref_cache[2 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
3978  AV_WN32A(&ref_cache[3 * 8], ((LIST_NOT_USED) & 0xFF) * 0x01010101u);
3979  return;
3980  }
3981 
3982  {
3983  int8_t *ref = &h->cur_pic.ref_index[list][4 * mb_xy];
3984  int (*ref2frm)[64] = (void*)(h->ref2frm[h->slice_num & (MAX_SLICES - 1)][0] + (MB_MBAFF(h) ? 20 : 2));
3985  uint32_t ref01 = (pack16to32(ref2frm[list][ref[0]], ref2frm[list][ref[1]]) & 0x00FF00FF) * 0x0101;
3986  uint32_t ref23 = (pack16to32(ref2frm[list][ref[2]], ref2frm[list][ref[3]]) & 0x00FF00FF) * 0x0101;
3987  AV_WN32A(&ref_cache[0 * 8], ref01);
3988  AV_WN32A(&ref_cache[1 * 8], ref01);
3989  AV_WN32A(&ref_cache[2 * 8], ref23);
3990  AV_WN32A(&ref_cache[3 * 8], ref23);
3991  }
3992 
3993  {
3994  int16_t(*mv_src)[2] = &h->cur_pic.motion_val[list][4 * h->mb_x + 4 * h->mb_y * b_stride];
3995  AV_COPY128(mv_dst + 8 * 0, mv_src + 0 * b_stride);
3996  AV_COPY128(mv_dst + 8 * 1, mv_src + 1 * b_stride);
3997  AV_COPY128(mv_dst + 8 * 2, mv_src + 2 * b_stride);
3998  AV_COPY128(mv_dst + 8 * 3, mv_src + 3 * b_stride);
3999  }
4000 }
4001 
4002 /**
4003  *
4004  * @return non zero if the loop filter can be skipped
4005  */
4006 static int fill_filter_caches(H264Context *h, int mb_type)
4007 {
4008  const int mb_xy = h->mb_xy;
4009  int top_xy, left_xy[LEFT_MBS];
4010  int top_type, left_type[LEFT_MBS];
4011  uint8_t *nnz;
4012  uint8_t *nnz_cache;
4013 
4014  top_xy = mb_xy - (h->mb_stride << MB_FIELD(h));
4015 
4016  /* Wow, what a mess, why didn't they simplify the interlacing & intra
4017  * stuff, I can't imagine that these complex rules are worth it. */
4018 
4019  left_xy[LBOT] = left_xy[LTOP] = mb_xy - 1;
4020  if (FRAME_MBAFF(h)) {
4021  const int left_mb_field_flag = IS_INTERLACED(h->cur_pic.mb_type[mb_xy - 1]);
4022  const int curr_mb_field_flag = IS_INTERLACED(mb_type);
4023  if (h->mb_y & 1) {
4024  if (left_mb_field_flag != curr_mb_field_flag)
4025  left_xy[LTOP] -= h->mb_stride;
4026  } else {
4027  if (curr_mb_field_flag)
4028  top_xy += h->mb_stride &
4029  (((h->cur_pic.mb_type[top_xy] >> 7) & 1) - 1);
4030  if (left_mb_field_flag != curr_mb_field_flag)
4031  left_xy[LBOT] += h->mb_stride;
4032  }
4033  }
4034 
4035  h->top_mb_xy = top_xy;
4036  h->left_mb_xy[LTOP] = left_xy[LTOP];
4037  h->left_mb_xy[LBOT] = left_xy[LBOT];
4038  {
4039  /* For sufficiently low qp, filtering wouldn't do anything.
4040  * This is a conservative estimate: could also check beta_offset
4041  * and more accurate chroma_qp. */
4042  int qp_thresh = h->qp_thresh; // FIXME strictly we should store qp_thresh for each mb of a slice
4043  int qp = h->cur_pic.qscale_table[mb_xy];
4044  if (qp <= qp_thresh &&
4045  (left_xy[LTOP] < 0 ||
4046  ((qp + h->cur_pic.qscale_table[left_xy[LTOP]] + 1) >> 1) <= qp_thresh) &&
4047  (top_xy < 0 ||
4048  ((qp + h->cur_pic.qscale_table[top_xy] + 1) >> 1) <= qp_thresh)) {
4049  if (!FRAME_MBAFF(h))
4050  return 1;
4051  if ((left_xy[LTOP] < 0 ||
4052  ((qp + h->cur_pic.qscale_table[left_xy[LBOT]] + 1) >> 1) <= qp_thresh) &&
4053  (top_xy < h->mb_stride ||
4054  ((qp + h->cur_pic.qscale_table[top_xy - h->mb_stride] + 1) >> 1) <= qp_thresh))
4055  return 1;
4056  }
4057  }
4058 
4059  top_type = h->cur_pic.mb_type[top_xy];
4060  left_type[LTOP] = h->cur_pic.mb_type[left_xy[LTOP]];
4061  left_type[LBOT] = h->cur_pic.mb_type[left_xy[LBOT]];
4062  if (h->deblocking_filter == 2) {
4063  if (h->slice_table[top_xy] != h->slice_num)
4064  top_type = 0;
4065  if (h->slice_table[left_xy[LBOT]] != h->slice_num)
4066  left_type[LTOP] = left_type[LBOT] = 0;
4067  } else {
4068  if (h->slice_table[top_xy] == 0xFFFF)
4069  top_type = 0;
4070  if (h->slice_table[left_xy[LBOT]] == 0xFFFF)
4071  left_type[LTOP] = left_type[LBOT] = 0;
4072  }
4073  h->top_type = top_type;
4074  h->left_type[LTOP] = left_type[LTOP];
4075  h->left_type[LBOT] = left_type[LBOT];
4076 
4077  if (IS_INTRA(mb_type))
4078  return 0;
4079 
4080  fill_filter_caches_inter(h, mb_type, top_xy, left_xy,
4081  top_type, left_type, mb_xy, 0);
4082  if (h->list_count == 2)
4083  fill_filter_caches_inter(h, mb_type, top_xy, left_xy,
4084  top_type, left_type, mb_xy, 1);
4085 
4086  nnz = h->non_zero_count[mb_xy];
4087  nnz_cache = h->non_zero_count_cache;
4088  AV_COPY32(&nnz_cache[4 + 8 * 1], &nnz[0]);
4089  AV_COPY32(&nnz_cache[4 + 8 * 2], &nnz[4]);
4090  AV_COPY32(&nnz_cache[4 + 8 * 3], &nnz[8]);
4091  AV_COPY32(&nnz_cache[4 + 8 * 4], &nnz[12]);
4092  h->cbp = h->cbp_table[mb_xy];
4093 
4094  if (top_type) {
4095  nnz = h->non_zero_count[top_xy];
4096  AV_COPY32(&nnz_cache[4 + 8 * 0], &nnz[3 * 4]);
4097  }
4098 
4099  if (left_type[LTOP]) {
4100  nnz = h->non_zero_count[left_xy[LTOP]];
4101  nnz_cache[3 + 8 * 1] = nnz[3 + 0 * 4];
4102  nnz_cache[3 + 8 * 2] = nnz[3 + 1 * 4];
4103  nnz_cache[3 + 8 * 3] = nnz[3 + 2 * 4];
4104  nnz_cache[3 + 8 * 4] = nnz[3 + 3 * 4];
4105  }
4106 
4107  /* CAVLC 8x8dct requires NNZ values for residual decoding that differ
4108  * from what the loop filter needs */
4109  if (!CABAC(h) && h->pps.transform_8x8_mode) {
4110  if (IS_8x8DCT(top_type)) {
4111  nnz_cache[4 + 8 * 0] =
4112  nnz_cache[5 + 8 * 0] = (h->cbp_table[top_xy] & 0x4000) >> 12;
4113  nnz_cache[6 + 8 * 0] =
4114  nnz_cache[7 + 8 * 0] = (h->cbp_table[top_xy] & 0x8000) >> 12;
4115  }
4116  if (IS_8x8DCT(left_type[LTOP])) {
4117  nnz_cache[3 + 8 * 1] =
4118  nnz_cache[3 + 8 * 2] = (h->cbp_table[left_xy[LTOP]] & 0x2000) >> 12; // FIXME check MBAFF
4119  }
4120  if (IS_8x8DCT(left_type[LBOT])) {
4121  nnz_cache[3 + 8 * 3] =
4122  nnz_cache[3 + 8 * 4] = (h->cbp_table[left_xy[LBOT]] & 0x8000) >> 12; // FIXME check MBAFF
4123  }
4124 
4125  if (IS_8x8DCT(mb_type)) {
4126  nnz_cache[scan8[0]] =
4127  nnz_cache[scan8[1]] =
4128  nnz_cache[scan8[2]] =
4129  nnz_cache[scan8[3]] = (h->cbp & 0x1000) >> 12;
4130 
4131  nnz_cache[scan8[0 + 4]] =
4132  nnz_cache[scan8[1 + 4]] =
4133  nnz_cache[scan8[2 + 4]] =
4134  nnz_cache[scan8[3 + 4]] = (h->cbp & 0x2000) >> 12;
4135 
4136  nnz_cache[scan8[0 + 8]] =
4137  nnz_cache[scan8[1 + 8]] =
4138  nnz_cache[scan8[2 + 8]] =
4139  nnz_cache[scan8[3 + 8]] = (h->cbp & 0x4000) >> 12;
4140 
4141  nnz_cache[scan8[0 + 12]] =
4142  nnz_cache[scan8[1 + 12]] =
4143  nnz_cache[scan8[2 + 12]] =
4144  nnz_cache[scan8[3 + 12]] = (h->cbp & 0x8000) >> 12;
4145  }
4146  }
4147 
4148  return 0;
4149 }
4150 
4151 static void loop_filter(H264Context *h, int start_x, int end_x)
4152 {
4153  uint8_t *dest_y, *dest_cb, *dest_cr;
4154  int linesize, uvlinesize, mb_x, mb_y;
4155  const int end_mb_y = h->mb_y + FRAME_MBAFF(h);
4156  const int old_slice_type = h->slice_type;
4157  const int pixel_shift = h->pixel_shift;
4158  const int block_h = 16 >> h->chroma_y_shift;
4159 
4160  if (h->deblocking_filter) {
4161  for (mb_x = start_x; mb_x < end_x; mb_x++)
4162  for (mb_y = end_mb_y - FRAME_MBAFF(h); mb_y <= end_mb_y; mb_y++) {
4163  int mb_xy, mb_type;
4164  mb_xy = h->mb_xy = mb_x + mb_y * h->mb_stride;
4165  h->slice_num = h->slice_table[mb_xy];
4166  mb_type = h->cur_pic.mb_type[mb_xy];
4167  h->list_count = h->list_counts[mb_xy];
4168 
4169  if (FRAME_MBAFF(h))
4170  h->mb_mbaff =
4171  h->mb_field_decoding_flag = !!IS_INTERLACED(mb_type);
4172 
4173  h->mb_x = mb_x;
4174  h->mb_y = mb_y;
4175  dest_y = h->cur_pic.f.data[0] +
4176  ((mb_x << pixel_shift) + mb_y * h->linesize) * 16;
4177  dest_cb = h->cur_pic.f.data[1] +
4178  (mb_x << pixel_shift) * (8 << CHROMA444(h)) +
4179  mb_y * h->uvlinesize * block_h;
4180  dest_cr = h->cur_pic.f.data[2] +
4181  (mb_x << pixel_shift) * (8 << CHROMA444(h)) +
4182  mb_y * h->uvlinesize * block_h;
4183  // FIXME simplify above
4184 
4185  if (MB_FIELD(h)) {
4186  linesize = h->mb_linesize = h->linesize * 2;
4187  uvlinesize = h->mb_uvlinesize = h->uvlinesize * 2;
4188  if (mb_y & 1) { // FIXME move out of this function?
4189  dest_y -= h->linesize * 15;
4190  dest_cb -= h->uvlinesize * (block_h - 1);
4191  dest_cr -= h->uvlinesize * (block_h - 1);
4192  }
4193  } else {
4194  linesize = h->mb_linesize = h->linesize;
4195  uvlinesize = h->mb_uvlinesize = h->uvlinesize;
4196  }
4197  backup_mb_border(h, dest_y, dest_cb, dest_cr, linesize,
4198  uvlinesize, 0);
4199  if (fill_filter_caches(h, mb_type))
4200  continue;
4201  h->chroma_qp[0] = get_chroma_qp(h, 0, h->cur_pic.qscale_table[mb_xy]);
4202  h->chroma_qp[1] = get_chroma_qp(h, 1, h->cur_pic.qscale_table[mb_xy]);
4203 
4204  if (FRAME_MBAFF(h)) {
4205  ff_h264_filter_mb(h, mb_x, mb_y, dest_y, dest_cb, dest_cr,
4206  linesize, uvlinesize);
4207  } else {
4208  ff_h264_filter_mb_fast(h, mb_x, mb_y, dest_y, dest_cb,
4209  dest_cr, linesize, uvlinesize);
4210  }
4211  }
4212  }
4213  h->slice_type = old_slice_type;
4214  h->mb_x = end_x;
4215  h->mb_y = end_mb_y - FRAME_MBAFF(h);
4216  h->chroma_qp[0] = get_chroma_qp(h, 0, h->qscale);
4217  h->chroma_qp[1] = get_chroma_qp(h, 1, h->qscale);
4218 }
4219 
4221 {
4222  const int mb_xy = h->mb_x + h->mb_y * h->mb_stride;
4223  int mb_type = (h->slice_table[mb_xy - 1] == h->slice_num) ?
4224  h->cur_pic.mb_type[mb_xy - 1] :
4225  (h->slice_table[mb_xy - h->mb_stride] == h->slice_num) ?
4226  h->cur_pic.mb_type[mb_xy - h->mb_stride] : 0;
4227  h->mb_mbaff = h->mb_field_decoding_flag = IS_INTERLACED(mb_type) ? 1 : 0;
4228 }
4229 
4230 /**
4231  * Draw edges and report progress for the last MB row.
4232  */
4234 {
4235  int top = 16 * (h->mb_y >> FIELD_PICTURE(h));
4236  int pic_height = 16 * h->mb_height >> FIELD_PICTURE(h);
4237  int height = 16 << FRAME_MBAFF(h);
4238  int deblock_border = (16 + 4) << FRAME_MBAFF(h);
4239 
4240  if (h->deblocking_filter) {
4241  if ((top + height) >= pic_height)
4242  height += deblock_border;
4243  top -= deblock_border;
4244  }
4245 
4246  if (top >= pic_height || (top + height) < 0)
4247  return;
4248 
4249  height = FFMIN(height, pic_height - top);
4250  if (top < 0) {
4251  height = top + height;
4252  top = 0;
4253  }
4254 
4255  ff_h264_draw_horiz_band(h, top, height);
4256 
4257  if (h->droppable || h->er.error_occurred)
4258  return;
4259 
4260  ff_thread_report_progress(&h->cur_pic_ptr->tf, top + height - 1,
4262 }
4263 
4264 static void er_add_slice(H264Context *h, int startx, int starty,
4265  int endx, int endy, int status)
4266 {
4267  if (CONFIG_ERROR_RESILIENCE) {
4268  ERContext *er = &h->er;
4269 
4270  er->ref_count = h->ref_count[0];
4271  ff_er_add_slice(er, startx, starty, endx, endy, status);
4272  }
4273 }
4274 
4275 static int decode_slice(struct AVCodecContext *avctx, void *arg)
4276 {
4277  H264Context *h = *(void **)arg;
4278  int lf_x_start = h->mb_x;
4279 
4280  h->mb_skip_run = -1;
4281 
4282  av_assert0(h->block_offset[15] == (4 * ((scan8[15] - scan8[0]) & 7) << h->pixel_shift) + 4 * h->linesize * ((scan8[15] - scan8[0]) >> 3));
4283 
4285  avctx->codec_id != AV_CODEC_ID_H264 ||
4286  (CONFIG_GRAY && (h->flags & CODEC_FLAG_GRAY));
4287 
4289  const int start_i = av_clip(h->resync_mb_x + h->resync_mb_y * h->mb_width, 0, h->mb_num - 1);
4290  if (start_i) {
4291  int prev_status = h->er.error_status_table[h->er.mb_index2xy[start_i - 1]];
4292  prev_status &= ~ VP_START;
4293  if (prev_status != (ER_MV_END | ER_DC_END | ER_AC_END))
4294  h->er.error_occurred = 1;
4295  }
4296  }
4297 
4298  if (h->pps.cabac) {
4299  /* realign */
4300  align_get_bits(&h->gb);
4301 
4302  /* init cabac */
4304  h->gb.buffer + get_bits_count(&h->gb) / 8,
4305  (get_bits_left(&h->gb) + 7) / 8);
4306 
4308 
4309  for (;;) {
4310  // START_TIMER
4311  int ret = ff_h264_decode_mb_cabac(h);
4312  int eos;
4313  // STOP_TIMER("decode_mb_cabac")
4314 
4315  if (ret >= 0)
4317 
4318  // FIXME optimal? or let mb_decode decode 16x32 ?
4319  if (ret >= 0 && FRAME_MBAFF(h)) {
4320  h->mb_y++;
4321 
4322  ret = ff_h264_decode_mb_cabac(h);
4323 
4324  if (ret >= 0)
4326  h->mb_y--;
4327  }
4328  eos = get_cabac_terminate(&h->cabac);
4329 
4330  if ((h->workaround_bugs & FF_BUG_TRUNCATED) &&
4331  h->cabac.bytestream > h->cabac.bytestream_end + 2) {
4332  er_add_slice(h, h->resync_mb_x, h->resync_mb_y, h->mb_x - 1,
4333  h->mb_y, ER_MB_END);
4334  if (h->mb_x >= lf_x_start)
4335  loop_filter(h, lf_x_start, h->mb_x + 1);
4336  return 0;
4337  }
4338  if (h->cabac.bytestream > h->cabac.bytestream_end + 2 )
4339  av_log(h->avctx, AV_LOG_DEBUG, "bytestream overread %td\n", h->cabac.bytestream_end - h->cabac.bytestream);
4340  if (ret < 0 || h->cabac.bytestream > h->cabac.bytestream_end + 4) {
4342  "error while decoding MB %d %d, bytestream (%td)\n",
4343  h->mb_x, h->mb_y,
4345  er_add_slice(h, h->resync_mb_x, h->resync_mb_y, h->mb_x,
4346  h->mb_y, ER_MB_ERROR);
4347  return -1;
4348  }
4349 
4350  if (++h->mb_x >= h->mb_width) {
4351  loop_filter(h, lf_x_start, h->mb_x);
4352  h->mb_x = lf_x_start = 0;
4353  decode_finish_row(h);
4354  ++h->mb_y;
4355  if (FIELD_OR_MBAFF_PICTURE(h)) {
4356  ++h->mb_y;
4357  if (FRAME_MBAFF(h) && h->mb_y < h->mb_height)
4359  }
4360  }
4361 
4362  if (eos || h->mb_y >= h->mb_height) {
4363  tprintf(h->avctx, "slice end %d %d\n",
4364  get_bits_count(&h->gb), h->gb.size_in_bits);
4365  er_add_slice(h, h->resync_mb_x, h->resync_mb_y, h->mb_x - 1,
4366  h->mb_y, ER_MB_END);
4367  if (h->mb_x > lf_x_start)
4368  loop_filter(h, lf_x_start, h->mb_x);
4369  return 0;
4370  }
4371  }
4372  } else {
4373  for (;;) {
4374  int ret = ff_h264_decode_mb_cavlc(h);
4375 
4376  if (ret >= 0)
4378 
4379  // FIXME optimal? or let mb_decode decode 16x32 ?
4380  if (ret >= 0 && FRAME_MBAFF(h)) {
4381  h->mb_y++;
4382  ret = ff_h264_decode_mb_cavlc(h);
4383 
4384  if (ret >= 0)
4386  h->mb_y--;
4387  }
4388 
4389  if (ret < 0) {
4391  "error while decoding MB %d %d\n", h->mb_x, h->mb_y);
4392  er_add_slice(h, h->resync_mb_x, h->resync_mb_y, h->mb_x,
4393  h->mb_y, ER_MB_ERROR);
4394  return -1;
4395  }
4396 
4397  if (++h->mb_x >= h->mb_width) {
4398  loop_filter(h, lf_x_start, h->mb_x);
4399  h->mb_x = lf_x_start = 0;
4400  decode_finish_row(h);
4401  ++h->mb_y;
4402  if (FIELD_OR_MBAFF_PICTURE(h)) {
4403  ++h->mb_y;
4404  if (FRAME_MBAFF(h) && h->mb_y < h->mb_height)
4406  }
4407  if (h->mb_y >= h->mb_height) {
4408  tprintf(h->avctx, "slice end %d %d\n",
4409  get_bits_count(&h->gb), h->gb.size_in_bits);
4410 
4411  if ( get_bits_left(&h->gb) == 0
4412  || get_bits_left(&h->gb) > 0 && !(h->avctx->err_recognition & AV_EF_AGGRESSIVE)) {
4414  h->mb_x - 1, h->mb_y,
4415  ER_MB_END);
4416 
4417  return 0;
4418  } else {
4420  h->mb_x, h->mb_y,
4421  ER_MB_END);
4422 
4423  return -1;
4424  }
4425  }
4426  }
4427 
4428  if (get_bits_left(&h->gb) <= 0 && h->mb_skip_run <= 0) {
4429  tprintf(h->avctx, "slice end %d %d\n",
4430  get_bits_count(&h->gb), h->gb.size_in_bits);
4431  if (get_bits_left(&h->gb) == 0) {
4433  h->mb_x - 1, h->mb_y,
4434  ER_MB_END);
4435  if (h->mb_x > lf_x_start)
4436  loop_filter(h, lf_x_start, h->mb_x);
4437 
4438  return 0;
4439  } else {
4440  er_add_slice(h, h->resync_mb_x, h->resync_mb_y, h->mb_x,
4441  h->mb_y, ER_MB_ERROR);
4442 
4443  return -1;
4444  }
4445  }
4446  }
4447  }
4448 }
4449 
4450 /**
4451  * Call decode_slice() for each context.
4452  *
4453  * @param h h264 master context
4454  * @param context_count number of contexts to execute
4455  */
4456 static int execute_decode_slices(H264Context *h, int context_count)
4457 {
4458  AVCodecContext *const avctx = h->avctx;
4459  H264Context *hx;
4460  int i;
4461 
4462  if (h->avctx->hwaccel ||
4464  return 0;
4465  if (context_count == 1) {
4466  return decode_slice(avctx, &h);
4467  } else {
4468  av_assert0(context_count > 0);
4469  for (i = 1; i < context_count; i++) {
4470  hx = h->thread_context[i];
4471  if (CONFIG_ERROR_RESILIENCE) {
4472  hx->er.error_count = 0;
4473  }
4474  hx->x264_build = h->x264_build;
4475  }
4476 
4477  avctx->execute(avctx, decode_slice, h->thread_context,
4478  NULL, context_count, sizeof(void *));
4479 
4480  /* pull back stuff from slices to master context */
4481  hx = h->thread_context[context_count - 1];
4482  h->mb_x = hx->mb_x;
4483  h->mb_y = hx->mb_y;
4484  h->droppable = hx->droppable;
4486  if (CONFIG_ERROR_RESILIENCE) {
4487  for (i = 1; i < context_count; i++)
4489  }
4490  }
4491 
4492  return 0;
4493 }
4494 
4495 static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size,
4496  int parse_extradata)
4497 {
4498  AVCodecContext *const avctx = h->avctx;
4499  H264Context *hx; ///< thread context
4500  int buf_index;
4501  int context_count;
4502  int next_avc;
4503  int pass = !(avctx->active_thread_type & FF_THREAD_FRAME);
4504  int nals_needed = 0; ///< number of NALs that need decoding before the next frame thread starts
4505  int nal_index;
4506  int idr_cleared=0;
4507  int first_slice = 0;
4508 
4509  h->nal_unit_type= 0;
4510 
4511  if(!h->slice_context_count)
4512  h->slice_context_count= 1;
4514  if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS)) {
4515  h->current_slice = 0;
4516  if (!h->first_field)
4517  h->cur_pic_ptr = NULL;
4518  ff_h264_reset_sei(h);
4519  }
4520 
4521  if (h->nal_length_size == 4) {
4522  if (buf_size > 8 && AV_RB32(buf) == 1 && AV_RB32(buf+5) > (unsigned)buf_size) {
4523  h->is_avc = 0;
4524  }else if(buf_size > 3 && AV_RB32(buf) > 1 && AV_RB32(buf) <= (unsigned)buf_size)
4525  h->is_avc = 1;
4526  }
4527 
4528  for (; pass <= 1; pass++) {
4529  buf_index = 0;
4530  context_count = 0;
4531  next_avc = h->is_avc ? 0 : buf_size;
4532  nal_index = 0;
4533  for (;;) {
4534  int consumed;
4535  int dst_length;
4536  int bit_length;
4537  const uint8_t *ptr;
4538  int i, nalsize = 0;
4539  int err;
4540 
4541  if (buf_index >= next_avc) {
4542  if (buf_index >= buf_size - h->nal_length_size)
4543  break;
4544  nalsize = 0;
4545  for (i = 0; i < h->nal_length_size; i++)
4546  nalsize = (nalsize << 8) | buf[buf_index++];
4547  if (nalsize <= 0 || nalsize > buf_size - buf_index) {
4549  "AVC: nal size %d\n", nalsize);
4550  break;
4551  }
4552  next_avc = buf_index + nalsize;
4553  } else {
4554  // start code prefix search
4555  for (; buf_index + 3 < next_avc; buf_index++)
4556  // This should always succeed in the first iteration.
4557  if (buf[buf_index] == 0 &&
4558  buf[buf_index + 1] == 0 &&
4559  buf[buf_index + 2] == 1)
4560  break;
4561 
4562  if (buf_index + 3 >= buf_size) {
4563  buf_index = buf_size;
4564  break;
4565  }
4566 
4567  buf_index += 3;
4568  if (buf_index >= next_avc)
4569  continue;
4570  }
4571 
4572  hx = h->thread_context[context_count];
4573 
4574  ptr = ff_h264_decode_nal(hx, buf + buf_index, &dst_length,
4575  &consumed, next_avc - buf_index);
4576  if (ptr == NULL || dst_length < 0) {
4577  buf_index = -1;
4578  goto end;
4579  }
4580  i = buf_index + consumed;
4581  if ((h->workaround_bugs & FF_BUG_AUTODETECT) && i + 3 < next_avc &&
4582  buf[i] == 0x00 && buf[i + 1] == 0x00 &&
4583  buf[i + 2] == 0x01 && buf[i + 3] == 0xE0)
4585 
4586  if (!(h->workaround_bugs & FF_BUG_TRUNCATED))
4587  while(dst_length > 0 && ptr[dst_length - 1] == 0)
4588  dst_length--;
4589  bit_length = !dst_length ? 0
4590  : (8 * dst_length -
4591  decode_rbsp_trailing(h, ptr + dst_length - 1));
4592 
4593  if (h->avctx->debug & FF_DEBUG_STARTCODE)
4594  av_log(h->avctx, AV_LOG_DEBUG, "NAL %d/%d at %d/%d length %d pass %d\n", hx->nal_unit_type, hx->nal_ref_idc, buf_index, buf_size, dst_length, pass);
4595 
4596  if (h->is_avc && (nalsize != consumed) && nalsize)
4598  "AVC: Consumed only %d bytes instead of %d\n",
4599  consumed, nalsize);
4600 
4601  buf_index += consumed;
4602  nal_index++;
4603 
4604  if (pass == 0) {
4605  /* packets can sometimes contain multiple PPS/SPS,
4606  * e.g. two PAFF field pictures in one packet, or a demuxer
4607  * which splits NALs strangely if so, when frame threading we
4608  * can't start the next thread until we've read all of them */
4609  switch (hx->nal_unit_type) {
4610  case NAL_SPS:
4611  case NAL_PPS:
4612  nals_needed = nal_index;
4613  break;
4614  case NAL_DPA:
4615  case NAL_IDR_SLICE:
4616  case NAL_SLICE:
4617  init_get_bits(&hx->gb, ptr, bit_length);
4618  if (!get_ue_golomb(&hx->gb) || !first_slice)
4619  nals_needed = nal_index;
4620  if (!first_slice)
4621  first_slice = hx->nal_unit_type;
4622  }
4623  continue;
4624  }
4625 
4626  if (!first_slice)
4627  switch (hx->nal_unit_type) {
4628  case NAL_DPA:
4629  case NAL_IDR_SLICE:
4630  case NAL_SLICE:
4631  first_slice = hx->nal_unit_type;
4632  }
4633 
4634  // FIXME do not discard SEI id
4635  if (avctx->skip_frame >= AVDISCARD_NONREF && h->nal_ref_idc == 0)
4636  continue;
4637 
4638 again:
4639  /* Ignore per frame NAL unit type during extradata
4640  * parsing. Decoding slices is not possible in codec init
4641  * with frame-mt */
4642  if (parse_extradata) {
4643  switch (hx->nal_unit_type) {
4644  case NAL_IDR_SLICE:
4645  case NAL_SLICE:
4646  case NAL_DPA:
4647  case NAL_DPB:
4648  case NAL_DPC:
4649  case NAL_AUXILIARY_SLICE:
4650  av_log(h->avctx, AV_LOG_WARNING, "Ignoring NAL %d in global header/extradata\n", hx->nal_unit_type);
4652  }
4653  }
4654 
4655  err = 0;
4656 
4657  switch (hx->nal_unit_type) {
4658  case NAL_IDR_SLICE:
4659  if (first_slice != NAL_IDR_SLICE) {
4661  "Invalid mix of idr and non-idr slices\n");
4662  buf_index = -1;
4663  goto end;
4664  }
4665  if(!idr_cleared)
4666  idr(h); // FIXME ensure we don't lose some frames if there is reordering
4667  idr_cleared = 1;
4668  case NAL_SLICE:
4669  init_get_bits(&hx->gb, ptr, bit_length);
4670  hx->intra_gb_ptr =
4671  hx->inter_gb_ptr = &hx->gb;
4672  hx->data_partitioning = 0;
4673 
4674  if ((err = decode_slice_header(hx, h)))
4675  break;
4676 
4678  h->valid_recovery_point = 1;
4679 
4680  if ( h->sei_recovery_frame_cnt >= 0
4681  && ( h->recovery_frame<0
4682  || ((h->recovery_frame - h->frame_num) & ((1 << h->sps.log2_max_frame_num)-1)) > h->sei_recovery_frame_cnt)) {
4684  (1 << h->sps.log2_max_frame_num);
4685 
4686  if (!h->valid_recovery_point)
4687  h->recovery_frame = h->frame_num;
4688  }
4689 
4690  h->cur_pic_ptr->f.key_frame |=
4691  (hx->nal_unit_type == NAL_IDR_SLICE);
4692 
4693  if (h->recovery_frame == h->frame_num) {
4694  h->cur_pic_ptr->sync |= 1;
4695  h->recovery_frame = -1;
4696  }
4697 
4698  h->sync |= !!h->cur_pic_ptr->f.key_frame;
4699  h->sync |= 3*!!(avctx->flags2 & CODEC_FLAG2_SHOW_ALL);
4700  h->cur_pic_ptr->sync |= h->sync;
4701 
4702  if (h->current_slice == 1) {
4703  if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS))
4704  decode_postinit(h, nal_index >= nals_needed);
4705 
4706  if (h->avctx->hwaccel &&
4707  h->avctx->hwaccel->start_frame(h->avctx, NULL, 0) < 0)
4708  return -1;
4709  if (CONFIG_H264_VDPAU_DECODER &&
4712  }
4713 
4714  if (hx->redundant_pic_count == 0 &&
4715  (avctx->skip_frame < AVDISCARD_NONREF ||
4716  hx->nal_ref_idc) &&
4717  (avctx->skip_frame < AVDISCARD_BIDIR ||
4719  (avctx->skip_frame < AVDISCARD_NONKEY ||
4721  avctx->skip_frame < AVDISCARD_ALL) {
4722  if (avctx->hwaccel) {
4723  if (avctx->hwaccel->decode_slice(avctx,
4724  &buf[buf_index - consumed],
4725  consumed) < 0)
4726  return -1;
4727  } else if (CONFIG_H264_VDPAU_DECODER &&
4729  static const uint8_t start_code[] = {
4730  0x00, 0x00, 0x01 };
4731  ff_vdpau_add_data_chunk(h->cur_pic_ptr->f.data[0], start_code,
4732  sizeof(start_code));
4733  ff_vdpau_add_data_chunk(h->cur_pic_ptr->f.data[0], &buf[buf_index - consumed],
4734  consumed);
4735  } else
4736  context_count++;
4737  }
4738  break;
4739  case NAL_DPA:
4740  init_get_bits(&hx->gb, ptr, bit_length);
4741  hx->intra_gb_ptr =
4742  hx->inter_gb_ptr = NULL;
4743 
4744  if ((err = decode_slice_header(hx, h)) < 0)
4745  break;
4746 
4747  hx->data_partitioning = 1;
4748  break;
4749  case NAL_DPB:
4750  init_get_bits(&hx->intra_gb, ptr, bit_length);
4751  hx->intra_gb_ptr = &hx->intra_gb;
4752  break;
4753  case NAL_DPC:
4754  init_get_bits(&hx->inter_gb, ptr, bit_length);
4755  hx->inter_gb_ptr = &hx->inter_gb;
4756 
4757  av_log(h->avctx, AV_LOG_ERROR, "Partitioned H.264 support is incomplete\n");
4758  break;
4759 
4760  if (hx->redundant_pic_count == 0 &&
4761  hx->intra_gb_ptr &&
4762  hx->data_partitioning &&
4763  h->cur_pic_ptr && h->context_initialized &&
4764  (avctx->skip_frame < AVDISCARD_NONREF || hx->nal_ref_idc) &&
4765  (avctx->skip_frame < AVDISCARD_BIDIR ||
4767  (avctx->skip_frame < AVDISCARD_NONKEY ||
4769  avctx->skip_frame < AVDISCARD_ALL)
4770  context_count++;
4771  break;
4772  case NAL_SEI:
4773  init_get_bits(&h->gb, ptr, bit_length);
4774  ff_h264_decode_sei(h);
4775  break;
4776  case NAL_SPS:
4777  init_get_bits(&h->gb, ptr, bit_length);
4778  if (ff_h264_decode_seq_parameter_set(h) < 0 && (h->is_avc ? nalsize : 1)) {
4780  "SPS decoding failure, trying again with the complete NAL\n");
4781  if (h->is_avc)
4782  av_assert0(next_avc - buf_index + consumed == nalsize);
4783  if ((next_avc - buf_index + consumed - 1) >= INT_MAX/8)
4784  break;
4785  init_get_bits(&h->gb, &buf[buf_index + 1 - consumed],
4786  8*(next_avc - buf_index + consumed - 1));
4788  }
4789 
4790  break;
4791  case NAL_PPS:
4792  init_get_bits(&h->gb, ptr, bit_length);
4793  ff_h264_decode_picture_parameter_set(h, bit_length);
4794  break;
4795  case NAL_AUD:
4796  case NAL_END_SEQUENCE:
4797  case NAL_END_STREAM:
4798  case NAL_FILLER_DATA:
4799  case NAL_SPS_EXT:
4800  case NAL_AUXILIARY_SLICE:
4801  break;
4802  case NAL_FF_IGNORE:
4803  break;
4804  default:
4805  av_log(avctx, AV_LOG_DEBUG, "Unknown NAL code: %d (%d bits)\n",
4806  hx->nal_unit_type, bit_length);
4807  }
4808 
4809  if (context_count == h->max_contexts) {
4810  execute_decode_slices(h, context_count);
4811  context_count = 0;
4812  }
4813 
4814  if (err < 0)
4815  av_log(h->avctx, AV_LOG_ERROR, "decode_slice_header error\n");
4816  else if (err == 1) {
4817  /* Slice could not be decoded in parallel mode, copy down
4818  * NAL unit stuff to context 0 and restart. Note that
4819  * rbsp_buffer is not transferred, but since we no longer
4820  * run in parallel mode this should not be an issue. */
4821  h->nal_unit_type = hx->nal_unit_type;
4822  h->nal_ref_idc = hx->nal_ref_idc;
4823  hx = h;
4824  goto again;
4825  }
4826  }
4827  }
4828  if (context_count)
4829  execute_decode_slices(h, context_count);
4830 
4831 end:
4832  /* clean up */
4833  if (h->cur_pic_ptr && !h->droppable) {
4836  }
4837 
4838  return buf_index;
4839 }
4840 
4841 /**
4842  * Return the number of bytes consumed for building the current frame.
4843  */
4844 static int get_consumed_bytes(int pos, int buf_size)
4845 {
4846  if (pos == 0)
4847  pos = 1; // avoid infinite loops (i doubt that is needed but ...)
4848  if (pos + 10 > buf_size)
4849  pos = buf_size; // oops ;)
4850 
4851  return pos;
4852 }
4853 
4855 {
4856  AVFrame *src = &srcp->f;
4857  int i;
4858  int ret = av_frame_ref(dst, src);
4859  if (ret < 0)
4860  return ret;
4861 
4862  if (!srcp->crop)
4863  return 0;
4864 
4865  for (i = 0; i < 3; i++) {
4866  int hshift = (i > 0) ? h->chroma_x_shift : 0;
4867  int vshift = (i > 0) ? h->chroma_y_shift : 0;
4868  int off = ((srcp->crop_left >> hshift) << h->pixel_shift) +
4869  (srcp->crop_top >> vshift) * dst->linesize[i];
4870  dst->data[i] += off;
4871  }
4872  return 0;
4873 }
4874 
4875 static int decode_frame(AVCodecContext *avctx, void *data,
4876  int *got_frame, AVPacket *avpkt)
4877 {
4878  const uint8_t *buf = avpkt->data;
4879  int buf_size = avpkt->size;
4880  H264Context *h = avctx->priv_data;
4881  AVFrame *pict = data;
4882  int buf_index = 0;
4883  Picture *out;
4884  int i, out_idx;
4885  int ret;
4886 
4887  h->flags = avctx->flags;
4888 
4889  /* end of stream, output what is still in the buffers */
4890  if (buf_size == 0) {
4891  out:
4892 
4893  h->cur_pic_ptr = NULL;
4894  h->first_field = 0;
4895 
4896  // FIXME factorize this with the output code below
4897  out = h->delayed_pic[0];
4898  out_idx = 0;
4899  for (i = 1;
4900  h->delayed_pic[i] &&
4901  !h->delayed_pic[i]->f.key_frame &&
4902  !h->delayed_pic[i]->mmco_reset;
4903  i++)
4904  if (h->delayed_pic[i]->poc < out->poc) {
4905  out = h->delayed_pic[i];
4906  out_idx = i;
4907  }
4908 
4909  for (i = out_idx; h->delayed_pic[i]; i++)
4910  h->delayed_pic[i] = h->delayed_pic[i + 1];
4911 
4912  if (out) {
4913  out->reference &= ~DELAYED_PIC_REF;
4914  ret = output_frame(h, pict, out);
4915  if (ret < 0)
4916  return ret;
4917  *got_frame = 1;
4918  }
4919 
4920  return buf_index;
4921  }
4922  if(h->is_avc && buf_size >= 9 && buf[0]==1 && buf[2]==0 && (buf[4]&0xFC)==0xFC && (buf[5]&0x1F) && buf[8]==0x67){
4923  int cnt= buf[5]&0x1f;
4924  const uint8_t *p= buf+6;
4925  while(cnt--){
4926  int nalsize= AV_RB16(p) + 2;
4927  if(nalsize > buf_size - (p-buf) || p[2]!=0x67)
4928  goto not_extra;
4929  p += nalsize;
4930  }
4931  cnt = *(p++);
4932  if(!cnt)
4933  goto not_extra;
4934  while(cnt--){
4935  int nalsize= AV_RB16(p) + 2;
4936  if(nalsize > buf_size - (p-buf) || p[2]!=0x68)
4937  goto not_extra;
4938  p += nalsize;
4939  }
4940 
4941  return ff_h264_decode_extradata(h, buf, buf_size);
4942  }
4943 not_extra:
4944 
4945  buf_index = decode_nal_units(h, buf, buf_size, 0);
4946  if (buf_index < 0)
4947  return -1;
4948 
4949  if (!h->cur_pic_ptr && h->nal_unit_type == NAL_END_SEQUENCE) {
4950  av_assert0(buf_index <= buf_size);
4951  goto out;
4952  }
4953 
4954  if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS) && !h->cur_pic_ptr) {
4955  if (avctx->skip_frame >= AVDISCARD_NONREF ||
4956  buf_size >= 4 && !memcmp("Q264", buf, 4))
4957  return buf_size;
4958  av_log(avctx, AV_LOG_ERROR, "no frame!\n");
4959  return -1;
4960  }
4961 
4962  if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS) ||
4963  (h->mb_y >= h->mb_height && h->mb_height)) {
4964  if (avctx->flags2 & CODEC_FLAG2_CHUNKS)
4965  decode_postinit(h, 1);
4966 
4967  field_end(h, 0);
4968 
4969  /* Wait for second field. */
4970  *got_frame = 0;
4971  if (h->next_output_pic && (h->next_output_pic->sync || h->sync>1)) {
4972  ret = output_frame(h, pict, h->next_output_pic);
4973  if (ret < 0)
4974  return ret;
4975  *got_frame = 1;
4976  if (CONFIG_MPEGVIDEO) {
4978  &h->low_delay,
4979  h->mb_width, h->mb_height, h->mb_stride, 1);
4980  }
4981  }
4982  }
4983 
4984  assert(pict->data[0] || !*got_frame);
4985 
4986  return get_consumed_bytes(buf_index, buf_size);
4987 }
4988 
4990 {
4991  int i;
4992 
4993  free_tables(h, 1); // FIXME cleanup init stuff perhaps
4994 
4995  for (i = 0; i < MAX_SPS_COUNT; i++)
4996  av_freep(h->sps_buffers + i);
4997 
4998  for (i = 0; i < MAX_PPS_COUNT; i++)
4999  av_freep(h->pps_buffers + i);
5000 }
5001 
5003 {
5004  H264Context *h = avctx->priv_data;
5005 
5008 
5009  unref_picture(h, &h->cur_pic);
5010 
5011  return 0;
5012 }
5013 
5014 static const AVProfile profiles[] = {
5015  { FF_PROFILE_H264_BASELINE, "Baseline" },
5016  { FF_PROFILE_H264_CONSTRAINED_BASELINE, "Constrained Baseline" },
5017  { FF_PROFILE_H264_MAIN, "Main" },
5018  { FF_PROFILE_H264_EXTENDED, "Extended" },
5019  { FF_PROFILE_H264_HIGH, "High" },
5020  { FF_PROFILE_H264_HIGH_10, "High 10" },
5021  { FF_PROFILE_H264_HIGH_10_INTRA, "High 10 Intra" },
5022  { FF_PROFILE_H264_HIGH_422, "High 4:2:2" },
5023  { FF_PROFILE_H264_HIGH_422_INTRA, "High 4:2:2 Intra" },
5024  { FF_PROFILE_H264_HIGH_444, "High 4:4:4" },
5025  { FF_PROFILE_H264_HIGH_444_PREDICTIVE, "High 4:4:4 Predictive" },
5026  { FF_PROFILE_H264_HIGH_444_INTRA, "High 4:4:4 Intra" },
5027  { FF_PROFILE_H264_CAVLC_444, "CAVLC 4:4:4" },
5028  { FF_PROFILE_UNKNOWN },
5029 };
5030 
5031 static const AVOption h264_options[] = {
5032  {"is_avc", "is avc", offsetof(H264Context, is_avc), FF_OPT_TYPE_INT, {.i64 = 0}, 0, 1, 0},
5033  {"nal_length_size", "nal_length_size", offsetof(H264Context, nal_length_size), FF_OPT_TYPE_INT, {.i64 = 0}, 0, 4, 0},
5034  {NULL}
5035 };
5036 
5037 static const AVClass h264_class = {
5038  .class_name = "H264 Decoder",
5039  .item_name = av_default_item_name,
5040  .option = h264_options,
5041  .version = LIBAVUTIL_VERSION_INT,
5042 };
5043 
5044 static const AVClass h264_vdpau_class = {
5045  .class_name = "H264 VDPAU Decoder",
5046  .item_name = av_default_item_name,
5047  .option = h264_options,
5048  .version = LIBAVUTIL_VERSION_INT,
5049 };
5050 
5052  .name = "h264",
5053  .type = AVMEDIA_TYPE_VIDEO,
5054  .id = AV_CODEC_ID_H264,
5055  .priv_data_size = sizeof(H264Context),
5058  .decode = decode_frame,
5059  .capabilities = /*CODEC_CAP_DRAW_HORIZ_BAND |*/ CODEC_CAP_DR1 |
5062  .flush = flush_dpb,
5063  .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"),
5064  .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
5065  .update_thread_context = ONLY_IF_THREADS_ENABLED(decode_update_thread_context),
5066  .profiles = NULL_IF_CONFIG_SMALL(profiles),
5067  .priv_class = &h264_class,
5068 };
5069 
5070 #if CONFIG_H264_VDPAU_DECODER
5071 AVCodec ff_h264_vdpau_decoder = {
5072  .name = "h264_vdpau",
5073  .type = AVMEDIA_TYPE_VIDEO,
5074  .id = AV_CODEC_ID_H264,
5075  .priv_data_size = sizeof(H264Context),
5078  .decode = decode_frame,
5080  .flush = flush_dpb,
5081  .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (VDPAU acceleration)"),
5082  .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_VDPAU_H264,
5083  AV_PIX_FMT_NONE},
5084  .profiles = NULL_IF_CONFIG_SMALL(profiles),
5085  .priv_class = &h264_vdpau_class,
5086 };
5087 #endif