FFmpeg
hevcdec.c
Go to the documentation of this file.
1 /*
2  * HEVC video Decoder
3  *
4  * Copyright (C) 2012 - 2013 Guillaume Martres
5  * Copyright (C) 2012 - 2013 Mickael Raulet
6  * Copyright (C) 2012 - 2013 Gildas Cocherel
7  * Copyright (C) 2012 - 2013 Wassim Hamidouche
8  *
9  * This file is part of FFmpeg.
10  *
11  * FFmpeg is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public
13  * License as published by the Free Software Foundation; either
14  * version 2.1 of the License, or (at your option) any later version.
15  *
16  * FFmpeg is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with FFmpeg; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24  */
25 
26 #include "libavutil/attributes.h"
27 #include "libavutil/common.h"
28 #include "libavutil/display.h"
30 #include "libavutil/internal.h"
32 #include "libavutil/md5.h"
33 #include "libavutil/opt.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/stereo3d.h"
36 #include "libavutil/timecode.h"
37 
38 #include "bswapdsp.h"
39 #include "bytestream.h"
40 #include "cabac_functions.h"
41 #include "golomb.h"
42 #include "hevc.h"
43 #include "hevc_data.h"
44 #include "hevc_parse.h"
45 #include "hevcdec.h"
46 #include "hwconfig.h"
47 #include "profiles.h"
48 
49 const uint8_t ff_hevc_pel_weight[65] = { [2] = 0, [4] = 1, [6] = 2, [8] = 3, [12] = 4, [16] = 5, [24] = 6, [32] = 7, [48] = 8, [64] = 9 };
50 
51 /**
52  * NOTE: Each function hls_foo correspond to the function foo in the
53  * specification (HLS stands for High Level Syntax).
54  */
55 
56 /**
57  * Section 5.7
58  */
59 
60 /* free everything allocated by pic_arrays_init() */
62 {
63  av_freep(&s->sao);
64  av_freep(&s->deblock);
65 
66  av_freep(&s->skip_flag);
67  av_freep(&s->tab_ct_depth);
68 
69  av_freep(&s->tab_ipm);
70  av_freep(&s->cbf_luma);
71  av_freep(&s->is_pcm);
72 
73  av_freep(&s->qp_y_tab);
74  av_freep(&s->tab_slice_address);
75  av_freep(&s->filter_slice_edges);
76 
77  av_freep(&s->horizontal_bs);
78  av_freep(&s->vertical_bs);
79 
80  av_freep(&s->sh.entry_point_offset);
81  av_freep(&s->sh.size);
82  av_freep(&s->sh.offset);
83 
84  av_buffer_pool_uninit(&s->tab_mvf_pool);
85  av_buffer_pool_uninit(&s->rpl_tab_pool);
86 }
87 
88 /* allocate arrays that depend on frame dimensions */
89 static int pic_arrays_init(HEVCContext *s, const HEVCSPS *sps)
90 {
91  int log2_min_cb_size = sps->log2_min_cb_size;
92  int width = sps->width;
93  int height = sps->height;
94  int pic_size_in_ctb = ((width >> log2_min_cb_size) + 1) *
95  ((height >> log2_min_cb_size) + 1);
96  int ctb_count = sps->ctb_width * sps->ctb_height;
97  int min_pu_size = sps->min_pu_width * sps->min_pu_height;
98 
99  s->bs_width = (width >> 2) + 1;
100  s->bs_height = (height >> 2) + 1;
101 
102  s->sao = av_calloc(ctb_count, sizeof(*s->sao));
103  s->deblock = av_calloc(ctb_count, sizeof(*s->deblock));
104  if (!s->sao || !s->deblock)
105  goto fail;
106 
107  s->skip_flag = av_malloc_array(sps->min_cb_height, sps->min_cb_width);
108  s->tab_ct_depth = av_malloc_array(sps->min_cb_height, sps->min_cb_width);
109  if (!s->skip_flag || !s->tab_ct_depth)
110  goto fail;
111 
112  s->cbf_luma = av_malloc_array(sps->min_tb_width, sps->min_tb_height);
113  s->tab_ipm = av_mallocz(min_pu_size);
114  s->is_pcm = av_malloc_array(sps->min_pu_width + 1, sps->min_pu_height + 1);
115  if (!s->tab_ipm || !s->cbf_luma || !s->is_pcm)
116  goto fail;
117 
118  s->filter_slice_edges = av_mallocz(ctb_count);
119  s->tab_slice_address = av_malloc_array(pic_size_in_ctb,
120  sizeof(*s->tab_slice_address));
121  s->qp_y_tab = av_malloc_array(pic_size_in_ctb,
122  sizeof(*s->qp_y_tab));
123  if (!s->qp_y_tab || !s->filter_slice_edges || !s->tab_slice_address)
124  goto fail;
125 
126  s->horizontal_bs = av_calloc(s->bs_width, s->bs_height);
127  s->vertical_bs = av_calloc(s->bs_width, s->bs_height);
128  if (!s->horizontal_bs || !s->vertical_bs)
129  goto fail;
130 
131  s->tab_mvf_pool = av_buffer_pool_init(min_pu_size * sizeof(MvField),
133  s->rpl_tab_pool = av_buffer_pool_init(ctb_count * sizeof(RefPicListTab),
135  if (!s->tab_mvf_pool || !s->rpl_tab_pool)
136  goto fail;
137 
138  return 0;
139 
140 fail:
142  return AVERROR(ENOMEM);
143 }
144 
146 {
147  int i = 0;
148  int j = 0;
149  uint8_t luma_weight_l0_flag[16];
150  uint8_t chroma_weight_l0_flag[16];
151  uint8_t luma_weight_l1_flag[16];
152  uint8_t chroma_weight_l1_flag[16];
153  int luma_log2_weight_denom;
154 
155  luma_log2_weight_denom = get_ue_golomb_long(gb);
156  if (luma_log2_weight_denom < 0 || luma_log2_weight_denom > 7) {
157  av_log(s->avctx, AV_LOG_ERROR, "luma_log2_weight_denom %d is invalid\n", luma_log2_weight_denom);
158  return AVERROR_INVALIDDATA;
159  }
160  s->sh.luma_log2_weight_denom = av_clip_uintp2(luma_log2_weight_denom, 3);
161  if (s->ps.sps->chroma_format_idc != 0) {
162  int64_t chroma_log2_weight_denom = luma_log2_weight_denom + (int64_t)get_se_golomb(gb);
163  if (chroma_log2_weight_denom < 0 || chroma_log2_weight_denom > 7) {
164  av_log(s->avctx, AV_LOG_ERROR, "chroma_log2_weight_denom %"PRId64" is invalid\n", chroma_log2_weight_denom);
165  return AVERROR_INVALIDDATA;
166  }
167  s->sh.chroma_log2_weight_denom = chroma_log2_weight_denom;
168  }
169 
170  for (i = 0; i < s->sh.nb_refs[L0]; i++) {
171  luma_weight_l0_flag[i] = get_bits1(gb);
172  if (!luma_weight_l0_flag[i]) {
173  s->sh.luma_weight_l0[i] = 1 << s->sh.luma_log2_weight_denom;
174  s->sh.luma_offset_l0[i] = 0;
175  }
176  }
177  if (s->ps.sps->chroma_format_idc != 0) {
178  for (i = 0; i < s->sh.nb_refs[L0]; i++)
179  chroma_weight_l0_flag[i] = get_bits1(gb);
180  } else {
181  for (i = 0; i < s->sh.nb_refs[L0]; i++)
182  chroma_weight_l0_flag[i] = 0;
183  }
184  for (i = 0; i < s->sh.nb_refs[L0]; i++) {
185  if (luma_weight_l0_flag[i]) {
186  int delta_luma_weight_l0 = get_se_golomb(gb);
187  if ((int8_t)delta_luma_weight_l0 != delta_luma_weight_l0)
188  return AVERROR_INVALIDDATA;
189  s->sh.luma_weight_l0[i] = (1 << s->sh.luma_log2_weight_denom) + delta_luma_weight_l0;
190  s->sh.luma_offset_l0[i] = get_se_golomb(gb);
191  }
192  if (chroma_weight_l0_flag[i]) {
193  for (j = 0; j < 2; j++) {
194  int delta_chroma_weight_l0 = get_se_golomb(gb);
195  int delta_chroma_offset_l0 = get_se_golomb(gb);
196 
197  if ( (int8_t)delta_chroma_weight_l0 != delta_chroma_weight_l0
198  || delta_chroma_offset_l0 < -(1<<17) || delta_chroma_offset_l0 > (1<<17)) {
199  return AVERROR_INVALIDDATA;
200  }
201 
202  s->sh.chroma_weight_l0[i][j] = (1 << s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l0;
203  s->sh.chroma_offset_l0[i][j] = av_clip((delta_chroma_offset_l0 - ((128 * s->sh.chroma_weight_l0[i][j])
204  >> s->sh.chroma_log2_weight_denom) + 128), -128, 127);
205  }
206  } else {
207  s->sh.chroma_weight_l0[i][0] = 1 << s->sh.chroma_log2_weight_denom;
208  s->sh.chroma_offset_l0[i][0] = 0;
209  s->sh.chroma_weight_l0[i][1] = 1 << s->sh.chroma_log2_weight_denom;
210  s->sh.chroma_offset_l0[i][1] = 0;
211  }
212  }
213  if (s->sh.slice_type == HEVC_SLICE_B) {
214  for (i = 0; i < s->sh.nb_refs[L1]; i++) {
215  luma_weight_l1_flag[i] = get_bits1(gb);
216  if (!luma_weight_l1_flag[i]) {
217  s->sh.luma_weight_l1[i] = 1 << s->sh.luma_log2_weight_denom;
218  s->sh.luma_offset_l1[i] = 0;
219  }
220  }
221  if (s->ps.sps->chroma_format_idc != 0) {
222  for (i = 0; i < s->sh.nb_refs[L1]; i++)
223  chroma_weight_l1_flag[i] = get_bits1(gb);
224  } else {
225  for (i = 0; i < s->sh.nb_refs[L1]; i++)
226  chroma_weight_l1_flag[i] = 0;
227  }
228  for (i = 0; i < s->sh.nb_refs[L1]; i++) {
229  if (luma_weight_l1_flag[i]) {
230  int delta_luma_weight_l1 = get_se_golomb(gb);
231  if ((int8_t)delta_luma_weight_l1 != delta_luma_weight_l1)
232  return AVERROR_INVALIDDATA;
233  s->sh.luma_weight_l1[i] = (1 << s->sh.luma_log2_weight_denom) + delta_luma_weight_l1;
234  s->sh.luma_offset_l1[i] = get_se_golomb(gb);
235  }
236  if (chroma_weight_l1_flag[i]) {
237  for (j = 0; j < 2; j++) {
238  int delta_chroma_weight_l1 = get_se_golomb(gb);
239  int delta_chroma_offset_l1 = get_se_golomb(gb);
240 
241  if ( (int8_t)delta_chroma_weight_l1 != delta_chroma_weight_l1
242  || delta_chroma_offset_l1 < -(1<<17) || delta_chroma_offset_l1 > (1<<17)) {
243  return AVERROR_INVALIDDATA;
244  }
245 
246  s->sh.chroma_weight_l1[i][j] = (1 << s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l1;
247  s->sh.chroma_offset_l1[i][j] = av_clip((delta_chroma_offset_l1 - ((128 * s->sh.chroma_weight_l1[i][j])
248  >> s->sh.chroma_log2_weight_denom) + 128), -128, 127);
249  }
250  } else {
251  s->sh.chroma_weight_l1[i][0] = 1 << s->sh.chroma_log2_weight_denom;
252  s->sh.chroma_offset_l1[i][0] = 0;
253  s->sh.chroma_weight_l1[i][1] = 1 << s->sh.chroma_log2_weight_denom;
254  s->sh.chroma_offset_l1[i][1] = 0;
255  }
256  }
257  }
258  return 0;
259 }
260 
262 {
263  const HEVCSPS *sps = s->ps.sps;
264  int max_poc_lsb = 1 << sps->log2_max_poc_lsb;
265  int prev_delta_msb = 0;
266  unsigned int nb_sps = 0, nb_sh;
267  int i;
268 
269  rps->nb_refs = 0;
270  if (!sps->long_term_ref_pics_present_flag)
271  return 0;
272 
273  if (sps->num_long_term_ref_pics_sps > 0)
274  nb_sps = get_ue_golomb_long(gb);
275  nb_sh = get_ue_golomb_long(gb);
276 
277  if (nb_sps > sps->num_long_term_ref_pics_sps)
278  return AVERROR_INVALIDDATA;
279  if (nb_sh + (uint64_t)nb_sps > FF_ARRAY_ELEMS(rps->poc))
280  return AVERROR_INVALIDDATA;
281 
282  rps->nb_refs = nb_sh + nb_sps;
283 
284  for (i = 0; i < rps->nb_refs; i++) {
285 
286  if (i < nb_sps) {
287  uint8_t lt_idx_sps = 0;
288 
289  if (sps->num_long_term_ref_pics_sps > 1)
290  lt_idx_sps = get_bits(gb, av_ceil_log2(sps->num_long_term_ref_pics_sps));
291 
292  rps->poc[i] = sps->lt_ref_pic_poc_lsb_sps[lt_idx_sps];
293  rps->used[i] = sps->used_by_curr_pic_lt_sps_flag[lt_idx_sps];
294  } else {
295  rps->poc[i] = get_bits(gb, sps->log2_max_poc_lsb);
296  rps->used[i] = get_bits1(gb);
297  }
298 
299  rps->poc_msb_present[i] = get_bits1(gb);
300  if (rps->poc_msb_present[i]) {
301  int64_t delta = get_ue_golomb_long(gb);
302  int64_t poc;
303 
304  if (i && i != nb_sps)
305  delta += prev_delta_msb;
306 
307  poc = rps->poc[i] + s->poc - delta * max_poc_lsb - s->sh.pic_order_cnt_lsb;
308  if (poc != (int32_t)poc)
309  return AVERROR_INVALIDDATA;
310  rps->poc[i] = poc;
311  prev_delta_msb = delta;
312  }
313  }
314 
315  return 0;
316 }
317 
319 {
320  AVCodecContext *avctx = s->avctx;
321  const HEVCParamSets *ps = &s->ps;
322  const HEVCVPS *vps = (const HEVCVPS*)ps->vps_list[sps->vps_id]->data;
323  const HEVCWindow *ow = &sps->output_window;
324  unsigned int num = 0, den = 0;
325 
326  avctx->pix_fmt = sps->pix_fmt;
327  avctx->coded_width = sps->width;
328  avctx->coded_height = sps->height;
329  avctx->width = sps->width - ow->left_offset - ow->right_offset;
330  avctx->height = sps->height - ow->top_offset - ow->bottom_offset;
331  avctx->has_b_frames = sps->temporal_layer[sps->max_sub_layers - 1].num_reorder_pics;
332  avctx->profile = sps->ptl.general_ptl.profile_idc;
333  avctx->level = sps->ptl.general_ptl.level_idc;
334 
335  ff_set_sar(avctx, sps->vui.sar);
336 
337  if (sps->vui.video_signal_type_present_flag)
338  avctx->color_range = sps->vui.video_full_range_flag ? AVCOL_RANGE_JPEG
340  else
341  avctx->color_range = AVCOL_RANGE_MPEG;
342 
343  if (sps->vui.colour_description_present_flag) {
344  avctx->color_primaries = sps->vui.colour_primaries;
345  avctx->color_trc = sps->vui.transfer_characteristic;
346  avctx->colorspace = sps->vui.matrix_coeffs;
347  } else {
351  }
352 
354  if (sps->chroma_format_idc == 1) {
355  if (sps->vui.chroma_loc_info_present_flag) {
356  if (sps->vui.chroma_sample_loc_type_top_field <= 5)
357  avctx->chroma_sample_location = sps->vui.chroma_sample_loc_type_top_field + 1;
358  } else
360  }
361 
362  if (vps->vps_timing_info_present_flag) {
363  num = vps->vps_num_units_in_tick;
364  den = vps->vps_time_scale;
365  } else if (sps->vui.vui_timing_info_present_flag) {
366  num = sps->vui.vui_num_units_in_tick;
367  den = sps->vui.vui_time_scale;
368  }
369 
370  if (num != 0 && den != 0)
371  av_reduce(&avctx->framerate.den, &avctx->framerate.num,
372  num, den, 1 << 30);
373 }
374 
376 {
377  AVCodecContext *avctx = s->avctx;
378 
379  if (s->sei.a53_caption.buf_ref)
380  s->avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS;
381 
382  if (s->sei.alternative_transfer.present &&
383  av_color_transfer_name(s->sei.alternative_transfer.preferred_transfer_characteristics) &&
384  s->sei.alternative_transfer.preferred_transfer_characteristics != AVCOL_TRC_UNSPECIFIED) {
385  avctx->color_trc = s->sei.alternative_transfer.preferred_transfer_characteristics;
386  }
387 
388  return 0;
389 }
390 
392 {
393 #define HWACCEL_MAX (CONFIG_HEVC_DXVA2_HWACCEL + \
394  CONFIG_HEVC_D3D11VA_HWACCEL * 2 + \
395  CONFIG_HEVC_NVDEC_HWACCEL + \
396  CONFIG_HEVC_VAAPI_HWACCEL + \
397  CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL + \
398  CONFIG_HEVC_VDPAU_HWACCEL)
399  enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmt = pix_fmts;
400 
401  switch (sps->pix_fmt) {
402  case AV_PIX_FMT_YUV420P:
403  case AV_PIX_FMT_YUVJ420P:
404 #if CONFIG_HEVC_DXVA2_HWACCEL
405  *fmt++ = AV_PIX_FMT_DXVA2_VLD;
406 #endif
407 #if CONFIG_HEVC_D3D11VA_HWACCEL
408  *fmt++ = AV_PIX_FMT_D3D11VA_VLD;
409  *fmt++ = AV_PIX_FMT_D3D11;
410 #endif
411 #if CONFIG_HEVC_VAAPI_HWACCEL
412  *fmt++ = AV_PIX_FMT_VAAPI;
413 #endif
414 #if CONFIG_HEVC_VDPAU_HWACCEL
415  *fmt++ = AV_PIX_FMT_VDPAU;
416 #endif
417 #if CONFIG_HEVC_NVDEC_HWACCEL
418  *fmt++ = AV_PIX_FMT_CUDA;
419 #endif
420 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
421  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
422 #endif
423  break;
425 #if CONFIG_HEVC_DXVA2_HWACCEL
426  *fmt++ = AV_PIX_FMT_DXVA2_VLD;
427 #endif
428 #if CONFIG_HEVC_D3D11VA_HWACCEL
429  *fmt++ = AV_PIX_FMT_D3D11VA_VLD;
430  *fmt++ = AV_PIX_FMT_D3D11;
431 #endif
432 #if CONFIG_HEVC_VAAPI_HWACCEL
433  *fmt++ = AV_PIX_FMT_VAAPI;
434 #endif
435 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
436  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
437 #endif
438 #if CONFIG_HEVC_VDPAU_HWACCEL
439  *fmt++ = AV_PIX_FMT_VDPAU;
440 #endif
441 #if CONFIG_HEVC_NVDEC_HWACCEL
442  *fmt++ = AV_PIX_FMT_CUDA;
443 #endif
444  break;
445  case AV_PIX_FMT_YUV444P:
446 #if CONFIG_HEVC_VDPAU_HWACCEL
447  *fmt++ = AV_PIX_FMT_VDPAU;
448 #endif
449 #if CONFIG_HEVC_NVDEC_HWACCEL
450  *fmt++ = AV_PIX_FMT_CUDA;
451 #endif
452  break;
453  case AV_PIX_FMT_YUV422P:
455 #if CONFIG_HEVC_VAAPI_HWACCEL
456  *fmt++ = AV_PIX_FMT_VAAPI;
457 #endif
458  break;
462 #if CONFIG_HEVC_VDPAU_HWACCEL
463  *fmt++ = AV_PIX_FMT_VDPAU;
464 #endif
465 #if CONFIG_HEVC_NVDEC_HWACCEL
466  *fmt++ = AV_PIX_FMT_CUDA;
467 #endif
468  break;
469  }
470 
471  *fmt++ = sps->pix_fmt;
472  *fmt = AV_PIX_FMT_NONE;
473 
474  return ff_thread_get_format(s->avctx, pix_fmts);
475 }
476 
477 static int set_sps(HEVCContext *s, const HEVCSPS *sps,
478  enum AVPixelFormat pix_fmt)
479 {
480  int ret, i;
481 
483  s->ps.sps = NULL;
484  s->ps.vps = NULL;
485 
486  if (!sps)
487  return 0;
488 
489  ret = pic_arrays_init(s, sps);
490  if (ret < 0)
491  goto fail;
492 
494 
495  s->avctx->pix_fmt = pix_fmt;
496 
497  ff_hevc_pred_init(&s->hpc, sps->bit_depth);
498  ff_hevc_dsp_init (&s->hevcdsp, sps->bit_depth);
499  ff_videodsp_init (&s->vdsp, sps->bit_depth);
500 
501  for (i = 0; i < 3; i++) {
502  av_freep(&s->sao_pixel_buffer_h[i]);
503  av_freep(&s->sao_pixel_buffer_v[i]);
504  }
505 
506  if (sps->sao_enabled && !s->avctx->hwaccel) {
507  int c_count = (sps->chroma_format_idc != 0) ? 3 : 1;
508  int c_idx;
509 
510  for(c_idx = 0; c_idx < c_count; c_idx++) {
511  int w = sps->width >> sps->hshift[c_idx];
512  int h = sps->height >> sps->vshift[c_idx];
513  s->sao_pixel_buffer_h[c_idx] =
514  av_malloc((w * 2 * sps->ctb_height) <<
515  sps->pixel_shift);
516  s->sao_pixel_buffer_v[c_idx] =
517  av_malloc((h * 2 * sps->ctb_width) <<
518  sps->pixel_shift);
519  if (!s->sao_pixel_buffer_h[c_idx] ||
520  !s->sao_pixel_buffer_v[c_idx])
521  goto fail;
522  }
523  }
524 
525  s->ps.sps = sps;
526  s->ps.vps = (HEVCVPS*) s->ps.vps_list[s->ps.sps->vps_id]->data;
527 
528  return 0;
529 
530 fail:
532  for (i = 0; i < 3; i++) {
533  av_freep(&s->sao_pixel_buffer_h[i]);
534  av_freep(&s->sao_pixel_buffer_v[i]);
535  }
536  s->ps.sps = NULL;
537  return ret;
538 }
539 
541 {
542  GetBitContext *gb = &s->HEVClc->gb;
543  SliceHeader *sh = &s->sh;
544  int i, ret;
545 
546  // Coded parameters
548  if (s->ref && sh->first_slice_in_pic_flag) {
549  av_log(s->avctx, AV_LOG_ERROR, "Two slices reporting being the first in the same frame.\n");
550  return 1; // This slice will be skipped later, do not corrupt state
551  }
552 
553  if ((IS_IDR(s) || IS_BLA(s)) && sh->first_slice_in_pic_flag) {
554  s->seq_decode = (s->seq_decode + 1) & 0xff;
555  s->max_ra = INT_MAX;
556  if (IS_IDR(s))
558  }
560  if (IS_IRAP(s))
562 
563  sh->pps_id = get_ue_golomb_long(gb);
564  if (sh->pps_id >= HEVC_MAX_PPS_COUNT || !s->ps.pps_list[sh->pps_id]) {
565  av_log(s->avctx, AV_LOG_ERROR, "PPS id out of range: %d\n", sh->pps_id);
566  return AVERROR_INVALIDDATA;
567  }
568  if (!sh->first_slice_in_pic_flag &&
569  s->ps.pps != (HEVCPPS*)s->ps.pps_list[sh->pps_id]->data) {
570  av_log(s->avctx, AV_LOG_ERROR, "PPS changed between slices.\n");
571  return AVERROR_INVALIDDATA;
572  }
573  s->ps.pps = (HEVCPPS*)s->ps.pps_list[sh->pps_id]->data;
574  if (s->nal_unit_type == HEVC_NAL_CRA_NUT && s->last_eos == 1)
576 
577  if (s->ps.sps != (HEVCSPS*)s->ps.sps_list[s->ps.pps->sps_id]->data) {
578  const HEVCSPS *sps = (HEVCSPS*)s->ps.sps_list[s->ps.pps->sps_id]->data;
579  const HEVCSPS *last_sps = s->ps.sps;
580  enum AVPixelFormat pix_fmt;
581 
582  if (last_sps && IS_IRAP(s) && s->nal_unit_type != HEVC_NAL_CRA_NUT) {
583  if (sps->width != last_sps->width || sps->height != last_sps->height ||
584  sps->temporal_layer[sps->max_sub_layers - 1].max_dec_pic_buffering !=
585  last_sps->temporal_layer[last_sps->max_sub_layers - 1].max_dec_pic_buffering)
587  }
589 
590  ret = set_sps(s, sps, sps->pix_fmt);
591  if (ret < 0)
592  return ret;
593 
594  pix_fmt = get_format(s, sps);
595  if (pix_fmt < 0)
596  return pix_fmt;
597  s->avctx->pix_fmt = pix_fmt;
598 
599  s->seq_decode = (s->seq_decode + 1) & 0xff;
600  s->max_ra = INT_MAX;
601  }
602 
604  if (ret < 0)
605  return ret;
606 
608  if (!sh->first_slice_in_pic_flag) {
609  int slice_address_length;
610 
611  if (s->ps.pps->dependent_slice_segments_enabled_flag)
613 
614  slice_address_length = av_ceil_log2(s->ps.sps->ctb_width *
615  s->ps.sps->ctb_height);
616  sh->slice_segment_addr = get_bitsz(gb, slice_address_length);
617  if (sh->slice_segment_addr >= s->ps.sps->ctb_width * s->ps.sps->ctb_height) {
618  av_log(s->avctx, AV_LOG_ERROR,
619  "Invalid slice segment address: %u.\n",
620  sh->slice_segment_addr);
621  return AVERROR_INVALIDDATA;
622  }
623 
624  if (!sh->dependent_slice_segment_flag) {
625  sh->slice_addr = sh->slice_segment_addr;
626  s->slice_idx++;
627  }
628  } else {
629  sh->slice_segment_addr = sh->slice_addr = 0;
630  s->slice_idx = 0;
631  s->slice_initialized = 0;
632  }
633 
634  if (!sh->dependent_slice_segment_flag) {
635  s->slice_initialized = 0;
636 
637  for (i = 0; i < s->ps.pps->num_extra_slice_header_bits; i++)
638  skip_bits(gb, 1); // slice_reserved_undetermined_flag[]
639 
640  sh->slice_type = get_ue_golomb_long(gb);
641  if (!(sh->slice_type == HEVC_SLICE_I ||
642  sh->slice_type == HEVC_SLICE_P ||
643  sh->slice_type == HEVC_SLICE_B)) {
644  av_log(s->avctx, AV_LOG_ERROR, "Unknown slice type: %d.\n",
645  sh->slice_type);
646  return AVERROR_INVALIDDATA;
647  }
648  if (IS_IRAP(s) && sh->slice_type != HEVC_SLICE_I) {
649  av_log(s->avctx, AV_LOG_ERROR, "Inter slices in an IRAP frame.\n");
650  return AVERROR_INVALIDDATA;
651  }
652 
653  // when flag is not present, picture is inferred to be output
654  sh->pic_output_flag = 1;
655  if (s->ps.pps->output_flag_present_flag)
656  sh->pic_output_flag = get_bits1(gb);
657 
658  if (s->ps.sps->separate_colour_plane_flag)
659  sh->colour_plane_id = get_bits(gb, 2);
660 
661  if (!IS_IDR(s)) {
662  int poc, pos;
663 
664  sh->pic_order_cnt_lsb = get_bits(gb, s->ps.sps->log2_max_poc_lsb);
665  poc = ff_hevc_compute_poc(s->ps.sps, s->pocTid0, sh->pic_order_cnt_lsb, s->nal_unit_type);
666  if (!sh->first_slice_in_pic_flag && poc != s->poc) {
667  av_log(s->avctx, AV_LOG_WARNING,
668  "Ignoring POC change between slices: %d -> %d\n", s->poc, poc);
669  if (s->avctx->err_recognition & AV_EF_EXPLODE)
670  return AVERROR_INVALIDDATA;
671  poc = s->poc;
672  }
673  s->poc = poc;
674 
676  pos = get_bits_left(gb);
678  ret = ff_hevc_decode_short_term_rps(gb, s->avctx, &sh->slice_rps, s->ps.sps, 1);
679  if (ret < 0)
680  return ret;
681 
682  sh->short_term_rps = &sh->slice_rps;
683  } else {
684  int numbits, rps_idx;
685 
686  if (!s->ps.sps->nb_st_rps) {
687  av_log(s->avctx, AV_LOG_ERROR, "No ref lists in the SPS.\n");
688  return AVERROR_INVALIDDATA;
689  }
690 
691  numbits = av_ceil_log2(s->ps.sps->nb_st_rps);
692  rps_idx = numbits > 0 ? get_bits(gb, numbits) : 0;
693  sh->short_term_rps = &s->ps.sps->st_rps[rps_idx];
694  }
696 
697  pos = get_bits_left(gb);
698  ret = decode_lt_rps(s, &sh->long_term_rps, gb);
699  if (ret < 0) {
700  av_log(s->avctx, AV_LOG_WARNING, "Invalid long term RPS.\n");
701  if (s->avctx->err_recognition & AV_EF_EXPLODE)
702  return AVERROR_INVALIDDATA;
703  }
705 
706  if (s->ps.sps->sps_temporal_mvp_enabled_flag)
708  else
710  } else {
711  s->sh.short_term_rps = NULL;
712  s->poc = 0;
713  }
714 
715  /* 8.3.1 */
716  if (sh->first_slice_in_pic_flag && s->temporal_id == 0 &&
717  s->nal_unit_type != HEVC_NAL_TRAIL_N &&
718  s->nal_unit_type != HEVC_NAL_TSA_N &&
719  s->nal_unit_type != HEVC_NAL_STSA_N &&
720  s->nal_unit_type != HEVC_NAL_RADL_N &&
721  s->nal_unit_type != HEVC_NAL_RADL_R &&
722  s->nal_unit_type != HEVC_NAL_RASL_N &&
723  s->nal_unit_type != HEVC_NAL_RASL_R)
724  s->pocTid0 = s->poc;
725 
726  if (s->ps.sps->sao_enabled) {
728  if (s->ps.sps->chroma_format_idc) {
731  }
732  } else {
736  }
737 
738  sh->nb_refs[L0] = sh->nb_refs[L1] = 0;
739  if (sh->slice_type == HEVC_SLICE_P || sh->slice_type == HEVC_SLICE_B) {
740  int nb_refs;
741 
742  sh->nb_refs[L0] = s->ps.pps->num_ref_idx_l0_default_active;
743  if (sh->slice_type == HEVC_SLICE_B)
744  sh->nb_refs[L1] = s->ps.pps->num_ref_idx_l1_default_active;
745 
746  if (get_bits1(gb)) { // num_ref_idx_active_override_flag
747  sh->nb_refs[L0] = get_ue_golomb_long(gb) + 1;
748  if (sh->slice_type == HEVC_SLICE_B)
749  sh->nb_refs[L1] = get_ue_golomb_long(gb) + 1;
750  }
751  if (sh->nb_refs[L0] > HEVC_MAX_REFS || sh->nb_refs[L1] > HEVC_MAX_REFS) {
752  av_log(s->avctx, AV_LOG_ERROR, "Too many refs: %d/%d.\n",
753  sh->nb_refs[L0], sh->nb_refs[L1]);
754  return AVERROR_INVALIDDATA;
755  }
756 
757  sh->rpl_modification_flag[0] = 0;
758  sh->rpl_modification_flag[1] = 0;
759  nb_refs = ff_hevc_frame_nb_refs(s);
760  if (!nb_refs) {
761  av_log(s->avctx, AV_LOG_ERROR, "Zero refs for a frame with P or B slices.\n");
762  return AVERROR_INVALIDDATA;
763  }
764 
765  if (s->ps.pps->lists_modification_present_flag && nb_refs > 1) {
766  sh->rpl_modification_flag[0] = get_bits1(gb);
767  if (sh->rpl_modification_flag[0]) {
768  for (i = 0; i < sh->nb_refs[L0]; i++)
769  sh->list_entry_lx[0][i] = get_bits(gb, av_ceil_log2(nb_refs));
770  }
771 
772  if (sh->slice_type == HEVC_SLICE_B) {
773  sh->rpl_modification_flag[1] = get_bits1(gb);
774  if (sh->rpl_modification_flag[1] == 1)
775  for (i = 0; i < sh->nb_refs[L1]; i++)
776  sh->list_entry_lx[1][i] = get_bits(gb, av_ceil_log2(nb_refs));
777  }
778  }
779 
780  if (sh->slice_type == HEVC_SLICE_B)
781  sh->mvd_l1_zero_flag = get_bits1(gb);
782 
783  if (s->ps.pps->cabac_init_present_flag)
784  sh->cabac_init_flag = get_bits1(gb);
785  else
786  sh->cabac_init_flag = 0;
787 
788  sh->collocated_ref_idx = 0;
790  sh->collocated_list = L0;
791  if (sh->slice_type == HEVC_SLICE_B)
792  sh->collocated_list = !get_bits1(gb);
793 
794  if (sh->nb_refs[sh->collocated_list] > 1) {
796  if (sh->collocated_ref_idx >= sh->nb_refs[sh->collocated_list]) {
797  av_log(s->avctx, AV_LOG_ERROR,
798  "Invalid collocated_ref_idx: %d.\n",
799  sh->collocated_ref_idx);
800  return AVERROR_INVALIDDATA;
801  }
802  }
803  }
804 
805  if ((s->ps.pps->weighted_pred_flag && sh->slice_type == HEVC_SLICE_P) ||
806  (s->ps.pps->weighted_bipred_flag && sh->slice_type == HEVC_SLICE_B)) {
807  int ret = pred_weight_table(s, gb);
808  if (ret < 0)
809  return ret;
810  }
811 
813  if (sh->max_num_merge_cand < 1 || sh->max_num_merge_cand > 5) {
814  av_log(s->avctx, AV_LOG_ERROR,
815  "Invalid number of merging MVP candidates: %d.\n",
816  sh->max_num_merge_cand);
817  return AVERROR_INVALIDDATA;
818  }
819  }
820 
821  sh->slice_qp_delta = get_se_golomb(gb);
822 
823  if (s->ps.pps->pic_slice_level_chroma_qp_offsets_present_flag) {
826  if (sh->slice_cb_qp_offset < -12 || sh->slice_cb_qp_offset > 12 ||
827  sh->slice_cr_qp_offset < -12 || sh->slice_cr_qp_offset > 12) {
828  av_log(s->avctx, AV_LOG_ERROR, "Invalid slice cx qp offset.\n");
829  return AVERROR_INVALIDDATA;
830  }
831  } else {
832  sh->slice_cb_qp_offset = 0;
833  sh->slice_cr_qp_offset = 0;
834  }
835 
836  if (s->ps.pps->chroma_qp_offset_list_enabled_flag)
838  else
840 
841  if (s->ps.pps->deblocking_filter_control_present_flag) {
842  int deblocking_filter_override_flag = 0;
843 
844  if (s->ps.pps->deblocking_filter_override_enabled_flag)
845  deblocking_filter_override_flag = get_bits1(gb);
846 
847  if (deblocking_filter_override_flag) {
850  int beta_offset_div2 = get_se_golomb(gb);
851  int tc_offset_div2 = get_se_golomb(gb) ;
852  if (beta_offset_div2 < -6 || beta_offset_div2 > 6 ||
853  tc_offset_div2 < -6 || tc_offset_div2 > 6) {
854  av_log(s->avctx, AV_LOG_ERROR,
855  "Invalid deblock filter offsets: %d, %d\n",
856  beta_offset_div2, tc_offset_div2);
857  return AVERROR_INVALIDDATA;
858  }
859  sh->beta_offset = beta_offset_div2 * 2;
860  sh->tc_offset = tc_offset_div2 * 2;
861  }
862  } else {
863  sh->disable_deblocking_filter_flag = s->ps.pps->disable_dbf;
864  sh->beta_offset = s->ps.pps->beta_offset;
865  sh->tc_offset = s->ps.pps->tc_offset;
866  }
867  } else {
869  sh->beta_offset = 0;
870  sh->tc_offset = 0;
871  }
872 
873  if (s->ps.pps->seq_loop_filter_across_slices_enabled_flag &&
878  } else {
879  sh->slice_loop_filter_across_slices_enabled_flag = s->ps.pps->seq_loop_filter_across_slices_enabled_flag;
880  }
881  } else if (!s->slice_initialized) {
882  av_log(s->avctx, AV_LOG_ERROR, "Independent slice segment missing.\n");
883  return AVERROR_INVALIDDATA;
884  }
885 
886  sh->num_entry_point_offsets = 0;
887  if (s->ps.pps->tiles_enabled_flag || s->ps.pps->entropy_coding_sync_enabled_flag) {
888  unsigned num_entry_point_offsets = get_ue_golomb_long(gb);
889  // It would be possible to bound this tighter but this here is simpler
890  if (num_entry_point_offsets > get_bits_left(gb)) {
891  av_log(s->avctx, AV_LOG_ERROR, "num_entry_point_offsets %d is invalid\n", num_entry_point_offsets);
892  return AVERROR_INVALIDDATA;
893  }
894 
895  sh->num_entry_point_offsets = num_entry_point_offsets;
896  if (sh->num_entry_point_offsets > 0) {
897  int offset_len = get_ue_golomb_long(gb) + 1;
898 
899  if (offset_len < 1 || offset_len > 32) {
900  sh->num_entry_point_offsets = 0;
901  av_log(s->avctx, AV_LOG_ERROR, "offset_len %d is invalid\n", offset_len);
902  return AVERROR_INVALIDDATA;
903  }
904 
906  av_freep(&sh->offset);
907  av_freep(&sh->size);
908  sh->entry_point_offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(unsigned));
909  sh->offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
910  sh->size = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
911  if (!sh->entry_point_offset || !sh->offset || !sh->size) {
912  sh->num_entry_point_offsets = 0;
913  av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate memory\n");
914  return AVERROR(ENOMEM);
915  }
916  for (i = 0; i < sh->num_entry_point_offsets; i++) {
917  unsigned val = get_bits_long(gb, offset_len);
918  sh->entry_point_offset[i] = val + 1; // +1; // +1 to get the size
919  }
920  if (s->threads_number > 1 && (s->ps.pps->num_tile_rows > 1 || s->ps.pps->num_tile_columns > 1)) {
921  s->enable_parallel_tiles = 0; // TODO: you can enable tiles in parallel here
922  s->threads_number = 1;
923  } else
924  s->enable_parallel_tiles = 0;
925  } else
926  s->enable_parallel_tiles = 0;
927  }
928 
929  if (s->ps.pps->slice_header_extension_present_flag) {
930  unsigned int length = get_ue_golomb_long(gb);
931  if (length*8LL > get_bits_left(gb)) {
932  av_log(s->avctx, AV_LOG_ERROR, "too many slice_header_extension_data_bytes\n");
933  return AVERROR_INVALIDDATA;
934  }
935  for (i = 0; i < length; i++)
936  skip_bits(gb, 8); // slice_header_extension_data_byte
937  }
938 
939  // Inferred parameters
940  sh->slice_qp = 26U + s->ps.pps->pic_init_qp_minus26 + sh->slice_qp_delta;
941  if (sh->slice_qp > 51 ||
942  sh->slice_qp < -s->ps.sps->qp_bd_offset) {
943  av_log(s->avctx, AV_LOG_ERROR,
944  "The slice_qp %d is outside the valid range "
945  "[%d, 51].\n",
946  sh->slice_qp,
947  -s->ps.sps->qp_bd_offset);
948  return AVERROR_INVALIDDATA;
949  }
950 
952 
953  if (!s->sh.slice_ctb_addr_rs && s->sh.dependent_slice_segment_flag) {
954  av_log(s->avctx, AV_LOG_ERROR, "Impossible slice segment.\n");
955  return AVERROR_INVALIDDATA;
956  }
957 
958  if (get_bits_left(gb) < 0) {
959  av_log(s->avctx, AV_LOG_ERROR,
960  "Overread slice header by %d bits\n", -get_bits_left(gb));
961  return AVERROR_INVALIDDATA;
962  }
963 
964  s->HEVClc->first_qp_group = !s->sh.dependent_slice_segment_flag;
965 
966  if (!s->ps.pps->cu_qp_delta_enabled_flag)
967  s->HEVClc->qp_y = s->sh.slice_qp;
968 
969  s->slice_initialized = 1;
970  s->HEVClc->tu.cu_qp_offset_cb = 0;
971  s->HEVClc->tu.cu_qp_offset_cr = 0;
972 
973  return 0;
974 }
975 
976 #define CTB(tab, x, y) ((tab)[(y) * s->ps.sps->ctb_width + (x)])
977 
978 #define SET_SAO(elem, value) \
979 do { \
980  if (!sao_merge_up_flag && !sao_merge_left_flag) \
981  sao->elem = value; \
982  else if (sao_merge_left_flag) \
983  sao->elem = CTB(s->sao, rx-1, ry).elem; \
984  else if (sao_merge_up_flag) \
985  sao->elem = CTB(s->sao, rx, ry-1).elem; \
986  else \
987  sao->elem = 0; \
988 } while (0)
989 
990 static void hls_sao_param(HEVCContext *s, int rx, int ry)
991 {
992  HEVCLocalContext *lc = s->HEVClc;
993  int sao_merge_left_flag = 0;
994  int sao_merge_up_flag = 0;
995  SAOParams *sao = &CTB(s->sao, rx, ry);
996  int c_idx, i;
997 
998  if (s->sh.slice_sample_adaptive_offset_flag[0] ||
999  s->sh.slice_sample_adaptive_offset_flag[1]) {
1000  if (rx > 0) {
1001  if (lc->ctb_left_flag)
1002  sao_merge_left_flag = ff_hevc_sao_merge_flag_decode(s);
1003  }
1004  if (ry > 0 && !sao_merge_left_flag) {
1005  if (lc->ctb_up_flag)
1006  sao_merge_up_flag = ff_hevc_sao_merge_flag_decode(s);
1007  }
1008  }
1009 
1010  for (c_idx = 0; c_idx < (s->ps.sps->chroma_format_idc ? 3 : 1); c_idx++) {
1011  int log2_sao_offset_scale = c_idx == 0 ? s->ps.pps->log2_sao_offset_scale_luma :
1012  s->ps.pps->log2_sao_offset_scale_chroma;
1013 
1014  if (!s->sh.slice_sample_adaptive_offset_flag[c_idx]) {
1015  sao->type_idx[c_idx] = SAO_NOT_APPLIED;
1016  continue;
1017  }
1018 
1019  if (c_idx == 2) {
1020  sao->type_idx[2] = sao->type_idx[1];
1021  sao->eo_class[2] = sao->eo_class[1];
1022  } else {
1023  SET_SAO(type_idx[c_idx], ff_hevc_sao_type_idx_decode(s));
1024  }
1025 
1026  if (sao->type_idx[c_idx] == SAO_NOT_APPLIED)
1027  continue;
1028 
1029  for (i = 0; i < 4; i++)
1030  SET_SAO(offset_abs[c_idx][i], ff_hevc_sao_offset_abs_decode(s));
1031 
1032  if (sao->type_idx[c_idx] == SAO_BAND) {
1033  for (i = 0; i < 4; i++) {
1034  if (sao->offset_abs[c_idx][i]) {
1035  SET_SAO(offset_sign[c_idx][i],
1037  } else {
1038  sao->offset_sign[c_idx][i] = 0;
1039  }
1040  }
1041  SET_SAO(band_position[c_idx], ff_hevc_sao_band_position_decode(s));
1042  } else if (c_idx != 2) {
1043  SET_SAO(eo_class[c_idx], ff_hevc_sao_eo_class_decode(s));
1044  }
1045 
1046  // Inferred parameters
1047  sao->offset_val[c_idx][0] = 0;
1048  for (i = 0; i < 4; i++) {
1049  sao->offset_val[c_idx][i + 1] = sao->offset_abs[c_idx][i];
1050  if (sao->type_idx[c_idx] == SAO_EDGE) {
1051  if (i > 1)
1052  sao->offset_val[c_idx][i + 1] = -sao->offset_val[c_idx][i + 1];
1053  } else if (sao->offset_sign[c_idx][i]) {
1054  sao->offset_val[c_idx][i + 1] = -sao->offset_val[c_idx][i + 1];
1055  }
1056  sao->offset_val[c_idx][i + 1] *= 1 << log2_sao_offset_scale;
1057  }
1058  }
1059 }
1060 
1061 #undef SET_SAO
1062 #undef CTB
1063 
1064 static int hls_cross_component_pred(HEVCContext *s, int idx) {
1065  HEVCLocalContext *lc = s->HEVClc;
1066  int log2_res_scale_abs_plus1 = ff_hevc_log2_res_scale_abs(s, idx);
1067 
1068  if (log2_res_scale_abs_plus1 != 0) {
1069  int res_scale_sign_flag = ff_hevc_res_scale_sign_flag(s, idx);
1070  lc->tu.res_scale_val = (1 << (log2_res_scale_abs_plus1 - 1)) *
1071  (1 - 2 * res_scale_sign_flag);
1072  } else {
1073  lc->tu.res_scale_val = 0;
1074  }
1075 
1076 
1077  return 0;
1078 }
1079 
1080 static int hls_transform_unit(HEVCContext *s, int x0, int y0,
1081  int xBase, int yBase, int cb_xBase, int cb_yBase,
1082  int log2_cb_size, int log2_trafo_size,
1083  int blk_idx, int cbf_luma, int *cbf_cb, int *cbf_cr)
1084 {
1085  HEVCLocalContext *lc = s->HEVClc;
1086  const int log2_trafo_size_c = log2_trafo_size - s->ps.sps->hshift[1];
1087  int i;
1088 
1089  if (lc->cu.pred_mode == MODE_INTRA) {
1090  int trafo_size = 1 << log2_trafo_size;
1091  ff_hevc_set_neighbour_available(s, x0, y0, trafo_size, trafo_size);
1092 
1093  s->hpc.intra_pred[log2_trafo_size - 2](s, x0, y0, 0);
1094  }
1095 
1096  if (cbf_luma || cbf_cb[0] || cbf_cr[0] ||
1097  (s->ps.sps->chroma_format_idc == 2 && (cbf_cb[1] || cbf_cr[1]))) {
1098  int scan_idx = SCAN_DIAG;
1099  int scan_idx_c = SCAN_DIAG;
1100  int cbf_chroma = cbf_cb[0] || cbf_cr[0] ||
1101  (s->ps.sps->chroma_format_idc == 2 &&
1102  (cbf_cb[1] || cbf_cr[1]));
1103 
1104  if (s->ps.pps->cu_qp_delta_enabled_flag && !lc->tu.is_cu_qp_delta_coded) {
1106  if (lc->tu.cu_qp_delta != 0)
1107  if (ff_hevc_cu_qp_delta_sign_flag(s) == 1)
1108  lc->tu.cu_qp_delta = -lc->tu.cu_qp_delta;
1109  lc->tu.is_cu_qp_delta_coded = 1;
1110 
1111  if (lc->tu.cu_qp_delta < -(26 + s->ps.sps->qp_bd_offset / 2) ||
1112  lc->tu.cu_qp_delta > (25 + s->ps.sps->qp_bd_offset / 2)) {
1113  av_log(s->avctx, AV_LOG_ERROR,
1114  "The cu_qp_delta %d is outside the valid range "
1115  "[%d, %d].\n",
1116  lc->tu.cu_qp_delta,
1117  -(26 + s->ps.sps->qp_bd_offset / 2),
1118  (25 + s->ps.sps->qp_bd_offset / 2));
1119  return AVERROR_INVALIDDATA;
1120  }
1121 
1122  ff_hevc_set_qPy(s, cb_xBase, cb_yBase, log2_cb_size);
1123  }
1124 
1125  if (s->sh.cu_chroma_qp_offset_enabled_flag && cbf_chroma &&
1127  int cu_chroma_qp_offset_flag = ff_hevc_cu_chroma_qp_offset_flag(s);
1128  if (cu_chroma_qp_offset_flag) {
1129  int cu_chroma_qp_offset_idx = 0;
1130  if (s->ps.pps->chroma_qp_offset_list_len_minus1 > 0) {
1131  cu_chroma_qp_offset_idx = ff_hevc_cu_chroma_qp_offset_idx(s);
1132  av_log(s->avctx, AV_LOG_ERROR,
1133  "cu_chroma_qp_offset_idx not yet tested.\n");
1134  }
1135  lc->tu.cu_qp_offset_cb = s->ps.pps->cb_qp_offset_list[cu_chroma_qp_offset_idx];
1136  lc->tu.cu_qp_offset_cr = s->ps.pps->cr_qp_offset_list[cu_chroma_qp_offset_idx];
1137  } else {
1138  lc->tu.cu_qp_offset_cb = 0;
1139  lc->tu.cu_qp_offset_cr = 0;
1140  }
1142  }
1143 
1144  if (lc->cu.pred_mode == MODE_INTRA && log2_trafo_size < 4) {
1145  if (lc->tu.intra_pred_mode >= 6 &&
1146  lc->tu.intra_pred_mode <= 14) {
1147  scan_idx = SCAN_VERT;
1148  } else if (lc->tu.intra_pred_mode >= 22 &&
1149  lc->tu.intra_pred_mode <= 30) {
1150  scan_idx = SCAN_HORIZ;
1151  }
1152 
1153  if (lc->tu.intra_pred_mode_c >= 6 &&
1154  lc->tu.intra_pred_mode_c <= 14) {
1155  scan_idx_c = SCAN_VERT;
1156  } else if (lc->tu.intra_pred_mode_c >= 22 &&
1157  lc->tu.intra_pred_mode_c <= 30) {
1158  scan_idx_c = SCAN_HORIZ;
1159  }
1160  }
1161 
1162  lc->tu.cross_pf = 0;
1163 
1164  if (cbf_luma)
1165  ff_hevc_hls_residual_coding(s, x0, y0, log2_trafo_size, scan_idx, 0);
1166  if (s->ps.sps->chroma_format_idc && (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3)) {
1167  int trafo_size_h = 1 << (log2_trafo_size_c + s->ps.sps->hshift[1]);
1168  int trafo_size_v = 1 << (log2_trafo_size_c + s->ps.sps->vshift[1]);
1169  lc->tu.cross_pf = (s->ps.pps->cross_component_prediction_enabled_flag && cbf_luma &&
1170  (lc->cu.pred_mode == MODE_INTER ||
1171  (lc->tu.chroma_mode_c == 4)));
1172 
1173  if (lc->tu.cross_pf) {
1175  }
1176  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1177  if (lc->cu.pred_mode == MODE_INTRA) {
1178  ff_hevc_set_neighbour_available(s, x0, y0 + (i << log2_trafo_size_c), trafo_size_h, trafo_size_v);
1179  s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (i << log2_trafo_size_c), 1);
1180  }
1181  if (cbf_cb[i])
1182  ff_hevc_hls_residual_coding(s, x0, y0 + (i << log2_trafo_size_c),
1183  log2_trafo_size_c, scan_idx_c, 1);
1184  else
1185  if (lc->tu.cross_pf) {
1186  ptrdiff_t stride = s->frame->linesize[1];
1187  int hshift = s->ps.sps->hshift[1];
1188  int vshift = s->ps.sps->vshift[1];
1189  int16_t *coeffs_y = (int16_t*)lc->edge_emu_buffer;
1190  int16_t *coeffs = (int16_t*)lc->edge_emu_buffer2;
1191  int size = 1 << log2_trafo_size_c;
1192 
1193  uint8_t *dst = &s->frame->data[1][(y0 >> vshift) * stride +
1194  ((x0 >> hshift) << s->ps.sps->pixel_shift)];
1195  for (i = 0; i < (size * size); i++) {
1196  coeffs[i] = ((lc->tu.res_scale_val * coeffs_y[i]) >> 3);
1197  }
1198  s->hevcdsp.add_residual[log2_trafo_size_c-2](dst, coeffs, stride);
1199  }
1200  }
1201 
1202  if (lc->tu.cross_pf) {
1204  }
1205  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1206  if (lc->cu.pred_mode == MODE_INTRA) {
1207  ff_hevc_set_neighbour_available(s, x0, y0 + (i << log2_trafo_size_c), trafo_size_h, trafo_size_v);
1208  s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (i << log2_trafo_size_c), 2);
1209  }
1210  if (cbf_cr[i])
1211  ff_hevc_hls_residual_coding(s, x0, y0 + (i << log2_trafo_size_c),
1212  log2_trafo_size_c, scan_idx_c, 2);
1213  else
1214  if (lc->tu.cross_pf) {
1215  ptrdiff_t stride = s->frame->linesize[2];
1216  int hshift = s->ps.sps->hshift[2];
1217  int vshift = s->ps.sps->vshift[2];
1218  int16_t *coeffs_y = (int16_t*)lc->edge_emu_buffer;
1219  int16_t *coeffs = (int16_t*)lc->edge_emu_buffer2;
1220  int size = 1 << log2_trafo_size_c;
1221 
1222  uint8_t *dst = &s->frame->data[2][(y0 >> vshift) * stride +
1223  ((x0 >> hshift) << s->ps.sps->pixel_shift)];
1224  for (i = 0; i < (size * size); i++) {
1225  coeffs[i] = ((lc->tu.res_scale_val * coeffs_y[i]) >> 3);
1226  }
1227  s->hevcdsp.add_residual[log2_trafo_size_c-2](dst, coeffs, stride);
1228  }
1229  }
1230  } else if (s->ps.sps->chroma_format_idc && blk_idx == 3) {
1231  int trafo_size_h = 1 << (log2_trafo_size + 1);
1232  int trafo_size_v = 1 << (log2_trafo_size + s->ps.sps->vshift[1]);
1233  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1234  if (lc->cu.pred_mode == MODE_INTRA) {
1235  ff_hevc_set_neighbour_available(s, xBase, yBase + (i << log2_trafo_size),
1236  trafo_size_h, trafo_size_v);
1237  s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (i << log2_trafo_size), 1);
1238  }
1239  if (cbf_cb[i])
1240  ff_hevc_hls_residual_coding(s, xBase, yBase + (i << log2_trafo_size),
1241  log2_trafo_size, scan_idx_c, 1);
1242  }
1243  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1244  if (lc->cu.pred_mode == MODE_INTRA) {
1245  ff_hevc_set_neighbour_available(s, xBase, yBase + (i << log2_trafo_size),
1246  trafo_size_h, trafo_size_v);
1247  s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (i << log2_trafo_size), 2);
1248  }
1249  if (cbf_cr[i])
1250  ff_hevc_hls_residual_coding(s, xBase, yBase + (i << log2_trafo_size),
1251  log2_trafo_size, scan_idx_c, 2);
1252  }
1253  }
1254  } else if (s->ps.sps->chroma_format_idc && lc->cu.pred_mode == MODE_INTRA) {
1255  if (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3) {
1256  int trafo_size_h = 1 << (log2_trafo_size_c + s->ps.sps->hshift[1]);
1257  int trafo_size_v = 1 << (log2_trafo_size_c + s->ps.sps->vshift[1]);
1258  ff_hevc_set_neighbour_available(s, x0, y0, trafo_size_h, trafo_size_v);
1259  s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0, 1);
1260  s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0, 2);
1261  if (s->ps.sps->chroma_format_idc == 2) {
1262  ff_hevc_set_neighbour_available(s, x0, y0 + (1 << log2_trafo_size_c),
1263  trafo_size_h, trafo_size_v);
1264  s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (1 << log2_trafo_size_c), 1);
1265  s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (1 << log2_trafo_size_c), 2);
1266  }
1267  } else if (blk_idx == 3) {
1268  int trafo_size_h = 1 << (log2_trafo_size + 1);
1269  int trafo_size_v = 1 << (log2_trafo_size + s->ps.sps->vshift[1]);
1270  ff_hevc_set_neighbour_available(s, xBase, yBase,
1271  trafo_size_h, trafo_size_v);
1272  s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase, 1);
1273  s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase, 2);
1274  if (s->ps.sps->chroma_format_idc == 2) {
1275  ff_hevc_set_neighbour_available(s, xBase, yBase + (1 << (log2_trafo_size)),
1276  trafo_size_h, trafo_size_v);
1277  s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (1 << (log2_trafo_size)), 1);
1278  s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (1 << (log2_trafo_size)), 2);
1279  }
1280  }
1281  }
1282 
1283  return 0;
1284 }
1285 
1286 static void set_deblocking_bypass(HEVCContext *s, int x0, int y0, int log2_cb_size)
1287 {
1288  int cb_size = 1 << log2_cb_size;
1289  int log2_min_pu_size = s->ps.sps->log2_min_pu_size;
1290 
1291  int min_pu_width = s->ps.sps->min_pu_width;
1292  int x_end = FFMIN(x0 + cb_size, s->ps.sps->width);
1293  int y_end = FFMIN(y0 + cb_size, s->ps.sps->height);
1294  int i, j;
1295 
1296  for (j = (y0 >> log2_min_pu_size); j < (y_end >> log2_min_pu_size); j++)
1297  for (i = (x0 >> log2_min_pu_size); i < (x_end >> log2_min_pu_size); i++)
1298  s->is_pcm[i + j * min_pu_width] = 2;
1299 }
1300 
1301 static int hls_transform_tree(HEVCContext *s, int x0, int y0,
1302  int xBase, int yBase, int cb_xBase, int cb_yBase,
1303  int log2_cb_size, int log2_trafo_size,
1304  int trafo_depth, int blk_idx,
1305  const int *base_cbf_cb, const int *base_cbf_cr)
1306 {
1307  HEVCLocalContext *lc = s->HEVClc;
1308  uint8_t split_transform_flag;
1309  int cbf_cb[2];
1310  int cbf_cr[2];
1311  int ret;
1312 
1313  cbf_cb[0] = base_cbf_cb[0];
1314  cbf_cb[1] = base_cbf_cb[1];
1315  cbf_cr[0] = base_cbf_cr[0];
1316  cbf_cr[1] = base_cbf_cr[1];
1317 
1318  if (lc->cu.intra_split_flag) {
1319  if (trafo_depth == 1) {
1320  lc->tu.intra_pred_mode = lc->pu.intra_pred_mode[blk_idx];
1321  if (s->ps.sps->chroma_format_idc == 3) {
1322  lc->tu.intra_pred_mode_c = lc->pu.intra_pred_mode_c[blk_idx];
1323  lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[blk_idx];
1324  } else {
1326  lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[0];
1327  }
1328  }
1329  } else {
1330  lc->tu.intra_pred_mode = lc->pu.intra_pred_mode[0];
1332  lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[0];
1333  }
1334 
1335  if (log2_trafo_size <= s->ps.sps->log2_max_trafo_size &&
1336  log2_trafo_size > s->ps.sps->log2_min_tb_size &&
1337  trafo_depth < lc->cu.max_trafo_depth &&
1338  !(lc->cu.intra_split_flag && trafo_depth == 0)) {
1339  split_transform_flag = ff_hevc_split_transform_flag_decode(s, log2_trafo_size);
1340  } else {
1341  int inter_split = s->ps.sps->max_transform_hierarchy_depth_inter == 0 &&
1342  lc->cu.pred_mode == MODE_INTER &&
1343  lc->cu.part_mode != PART_2Nx2N &&
1344  trafo_depth == 0;
1345 
1346  split_transform_flag = log2_trafo_size > s->ps.sps->log2_max_trafo_size ||
1347  (lc->cu.intra_split_flag && trafo_depth == 0) ||
1348  inter_split;
1349  }
1350 
1351  if (s->ps.sps->chroma_format_idc && (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3)) {
1352  if (trafo_depth == 0 || cbf_cb[0]) {
1353  cbf_cb[0] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth);
1354  if (s->ps.sps->chroma_format_idc == 2 && (!split_transform_flag || log2_trafo_size == 3)) {
1355  cbf_cb[1] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth);
1356  }
1357  }
1358 
1359  if (trafo_depth == 0 || cbf_cr[0]) {
1360  cbf_cr[0] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth);
1361  if (s->ps.sps->chroma_format_idc == 2 && (!split_transform_flag || log2_trafo_size == 3)) {
1362  cbf_cr[1] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth);
1363  }
1364  }
1365  }
1366 
1367  if (split_transform_flag) {
1368  const int trafo_size_split = 1 << (log2_trafo_size - 1);
1369  const int x1 = x0 + trafo_size_split;
1370  const int y1 = y0 + trafo_size_split;
1371 
1372 #define SUBDIVIDE(x, y, idx) \
1373 do { \
1374  ret = hls_transform_tree(s, x, y, x0, y0, cb_xBase, cb_yBase, log2_cb_size, \
1375  log2_trafo_size - 1, trafo_depth + 1, idx, \
1376  cbf_cb, cbf_cr); \
1377  if (ret < 0) \
1378  return ret; \
1379 } while (0)
1380 
1381  SUBDIVIDE(x0, y0, 0);
1382  SUBDIVIDE(x1, y0, 1);
1383  SUBDIVIDE(x0, y1, 2);
1384  SUBDIVIDE(x1, y1, 3);
1385 
1386 #undef SUBDIVIDE
1387  } else {
1388  int min_tu_size = 1 << s->ps.sps->log2_min_tb_size;
1389  int log2_min_tu_size = s->ps.sps->log2_min_tb_size;
1390  int min_tu_width = s->ps.sps->min_tb_width;
1391  int cbf_luma = 1;
1392 
1393  if (lc->cu.pred_mode == MODE_INTRA || trafo_depth != 0 ||
1394  cbf_cb[0] || cbf_cr[0] ||
1395  (s->ps.sps->chroma_format_idc == 2 && (cbf_cb[1] || cbf_cr[1]))) {
1396  cbf_luma = ff_hevc_cbf_luma_decode(s, trafo_depth);
1397  }
1398 
1399  ret = hls_transform_unit(s, x0, y0, xBase, yBase, cb_xBase, cb_yBase,
1400  log2_cb_size, log2_trafo_size,
1401  blk_idx, cbf_luma, cbf_cb, cbf_cr);
1402  if (ret < 0)
1403  return ret;
1404  // TODO: store cbf_luma somewhere else
1405  if (cbf_luma) {
1406  int i, j;
1407  for (i = 0; i < (1 << log2_trafo_size); i += min_tu_size)
1408  for (j = 0; j < (1 << log2_trafo_size); j += min_tu_size) {
1409  int x_tu = (x0 + j) >> log2_min_tu_size;
1410  int y_tu = (y0 + i) >> log2_min_tu_size;
1411  s->cbf_luma[y_tu * min_tu_width + x_tu] = 1;
1412  }
1413  }
1414  if (!s->sh.disable_deblocking_filter_flag) {
1415  ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_trafo_size);
1416  if (s->ps.pps->transquant_bypass_enable_flag &&
1418  set_deblocking_bypass(s, x0, y0, log2_trafo_size);
1419  }
1420  }
1421  return 0;
1422 }
1423 
1424 static int hls_pcm_sample(HEVCContext *s, int x0, int y0, int log2_cb_size)
1425 {
1426  HEVCLocalContext *lc = s->HEVClc;
1427  GetBitContext gb;
1428  int cb_size = 1 << log2_cb_size;
1429  ptrdiff_t stride0 = s->frame->linesize[0];
1430  ptrdiff_t stride1 = s->frame->linesize[1];
1431  ptrdiff_t stride2 = s->frame->linesize[2];
1432  uint8_t *dst0 = &s->frame->data[0][y0 * stride0 + (x0 << s->ps.sps->pixel_shift)];
1433  uint8_t *dst1 = &s->frame->data[1][(y0 >> s->ps.sps->vshift[1]) * stride1 + ((x0 >> s->ps.sps->hshift[1]) << s->ps.sps->pixel_shift)];
1434  uint8_t *dst2 = &s->frame->data[2][(y0 >> s->ps.sps->vshift[2]) * stride2 + ((x0 >> s->ps.sps->hshift[2]) << s->ps.sps->pixel_shift)];
1435 
1436  int length = cb_size * cb_size * s->ps.sps->pcm.bit_depth +
1437  (((cb_size >> s->ps.sps->hshift[1]) * (cb_size >> s->ps.sps->vshift[1])) +
1438  ((cb_size >> s->ps.sps->hshift[2]) * (cb_size >> s->ps.sps->vshift[2]))) *
1439  s->ps.sps->pcm.bit_depth_chroma;
1440  const uint8_t *pcm = skip_bytes(&lc->cc, (length + 7) >> 3);
1441  int ret;
1442 
1443  if (!s->sh.disable_deblocking_filter_flag)
1444  ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_cb_size);
1445 
1446  ret = init_get_bits(&gb, pcm, length);
1447  if (ret < 0)
1448  return ret;
1449 
1450  s->hevcdsp.put_pcm(dst0, stride0, cb_size, cb_size, &gb, s->ps.sps->pcm.bit_depth);
1451  if (s->ps.sps->chroma_format_idc) {
1452  s->hevcdsp.put_pcm(dst1, stride1,
1453  cb_size >> s->ps.sps->hshift[1],
1454  cb_size >> s->ps.sps->vshift[1],
1455  &gb, s->ps.sps->pcm.bit_depth_chroma);
1456  s->hevcdsp.put_pcm(dst2, stride2,
1457  cb_size >> s->ps.sps->hshift[2],
1458  cb_size >> s->ps.sps->vshift[2],
1459  &gb, s->ps.sps->pcm.bit_depth_chroma);
1460  }
1461 
1462  return 0;
1463 }
1464 
1465 /**
1466  * 8.5.3.2.2.1 Luma sample unidirectional interpolation process
1467  *
1468  * @param s HEVC decoding context
1469  * @param dst target buffer for block data at block position
1470  * @param dststride stride of the dst buffer
1471  * @param ref reference picture buffer at origin (0, 0)
1472  * @param mv motion vector (relative to block position) to get pixel data from
1473  * @param x_off horizontal position of block from origin (0, 0)
1474  * @param y_off vertical position of block from origin (0, 0)
1475  * @param block_w width of block
1476  * @param block_h height of block
1477  * @param luma_weight weighting factor applied to the luma prediction
1478  * @param luma_offset additive offset applied to the luma prediction value
1479  */
1480 
1481 static void luma_mc_uni(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride,
1482  AVFrame *ref, const Mv *mv, int x_off, int y_off,
1483  int block_w, int block_h, int luma_weight, int luma_offset)
1484 {
1485  HEVCLocalContext *lc = s->HEVClc;
1486  uint8_t *src = ref->data[0];
1487  ptrdiff_t srcstride = ref->linesize[0];
1488  int pic_width = s->ps.sps->width;
1489  int pic_height = s->ps.sps->height;
1490  int mx = mv->x & 3;
1491  int my = mv->y & 3;
1492  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1493  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1494  int idx = ff_hevc_pel_weight[block_w];
1495 
1496  x_off += mv->x >> 2;
1497  y_off += mv->y >> 2;
1498  src += y_off * srcstride + (x_off * (1 << s->ps.sps->pixel_shift));
1499 
1500  if (x_off < QPEL_EXTRA_BEFORE || y_off < QPEL_EXTRA_AFTER ||
1501  x_off >= pic_width - block_w - QPEL_EXTRA_AFTER ||
1502  y_off >= pic_height - block_h - QPEL_EXTRA_AFTER) {
1503  const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1504  int offset = QPEL_EXTRA_BEFORE * srcstride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1505  int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1506 
1507  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src - offset,
1508  edge_emu_stride, srcstride,
1509  block_w + QPEL_EXTRA,
1510  block_h + QPEL_EXTRA,
1511  x_off - QPEL_EXTRA_BEFORE, y_off - QPEL_EXTRA_BEFORE,
1512  pic_width, pic_height);
1513  src = lc->edge_emu_buffer + buf_offset;
1514  srcstride = edge_emu_stride;
1515  }
1516 
1517  if (!weight_flag)
1518  s->hevcdsp.put_hevc_qpel_uni[idx][!!my][!!mx](dst, dststride, src, srcstride,
1519  block_h, mx, my, block_w);
1520  else
1521  s->hevcdsp.put_hevc_qpel_uni_w[idx][!!my][!!mx](dst, dststride, src, srcstride,
1522  block_h, s->sh.luma_log2_weight_denom,
1523  luma_weight, luma_offset, mx, my, block_w);
1524 }
1525 
1526 /**
1527  * 8.5.3.2.2.1 Luma sample bidirectional interpolation process
1528  *
1529  * @param s HEVC decoding context
1530  * @param dst target buffer for block data at block position
1531  * @param dststride stride of the dst buffer
1532  * @param ref0 reference picture0 buffer at origin (0, 0)
1533  * @param mv0 motion vector0 (relative to block position) to get pixel data from
1534  * @param x_off horizontal position of block from origin (0, 0)
1535  * @param y_off vertical position of block from origin (0, 0)
1536  * @param block_w width of block
1537  * @param block_h height of block
1538  * @param ref1 reference picture1 buffer at origin (0, 0)
1539  * @param mv1 motion vector1 (relative to block position) to get pixel data from
1540  * @param current_mv current motion vector structure
1541  */
1542  static void luma_mc_bi(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride,
1543  AVFrame *ref0, const Mv *mv0, int x_off, int y_off,
1544  int block_w, int block_h, AVFrame *ref1, const Mv *mv1, struct MvField *current_mv)
1545 {
1546  HEVCLocalContext *lc = s->HEVClc;
1547  ptrdiff_t src0stride = ref0->linesize[0];
1548  ptrdiff_t src1stride = ref1->linesize[0];
1549  int pic_width = s->ps.sps->width;
1550  int pic_height = s->ps.sps->height;
1551  int mx0 = mv0->x & 3;
1552  int my0 = mv0->y & 3;
1553  int mx1 = mv1->x & 3;
1554  int my1 = mv1->y & 3;
1555  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1556  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1557  int x_off0 = x_off + (mv0->x >> 2);
1558  int y_off0 = y_off + (mv0->y >> 2);
1559  int x_off1 = x_off + (mv1->x >> 2);
1560  int y_off1 = y_off + (mv1->y >> 2);
1561  int idx = ff_hevc_pel_weight[block_w];
1562 
1563  uint8_t *src0 = ref0->data[0] + y_off0 * src0stride + (int)((unsigned)x_off0 << s->ps.sps->pixel_shift);
1564  uint8_t *src1 = ref1->data[0] + y_off1 * src1stride + (int)((unsigned)x_off1 << s->ps.sps->pixel_shift);
1565 
1566  if (x_off0 < QPEL_EXTRA_BEFORE || y_off0 < QPEL_EXTRA_AFTER ||
1567  x_off0 >= pic_width - block_w - QPEL_EXTRA_AFTER ||
1568  y_off0 >= pic_height - block_h - QPEL_EXTRA_AFTER) {
1569  const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1570  int offset = QPEL_EXTRA_BEFORE * src0stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1571  int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1572 
1573  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src0 - offset,
1574  edge_emu_stride, src0stride,
1575  block_w + QPEL_EXTRA,
1576  block_h + QPEL_EXTRA,
1577  x_off0 - QPEL_EXTRA_BEFORE, y_off0 - QPEL_EXTRA_BEFORE,
1578  pic_width, pic_height);
1579  src0 = lc->edge_emu_buffer + buf_offset;
1580  src0stride = edge_emu_stride;
1581  }
1582 
1583  if (x_off1 < QPEL_EXTRA_BEFORE || y_off1 < QPEL_EXTRA_AFTER ||
1584  x_off1 >= pic_width - block_w - QPEL_EXTRA_AFTER ||
1585  y_off1 >= pic_height - block_h - QPEL_EXTRA_AFTER) {
1586  const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1587  int offset = QPEL_EXTRA_BEFORE * src1stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1588  int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1589 
1590  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer2, src1 - offset,
1591  edge_emu_stride, src1stride,
1592  block_w + QPEL_EXTRA,
1593  block_h + QPEL_EXTRA,
1594  x_off1 - QPEL_EXTRA_BEFORE, y_off1 - QPEL_EXTRA_BEFORE,
1595  pic_width, pic_height);
1596  src1 = lc->edge_emu_buffer2 + buf_offset;
1597  src1stride = edge_emu_stride;
1598  }
1599 
1600  s->hevcdsp.put_hevc_qpel[idx][!!my0][!!mx0](lc->tmp, src0, src0stride,
1601  block_h, mx0, my0, block_w);
1602  if (!weight_flag)
1603  s->hevcdsp.put_hevc_qpel_bi[idx][!!my1][!!mx1](dst, dststride, src1, src1stride, lc->tmp,
1604  block_h, mx1, my1, block_w);
1605  else
1606  s->hevcdsp.put_hevc_qpel_bi_w[idx][!!my1][!!mx1](dst, dststride, src1, src1stride, lc->tmp,
1607  block_h, s->sh.luma_log2_weight_denom,
1608  s->sh.luma_weight_l0[current_mv->ref_idx[0]],
1609  s->sh.luma_weight_l1[current_mv->ref_idx[1]],
1610  s->sh.luma_offset_l0[current_mv->ref_idx[0]],
1611  s->sh.luma_offset_l1[current_mv->ref_idx[1]],
1612  mx1, my1, block_w);
1613 
1614 }
1615 
1616 /**
1617  * 8.5.3.2.2.2 Chroma sample uniprediction interpolation process
1618  *
1619  * @param s HEVC decoding context
1620  * @param dst1 target buffer for block data at block position (U plane)
1621  * @param dst2 target buffer for block data at block position (V plane)
1622  * @param dststride stride of the dst1 and dst2 buffers
1623  * @param ref reference picture buffer at origin (0, 0)
1624  * @param mv motion vector (relative to block position) to get pixel data from
1625  * @param x_off horizontal position of block from origin (0, 0)
1626  * @param y_off vertical position of block from origin (0, 0)
1627  * @param block_w width of block
1628  * @param block_h height of block
1629  * @param chroma_weight weighting factor applied to the chroma prediction
1630  * @param chroma_offset additive offset applied to the chroma prediction value
1631  */
1632 
1633 static void chroma_mc_uni(HEVCContext *s, uint8_t *dst0,
1634  ptrdiff_t dststride, uint8_t *src0, ptrdiff_t srcstride, int reflist,
1635  int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int chroma_weight, int chroma_offset)
1636 {
1637  HEVCLocalContext *lc = s->HEVClc;
1638  int pic_width = s->ps.sps->width >> s->ps.sps->hshift[1];
1639  int pic_height = s->ps.sps->height >> s->ps.sps->vshift[1];
1640  const Mv *mv = &current_mv->mv[reflist];
1641  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1642  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1643  int idx = ff_hevc_pel_weight[block_w];
1644  int hshift = s->ps.sps->hshift[1];
1645  int vshift = s->ps.sps->vshift[1];
1646  intptr_t mx = av_mod_uintp2(mv->x, 2 + hshift);
1647  intptr_t my = av_mod_uintp2(mv->y, 2 + vshift);
1648  intptr_t _mx = mx << (1 - hshift);
1649  intptr_t _my = my << (1 - vshift);
1650 
1651  x_off += mv->x >> (2 + hshift);
1652  y_off += mv->y >> (2 + vshift);
1653  src0 += y_off * srcstride + (x_off * (1 << s->ps.sps->pixel_shift));
1654 
1655  if (x_off < EPEL_EXTRA_BEFORE || y_off < EPEL_EXTRA_AFTER ||
1656  x_off >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1657  y_off >= pic_height - block_h - EPEL_EXTRA_AFTER) {
1658  const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1659  int offset0 = EPEL_EXTRA_BEFORE * (srcstride + (1 << s->ps.sps->pixel_shift));
1660  int buf_offset0 = EPEL_EXTRA_BEFORE *
1661  (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1662  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src0 - offset0,
1663  edge_emu_stride, srcstride,
1664  block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1665  x_off - EPEL_EXTRA_BEFORE,
1666  y_off - EPEL_EXTRA_BEFORE,
1667  pic_width, pic_height);
1668 
1669  src0 = lc->edge_emu_buffer + buf_offset0;
1670  srcstride = edge_emu_stride;
1671  }
1672  if (!weight_flag)
1673  s->hevcdsp.put_hevc_epel_uni[idx][!!my][!!mx](dst0, dststride, src0, srcstride,
1674  block_h, _mx, _my, block_w);
1675  else
1676  s->hevcdsp.put_hevc_epel_uni_w[idx][!!my][!!mx](dst0, dststride, src0, srcstride,
1677  block_h, s->sh.chroma_log2_weight_denom,
1678  chroma_weight, chroma_offset, _mx, _my, block_w);
1679 }
1680 
1681 /**
1682  * 8.5.3.2.2.2 Chroma sample bidirectional interpolation process
1683  *
1684  * @param s HEVC decoding context
1685  * @param dst target buffer for block data at block position
1686  * @param dststride stride of the dst buffer
1687  * @param ref0 reference picture0 buffer at origin (0, 0)
1688  * @param mv0 motion vector0 (relative to block position) to get pixel data from
1689  * @param x_off horizontal position of block from origin (0, 0)
1690  * @param y_off vertical position of block from origin (0, 0)
1691  * @param block_w width of block
1692  * @param block_h height of block
1693  * @param ref1 reference picture1 buffer at origin (0, 0)
1694  * @param mv1 motion vector1 (relative to block position) to get pixel data from
1695  * @param current_mv current motion vector structure
1696  * @param cidx chroma component(cb, cr)
1697  */
1698 static void chroma_mc_bi(HEVCContext *s, uint8_t *dst0, ptrdiff_t dststride, AVFrame *ref0, AVFrame *ref1,
1699  int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int cidx)
1700 {
1701  HEVCLocalContext *lc = s->HEVClc;
1702  uint8_t *src1 = ref0->data[cidx+1];
1703  uint8_t *src2 = ref1->data[cidx+1];
1704  ptrdiff_t src1stride = ref0->linesize[cidx+1];
1705  ptrdiff_t src2stride = ref1->linesize[cidx+1];
1706  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1707  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1708  int pic_width = s->ps.sps->width >> s->ps.sps->hshift[1];
1709  int pic_height = s->ps.sps->height >> s->ps.sps->vshift[1];
1710  Mv *mv0 = &current_mv->mv[0];
1711  Mv *mv1 = &current_mv->mv[1];
1712  int hshift = s->ps.sps->hshift[1];
1713  int vshift = s->ps.sps->vshift[1];
1714 
1715  intptr_t mx0 = av_mod_uintp2(mv0->x, 2 + hshift);
1716  intptr_t my0 = av_mod_uintp2(mv0->y, 2 + vshift);
1717  intptr_t mx1 = av_mod_uintp2(mv1->x, 2 + hshift);
1718  intptr_t my1 = av_mod_uintp2(mv1->y, 2 + vshift);
1719  intptr_t _mx0 = mx0 << (1 - hshift);
1720  intptr_t _my0 = my0 << (1 - vshift);
1721  intptr_t _mx1 = mx1 << (1 - hshift);
1722  intptr_t _my1 = my1 << (1 - vshift);
1723 
1724  int x_off0 = x_off + (mv0->x >> (2 + hshift));
1725  int y_off0 = y_off + (mv0->y >> (2 + vshift));
1726  int x_off1 = x_off + (mv1->x >> (2 + hshift));
1727  int y_off1 = y_off + (mv1->y >> (2 + vshift));
1728  int idx = ff_hevc_pel_weight[block_w];
1729  src1 += y_off0 * src1stride + (int)((unsigned)x_off0 << s->ps.sps->pixel_shift);
1730  src2 += y_off1 * src2stride + (int)((unsigned)x_off1 << s->ps.sps->pixel_shift);
1731 
1732  if (x_off0 < EPEL_EXTRA_BEFORE || y_off0 < EPEL_EXTRA_AFTER ||
1733  x_off0 >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1734  y_off0 >= pic_height - block_h - EPEL_EXTRA_AFTER) {
1735  const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1736  int offset1 = EPEL_EXTRA_BEFORE * (src1stride + (1 << s->ps.sps->pixel_shift));
1737  int buf_offset1 = EPEL_EXTRA_BEFORE *
1738  (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1739 
1740  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src1 - offset1,
1741  edge_emu_stride, src1stride,
1742  block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1743  x_off0 - EPEL_EXTRA_BEFORE,
1744  y_off0 - EPEL_EXTRA_BEFORE,
1745  pic_width, pic_height);
1746 
1747  src1 = lc->edge_emu_buffer + buf_offset1;
1748  src1stride = edge_emu_stride;
1749  }
1750 
1751  if (x_off1 < EPEL_EXTRA_BEFORE || y_off1 < EPEL_EXTRA_AFTER ||
1752  x_off1 >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1753  y_off1 >= pic_height - block_h - EPEL_EXTRA_AFTER) {
1754  const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1755  int offset1 = EPEL_EXTRA_BEFORE * (src2stride + (1 << s->ps.sps->pixel_shift));
1756  int buf_offset1 = EPEL_EXTRA_BEFORE *
1757  (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1758 
1759  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer2, src2 - offset1,
1760  edge_emu_stride, src2stride,
1761  block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1762  x_off1 - EPEL_EXTRA_BEFORE,
1763  y_off1 - EPEL_EXTRA_BEFORE,
1764  pic_width, pic_height);
1765 
1766  src2 = lc->edge_emu_buffer2 + buf_offset1;
1767  src2stride = edge_emu_stride;
1768  }
1769 
1770  s->hevcdsp.put_hevc_epel[idx][!!my0][!!mx0](lc->tmp, src1, src1stride,
1771  block_h, _mx0, _my0, block_w);
1772  if (!weight_flag)
1773  s->hevcdsp.put_hevc_epel_bi[idx][!!my1][!!mx1](dst0, s->frame->linesize[cidx+1],
1774  src2, src2stride, lc->tmp,
1775  block_h, _mx1, _my1, block_w);
1776  else
1777  s->hevcdsp.put_hevc_epel_bi_w[idx][!!my1][!!mx1](dst0, s->frame->linesize[cidx+1],
1778  src2, src2stride, lc->tmp,
1779  block_h,
1780  s->sh.chroma_log2_weight_denom,
1781  s->sh.chroma_weight_l0[current_mv->ref_idx[0]][cidx],
1782  s->sh.chroma_weight_l1[current_mv->ref_idx[1]][cidx],
1783  s->sh.chroma_offset_l0[current_mv->ref_idx[0]][cidx],
1784  s->sh.chroma_offset_l1[current_mv->ref_idx[1]][cidx],
1785  _mx1, _my1, block_w);
1786 }
1787 
1789  const Mv *mv, int y0, int height)
1790 {
1791  if (s->threads_type == FF_THREAD_FRAME ) {
1792  int y = FFMAX(0, (mv->y >> 2) + y0 + height + 9);
1793 
1794  ff_thread_await_progress(&ref->tf, y, 0);
1795  }
1796 }
1797 
1798 static void hevc_luma_mv_mvp_mode(HEVCContext *s, int x0, int y0, int nPbW,
1799  int nPbH, int log2_cb_size, int part_idx,
1800  int merge_idx, MvField *mv)
1801 {
1802  HEVCLocalContext *lc = s->HEVClc;
1803  enum InterPredIdc inter_pred_idc = PRED_L0;
1804  int mvp_flag;
1805 
1806  ff_hevc_set_neighbour_available(s, x0, y0, nPbW, nPbH);
1807  mv->pred_flag = 0;
1808  if (s->sh.slice_type == HEVC_SLICE_B)
1809  inter_pred_idc = ff_hevc_inter_pred_idc_decode(s, nPbW, nPbH);
1810 
1811  if (inter_pred_idc != PRED_L1) {
1812  if (s->sh.nb_refs[L0])
1813  mv->ref_idx[0]= ff_hevc_ref_idx_lx_decode(s, s->sh.nb_refs[L0]);
1814 
1815  mv->pred_flag = PF_L0;
1816  ff_hevc_hls_mvd_coding(s, x0, y0, 0);
1817  mvp_flag = ff_hevc_mvp_lx_flag_decode(s);
1818  ff_hevc_luma_mv_mvp_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
1819  part_idx, merge_idx, mv, mvp_flag, 0);
1820  mv->mv[0].x += lc->pu.mvd.x;
1821  mv->mv[0].y += lc->pu.mvd.y;
1822  }
1823 
1824  if (inter_pred_idc != PRED_L0) {
1825  if (s->sh.nb_refs[L1])
1826  mv->ref_idx[1]= ff_hevc_ref_idx_lx_decode(s, s->sh.nb_refs[L1]);
1827 
1828  if (s->sh.mvd_l1_zero_flag == 1 && inter_pred_idc == PRED_BI) {
1829  AV_ZERO32(&lc->pu.mvd);
1830  } else {
1831  ff_hevc_hls_mvd_coding(s, x0, y0, 1);
1832  }
1833 
1834  mv->pred_flag += PF_L1;
1835  mvp_flag = ff_hevc_mvp_lx_flag_decode(s);
1836  ff_hevc_luma_mv_mvp_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
1837  part_idx, merge_idx, mv, mvp_flag, 1);
1838  mv->mv[1].x += lc->pu.mvd.x;
1839  mv->mv[1].y += lc->pu.mvd.y;
1840  }
1841 }
1842 
1843 static void hls_prediction_unit(HEVCContext *s, int x0, int y0,
1844  int nPbW, int nPbH,
1845  int log2_cb_size, int partIdx, int idx)
1846 {
1847 #define POS(c_idx, x, y) \
1848  &s->frame->data[c_idx][((y) >> s->ps.sps->vshift[c_idx]) * s->frame->linesize[c_idx] + \
1849  (((x) >> s->ps.sps->hshift[c_idx]) << s->ps.sps->pixel_shift)]
1850  HEVCLocalContext *lc = s->HEVClc;
1851  int merge_idx = 0;
1852  struct MvField current_mv = {{{ 0 }}};
1853 
1854  int min_pu_width = s->ps.sps->min_pu_width;
1855 
1856  MvField *tab_mvf = s->ref->tab_mvf;
1857  RefPicList *refPicList = s->ref->refPicList;
1858  HEVCFrame *ref0 = NULL, *ref1 = NULL;
1859  uint8_t *dst0 = POS(0, x0, y0);
1860  uint8_t *dst1 = POS(1, x0, y0);
1861  uint8_t *dst2 = POS(2, x0, y0);
1862  int log2_min_cb_size = s->ps.sps->log2_min_cb_size;
1863  int min_cb_width = s->ps.sps->min_cb_width;
1864  int x_cb = x0 >> log2_min_cb_size;
1865  int y_cb = y0 >> log2_min_cb_size;
1866  int x_pu, y_pu;
1867  int i, j;
1868 
1869  int skip_flag = SAMPLE_CTB(s->skip_flag, x_cb, y_cb);
1870 
1871  if (!skip_flag)
1873 
1874  if (skip_flag || lc->pu.merge_flag) {
1875  if (s->sh.max_num_merge_cand > 1)
1876  merge_idx = ff_hevc_merge_idx_decode(s);
1877  else
1878  merge_idx = 0;
1879 
1880  ff_hevc_luma_mv_merge_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
1881  partIdx, merge_idx, &current_mv);
1882  } else {
1883  hevc_luma_mv_mvp_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
1884  partIdx, merge_idx, &current_mv);
1885  }
1886 
1887  x_pu = x0 >> s->ps.sps->log2_min_pu_size;
1888  y_pu = y0 >> s->ps.sps->log2_min_pu_size;
1889 
1890  for (j = 0; j < nPbH >> s->ps.sps->log2_min_pu_size; j++)
1891  for (i = 0; i < nPbW >> s->ps.sps->log2_min_pu_size; i++)
1892  tab_mvf[(y_pu + j) * min_pu_width + x_pu + i] = current_mv;
1893 
1894  if (current_mv.pred_flag & PF_L0) {
1895  ref0 = refPicList[0].ref[current_mv.ref_idx[0]];
1896  if (!ref0)
1897  return;
1898  hevc_await_progress(s, ref0, &current_mv.mv[0], y0, nPbH);
1899  }
1900  if (current_mv.pred_flag & PF_L1) {
1901  ref1 = refPicList[1].ref[current_mv.ref_idx[1]];
1902  if (!ref1)
1903  return;
1904  hevc_await_progress(s, ref1, &current_mv.mv[1], y0, nPbH);
1905  }
1906 
1907  if (current_mv.pred_flag == PF_L0) {
1908  int x0_c = x0 >> s->ps.sps->hshift[1];
1909  int y0_c = y0 >> s->ps.sps->vshift[1];
1910  int nPbW_c = nPbW >> s->ps.sps->hshift[1];
1911  int nPbH_c = nPbH >> s->ps.sps->vshift[1];
1912 
1913  luma_mc_uni(s, dst0, s->frame->linesize[0], ref0->frame,
1914  &current_mv.mv[0], x0, y0, nPbW, nPbH,
1915  s->sh.luma_weight_l0[current_mv.ref_idx[0]],
1916  s->sh.luma_offset_l0[current_mv.ref_idx[0]]);
1917 
1918  if (s->ps.sps->chroma_format_idc) {
1919  chroma_mc_uni(s, dst1, s->frame->linesize[1], ref0->frame->data[1], ref0->frame->linesize[1],
1920  0, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1921  s->sh.chroma_weight_l0[current_mv.ref_idx[0]][0], s->sh.chroma_offset_l0[current_mv.ref_idx[0]][0]);
1922  chroma_mc_uni(s, dst2, s->frame->linesize[2], ref0->frame->data[2], ref0->frame->linesize[2],
1923  0, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1924  s->sh.chroma_weight_l0[current_mv.ref_idx[0]][1], s->sh.chroma_offset_l0[current_mv.ref_idx[0]][1]);
1925  }
1926  } else if (current_mv.pred_flag == PF_L1) {
1927  int x0_c = x0 >> s->ps.sps->hshift[1];
1928  int y0_c = y0 >> s->ps.sps->vshift[1];
1929  int nPbW_c = nPbW >> s->ps.sps->hshift[1];
1930  int nPbH_c = nPbH >> s->ps.sps->vshift[1];
1931 
1932  luma_mc_uni(s, dst0, s->frame->linesize[0], ref1->frame,
1933  &current_mv.mv[1], x0, y0, nPbW, nPbH,
1934  s->sh.luma_weight_l1[current_mv.ref_idx[1]],
1935  s->sh.luma_offset_l1[current_mv.ref_idx[1]]);
1936 
1937  if (s->ps.sps->chroma_format_idc) {
1938  chroma_mc_uni(s, dst1, s->frame->linesize[1], ref1->frame->data[1], ref1->frame->linesize[1],
1939  1, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1940  s->sh.chroma_weight_l1[current_mv.ref_idx[1]][0], s->sh.chroma_offset_l1[current_mv.ref_idx[1]][0]);
1941 
1942  chroma_mc_uni(s, dst2, s->frame->linesize[2], ref1->frame->data[2], ref1->frame->linesize[2],
1943  1, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1944  s->sh.chroma_weight_l1[current_mv.ref_idx[1]][1], s->sh.chroma_offset_l1[current_mv.ref_idx[1]][1]);
1945  }
1946  } else if (current_mv.pred_flag == PF_BI) {
1947  int x0_c = x0 >> s->ps.sps->hshift[1];
1948  int y0_c = y0 >> s->ps.sps->vshift[1];
1949  int nPbW_c = nPbW >> s->ps.sps->hshift[1];
1950  int nPbH_c = nPbH >> s->ps.sps->vshift[1];
1951 
1952  luma_mc_bi(s, dst0, s->frame->linesize[0], ref0->frame,
1953  &current_mv.mv[0], x0, y0, nPbW, nPbH,
1954  ref1->frame, &current_mv.mv[1], &current_mv);
1955 
1956  if (s->ps.sps->chroma_format_idc) {
1957  chroma_mc_bi(s, dst1, s->frame->linesize[1], ref0->frame, ref1->frame,
1958  x0_c, y0_c, nPbW_c, nPbH_c, &current_mv, 0);
1959 
1960  chroma_mc_bi(s, dst2, s->frame->linesize[2], ref0->frame, ref1->frame,
1961  x0_c, y0_c, nPbW_c, nPbH_c, &current_mv, 1);
1962  }
1963  }
1964 }
1965 
1966 /**
1967  * 8.4.1
1968  */
1969 static int luma_intra_pred_mode(HEVCContext *s, int x0, int y0, int pu_size,
1970  int prev_intra_luma_pred_flag)
1971 {
1972  HEVCLocalContext *lc = s->HEVClc;
1973  int x_pu = x0 >> s->ps.sps->log2_min_pu_size;
1974  int y_pu = y0 >> s->ps.sps->log2_min_pu_size;
1975  int min_pu_width = s->ps.sps->min_pu_width;
1976  int size_in_pus = pu_size >> s->ps.sps->log2_min_pu_size;
1977  int x0b = av_mod_uintp2(x0, s->ps.sps->log2_ctb_size);
1978  int y0b = av_mod_uintp2(y0, s->ps.sps->log2_ctb_size);
1979 
1980  int cand_up = (lc->ctb_up_flag || y0b) ?
1981  s->tab_ipm[(y_pu - 1) * min_pu_width + x_pu] : INTRA_DC;
1982  int cand_left = (lc->ctb_left_flag || x0b) ?
1983  s->tab_ipm[y_pu * min_pu_width + x_pu - 1] : INTRA_DC;
1984 
1985  int y_ctb = (y0 >> (s->ps.sps->log2_ctb_size)) << (s->ps.sps->log2_ctb_size);
1986 
1987  MvField *tab_mvf = s->ref->tab_mvf;
1988  int intra_pred_mode;
1989  int candidate[3];
1990  int i, j;
1991 
1992  // intra_pred_mode prediction does not cross vertical CTB boundaries
1993  if ((y0 - 1) < y_ctb)
1994  cand_up = INTRA_DC;
1995 
1996  if (cand_left == cand_up) {
1997  if (cand_left < 2) {
1998  candidate[0] = INTRA_PLANAR;
1999  candidate[1] = INTRA_DC;
2000  candidate[2] = INTRA_ANGULAR_26;
2001  } else {
2002  candidate[0] = cand_left;
2003  candidate[1] = 2 + ((cand_left - 2 - 1 + 32) & 31);
2004  candidate[2] = 2 + ((cand_left - 2 + 1) & 31);
2005  }
2006  } else {
2007  candidate[0] = cand_left;
2008  candidate[1] = cand_up;
2009  if (candidate[0] != INTRA_PLANAR && candidate[1] != INTRA_PLANAR) {
2010  candidate[2] = INTRA_PLANAR;
2011  } else if (candidate[0] != INTRA_DC && candidate[1] != INTRA_DC) {
2012  candidate[2] = INTRA_DC;
2013  } else {
2014  candidate[2] = INTRA_ANGULAR_26;
2015  }
2016  }
2017 
2018  if (prev_intra_luma_pred_flag) {
2019  intra_pred_mode = candidate[lc->pu.mpm_idx];
2020  } else {
2021  if (candidate[0] > candidate[1])
2022  FFSWAP(uint8_t, candidate[0], candidate[1]);
2023  if (candidate[0] > candidate[2])
2024  FFSWAP(uint8_t, candidate[0], candidate[2]);
2025  if (candidate[1] > candidate[2])
2026  FFSWAP(uint8_t, candidate[1], candidate[2]);
2027 
2028  intra_pred_mode = lc->pu.rem_intra_luma_pred_mode;
2029  for (i = 0; i < 3; i++)
2030  if (intra_pred_mode >= candidate[i])
2031  intra_pred_mode++;
2032  }
2033 
2034  /* write the intra prediction units into the mv array */
2035  if (!size_in_pus)
2036  size_in_pus = 1;
2037  for (i = 0; i < size_in_pus; i++) {
2038  memset(&s->tab_ipm[(y_pu + i) * min_pu_width + x_pu],
2039  intra_pred_mode, size_in_pus);
2040 
2041  for (j = 0; j < size_in_pus; j++) {
2042  tab_mvf[(y_pu + j) * min_pu_width + x_pu + i].pred_flag = PF_INTRA;
2043  }
2044  }
2045 
2046  return intra_pred_mode;
2047 }
2048 
2049 static av_always_inline void set_ct_depth(HEVCContext *s, int x0, int y0,
2050  int log2_cb_size, int ct_depth)
2051 {
2052  int length = (1 << log2_cb_size) >> s->ps.sps->log2_min_cb_size;
2053  int x_cb = x0 >> s->ps.sps->log2_min_cb_size;
2054  int y_cb = y0 >> s->ps.sps->log2_min_cb_size;
2055  int y;
2056 
2057  for (y = 0; y < length; y++)
2058  memset(&s->tab_ct_depth[(y_cb + y) * s->ps.sps->min_cb_width + x_cb],
2059  ct_depth, length);
2060 }
2061 
2062 static const uint8_t tab_mode_idx[] = {
2063  0, 1, 2, 2, 2, 2, 3, 5, 7, 8, 10, 12, 13, 15, 17, 18, 19, 20,
2064  21, 22, 23, 23, 24, 24, 25, 25, 26, 27, 27, 28, 28, 29, 29, 30, 31};
2065 
2066 static void intra_prediction_unit(HEVCContext *s, int x0, int y0,
2067  int log2_cb_size)
2068 {
2069  HEVCLocalContext *lc = s->HEVClc;
2070  static const uint8_t intra_chroma_table[4] = { 0, 26, 10, 1 };
2071  uint8_t prev_intra_luma_pred_flag[4];
2072  int split = lc->cu.part_mode == PART_NxN;
2073  int pb_size = (1 << log2_cb_size) >> split;
2074  int side = split + 1;
2075  int chroma_mode;
2076  int i, j;
2077 
2078  for (i = 0; i < side; i++)
2079  for (j = 0; j < side; j++)
2080  prev_intra_luma_pred_flag[2 * i + j] = ff_hevc_prev_intra_luma_pred_flag_decode(s);
2081 
2082  for (i = 0; i < side; i++) {
2083  for (j = 0; j < side; j++) {
2084  if (prev_intra_luma_pred_flag[2 * i + j])
2086  else
2088 
2089  lc->pu.intra_pred_mode[2 * i + j] =
2090  luma_intra_pred_mode(s, x0 + pb_size * j, y0 + pb_size * i, pb_size,
2091  prev_intra_luma_pred_flag[2 * i + j]);
2092  }
2093  }
2094 
2095  if (s->ps.sps->chroma_format_idc == 3) {
2096  for (i = 0; i < side; i++) {
2097  for (j = 0; j < side; j++) {
2098  lc->pu.chroma_mode_c[2 * i + j] = chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(s);
2099  if (chroma_mode != 4) {
2100  if (lc->pu.intra_pred_mode[2 * i + j] == intra_chroma_table[chroma_mode])
2101  lc->pu.intra_pred_mode_c[2 * i + j] = 34;
2102  else
2103  lc->pu.intra_pred_mode_c[2 * i + j] = intra_chroma_table[chroma_mode];
2104  } else {
2105  lc->pu.intra_pred_mode_c[2 * i + j] = lc->pu.intra_pred_mode[2 * i + j];
2106  }
2107  }
2108  }
2109  } else if (s->ps.sps->chroma_format_idc == 2) {
2110  int mode_idx;
2111  lc->pu.chroma_mode_c[0] = chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(s);
2112  if (chroma_mode != 4) {
2113  if (lc->pu.intra_pred_mode[0] == intra_chroma_table[chroma_mode])
2114  mode_idx = 34;
2115  else
2116  mode_idx = intra_chroma_table[chroma_mode];
2117  } else {
2118  mode_idx = lc->pu.intra_pred_mode[0];
2119  }
2120  lc->pu.intra_pred_mode_c[0] = tab_mode_idx[mode_idx];
2121  } else if (s->ps.sps->chroma_format_idc != 0) {
2123  if (chroma_mode != 4) {
2124  if (lc->pu.intra_pred_mode[0] == intra_chroma_table[chroma_mode])
2125  lc->pu.intra_pred_mode_c[0] = 34;
2126  else
2127  lc->pu.intra_pred_mode_c[0] = intra_chroma_table[chroma_mode];
2128  } else {
2129  lc->pu.intra_pred_mode_c[0] = lc->pu.intra_pred_mode[0];
2130  }
2131  }
2132 }
2133 
2135  int x0, int y0,
2136  int log2_cb_size)
2137 {
2138  HEVCLocalContext *lc = s->HEVClc;
2139  int pb_size = 1 << log2_cb_size;
2140  int size_in_pus = pb_size >> s->ps.sps->log2_min_pu_size;
2141  int min_pu_width = s->ps.sps->min_pu_width;
2142  MvField *tab_mvf = s->ref->tab_mvf;
2143  int x_pu = x0 >> s->ps.sps->log2_min_pu_size;
2144  int y_pu = y0 >> s->ps.sps->log2_min_pu_size;
2145  int j, k;
2146 
2147  if (size_in_pus == 0)
2148  size_in_pus = 1;
2149  for (j = 0; j < size_in_pus; j++)
2150  memset(&s->tab_ipm[(y_pu + j) * min_pu_width + x_pu], INTRA_DC, size_in_pus);
2151  if (lc->cu.pred_mode == MODE_INTRA)
2152  for (j = 0; j < size_in_pus; j++)
2153  for (k = 0; k < size_in_pus; k++)
2154  tab_mvf[(y_pu + j) * min_pu_width + x_pu + k].pred_flag = PF_INTRA;
2155 }
2156 
2157 static int hls_coding_unit(HEVCContext *s, int x0, int y0, int log2_cb_size)
2158 {
2159  int cb_size = 1 << log2_cb_size;
2160  HEVCLocalContext *lc = s->HEVClc;
2161  int log2_min_cb_size = s->ps.sps->log2_min_cb_size;
2162  int length = cb_size >> log2_min_cb_size;
2163  int min_cb_width = s->ps.sps->min_cb_width;
2164  int x_cb = x0 >> log2_min_cb_size;
2165  int y_cb = y0 >> log2_min_cb_size;
2166  int idx = log2_cb_size - 2;
2167  int qp_block_mask = (1<<(s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth)) - 1;
2168  int x, y, ret;
2169 
2170  lc->cu.x = x0;
2171  lc->cu.y = y0;
2172  lc->cu.pred_mode = MODE_INTRA;
2173  lc->cu.part_mode = PART_2Nx2N;
2174  lc->cu.intra_split_flag = 0;
2175 
2176  SAMPLE_CTB(s->skip_flag, x_cb, y_cb) = 0;
2177  for (x = 0; x < 4; x++)
2178  lc->pu.intra_pred_mode[x] = 1;
2179  if (s->ps.pps->transquant_bypass_enable_flag) {
2181  if (lc->cu.cu_transquant_bypass_flag)
2182  set_deblocking_bypass(s, x0, y0, log2_cb_size);
2183  } else
2184  lc->cu.cu_transquant_bypass_flag = 0;
2185 
2186  if (s->sh.slice_type != HEVC_SLICE_I) {
2187  uint8_t skip_flag = ff_hevc_skip_flag_decode(s, x0, y0, x_cb, y_cb);
2188 
2189  x = y_cb * min_cb_width + x_cb;
2190  for (y = 0; y < length; y++) {
2191  memset(&s->skip_flag[x], skip_flag, length);
2192  x += min_cb_width;
2193  }
2194  lc->cu.pred_mode = skip_flag ? MODE_SKIP : MODE_INTER;
2195  } else {
2196  x = y_cb * min_cb_width + x_cb;
2197  for (y = 0; y < length; y++) {
2198  memset(&s->skip_flag[x], 0, length);
2199  x += min_cb_width;
2200  }
2201  }
2202 
2203  if (SAMPLE_CTB(s->skip_flag, x_cb, y_cb)) {
2204  hls_prediction_unit(s, x0, y0, cb_size, cb_size, log2_cb_size, 0, idx);
2205  intra_prediction_unit_default_value(s, x0, y0, log2_cb_size);
2206 
2207  if (!s->sh.disable_deblocking_filter_flag)
2208  ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_cb_size);
2209  } else {
2210  int pcm_flag = 0;
2211 
2212  if (s->sh.slice_type != HEVC_SLICE_I)
2214  if (lc->cu.pred_mode != MODE_INTRA ||
2215  log2_cb_size == s->ps.sps->log2_min_cb_size) {
2216  lc->cu.part_mode = ff_hevc_part_mode_decode(s, log2_cb_size);
2217  lc->cu.intra_split_flag = lc->cu.part_mode == PART_NxN &&
2218  lc->cu.pred_mode == MODE_INTRA;
2219  }
2220 
2221  if (lc->cu.pred_mode == MODE_INTRA) {
2222  if (lc->cu.part_mode == PART_2Nx2N && s->ps.sps->pcm_enabled_flag &&
2223  log2_cb_size >= s->ps.sps->pcm.log2_min_pcm_cb_size &&
2224  log2_cb_size <= s->ps.sps->pcm.log2_max_pcm_cb_size) {
2225  pcm_flag = ff_hevc_pcm_flag_decode(s);
2226  }
2227  if (pcm_flag) {
2228  intra_prediction_unit_default_value(s, x0, y0, log2_cb_size);
2229  ret = hls_pcm_sample(s, x0, y0, log2_cb_size);
2230  if (s->ps.sps->pcm.loop_filter_disable_flag)
2231  set_deblocking_bypass(s, x0, y0, log2_cb_size);
2232 
2233  if (ret < 0)
2234  return ret;
2235  } else {
2236  intra_prediction_unit(s, x0, y0, log2_cb_size);
2237  }
2238  } else {
2239  intra_prediction_unit_default_value(s, x0, y0, log2_cb_size);
2240  switch (lc->cu.part_mode) {
2241  case PART_2Nx2N:
2242  hls_prediction_unit(s, x0, y0, cb_size, cb_size, log2_cb_size, 0, idx);
2243  break;
2244  case PART_2NxN:
2245  hls_prediction_unit(s, x0, y0, cb_size, cb_size / 2, log2_cb_size, 0, idx);
2246  hls_prediction_unit(s, x0, y0 + cb_size / 2, cb_size, cb_size / 2, log2_cb_size, 1, idx);
2247  break;
2248  case PART_Nx2N:
2249  hls_prediction_unit(s, x0, y0, cb_size / 2, cb_size, log2_cb_size, 0, idx - 1);
2250  hls_prediction_unit(s, x0 + cb_size / 2, y0, cb_size / 2, cb_size, log2_cb_size, 1, idx - 1);
2251  break;
2252  case PART_2NxnU:
2253  hls_prediction_unit(s, x0, y0, cb_size, cb_size / 4, log2_cb_size, 0, idx);
2254  hls_prediction_unit(s, x0, y0 + cb_size / 4, cb_size, cb_size * 3 / 4, log2_cb_size, 1, idx);
2255  break;
2256  case PART_2NxnD:
2257  hls_prediction_unit(s, x0, y0, cb_size, cb_size * 3 / 4, log2_cb_size, 0, idx);
2258  hls_prediction_unit(s, x0, y0 + cb_size * 3 / 4, cb_size, cb_size / 4, log2_cb_size, 1, idx);
2259  break;
2260  case PART_nLx2N:
2261  hls_prediction_unit(s, x0, y0, cb_size / 4, cb_size, log2_cb_size, 0, idx - 2);
2262  hls_prediction_unit(s, x0 + cb_size / 4, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 1, idx - 2);
2263  break;
2264  case PART_nRx2N:
2265  hls_prediction_unit(s, x0, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 0, idx - 2);
2266  hls_prediction_unit(s, x0 + cb_size * 3 / 4, y0, cb_size / 4, cb_size, log2_cb_size, 1, idx - 2);
2267  break;
2268  case PART_NxN:
2269  hls_prediction_unit(s, x0, y0, cb_size / 2, cb_size / 2, log2_cb_size, 0, idx - 1);
2270  hls_prediction_unit(s, x0 + cb_size / 2, y0, cb_size / 2, cb_size / 2, log2_cb_size, 1, idx - 1);
2271  hls_prediction_unit(s, x0, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 2, idx - 1);
2272  hls_prediction_unit(s, x0 + cb_size / 2, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 3, idx - 1);
2273  break;
2274  }
2275  }
2276 
2277  if (!pcm_flag) {
2278  int rqt_root_cbf = 1;
2279 
2280  if (lc->cu.pred_mode != MODE_INTRA &&
2281  !(lc->cu.part_mode == PART_2Nx2N && lc->pu.merge_flag)) {
2282  rqt_root_cbf = ff_hevc_no_residual_syntax_flag_decode(s);
2283  }
2284  if (rqt_root_cbf) {
2285  const static int cbf[2] = { 0 };
2286  lc->cu.max_trafo_depth = lc->cu.pred_mode == MODE_INTRA ?
2287  s->ps.sps->max_transform_hierarchy_depth_intra + lc->cu.intra_split_flag :
2288  s->ps.sps->max_transform_hierarchy_depth_inter;
2289  ret = hls_transform_tree(s, x0, y0, x0, y0, x0, y0,
2290  log2_cb_size,
2291  log2_cb_size, 0, 0, cbf, cbf);
2292  if (ret < 0)
2293  return ret;
2294  } else {
2295  if (!s->sh.disable_deblocking_filter_flag)
2296  ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_cb_size);
2297  }
2298  }
2299  }
2300 
2301  if (s->ps.pps->cu_qp_delta_enabled_flag && lc->tu.is_cu_qp_delta_coded == 0)
2302  ff_hevc_set_qPy(s, x0, y0, log2_cb_size);
2303 
2304  x = y_cb * min_cb_width + x_cb;
2305  for (y = 0; y < length; y++) {
2306  memset(&s->qp_y_tab[x], lc->qp_y, length);
2307  x += min_cb_width;
2308  }
2309 
2310  if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 &&
2311  ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0) {
2312  lc->qPy_pred = lc->qp_y;
2313  }
2314 
2315  set_ct_depth(s, x0, y0, log2_cb_size, lc->ct_depth);
2316 
2317  return 0;
2318 }
2319 
2320 static int hls_coding_quadtree(HEVCContext *s, int x0, int y0,
2321  int log2_cb_size, int cb_depth)
2322 {
2323  HEVCLocalContext *lc = s->HEVClc;
2324  const int cb_size = 1 << log2_cb_size;
2325  int ret;
2326  int split_cu;
2327 
2328  lc->ct_depth = cb_depth;
2329  if (x0 + cb_size <= s->ps.sps->width &&
2330  y0 + cb_size <= s->ps.sps->height &&
2331  log2_cb_size > s->ps.sps->log2_min_cb_size) {
2332  split_cu = ff_hevc_split_coding_unit_flag_decode(s, cb_depth, x0, y0);
2333  } else {
2334  split_cu = (log2_cb_size > s->ps.sps->log2_min_cb_size);
2335  }
2336  if (s->ps.pps->cu_qp_delta_enabled_flag &&
2337  log2_cb_size >= s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth) {
2338  lc->tu.is_cu_qp_delta_coded = 0;
2339  lc->tu.cu_qp_delta = 0;
2340  }
2341 
2342  if (s->sh.cu_chroma_qp_offset_enabled_flag &&
2343  log2_cb_size >= s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_chroma_qp_offset_depth) {
2345  }
2346 
2347  if (split_cu) {
2348  int qp_block_mask = (1<<(s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth)) - 1;
2349  const int cb_size_split = cb_size >> 1;
2350  const int x1 = x0 + cb_size_split;
2351  const int y1 = y0 + cb_size_split;
2352 
2353  int more_data = 0;
2354 
2355  more_data = hls_coding_quadtree(s, x0, y0, log2_cb_size - 1, cb_depth + 1);
2356  if (more_data < 0)
2357  return more_data;
2358 
2359  if (more_data && x1 < s->ps.sps->width) {
2360  more_data = hls_coding_quadtree(s, x1, y0, log2_cb_size - 1, cb_depth + 1);
2361  if (more_data < 0)
2362  return more_data;
2363  }
2364  if (more_data && y1 < s->ps.sps->height) {
2365  more_data = hls_coding_quadtree(s, x0, y1, log2_cb_size - 1, cb_depth + 1);
2366  if (more_data < 0)
2367  return more_data;
2368  }
2369  if (more_data && x1 < s->ps.sps->width &&
2370  y1 < s->ps.sps->height) {
2371  more_data = hls_coding_quadtree(s, x1, y1, log2_cb_size - 1, cb_depth + 1);
2372  if (more_data < 0)
2373  return more_data;
2374  }
2375 
2376  if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 &&
2377  ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0)
2378  lc->qPy_pred = lc->qp_y;
2379 
2380  if (more_data)
2381  return ((x1 + cb_size_split) < s->ps.sps->width ||
2382  (y1 + cb_size_split) < s->ps.sps->height);
2383  else
2384  return 0;
2385  } else {
2386  ret = hls_coding_unit(s, x0, y0, log2_cb_size);
2387  if (ret < 0)
2388  return ret;
2389  if ((!((x0 + cb_size) %
2390  (1 << (s->ps.sps->log2_ctb_size))) ||
2391  (x0 + cb_size >= s->ps.sps->width)) &&
2392  (!((y0 + cb_size) %
2393  (1 << (s->ps.sps->log2_ctb_size))) ||
2394  (y0 + cb_size >= s->ps.sps->height))) {
2395  int end_of_slice_flag = ff_hevc_end_of_slice_flag_decode(s);
2396  return !end_of_slice_flag;
2397  } else {
2398  return 1;
2399  }
2400  }
2401 
2402  return 0;
2403 }
2404 
2405 static void hls_decode_neighbour(HEVCContext *s, int x_ctb, int y_ctb,
2406  int ctb_addr_ts)
2407 {
2408  HEVCLocalContext *lc = s->HEVClc;
2409  int ctb_size = 1 << s->ps.sps->log2_ctb_size;
2410  int ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2411  int ctb_addr_in_slice = ctb_addr_rs - s->sh.slice_addr;
2412 
2413  s->tab_slice_address[ctb_addr_rs] = s->sh.slice_addr;
2414 
2415  if (s->ps.pps->entropy_coding_sync_enabled_flag) {
2416  if (x_ctb == 0 && (y_ctb & (ctb_size - 1)) == 0)
2417  lc->first_qp_group = 1;
2418  lc->end_of_tiles_x = s->ps.sps->width;
2419  } else if (s->ps.pps->tiles_enabled_flag) {
2420  if (ctb_addr_ts && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[ctb_addr_ts - 1]) {
2421  int idxX = s->ps.pps->col_idxX[x_ctb >> s->ps.sps->log2_ctb_size];
2422  lc->end_of_tiles_x = x_ctb + (s->ps.pps->column_width[idxX] << s->ps.sps->log2_ctb_size);
2423  lc->first_qp_group = 1;
2424  }
2425  } else {
2426  lc->end_of_tiles_x = s->ps.sps->width;
2427  }
2428 
2429  lc->end_of_tiles_y = FFMIN(y_ctb + ctb_size, s->ps.sps->height);
2430 
2431  lc->boundary_flags = 0;
2432  if (s->ps.pps->tiles_enabled_flag) {
2433  if (x_ctb > 0 && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - 1]])
2435  if (x_ctb > 0 && s->tab_slice_address[ctb_addr_rs] != s->tab_slice_address[ctb_addr_rs - 1])
2437  if (y_ctb > 0 && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - s->ps.sps->ctb_width]])
2439  if (y_ctb > 0 && s->tab_slice_address[ctb_addr_rs] != s->tab_slice_address[ctb_addr_rs - s->ps.sps->ctb_width])
2441  } else {
2442  if (ctb_addr_in_slice <= 0)
2444  if (ctb_addr_in_slice < s->ps.sps->ctb_width)
2446  }
2447 
2448  lc->ctb_left_flag = ((x_ctb > 0) && (ctb_addr_in_slice > 0) && !(lc->boundary_flags & BOUNDARY_LEFT_TILE));
2449  lc->ctb_up_flag = ((y_ctb > 0) && (ctb_addr_in_slice >= s->ps.sps->ctb_width) && !(lc->boundary_flags & BOUNDARY_UPPER_TILE));
2450  lc->ctb_up_right_flag = ((y_ctb > 0) && (ctb_addr_in_slice+1 >= s->ps.sps->ctb_width) && (s->ps.pps->tile_id[ctb_addr_ts] == s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs+1 - s->ps.sps->ctb_width]]));
2451  lc->ctb_up_left_flag = ((x_ctb > 0) && (y_ctb > 0) && (ctb_addr_in_slice-1 >= s->ps.sps->ctb_width) && (s->ps.pps->tile_id[ctb_addr_ts] == s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs-1 - s->ps.sps->ctb_width]]));
2452 }
2453 
2454 static int hls_decode_entry(AVCodecContext *avctxt, void *isFilterThread)
2455 {
2456  HEVCContext *s = avctxt->priv_data;
2457  int ctb_size = 1 << s->ps.sps->log2_ctb_size;
2458  int more_data = 1;
2459  int x_ctb = 0;
2460  int y_ctb = 0;
2461  int ctb_addr_ts = s->ps.pps->ctb_addr_rs_to_ts[s->sh.slice_ctb_addr_rs];
2462  int ret;
2463 
2464  if (!ctb_addr_ts && s->sh.dependent_slice_segment_flag) {
2465  av_log(s->avctx, AV_LOG_ERROR, "Impossible initial tile.\n");
2466  return AVERROR_INVALIDDATA;
2467  }
2468 
2469  if (s->sh.dependent_slice_segment_flag) {
2470  int prev_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts - 1];
2471  if (s->tab_slice_address[prev_rs] != s->sh.slice_addr) {
2472  av_log(s->avctx, AV_LOG_ERROR, "Previous slice segment missing\n");
2473  return AVERROR_INVALIDDATA;
2474  }
2475  }
2476 
2477  while (more_data && ctb_addr_ts < s->ps.sps->ctb_size) {
2478  int ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2479 
2480  x_ctb = (ctb_addr_rs % ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size)) << s->ps.sps->log2_ctb_size;
2481  y_ctb = (ctb_addr_rs / ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size)) << s->ps.sps->log2_ctb_size;
2482  hls_decode_neighbour(s, x_ctb, y_ctb, ctb_addr_ts);
2483 
2484  ret = ff_hevc_cabac_init(s, ctb_addr_ts, 0);
2485  if (ret < 0) {
2486  s->tab_slice_address[ctb_addr_rs] = -1;
2487  return ret;
2488  }
2489 
2490  hls_sao_param(s, x_ctb >> s->ps.sps->log2_ctb_size, y_ctb >> s->ps.sps->log2_ctb_size);
2491 
2492  s->deblock[ctb_addr_rs].beta_offset = s->sh.beta_offset;
2493  s->deblock[ctb_addr_rs].tc_offset = s->sh.tc_offset;
2494  s->filter_slice_edges[ctb_addr_rs] = s->sh.slice_loop_filter_across_slices_enabled_flag;
2495 
2496  more_data = hls_coding_quadtree(s, x_ctb, y_ctb, s->ps.sps->log2_ctb_size, 0);
2497  if (more_data < 0) {
2498  s->tab_slice_address[ctb_addr_rs] = -1;
2499  return more_data;
2500  }
2501 
2502 
2503  ctb_addr_ts++;
2504  ff_hevc_save_states(s, ctb_addr_ts);
2505  ff_hevc_hls_filters(s, x_ctb, y_ctb, ctb_size);
2506  }
2507 
2508  if (x_ctb + ctb_size >= s->ps.sps->width &&
2509  y_ctb + ctb_size >= s->ps.sps->height)
2510  ff_hevc_hls_filter(s, x_ctb, y_ctb, ctb_size);
2511 
2512  return ctb_addr_ts;
2513 }
2514 
2516 {
2517  int arg[2];
2518  int ret[2];
2519 
2520  arg[0] = 0;
2521  arg[1] = 1;
2522 
2523  s->avctx->execute(s->avctx, hls_decode_entry, arg, ret , 1, sizeof(int));
2524  return ret[0];
2525 }
2526 static int hls_decode_entry_wpp(AVCodecContext *avctxt, void *input_ctb_row, int job, int self_id)
2527 {
2528  HEVCContext *s1 = avctxt->priv_data, *s;
2529  HEVCLocalContext *lc;
2530  int ctb_size = 1<< s1->ps.sps->log2_ctb_size;
2531  int more_data = 1;
2532  int *ctb_row_p = input_ctb_row;
2533  int ctb_row = ctb_row_p[job];
2534  int ctb_addr_rs = s1->sh.slice_ctb_addr_rs + ctb_row * ((s1->ps.sps->width + ctb_size - 1) >> s1->ps.sps->log2_ctb_size);
2535  int ctb_addr_ts = s1->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs];
2536  int thread = ctb_row % s1->threads_number;
2537  int ret;
2538 
2539  s = s1->sList[self_id];
2540  lc = s->HEVClc;
2541 
2542  if(ctb_row) {
2543  ret = init_get_bits8(&lc->gb, s->data + s->sh.offset[ctb_row - 1], s->sh.size[ctb_row - 1]);
2544  if (ret < 0)
2545  goto error;
2546  ff_init_cabac_decoder(&lc->cc, s->data + s->sh.offset[(ctb_row)-1], s->sh.size[ctb_row - 1]);
2547  }
2548 
2549  while(more_data && ctb_addr_ts < s->ps.sps->ctb_size) {
2550  int x_ctb = (ctb_addr_rs % s->ps.sps->ctb_width) << s->ps.sps->log2_ctb_size;
2551  int y_ctb = (ctb_addr_rs / s->ps.sps->ctb_width) << s->ps.sps->log2_ctb_size;
2552 
2553  hls_decode_neighbour(s, x_ctb, y_ctb, ctb_addr_ts);
2554 
2555  ff_thread_await_progress2(s->avctx, ctb_row, thread, SHIFT_CTB_WPP);
2556 
2557  if (atomic_load(&s1->wpp_err)) {
2558  ff_thread_report_progress2(s->avctx, ctb_row , thread, SHIFT_CTB_WPP);
2559  return 0;
2560  }
2561 
2562  ret = ff_hevc_cabac_init(s, ctb_addr_ts, thread);
2563  if (ret < 0)
2564  goto error;
2565  hls_sao_param(s, x_ctb >> s->ps.sps->log2_ctb_size, y_ctb >> s->ps.sps->log2_ctb_size);
2566  more_data = hls_coding_quadtree(s, x_ctb, y_ctb, s->ps.sps->log2_ctb_size, 0);
2567 
2568  if (more_data < 0) {
2569  ret = more_data;
2570  goto error;
2571  }
2572 
2573  ctb_addr_ts++;
2574 
2575  ff_hevc_save_states(s, ctb_addr_ts);
2576  ff_thread_report_progress2(s->avctx, ctb_row, thread, 1);
2577  ff_hevc_hls_filters(s, x_ctb, y_ctb, ctb_size);
2578 
2579  if (!more_data && (x_ctb+ctb_size) < s->ps.sps->width && ctb_row != s->sh.num_entry_point_offsets) {
2580  atomic_store(&s1->wpp_err, 1);
2581  ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
2582  return 0;
2583  }
2584 
2585  if ((x_ctb+ctb_size) >= s->ps.sps->width && (y_ctb+ctb_size) >= s->ps.sps->height ) {
2586  ff_hevc_hls_filter(s, x_ctb, y_ctb, ctb_size);
2587  ff_thread_report_progress2(s->avctx, ctb_row , thread, SHIFT_CTB_WPP);
2588  return ctb_addr_ts;
2589  }
2590  ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2591  x_ctb+=ctb_size;
2592 
2593  if(x_ctb >= s->ps.sps->width) {
2594  break;
2595  }
2596  }
2597  ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
2598 
2599  return 0;
2600 error:
2601  s->tab_slice_address[ctb_addr_rs] = -1;
2602  atomic_store(&s1->wpp_err, 1);
2603  ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
2604  return ret;
2605 }
2606 
2607 static int hls_slice_data_wpp(HEVCContext *s, const H2645NAL *nal)
2608 {
2609  const uint8_t *data = nal->data;
2610  int length = nal->size;
2611  HEVCLocalContext *lc = s->HEVClc;
2612  int *ret = av_malloc_array(s->sh.num_entry_point_offsets + 1, sizeof(int));
2613  int *arg = av_malloc_array(s->sh.num_entry_point_offsets + 1, sizeof(int));
2614  int64_t offset;
2615  int64_t startheader, cmpt = 0;
2616  int i, j, res = 0;
2617 
2618  if (!ret || !arg) {
2619  av_free(ret);
2620  av_free(arg);
2621  return AVERROR(ENOMEM);
2622  }
2623 
2624  if (s->sh.slice_ctb_addr_rs + s->sh.num_entry_point_offsets * s->ps.sps->ctb_width >= s->ps.sps->ctb_width * s->ps.sps->ctb_height) {
2625  av_log(s->avctx, AV_LOG_ERROR, "WPP ctb addresses are wrong (%d %d %d %d)\n",
2626  s->sh.slice_ctb_addr_rs, s->sh.num_entry_point_offsets,
2627  s->ps.sps->ctb_width, s->ps.sps->ctb_height
2628  );
2629  res = AVERROR_INVALIDDATA;
2630  goto error;
2631  }
2632 
2633  ff_alloc_entries(s->avctx, s->sh.num_entry_point_offsets + 1);
2634 
2635  for (i = 1; i < s->threads_number; i++) {
2636  if (s->sList[i] && s->HEVClcList[i])
2637  continue;
2638  av_freep(&s->sList[i]);
2639  av_freep(&s->HEVClcList[i]);
2640  s->sList[i] = av_malloc(sizeof(HEVCContext));
2641  s->HEVClcList[i] = av_mallocz(sizeof(HEVCLocalContext));
2642  if (!s->sList[i] || !s->HEVClcList[i]) {
2643  res = AVERROR(ENOMEM);
2644  goto error;
2645  }
2646  memcpy(s->sList[i], s, sizeof(HEVCContext));
2647  s->sList[i]->HEVClc = s->HEVClcList[i];
2648  }
2649 
2650  offset = (lc->gb.index >> 3);
2651 
2652  for (j = 0, cmpt = 0, startheader = offset + s->sh.entry_point_offset[0]; j < nal->skipped_bytes; j++) {
2653  if (nal->skipped_bytes_pos[j] >= offset && nal->skipped_bytes_pos[j] < startheader) {
2654  startheader--;
2655  cmpt++;
2656  }
2657  }
2658 
2659  for (i = 1; i < s->sh.num_entry_point_offsets; i++) {
2660  offset += (s->sh.entry_point_offset[i - 1] - cmpt);
2661  for (j = 0, cmpt = 0, startheader = offset
2662  + s->sh.entry_point_offset[i]; j < nal->skipped_bytes; j++) {
2663  if (nal->skipped_bytes_pos[j] >= offset && nal->skipped_bytes_pos[j] < startheader) {
2664  startheader--;
2665  cmpt++;
2666  }
2667  }
2668  s->sh.size[i - 1] = s->sh.entry_point_offset[i] - cmpt;
2669  s->sh.offset[i - 1] = offset;
2670 
2671  }
2672  if (s->sh.num_entry_point_offsets != 0) {
2673  offset += s->sh.entry_point_offset[s->sh.num_entry_point_offsets - 1] - cmpt;
2674  if (length < offset) {
2675  av_log(s->avctx, AV_LOG_ERROR, "entry_point_offset table is corrupted\n");
2676  res = AVERROR_INVALIDDATA;
2677  goto error;
2678  }
2679  s->sh.size[s->sh.num_entry_point_offsets - 1] = length - offset;
2680  s->sh.offset[s->sh.num_entry_point_offsets - 1] = offset;
2681 
2682  }
2683  s->data = data;
2684 
2685  for (i = 1; i < s->threads_number; i++) {
2686  s->sList[i]->HEVClc->first_qp_group = 1;
2687  s->sList[i]->HEVClc->qp_y = s->sList[0]->HEVClc->qp_y;
2688  memcpy(s->sList[i], s, sizeof(HEVCContext));
2689  s->sList[i]->HEVClc = s->HEVClcList[i];
2690  }
2691 
2692  atomic_store(&s->wpp_err, 0);
2693  ff_reset_entries(s->avctx);
2694 
2695  for (i = 0; i <= s->sh.num_entry_point_offsets; i++) {
2696  arg[i] = i;
2697  ret[i] = 0;
2698  }
2699 
2700  if (s->ps.pps->entropy_coding_sync_enabled_flag)
2701  s->avctx->execute2(s->avctx, hls_decode_entry_wpp, arg, ret, s->sh.num_entry_point_offsets + 1);
2702 
2703  for (i = 0; i <= s->sh.num_entry_point_offsets; i++)
2704  res += ret[i];
2705 error:
2706  av_free(ret);
2707  av_free(arg);
2708  return res;
2709 }
2710 
2712 {
2713  AVFrame *out = s->ref->frame;
2714 
2715  if (s->sei.frame_packing.present &&
2716  s->sei.frame_packing.arrangement_type >= 3 &&
2717  s->sei.frame_packing.arrangement_type <= 5 &&
2718  s->sei.frame_packing.content_interpretation_type > 0 &&
2719  s->sei.frame_packing.content_interpretation_type < 3) {
2721  if (!stereo)
2722  return AVERROR(ENOMEM);
2723 
2724  switch (s->sei.frame_packing.arrangement_type) {
2725  case 3:
2726  if (s->sei.frame_packing.quincunx_subsampling)
2728  else
2729  stereo->type = AV_STEREO3D_SIDEBYSIDE;
2730  break;
2731  case 4:
2732  stereo->type = AV_STEREO3D_TOPBOTTOM;
2733  break;
2734  case 5:
2735  stereo->type = AV_STEREO3D_FRAMESEQUENCE;
2736  break;
2737  }
2738 
2739  if (s->sei.frame_packing.content_interpretation_type == 2)
2740  stereo->flags = AV_STEREO3D_FLAG_INVERT;
2741 
2742  if (s->sei.frame_packing.arrangement_type == 5) {
2743  if (s->sei.frame_packing.current_frame_is_frame0_flag)
2744  stereo->view = AV_STEREO3D_VIEW_LEFT;
2745  else
2746  stereo->view = AV_STEREO3D_VIEW_RIGHT;
2747  }
2748  }
2749 
2750  if (s->sei.display_orientation.present &&
2751  (s->sei.display_orientation.anticlockwise_rotation ||
2752  s->sei.display_orientation.hflip || s->sei.display_orientation.vflip)) {
2753  double angle = s->sei.display_orientation.anticlockwise_rotation * 360 / (double) (1 << 16);
2756  sizeof(int32_t) * 9);
2757  if (!rotation)
2758  return AVERROR(ENOMEM);
2759 
2760  av_display_rotation_set((int32_t *)rotation->data, angle);
2761  av_display_matrix_flip((int32_t *)rotation->data,
2762  s->sei.display_orientation.hflip,
2763  s->sei.display_orientation.vflip);
2764  }
2765 
2766  // Decrement the mastering display flag when IRAP frame has no_rasl_output_flag=1
2767  // so the side data persists for the entire coded video sequence.
2768  if (s->sei.mastering_display.present > 0 &&
2769  IS_IRAP(s) && s->no_rasl_output_flag) {
2770  s->sei.mastering_display.present--;
2771  }
2772  if (s->sei.mastering_display.present) {
2773  // HEVC uses a g,b,r ordering, which we convert to a more natural r,g,b
2774  const int mapping[3] = {2, 0, 1};
2775  const int chroma_den = 50000;
2776  const int luma_den = 10000;
2777  int i;
2778  AVMasteringDisplayMetadata *metadata =
2780  if (!metadata)
2781  return AVERROR(ENOMEM);
2782 
2783  for (i = 0; i < 3; i++) {
2784  const int j = mapping[i];
2785  metadata->display_primaries[i][0].num = s->sei.mastering_display.display_primaries[j][0];
2786  metadata->display_primaries[i][0].den = chroma_den;
2787  metadata->display_primaries[i][1].num = s->sei.mastering_display.display_primaries[j][1];
2788  metadata->display_primaries[i][1].den = chroma_den;
2789  }
2790  metadata->white_point[0].num = s->sei.mastering_display.white_point[0];
2791  metadata->white_point[0].den = chroma_den;
2792  metadata->white_point[1].num = s->sei.mastering_display.white_point[1];
2793  metadata->white_point[1].den = chroma_den;
2794 
2795  metadata->max_luminance.num = s->sei.mastering_display.max_luminance;
2796  metadata->max_luminance.den = luma_den;
2797  metadata->min_luminance.num = s->sei.mastering_display.min_luminance;
2798  metadata->min_luminance.den = luma_den;
2799  metadata->has_luminance = 1;
2800  metadata->has_primaries = 1;
2801 
2802  av_log(s->avctx, AV_LOG_DEBUG, "Mastering Display Metadata:\n");
2803  av_log(s->avctx, AV_LOG_DEBUG,
2804  "r(%5.4f,%5.4f) g(%5.4f,%5.4f) b(%5.4f %5.4f) wp(%5.4f, %5.4f)\n",
2805  av_q2d(metadata->display_primaries[0][0]),
2806  av_q2d(metadata->display_primaries[0][1]),
2807  av_q2d(metadata->display_primaries[1][0]),
2808  av_q2d(metadata->display_primaries[1][1]),
2809  av_q2d(metadata->display_primaries[2][0]),
2810  av_q2d(metadata->display_primaries[2][1]),
2811  av_q2d(metadata->white_point[0]), av_q2d(metadata->white_point[1]));
2812  av_log(s->avctx, AV_LOG_DEBUG,
2813  "min_luminance=%f, max_luminance=%f\n",
2814  av_q2d(metadata->min_luminance), av_q2d(metadata->max_luminance));
2815  }
2816  // Decrement the mastering display flag when IRAP frame has no_rasl_output_flag=1
2817  // so the side data persists for the entire coded video sequence.
2818  if (s->sei.content_light.present > 0 &&
2819  IS_IRAP(s) && s->no_rasl_output_flag) {
2820  s->sei.content_light.present--;
2821  }
2822  if (s->sei.content_light.present) {
2823  AVContentLightMetadata *metadata =
2825  if (!metadata)
2826  return AVERROR(ENOMEM);
2827  metadata->MaxCLL = s->sei.content_light.max_content_light_level;
2828  metadata->MaxFALL = s->sei.content_light.max_pic_average_light_level;
2829 
2830  av_log(s->avctx, AV_LOG_DEBUG, "Content Light Level Metadata:\n");
2831  av_log(s->avctx, AV_LOG_DEBUG, "MaxCLL=%d, MaxFALL=%d\n",
2832  metadata->MaxCLL, metadata->MaxFALL);
2833  }
2834 
2835  if (s->sei.a53_caption.buf_ref) {
2836  HEVCSEIA53Caption *a53 = &s->sei.a53_caption;
2837 
2839  if (!sd)
2840  av_buffer_unref(&a53->buf_ref);
2841  a53->buf_ref = NULL;
2842  }
2843 
2844  for (int i = 0; i < s->sei.unregistered.nb_buf_ref; i++) {
2845  HEVCSEIUnregistered *unreg = &s->sei.unregistered;
2846 
2847  if (unreg->buf_ref[i]) {
2850  unreg->buf_ref[i]);
2851  if (!sd)
2852  av_buffer_unref(&unreg->buf_ref[i]);
2853  unreg->buf_ref[i] = NULL;
2854  }
2855  }
2856  s->sei.unregistered.nb_buf_ref = 0;
2857 
2858  if (s->sei.timecode.present) {
2859  uint32_t *tc_sd;
2860  char tcbuf[AV_TIMECODE_STR_SIZE];
2862  sizeof(uint32_t) * 4);
2863  if (!tcside)
2864  return AVERROR(ENOMEM);
2865 
2866  tc_sd = (uint32_t*)tcside->data;
2867  tc_sd[0] = s->sei.timecode.num_clock_ts;
2868 
2869  for (int i = 0; i < tc_sd[0]; i++) {
2870  int drop = s->sei.timecode.cnt_dropped_flag[i];
2871  int hh = s->sei.timecode.hours_value[i];
2872  int mm = s->sei.timecode.minutes_value[i];
2873  int ss = s->sei.timecode.seconds_value[i];
2874  int ff = s->sei.timecode.n_frames[i];
2875 
2876  tc_sd[i + 1] = av_timecode_get_smpte(s->avctx->framerate, drop, hh, mm, ss, ff);
2877  av_timecode_make_smpte_tc_string2(tcbuf, s->avctx->framerate, tc_sd[i + 1], 0, 0);
2878  av_dict_set(&out->metadata, "timecode", tcbuf, 0);
2879  }
2880 
2881  s->sei.timecode.num_clock_ts = 0;
2882  }
2883 
2884  if (s->sei.film_grain_characteristics.present &&
2885  (s->avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN)) {
2886  HEVCSEIFilmGrainCharacteristics *fgc = &s->sei.film_grain_characteristics;
2888  if (!fgp)
2889  return AVERROR(ENOMEM);
2890 
2892 
2893  fgp->codec.h274.model_id = fgc->model_id;
2897  fgp->codec.h274.color_range = fgc->full_range + 1;
2900  fgp->codec.h274.color_space = fgc->matrix_coeffs;
2901  } else {
2902  const HEVCSPS *sps = s->ps.sps;
2903  const VUI *vui = &sps->vui;
2904  fgp->codec.h274.bit_depth_luma = sps->bit_depth;
2905  fgp->codec.h274.bit_depth_chroma = sps->bit_depth_chroma;
2907  fgp->codec.h274.color_range = vui->video_full_range_flag + 1;
2908  else
2913  fgp->codec.h274.color_space = vui->matrix_coeffs;
2914  } else {
2918  }
2919  }
2922 
2924  sizeof(fgp->codec.h274.component_model_present));
2926  sizeof(fgp->codec.h274.num_intensity_intervals));
2927  memcpy(&fgp->codec.h274.num_model_values, &fgc->num_model_values,
2928  sizeof(fgp->codec.h274.num_model_values));
2933  memcpy(&fgp->codec.h274.comp_model_value, &fgc->comp_model_value,
2934  sizeof(fgp->codec.h274.comp_model_value));
2935 
2936  fgc->present = fgc->persistence_flag;
2937  }
2938 
2939  if (s->sei.dynamic_hdr_plus.info) {
2940  AVBufferRef *info_ref = av_buffer_ref(s->sei.dynamic_hdr_plus.info);
2941  if (!info_ref)
2942  return AVERROR(ENOMEM);
2943 
2945  av_buffer_unref(&info_ref);
2946  return AVERROR(ENOMEM);
2947  }
2948  }
2949 
2950  return 0;
2951 }
2952 
2954 {
2955  HEVCLocalContext *lc = s->HEVClc;
2956  int pic_size_in_ctb = ((s->ps.sps->width >> s->ps.sps->log2_min_cb_size) + 1) *
2957  ((s->ps.sps->height >> s->ps.sps->log2_min_cb_size) + 1);
2958  int ret;
2959 
2960  memset(s->horizontal_bs, 0, s->bs_width * s->bs_height);
2961  memset(s->vertical_bs, 0, s->bs_width * s->bs_height);
2962  memset(s->cbf_luma, 0, s->ps.sps->min_tb_width * s->ps.sps->min_tb_height);
2963  memset(s->is_pcm, 0, (s->ps.sps->min_pu_width + 1) * (s->ps.sps->min_pu_height + 1));
2964  memset(s->tab_slice_address, -1, pic_size_in_ctb * sizeof(*s->tab_slice_address));
2965 
2966  s->is_decoded = 0;
2967  s->first_nal_type = s->nal_unit_type;
2968 
2969  s->no_rasl_output_flag = IS_IDR(s) || IS_BLA(s) || (s->nal_unit_type == HEVC_NAL_CRA_NUT && s->last_eos);
2970 
2971  if (s->ps.pps->tiles_enabled_flag)
2972  lc->end_of_tiles_x = s->ps.pps->column_width[0] << s->ps.sps->log2_ctb_size;
2973 
2974  ret = ff_hevc_set_new_ref(s, &s->frame, s->poc);
2975  if (ret < 0)
2976  goto fail;
2977 
2978  ret = ff_hevc_frame_rps(s);
2979  if (ret < 0) {
2980  av_log(s->avctx, AV_LOG_ERROR, "Error constructing the frame RPS.\n");
2981  goto fail;
2982  }
2983 
2984  s->ref->frame->key_frame = IS_IRAP(s);
2985 
2986  ret = set_side_data(s);
2987  if (ret < 0)
2988  goto fail;
2989 
2990  s->frame->pict_type = 3 - s->sh.slice_type;
2991 
2992  if (!IS_IRAP(s))
2994 
2995  av_frame_unref(s->output_frame);
2996  ret = ff_hevc_output_frame(s, s->output_frame, 0);
2997  if (ret < 0)
2998  goto fail;
2999 
3000  if (!s->avctx->hwaccel)
3001  ff_thread_finish_setup(s->avctx);
3002 
3003  return 0;
3004 
3005 fail:
3006  if (s->ref)
3007  ff_hevc_unref_frame(s, s->ref, ~0);
3008  s->ref = NULL;
3009  return ret;
3010 }
3011 
3012 static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal)
3013 {
3014  HEVCLocalContext *lc = s->HEVClc;
3015  GetBitContext *gb = &lc->gb;
3016  int ctb_addr_ts, ret;
3017 
3018  *gb = nal->gb;
3019  s->nal_unit_type = nal->type;
3020  s->temporal_id = nal->temporal_id;
3021 
3022  switch (s->nal_unit_type) {
3023  case HEVC_NAL_VPS:
3024  if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
3025  ret = s->avctx->hwaccel->decode_params(s->avctx,
3026  nal->type,
3027  nal->raw_data,
3028  nal->raw_size);
3029  if (ret < 0)
3030  goto fail;
3031  }
3032  ret = ff_hevc_decode_nal_vps(gb, s->avctx, &s->ps);
3033  if (ret < 0)
3034  goto fail;
3035  break;
3036  case HEVC_NAL_SPS:
3037  if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
3038  ret = s->avctx->hwaccel->decode_params(s->avctx,
3039  nal->type,
3040  nal->raw_data,
3041  nal->raw_size);
3042  if (ret < 0)
3043  goto fail;
3044  }
3045  ret = ff_hevc_decode_nal_sps(gb, s->avctx, &s->ps,
3046  s->apply_defdispwin);
3047  if (ret < 0)
3048  goto fail;
3049  break;
3050  case HEVC_NAL_PPS:
3051  if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
3052  ret = s->avctx->hwaccel->decode_params(s->avctx,
3053  nal->type,
3054  nal->raw_data,
3055  nal->raw_size);
3056  if (ret < 0)
3057  goto fail;
3058  }
3059  ret = ff_hevc_decode_nal_pps(gb, s->avctx, &s->ps);
3060  if (ret < 0)
3061  goto fail;
3062  break;
3063  case HEVC_NAL_SEI_PREFIX:
3064  case HEVC_NAL_SEI_SUFFIX:
3065  if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
3066  ret = s->avctx->hwaccel->decode_params(s->avctx,
3067  nal->type,
3068  nal->raw_data,
3069  nal->raw_size);
3070  if (ret < 0)
3071  goto fail;
3072  }
3073  ret = ff_hevc_decode_nal_sei(gb, s->avctx, &s->sei, &s->ps, s->nal_unit_type);
3074  if (ret < 0)
3075  goto fail;
3076  break;
3077  case HEVC_NAL_TRAIL_R:
3078  case HEVC_NAL_TRAIL_N:
3079  case HEVC_NAL_TSA_N:
3080  case HEVC_NAL_TSA_R:
3081  case HEVC_NAL_STSA_N:
3082  case HEVC_NAL_STSA_R:
3083  case HEVC_NAL_BLA_W_LP:
3084  case HEVC_NAL_BLA_W_RADL:
3085  case HEVC_NAL_BLA_N_LP:
3086  case HEVC_NAL_IDR_W_RADL:
3087  case HEVC_NAL_IDR_N_LP:
3088  case HEVC_NAL_CRA_NUT:
3089  case HEVC_NAL_RADL_N:
3090  case HEVC_NAL_RADL_R:
3091  case HEVC_NAL_RASL_N:
3092  case HEVC_NAL_RASL_R:
3093  ret = hls_slice_header(s);
3094  if (ret < 0)
3095  return ret;
3096  if (ret == 1) {
3098  goto fail;
3099  }
3100 
3101 
3102  if (
3103  (s->avctx->skip_frame >= AVDISCARD_BIDIR && s->sh.slice_type == HEVC_SLICE_B) ||
3104  (s->avctx->skip_frame >= AVDISCARD_NONINTRA && s->sh.slice_type != HEVC_SLICE_I) ||
3105  (s->avctx->skip_frame >= AVDISCARD_NONKEY && !IS_IRAP(s))) {
3106  break;
3107  }
3108 
3109  if (s->sh.first_slice_in_pic_flag) {
3110  if (s->max_ra == INT_MAX) {
3111  if (s->nal_unit_type == HEVC_NAL_CRA_NUT || IS_BLA(s)) {
3112  s->max_ra = s->poc;
3113  } else {
3114  if (IS_IDR(s))
3115  s->max_ra = INT_MIN;
3116  }
3117  }
3118 
3119  if ((s->nal_unit_type == HEVC_NAL_RASL_R || s->nal_unit_type == HEVC_NAL_RASL_N) &&
3120  s->poc <= s->max_ra) {
3121  s->is_decoded = 0;
3122  break;
3123  } else {
3124  if (s->nal_unit_type == HEVC_NAL_RASL_R && s->poc > s->max_ra)
3125  s->max_ra = INT_MIN;
3126  }
3127 
3128  s->overlap ++;
3129  ret = hevc_frame_start(s);
3130  if (ret < 0)
3131  return ret;
3132  } else if (!s->ref) {
3133  av_log(s->avctx, AV_LOG_ERROR, "First slice in a frame missing.\n");
3134  goto fail;
3135  }
3136 
3137  if (s->nal_unit_type != s->first_nal_type) {
3138  av_log(s->avctx, AV_LOG_ERROR,
3139  "Non-matching NAL types of the VCL NALUs: %d %d\n",
3140  s->first_nal_type, s->nal_unit_type);
3141  return AVERROR_INVALIDDATA;
3142  }
3143 
3144  if (!s->sh.dependent_slice_segment_flag &&
3145  s->sh.slice_type != HEVC_SLICE_I) {
3146  ret = ff_hevc_slice_rpl(s);
3147  if (ret < 0) {
3148  av_log(s->avctx, AV_LOG_WARNING,
3149  "Error constructing the reference lists for the current slice.\n");
3150  goto fail;
3151  }
3152  }
3153 
3154  if (s->sh.first_slice_in_pic_flag && s->avctx->hwaccel) {
3155  ret = s->avctx->hwaccel->start_frame(s->avctx, NULL, 0);
3156  if (ret < 0)
3157  goto fail;
3158  }
3159 
3160  if (s->avctx->hwaccel) {
3161  ret = s->avctx->hwaccel->decode_slice(s->avctx, nal->raw_data, nal->raw_size);
3162  if (ret < 0)
3163  goto fail;
3164  } else {
3165  if (s->threads_number > 1 && s->sh.num_entry_point_offsets > 0)
3166  ctb_addr_ts = hls_slice_data_wpp(s, nal);
3167  else
3168  ctb_addr_ts = hls_slice_data(s);
3169  if (ctb_addr_ts >= (s->ps.sps->ctb_width * s->ps.sps->ctb_height)) {
3170  s->is_decoded = 1;
3171  }
3172 
3173  if (ctb_addr_ts < 0) {
3174  ret = ctb_addr_ts;
3175  goto fail;
3176  }
3177  }
3178  break;
3179  case HEVC_NAL_EOS_NUT:
3180  case HEVC_NAL_EOB_NUT:
3181  s->seq_decode = (s->seq_decode + 1) & 0xff;
3182  s->max_ra = INT_MAX;
3183  break;
3184  case HEVC_NAL_AUD:
3185  case HEVC_NAL_FD_NUT:
3186  break;
3187  default:
3188  av_log(s->avctx, AV_LOG_INFO,
3189  "Skipping NAL unit %d\n", s->nal_unit_type);
3190  }
3191 
3192  return 0;
3193 fail:
3194  if (s->avctx->err_recognition & AV_EF_EXPLODE)
3195  return ret;
3196  return 0;
3197 }
3198 
3199 static int decode_nal_units(HEVCContext *s, const uint8_t *buf, int length)
3200 {
3201  int i, ret = 0;
3202  int eos_at_start = 1;
3203 
3204  s->ref = NULL;
3205  s->last_eos = s->eos;
3206  s->eos = 0;
3207  s->overlap = 0;
3208 
3209  /* split the input packet into NAL units, so we know the upper bound on the
3210  * number of slices in the frame */
3211  ret = ff_h2645_packet_split(&s->pkt, buf, length, s->avctx, s->is_nalff,
3212  s->nal_length_size, s->avctx->codec_id, 1, 0);
3213  if (ret < 0) {
3214  av_log(s->avctx, AV_LOG_ERROR,
3215  "Error splitting the input into NAL units.\n");
3216  return ret;
3217  }
3218 
3219  for (i = 0; i < s->pkt.nb_nals; i++) {
3220  if (s->pkt.nals[i].type == HEVC_NAL_EOB_NUT ||
3221  s->pkt.nals[i].type == HEVC_NAL_EOS_NUT) {
3222  if (eos_at_start) {
3223  s->last_eos = 1;
3224  } else {
3225  s->eos = 1;
3226  }
3227  } else {
3228  eos_at_start = 0;
3229  }
3230  }
3231 
3232  /* decode the NAL units */
3233  for (i = 0; i < s->pkt.nb_nals; i++) {
3234  H2645NAL *nal = &s->pkt.nals[i];
3235 
3236  if (s->avctx->skip_frame >= AVDISCARD_ALL ||
3237  (s->avctx->skip_frame >= AVDISCARD_NONREF
3238  && ff_hevc_nal_is_nonref(nal->type)) || nal->nuh_layer_id > 0)
3239  continue;
3240 
3241  ret = decode_nal_unit(s, nal);
3242  if (ret >= 0 && s->overlap > 2)
3244  if (ret < 0) {
3245  av_log(s->avctx, AV_LOG_WARNING,
3246  "Error parsing NAL unit #%d.\n", i);
3247  goto fail;
3248  }
3249  }
3250 
3251 fail:
3252  if (s->ref && s->threads_type == FF_THREAD_FRAME)
3253  ff_thread_report_progress(&s->ref->tf, INT_MAX, 0);
3254 
3255  return ret;
3256 }
3257 
3258 static void print_md5(void *log_ctx, int level, uint8_t md5[16])
3259 {
3260  int i;
3261  for (i = 0; i < 16; i++)
3262  av_log(log_ctx, level, "%02"PRIx8, md5[i]);
3263 }
3264 
3266 {
3268  int pixel_shift;
3269  int i, j;
3270 
3271  if (!desc)
3272  return AVERROR(EINVAL);
3273 
3274  pixel_shift = desc->comp[0].depth > 8;
3275 
3276  av_log(s->avctx, AV_LOG_DEBUG, "Verifying checksum for frame with POC %d: ",
3277  s->poc);
3278 
3279  /* the checksums are LE, so we have to byteswap for >8bpp formats
3280  * on BE arches */
3281 #if HAVE_BIGENDIAN
3282  if (pixel_shift && !s->checksum_buf) {
3283  av_fast_malloc(&s->checksum_buf, &s->checksum_buf_size,
3284  FFMAX3(frame->linesize[0], frame->linesize[1],
3285  frame->linesize[2]));
3286  if (!s->checksum_buf)
3287  return AVERROR(ENOMEM);
3288  }
3289 #endif
3290 
3291  for (i = 0; frame->data[i]; i++) {
3292  int width = s->avctx->coded_width;
3293  int height = s->avctx->coded_height;
3294  int w = (i == 1 || i == 2) ? (width >> desc->log2_chroma_w) : width;
3295  int h = (i == 1 || i == 2) ? (height >> desc->log2_chroma_h) : height;
3296  uint8_t md5[16];
3297 
3298  av_md5_init(s->md5_ctx);
3299  for (j = 0; j < h; j++) {
3300  const uint8_t *src = frame->data[i] + j * frame->linesize[i];
3301 #if HAVE_BIGENDIAN
3302  if (pixel_shift) {
3303  s->bdsp.bswap16_buf((uint16_t *) s->checksum_buf,
3304  (const uint16_t *) src, w);
3305  src = s->checksum_buf;
3306  }
3307 #endif
3308  av_md5_update(s->md5_ctx, src, w << pixel_shift);
3309  }
3310  av_md5_final(s->md5_ctx, md5);
3311 
3312  if (!memcmp(md5, s->sei.picture_hash.md5[i], 16)) {
3313  av_log (s->avctx, AV_LOG_DEBUG, "plane %d - correct ", i);
3314  print_md5(s->avctx, AV_LOG_DEBUG, md5);
3315  av_log (s->avctx, AV_LOG_DEBUG, "; ");
3316  } else {
3317  av_log (s->avctx, AV_LOG_ERROR, "mismatching checksum of plane %d - ", i);
3318  print_md5(s->avctx, AV_LOG_ERROR, md5);
3319  av_log (s->avctx, AV_LOG_ERROR, " != ");
3320  print_md5(s->avctx, AV_LOG_ERROR, s->sei.picture_hash.md5[i]);
3321  av_log (s->avctx, AV_LOG_ERROR, "\n");
3322  return AVERROR_INVALIDDATA;
3323  }
3324  }
3325 
3326  av_log(s->avctx, AV_LOG_DEBUG, "\n");
3327 
3328  return 0;
3329 }
3330 
3331 static int hevc_decode_extradata(HEVCContext *s, uint8_t *buf, int length, int first)
3332 {
3333  int ret, i;
3334 
3335  ret = ff_hevc_decode_extradata(buf, length, &s->ps, &s->sei, &s->is_nalff,
3336  &s->nal_length_size, s->avctx->err_recognition,
3337  s->apply_defdispwin, s->avctx);
3338  if (ret < 0)
3339  return ret;
3340 
3341  /* export stream parameters from the first SPS */
3342  for (i = 0; i < FF_ARRAY_ELEMS(s->ps.sps_list); i++) {
3343  if (first && s->ps.sps_list[i]) {
3344  const HEVCSPS *sps = (const HEVCSPS*)s->ps.sps_list[i]->data;
3346  break;
3347  }
3348  }
3349 
3350  /* export stream parameters from SEI */
3352  if (ret < 0)
3353  return ret;
3354 
3355  return 0;
3356 }
3357 
3358 static int hevc_decode_frame(AVCodecContext *avctx, void *data, int *got_output,
3359  AVPacket *avpkt)
3360 {
3361  int ret;
3362  size_t new_extradata_size;
3363  uint8_t *new_extradata;
3364  HEVCContext *s = avctx->priv_data;
3365 
3366  if (!avpkt->size) {
3367  ret = ff_hevc_output_frame(s, data, 1);
3368  if (ret < 0)
3369  return ret;
3370 
3371  *got_output = ret;
3372  return 0;
3373  }
3374 
3375  new_extradata = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA,
3376  &new_extradata_size);
3377  if (new_extradata && new_extradata_size > 0) {
3378  ret = hevc_decode_extradata(s, new_extradata, new_extradata_size, 0);
3379  if (ret < 0)
3380  return ret;
3381  }
3382 
3383  s->ref = NULL;
3384  ret = decode_nal_units(s, avpkt->data, avpkt->size);
3385  if (ret < 0)
3386  return ret;
3387 
3388  if (avctx->hwaccel) {
3389  if (s->ref && (ret = avctx->hwaccel->end_frame(avctx)) < 0) {
3390  av_log(avctx, AV_LOG_ERROR,
3391  "hardware accelerator failed to decode picture\n");
3392  ff_hevc_unref_frame(s, s->ref, ~0);
3393  return ret;
3394  }
3395  } else {
3396  /* verify the SEI checksum */
3397  if (avctx->err_recognition & AV_EF_CRCCHECK && s->is_decoded &&
3398  s->sei.picture_hash.is_md5) {
3399  ret = verify_md5(s, s->ref->frame);
3400  if (ret < 0 && avctx->err_recognition & AV_EF_EXPLODE) {
3401  ff_hevc_unref_frame(s, s->ref, ~0);
3402  return ret;
3403  }
3404  }
3405  }
3406  s->sei.picture_hash.is_md5 = 0;
3407 
3408  if (s->is_decoded) {
3409  av_log(avctx, AV_LOG_DEBUG, "Decoded frame with POC %d.\n", s->poc);
3410  s->is_decoded = 0;
3411  }
3412 
3413  if (s->output_frame->buf[0]) {
3414  av_frame_move_ref(data, s->output_frame);
3415  *got_output = 1;
3416  }
3417 
3418  return avpkt->size;
3419 }
3420 
3422 {
3423  int ret;
3424 
3425  ret = ff_thread_ref_frame(&dst->tf, &src->tf);
3426  if (ret < 0)
3427  return ret;
3428 
3429  dst->tab_mvf_buf = av_buffer_ref(src->tab_mvf_buf);
3430  if (!dst->tab_mvf_buf)
3431  goto fail;
3432  dst->tab_mvf = src->tab_mvf;
3433 
3434  dst->rpl_tab_buf = av_buffer_ref(src->rpl_tab_buf);
3435  if (!dst->rpl_tab_buf)
3436  goto fail;
3437  dst->rpl_tab = src->rpl_tab;
3438 
3439  dst->rpl_buf = av_buffer_ref(src->rpl_buf);
3440  if (!dst->rpl_buf)
3441  goto fail;
3442 
3443  dst->poc = src->poc;
3444  dst->ctb_count = src->ctb_count;
3445  dst->flags = src->flags;
3446  dst->sequence = src->sequence;
3447 
3448  if (src->hwaccel_picture_private) {
3449  dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
3450  if (!dst->hwaccel_priv_buf)
3451  goto fail;
3453  }
3454 
3455  return 0;
3456 fail:
3457  ff_hevc_unref_frame(s, dst, ~0);
3458  return AVERROR(ENOMEM);
3459 }
3460 
3462 {
3463  HEVCContext *s = avctx->priv_data;
3464  int i;
3465 
3466  pic_arrays_free(s);
3467 
3468  av_freep(&s->md5_ctx);
3469 
3470  av_freep(&s->cabac_state);
3471 
3472  for (i = 0; i < 3; i++) {
3473  av_freep(&s->sao_pixel_buffer_h[i]);
3474  av_freep(&s->sao_pixel_buffer_v[i]);
3475  }
3476  av_frame_free(&s->output_frame);
3477 
3478  for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
3479  ff_hevc_unref_frame(s, &s->DPB[i], ~0);
3480  av_frame_free(&s->DPB[i].frame);
3481  }
3482 
3483  ff_hevc_ps_uninit(&s->ps);
3484 
3485  av_freep(&s->sh.entry_point_offset);
3486  av_freep(&s->sh.offset);
3487  av_freep(&s->sh.size);
3488 
3489  if (s->HEVClcList && s->sList) {
3490  for (i = 1; i < s->threads_number; i++) {
3491  av_freep(&s->HEVClcList[i]);
3492  av_freep(&s->sList[i]);
3493  }
3494  }
3495  av_freep(&s->HEVClc);
3496  av_freep(&s->HEVClcList);
3497  av_freep(&s->sList);
3498 
3499  ff_h2645_packet_uninit(&s->pkt);
3500 
3501  ff_hevc_reset_sei(&s->sei);
3502 
3503  return 0;
3504 }
3505 
3507 {
3508  HEVCContext *s = avctx->priv_data;
3509  int i;
3510 
3511  s->avctx = avctx;
3512 
3513  s->HEVClc = av_mallocz(sizeof(HEVCLocalContext));
3514  s->HEVClcList = av_mallocz(sizeof(HEVCLocalContext*) * s->threads_number);
3515  s->sList = av_mallocz(sizeof(HEVCContext*) * s->threads_number);
3516  if (!s->HEVClc || !s->HEVClcList || !s->sList)
3517  goto fail;
3518  s->HEVClcList[0] = s->HEVClc;
3519  s->sList[0] = s;
3520 
3521  s->cabac_state = av_malloc(HEVC_CONTEXTS);
3522  if (!s->cabac_state)
3523  goto fail;
3524 
3525  s->output_frame = av_frame_alloc();
3526  if (!s->output_frame)
3527  goto fail;
3528 
3529  for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
3530  s->DPB[i].frame = av_frame_alloc();
3531  if (!s->DPB[i].frame)
3532  goto fail;
3533  s->DPB[i].tf.f = s->DPB[i].frame;
3534  }
3535 
3536  s->max_ra = INT_MAX;
3537 
3538  s->md5_ctx = av_md5_alloc();
3539  if (!s->md5_ctx)
3540  goto fail;
3541 
3542  ff_bswapdsp_init(&s->bdsp);
3543 
3544  s->context_initialized = 1;
3545  s->eos = 0;
3546 
3547  ff_hevc_reset_sei(&s->sei);
3548 
3549  return 0;
3550 
3551 fail:
3552  hevc_decode_free(avctx);
3553  return AVERROR(ENOMEM);
3554 }
3555 
3556 #if HAVE_THREADS
3557 static int hevc_update_thread_context(AVCodecContext *dst,
3558  const AVCodecContext *src)
3559 {
3560  HEVCContext *s = dst->priv_data;
3561  HEVCContext *s0 = src->priv_data;
3562  int i, ret;
3563 
3564  if (!s->context_initialized) {
3565  ret = hevc_init_context(dst);
3566  if (ret < 0)
3567  return ret;
3568  }
3569 
3570  for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
3571  ff_hevc_unref_frame(s, &s->DPB[i], ~0);
3572  if (s0->DPB[i].frame->buf[0]) {
3573  ret = hevc_ref_frame(s, &s->DPB[i], &s0->DPB[i]);
3574  if (ret < 0)
3575  return ret;
3576  }
3577  }
3578 
3579  if (s->ps.sps != s0->ps.sps)
3580  s->ps.sps = NULL;
3581  for (i = 0; i < FF_ARRAY_ELEMS(s->ps.vps_list); i++) {
3582  ret = av_buffer_replace(&s->ps.vps_list[i], s0->ps.vps_list[i]);
3583  if (ret < 0)
3584  return ret;
3585  }
3586 
3587  for (i = 0; i < FF_ARRAY_ELEMS(s->ps.sps_list); i++) {
3588  ret = av_buffer_replace(&s->ps.sps_list[i], s0->ps.sps_list[i]);
3589  if (ret < 0)
3590  return ret;
3591  }
3592 
3593  for (i = 0; i < FF_ARRAY_ELEMS(s->ps.pps_list); i++) {
3594  ret = av_buffer_replace(&s->ps.pps_list[i], s0->ps.pps_list[i]);
3595  if (ret < 0)
3596  return ret;
3597  }
3598 
3599  if (s->ps.sps != s0->ps.sps)
3600  if ((ret = set_sps(s, s0->ps.sps, src->pix_fmt)) < 0)
3601  return ret;
3602 
3603  s->seq_decode = s0->seq_decode;
3604  s->seq_output = s0->seq_output;
3605  s->pocTid0 = s0->pocTid0;
3606  s->max_ra = s0->max_ra;
3607  s->eos = s0->eos;
3608  s->no_rasl_output_flag = s0->no_rasl_output_flag;
3609 
3610  s->is_nalff = s0->is_nalff;
3611  s->nal_length_size = s0->nal_length_size;
3612 
3613  s->threads_number = s0->threads_number;
3614  s->threads_type = s0->threads_type;
3615 
3616  if (s0->eos) {
3617  s->seq_decode = (s->seq_decode + 1) & 0xff;
3618  s->max_ra = INT_MAX;
3619  }
3620 
3621  ret = av_buffer_replace(&s->sei.a53_caption.buf_ref, s0->sei.a53_caption.buf_ref);
3622  if (ret < 0)
3623  return ret;
3624 
3625  for (i = 0; i < s->sei.unregistered.nb_buf_ref; i++)
3626  av_buffer_unref(&s->sei.unregistered.buf_ref[i]);
3627  s->sei.unregistered.nb_buf_ref = 0;
3628 
3629  if (s0->sei.unregistered.nb_buf_ref) {
3630  ret = av_reallocp_array(&s->sei.unregistered.buf_ref,
3631  s0->sei.unregistered.nb_buf_ref,
3632  sizeof(*s->sei.unregistered.buf_ref));
3633  if (ret < 0)
3634  return ret;
3635 
3636  for (i = 0; i < s0->sei.unregistered.nb_buf_ref; i++) {
3637  s->sei.unregistered.buf_ref[i] = av_buffer_ref(s0->sei.unregistered.buf_ref[i]);
3638  if (!s->sei.unregistered.buf_ref[i])
3639  return AVERROR(ENOMEM);
3640  s->sei.unregistered.nb_buf_ref++;
3641  }
3642  }
3643 
3644  ret = av_buffer_replace(&s->sei.dynamic_hdr_plus.info, s0->sei.dynamic_hdr_plus.info);
3645  if (ret < 0)
3646  return ret;
3647 
3648  s->sei.frame_packing = s0->sei.frame_packing;
3649  s->sei.display_orientation = s0->sei.display_orientation;
3650  s->sei.mastering_display = s0->sei.mastering_display;
3651  s->sei.content_light = s0->sei.content_light;
3652  s->sei.alternative_transfer = s0->sei.alternative_transfer;
3653 
3655  if (ret < 0)
3656  return ret;
3657 
3658  return 0;
3659 }
3660 #endif
3661 
3663 {
3664  HEVCContext *s = avctx->priv_data;
3665  int ret;
3666 
3667  if(avctx->active_thread_type & FF_THREAD_SLICE)
3668  s->threads_number = avctx->thread_count;
3669  else
3670  s->threads_number = 1;
3671 
3672  if((avctx->active_thread_type & FF_THREAD_FRAME) && avctx->thread_count > 1)
3673  s->threads_type = FF_THREAD_FRAME;
3674  else
3675  s->threads_type = FF_THREAD_SLICE;
3676 
3677  ret = hevc_init_context(avctx);
3678  if (ret < 0)
3679  return ret;
3680 
3681  s->enable_parallel_tiles = 0;
3682  s->sei.picture_timing.picture_struct = 0;
3683  s->eos = 1;
3684 
3685  atomic_init(&s->wpp_err, 0);
3686 
3687  if (!avctx->internal->is_copy) {
3688  if (avctx->extradata_size > 0 && avctx->extradata) {
3689  ret = hevc_decode_extradata(s, avctx->extradata, avctx->extradata_size, 1);
3690  if (ret < 0) {
3691  return ret;
3692  }
3693  }
3694  }
3695 
3696  return 0;
3697 }
3698 
3700 {
3701  HEVCContext *s = avctx->priv_data;
3703  ff_hevc_reset_sei(&s->sei);
3704  s->max_ra = INT_MAX;
3705  s->eos = 1;
3706 }
3707 
3708 #define OFFSET(x) offsetof(HEVCContext, x)
3709 #define PAR (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM)
3710 
3711 static const AVOption options[] = {
3712  { "apply_defdispwin", "Apply default display window from VUI", OFFSET(apply_defdispwin),
3713  AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, PAR },
3714  { "strict-displaywin", "stricly apply default display window size", OFFSET(apply_defdispwin),
3715  AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, PAR },
3716  { NULL },
3717 };
3718 
3719 static const AVClass hevc_decoder_class = {
3720  .class_name = "HEVC decoder",
3721  .item_name = av_default_item_name,
3722  .option = options,
3723  .version = LIBAVUTIL_VERSION_INT,
3724 };
3725 
3727  .name = "hevc",
3728  .long_name = NULL_IF_CONFIG_SMALL("HEVC (High Efficiency Video Coding)"),
3729  .type = AVMEDIA_TYPE_VIDEO,
3730  .id = AV_CODEC_ID_HEVC,
3731  .priv_data_size = sizeof(HEVCContext),
3732  .priv_class = &hevc_decoder_class,
3734  .close = hevc_decode_free,
3737  .update_thread_context = ONLY_IF_THREADS_ENABLED(hevc_update_thread_context),
3738  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY |
3743  .hw_configs = (const AVCodecHWConfigInternal *const []) {
3744 #if CONFIG_HEVC_DXVA2_HWACCEL
3745  HWACCEL_DXVA2(hevc),
3746 #endif
3747 #if CONFIG_HEVC_D3D11VA_HWACCEL
3748  HWACCEL_D3D11VA(hevc),
3749 #endif
3750 #if CONFIG_HEVC_D3D11VA2_HWACCEL
3751  HWACCEL_D3D11VA2(hevc),
3752 #endif
3753 #if CONFIG_HEVC_NVDEC_HWACCEL
3754  HWACCEL_NVDEC(hevc),
3755 #endif
3756 #if CONFIG_HEVC_VAAPI_HWACCEL
3757  HWACCEL_VAAPI(hevc),
3758 #endif
3759 #if CONFIG_HEVC_VDPAU_HWACCEL
3760  HWACCEL_VDPAU(hevc),
3761 #endif
3762 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
3763  HWACCEL_VIDEOTOOLBOX(hevc),
3764 #endif
3765  NULL
3766  },
3767 };
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:31
HEVCSEIFilmGrainCharacteristics::comp_model_present_flag
int comp_model_present_flag[3]
Definition: hevc_sei.h:124
verify_md5
static int verify_md5(HEVCContext *s, AVFrame *frame)
Definition: hevcdec.c:3265
hwconfig.h
AVMasteringDisplayMetadata::has_primaries
int has_primaries
Flag indicating whether the display primaries (and white point) are set.
Definition: mastering_display_metadata.h:62
HEVC_NAL_RADL_N
@ HEVC_NAL_RADL_N
Definition: hevc.h:35
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1359
SliceHeader::beta_offset
int beta_offset
beta_offset_div2 * 2
Definition: hevcdec.h:298
AVCodec
AVCodec.
Definition: codec.h:202
bswapdsp.h
L1
F H1 F F H1 F F F F H1<-F-------F-------F v v v H2 H3 H2 ^ ^ ^ F-------F-------F-> H1<-F-------F-------F|||||||||F H1 F|||||||||F H1 Funavailable fullpel samples(outside the picture for example) shall be equalto the closest available fullpel sampleSmaller pel interpolation:--------------------------if diag_mc is set then points which lie on a line between 2 vertically, horizontally or diagonally adjacent halfpel points shall be interpolatedlinearly with rounding to nearest and halfway values rounded up.points which lie on 2 diagonals at the same time should only use the onediagonal not containing the fullpel point F--> O q O<--h1-> O q O<--F v \/v \/v O O O O O O O|/|\|q q q q q|/|\|O O O O O O O ^/\ ^/\ ^ h2--> O q O<--h3-> O q O<--h2 v \/v \/v O O O O O O O|\|/|q q q q q|\|/|O O O O O O O ^/\ ^/\ ^ F--> O q O<--h1-> O q O<--Fthe remaining points shall be bilinearly interpolated from theup to 4 surrounding halfpel and fullpel points, again rounding should be tonearest and halfway values rounded upcompliant Snow decoders MUST support 1-1/8 pel luma and 1/2-1/16 pel chromainterpolation at leastOverlapped block motion compensation:-------------------------------------FIXMELL band prediction:===================Each sample in the LL0 subband is predicted by the median of the left, top andleft+top-topleft samples, samples outside the subband shall be considered tobe 0. To reverse this prediction in the decoder apply the following.for(y=0;y< height;y++){ for(x=0;x< width;x++){ sample[y][x]+=median(sample[y-1][x], sample[y][x-1], sample[y-1][x]+sample[y][x-1]-sample[y-1][x-1]);}}sample[-1][ *]=sample[ *][-1]=0;width, height here are the width and height of the LL0 subband not of the finalvideoDequantization:===============FIXMEWavelet Transform:==================Snow supports 2 wavelet transforms, the symmetric biorthogonal 5/3 integertransform and an integer approximation of the symmetric biorthogonal 9/7daubechies wavelet.2D IDWT(inverse discrete wavelet transform) --------------------------------------------The 2D IDWT applies a 2D filter recursively, each time combining the4 lowest frequency subbands into a single subband until only 1 subbandremains.The 2D filter is done by first applying a 1D filter in the vertical directionand then applying it in the horizontal one. --------------- --------------- --------------- ---------------|LL0|HL0|||||||||||||---+---|HL1||L0|H0|HL1||LL1|HL1|||||LH0|HH0|||||||||||||-------+-------|-> L1 H1 LH1 HH1 LH1 HH1 LH1 HH1 L1
Definition: snow.txt:554
stride
int stride
Definition: mace.c:144
av_buffer_pool_init
AVBufferPool * av_buffer_pool_init(size_t size, AVBufferRef *(*alloc)(size_t size))
Allocate and initialize a buffer pool.
Definition: buffer.c:280
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_TIMECODE_STR_SIZE
#define AV_TIMECODE_STR_SIZE
Definition: timecode.h:33
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:42
HEVCLocalContext
Definition: hevcdec.h:425
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:225
HEVCFrame::flags
uint8_t flags
A combination of HEVC_FRAME_FLAG_*.
Definition: hevcdec.h:422
AVMasteringDisplayMetadata::max_luminance
AVRational max_luminance
Max luminance of mastering display (cd/m^2).
Definition: mastering_display_metadata.h:57
HWACCEL_MAX
#define HWACCEL_MAX
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
ff_hevc_sao_type_idx_decode
int ff_hevc_sao_type_idx_decode(HEVCContext *s)
Definition: hevc_cabac.c:573
HEVCFrame::tf
ThreadFrame tf
Definition: hevcdec.h:398
HEVCFrame::hwaccel_priv_buf
AVBufferRef * hwaccel_priv_buf
Definition: hevcdec.h:410
level
uint8_t level
Definition: svq3.c:204
ff_hevc_no_residual_syntax_flag_decode
int ff_hevc_no_residual_syntax_flag_decode(HEVCContext *s)
Definition: hevc_cabac.c:835
AV_STEREO3D_VIEW_LEFT
@ AV_STEREO3D_VIEW_LEFT
Frame contains only the left view.
Definition: stereo3d.h:156
av_clip
#define av_clip
Definition: common.h:96
atomic_store
#define atomic_store(object, desired)
Definition: stdatomic.h:85
hls_decode_neighbour
static void hls_decode_neighbour(HEVCContext *s, int x_ctb, int y_ctb, int ctb_addr_ts)
Definition: hevcdec.c:2405
ff_hevc_sao_eo_class_decode
int ff_hevc_sao_eo_class_decode(HEVCContext *s)
Definition: hevc_cabac.c:608
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:850
ff_hevc_pred_init
void ff_hevc_pred_init(HEVCPredContext *hpc, int bit_depth)
Definition: hevcpred.c:43
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AV_STEREO3D_SIDEBYSIDE_QUINCUNX
@ AV_STEREO3D_SIDEBYSIDE_QUINCUNX
Views are next to each other, but when upscaling apply a checkerboard pattern.
Definition: stereo3d.h:117
HEVCSEIUnregistered
Definition: hevc_sei.h:64
hevc_decode_flush
static void hevc_decode_flush(AVCodecContext *avctx)
Definition: hevcdec.c:3699
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:960
ff_hevc_set_qPy
void ff_hevc_set_qPy(HEVCContext *s, int xBase, int yBase, int log2_cb_size)
Definition: hevc_filter.c:121
chroma_mc_bi
static void chroma_mc_bi(HEVCContext *s, uint8_t *dst0, ptrdiff_t dststride, AVFrame *ref0, AVFrame *ref1, int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int cidx)
8.5.3.2.2.2 Chroma sample bidirectional interpolation process
Definition: hevcdec.c:1698
PART_NxN
@ PART_NxN
Definition: hevcdec.h:148
luma_mc_bi
static void luma_mc_bi(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride, AVFrame *ref0, const Mv *mv0, int x_off, int y_off, int block_w, int block_h, AVFrame *ref1, const Mv *mv1, struct MvField *current_mv)
8.5.3.2.2.1 Luma sample bidirectional interpolation process
Definition: hevcdec.c:1542
ff_hevc_res_scale_sign_flag
int ff_hevc_res_scale_sign_flag(HEVCContext *s, int idx)
Definition: hevc_cabac.c:912
decode_nal_unit
static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal)
Definition: hevcdec.c:3012
ff_hevc_split_transform_flag_decode
int ff_hevc_split_transform_flag_decode(HEVCContext *s, int log2_trafo_size)
Definition: hevc_cabac.c:873
ff_thread_ref_frame
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
Definition: utils.c:878
out
FILE * out
Definition: movenc.c:54
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:605
SAO_BAND
@ SAO_BAND
Definition: hevcdec.h:214
ff_hevc_profiles
const AVProfile ff_hevc_profiles[]
Definition: profiles.c:83
HEVCSEIFilmGrainCharacteristics::matrix_coeffs
int matrix_coeffs
Definition: hevc_sei.h:121
AVFilmGrainH274Params::color_space
enum AVColorSpace color_space
Definition: film_grain_params.h:152
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2540
ff_hevc_hls_filter
void ff_hevc_hls_filter(HEVCContext *s, int x, int y, int ctb_size)
Definition: hevc_filter.c:842
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:120
AVMasteringDisplayMetadata::display_primaries
AVRational display_primaries[3][2]
CIE 1931 xy chromaticity coords of color primaries (r, g, b order).
Definition: mastering_display_metadata.h:42
AVMasteringDisplayMetadata::has_luminance
int has_luminance
Flag indicating whether the luminance (min_ and max_) have been set.
Definition: mastering_display_metadata.h:67
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1324
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:547
HEVCLocalContext::ctb_up_flag
uint8_t ctb_up_flag
Definition: hevcdec.h:443
mv
static const int8_t mv[256][2]
Definition: 4xm.c:79
SliceHeader::num_entry_point_offsets
int num_entry_point_offsets
Definition: hevcdec.h:306
HEVC_NAL_STSA_N
@ HEVC_NAL_STSA_N
Definition: hevc.h:33
PART_2NxnU
@ PART_2NxnU
Definition: hevcdec.h:149
AVFilmGrainH274Params::blending_mode_id
int blending_mode_id
Specifies the blending mode used to blend the simulated film grain with the decoded images.
Definition: film_grain_params.h:160
ff_hevc_cu_qp_delta_abs
int ff_hevc_cu_qp_delta_abs(HEVCContext *s)
Definition: hevc_cabac.c:640
AV_FRAME_DATA_S12M_TIMECODE
@ AV_FRAME_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1.
Definition: frame.h:151
av_mod_uintp2
#define av_mod_uintp2
Definition: common.h:123
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:112
AVContentLightMetadata::MaxCLL
unsigned MaxCLL
Max content light level (cd/m^2).
Definition: mastering_display_metadata.h:102
H2645NAL::nuh_layer_id
int nuh_layer_id
Definition: h2645_parse.h:64
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:303
set_deblocking_bypass
static void set_deblocking_bypass(HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:1286
pixdesc.h
HEVCFrame::tab_mvf
MvField * tab_mvf
Definition: hevcdec.h:399
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:953
TransformUnit::cu_qp_delta
int cu_qp_delta
Definition: hevcdec.h:371
HEVC_NAL_TSA_N
@ HEVC_NAL_TSA_N
Definition: hevc.h:31
w
uint8_t w
Definition: llviddspenc.c:38
HWACCEL_DXVA2
#define HWACCEL_DXVA2(codec)
Definition: hwconfig.h:67
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:576
HEVCFrame::hwaccel_picture_private
void * hwaccel_picture_private
Definition: hevcdec.h:411
av_display_matrix_flip
void av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip)
Flip the input matrix horizontally and/or vertically.
Definition: display.c:65
AVPacket::data
uint8_t * data
Definition: packet.h:373
PAR
#define PAR
Definition: hevcdec.c:3709
INTRA_DC
@ INTRA_DC
Definition: hevcdec.h:176
AVOption
AVOption.
Definition: opt.h:247
HWACCEL_D3D11VA2
#define HWACCEL_D3D11VA2(codec)
Definition: hwconfig.h:69
ff_h2645_packet_uninit
void ff_h2645_packet_uninit(H2645Packet *pkt)
Free all the allocated memory in the packet.
Definition: h2645_parse.c:523
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:476
hevc_decode_free
static av_cold int hevc_decode_free(AVCodecContext *avctx)
Definition: hevcdec.c:3461
ff_hevc_hls_filters
void ff_hevc_hls_filters(HEVCContext *s, int x_ctb, int y_ctb, int ctb_size)
Definition: hevc_filter.c:878
data
const char data[16]
Definition: mxf.c:143
Mv::y
int16_t y
vertical component of motion vector
Definition: hevcdec.h:342
ff_hevc_mpm_idx_decode
int ff_hevc_mpm_idx_decode(HEVCContext *s)
Definition: hevc_cabac.c:759
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:389
SAO_EDGE
@ SAO_EDGE
Definition: hevcdec.h:215
ff_hevc_hls_residual_coding
void ff_hevc_hls_residual_coding(HEVCContext *s, int x0, int y0, int log2_trafo_size, enum ScanType scan_idx, int c_idx)
Definition: hevc_cabac.c:1031
SliceHeader::slice_temporal_mvp_enabled_flag
uint8_t slice_temporal_mvp_enabled_flag
Definition: hevcdec.h:278
MvField::mv
Mv mv[2]
Definition: hevcdec.h:346
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:219
TransformUnit::is_cu_qp_delta_coded
uint8_t is_cu_qp_delta_coded
Definition: hevcdec.h:379
HEVC_NAL_RASL_N
@ HEVC_NAL_RASL_N
Definition: hevc.h:37
HEVC_NAL_STSA_R
@ HEVC_NAL_STSA_R
Definition: hevc.h:34
MODE_INTRA
@ MODE_INTRA
Definition: hevcdec.h:157
AVFilmGrainH274Params::color_range
enum AVColorRange color_range
Definition: film_grain_params.h:149
av_display_rotation_set
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure counterclockwise rotation by the specified angle...
Definition: display.c:50
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:84
HEVC_NAL_BLA_W_RADL
@ HEVC_NAL_BLA_W_RADL
Definition: hevc.h:46
SliceHeader::slice_loop_filter_across_slices_enabled_flag
uint8_t slice_loop_filter_across_slices_enabled_flag
Definition: hevcdec.h:287
SAOParams::offset_sign
int offset_sign[3][4]
sao_offset_sign
Definition: hevcdsp.h:36
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
export_stream_params
static void export_stream_params(HEVCContext *s, const HEVCSPS *sps)
Definition: hevcdec.c:318
HEVCLocalContext::ctb_up_left_flag
uint8_t ctb_up_left_flag
Definition: hevcdec.h:445
H2645NAL::temporal_id
int temporal_id
HEVC only, nuh_temporal_id_plus_1 - 1.
Definition: h2645_parse.h:59
av_timecode_get_smpte
uint32_t av_timecode_get_smpte(AVRational rate, int drop, int hh, int mm, int ss, int ff)
Convert sei info to SMPTE 12M binary representation.
Definition: timecode.c:68
RefPicList
Definition: hevcdec.h:239
AV_STEREO3D_VIEW_RIGHT
@ AV_STEREO3D_VIEW_RIGHT
Frame contains only the right view.
Definition: stereo3d.h:161
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:660
ff_thread_await_progress
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
OFFSET
#define OFFSET(x)
Definition: hevcdec.c:3708
PF_INTRA
@ PF_INTRA
Definition: hevcdec.h:168
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:317
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
MODE_SKIP
@ MODE_SKIP
Definition: hevcdec.h:158
HEVCLocalContext::end_of_tiles_x
int end_of_tiles_x
Definition: hevcdec.h:446
AVContentLightMetadata
Content light level needed by to transmit HDR over HDMI (CTA-861.3).
Definition: mastering_display_metadata.h:98
CodingUnit::x
int x
Definition: hevcdec.h:328
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:468
BOUNDARY_LEFT_TILE
#define BOUNDARY_LEFT_TILE
Definition: hevcdec.h:460
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:1710
golomb.h
exp golomb vlc stuff
AVCodecInternal::is_copy
int is_copy
Whether the parent AVCodecContext is a copy of the context which had init() called on it.
Definition: internal.h:130
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:67
PART_2Nx2N
@ PART_2Nx2N
Definition: hevcdec.h:145
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:380
SET_SAO
#define SET_SAO(elem, value)
Definition: hevcdec.c:978
HEVCLocalContext::ctb_up_right_flag
uint8_t ctb_up_right_flag
Definition: hevcdec.h:444
ff_hevc_clear_refs
void ff_hevc_clear_refs(HEVCContext *s)
Mark all frames in DPB as unused for reference.
Definition: hevc_refs.c:66
PRED_BI
@ PRED_BI
Definition: hevcdec.h:164
U
#define U(x)
Definition: vp56_arith.h:37
av_ceil_log2
#define av_ceil_log2
Definition: common.h:93
ff_hevc_split_coding_unit_flag_decode
int ff_hevc_split_coding_unit_flag_decode(HEVCContext *s, int ct_depth, int x0, int y0)
Definition: hevc_cabac.c:693
fail
#define fail()
Definition: checkasm.h:127
PredictionUnit::intra_pred_mode_c
uint8_t intra_pred_mode_c[4]
Definition: hevcdec.h:366
ff_hevc_sao_merge_flag_decode
int ff_hevc_sao_merge_flag_decode(HEVCContext *s)
Definition: hevc_cabac.c:568
AVCodecContext::thread_count
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
Definition: avcodec.h:1440
md5
struct AVMD5 * md5
Definition: movenc.c:56
InterPredIdc
InterPredIdc
Definition: hevcdec.h:161
MODE_INTER
@ MODE_INTER
Definition: hevcdec.h:156
HEVCSEIA53Caption
Definition: hevc_sei.h:60
timecode.h
GetBitContext
Definition: get_bits.h:62
HEVCLocalContext::pu
PredictionUnit pu
Definition: hevcdec.h:456
decode_lt_rps
static int decode_lt_rps(HEVCContext *s, LongTermRPS *rps, GetBitContext *gb)
Definition: hevcdec.c:261
TransformUnit::res_scale_val
int res_scale_val
Definition: hevcdec.h:373
HEVCSEIFilmGrainCharacteristics::present
int present
Definition: hevc_sei.h:113
SliceHeader::short_term_ref_pic_set_size
int short_term_ref_pic_set_size
Definition: hevcdec.h:269
hevc_decoder_class
static const AVClass hevc_decoder_class
Definition: hevcdec.c:3719
val
static double val(void *priv, double ch)
Definition: aeval.c:76
HWACCEL_VDPAU
#define HWACCEL_VDPAU(codec)
Definition: hwconfig.h:75
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
ff_hevc_output_frame
int ff_hevc_output_frame(HEVCContext *s, AVFrame *out, int flush)
Find next frame in output order and put a reference to it in frame.
Definition: hevc_refs.c:174
HEVCSEIFilmGrainCharacteristics::bit_depth_chroma
int bit_depth_chroma
Definition: hevc_sei.h:117
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:571
SliceHeader::long_term_ref_pic_set_size
int long_term_ref_pic_set_size
Definition: hevcdec.h:272
HEVCSEIFilmGrainCharacteristics::log2_scale_factor
int log2_scale_factor
Definition: hevc_sei.h:123
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:261
ff_hevc_luma_mv_mvp_mode
void ff_hevc_luma_mv_mvp_mode(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv, int mvp_lx_flag, int LX)
Definition: hevc_mvs.c:582
CTB
#define CTB(tab, x, y)
Definition: hevcdec.c:976
ff_reset_entries
void ff_reset_entries(AVCodecContext *avctx)
Definition: pthread_slice.c:238
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
ff_hevc_skip_flag_decode
int ff_hevc_skip_flag_decode(HEVCContext *s, int x0, int y0, int x_cb, int y_cb)
Definition: hevc_cabac.c:625
ff_hevc_merge_flag_decode
int ff_hevc_merge_flag_decode(HEVCContext *s)
Definition: hevc_cabac.c:799
AVRational::num
int num
Numerator.
Definition: rational.h:59
HWACCEL_VIDEOTOOLBOX
@ HWACCEL_VIDEOTOOLBOX
Definition: ffmpeg.h:63
SliceHeader::slice_segment_addr
unsigned int slice_segment_addr
address (in raster order) of the first block in the current slice
Definition: hevcdec.h:254
AVFilmGrainH274Params::intensity_interval_upper_bound
uint8_t intensity_interval_upper_bound[3][256]
Specifies the upper bound of each intensity interval for which the set of model values applies for th...
Definition: film_grain_params.h:194
hevc_parse.h
MvField::ref_idx
int8_t ref_idx[2]
Definition: hevcdec.h:347
ff_hevc_save_states
void ff_hevc_save_states(HEVCContext *s, int ctb_addr_ts)
Definition: hevc_cabac.c:450
AVFilmGrainH274Params::bit_depth_luma
int bit_depth_luma
Specifies the bit depth used for the luma component.
Definition: film_grain_params.h:142
ff_hevc_deblocking_boundary_strengths
void ff_hevc_deblocking_boundary_strengths(HEVCContext *s, int x0, int y0, int log2_trafo_size)
Definition: hevc_filter.c:714
SAOParams::eo_class
int eo_class[3]
sao_eo_class
Definition: hevcdsp.h:40
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:99
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:392
ff_hevc_prev_intra_luma_pred_flag_decode
int ff_hevc_prev_intra_luma_pred_flag_decode(HEVCContext *s)
Definition: hevc_cabac.c:754
ff_hevc_decode_nal_sei
int ff_hevc_decode_nal_sei(GetBitContext *gb, void *logctx, HEVCSEI *s, const HEVCParamSets *ps, int type)
Definition: hevc_sei.c:543
ff_thread_report_progress2
void ff_thread_report_progress2(AVCodecContext *avctx, int field, int thread, int n)
Definition: pthread_slice.c:174
first
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But first
Definition: rate_distortion.txt:12
hls_decode_entry_wpp
static int hls_decode_entry_wpp(AVCodecContext *avctxt, void *input_ctb_row, int job, int self_id)
Definition: hevcdec.c:2526
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:946
AV_STEREO3D_FRAMESEQUENCE
@ AV_STEREO3D_FRAMESEQUENCE
Views are alternated temporally.
Definition: stereo3d.h:92
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
QPEL_EXTRA_AFTER
#define QPEL_EXTRA_AFTER
Definition: hevcdec.h:67
HEVC_NAL_BLA_N_LP
@ HEVC_NAL_BLA_N_LP
Definition: hevc.h:47
SAOParams::type_idx
uint8_t type_idx[3]
sao_type_idx
Definition: hevcdsp.h:44
film_grain_params.h
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
TransformUnit::intra_pred_mode
int intra_pred_mode
Definition: hevcdec.h:376
ff_hevc_hls_mvd_coding
void ff_hevc_hls_mvd_coding(HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevc_cabac.c:1541
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:678
HEVCSEIFilmGrainCharacteristics::full_range
int full_range
Definition: hevc_sei.h:118
HEVC_NAL_RADL_R
@ HEVC_NAL_RADL_R
Definition: hevc.h:36
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:588
SliceHeader::cabac_init_flag
uint8_t cabac_init_flag
Definition: hevcdec.h:285
HEVC_MAX_REFS
@ HEVC_MAX_REFS
Definition: hevc.h:119
H2645NAL::size
int size
Definition: h2645_parse.h:37
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
hls_pcm_sample
static int hls_pcm_sample(HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:1424
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:485
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:679
VUI::matrix_coeffs
uint8_t matrix_coeffs
Definition: hevc_ps.h:61
width
#define width
QPEL_EXTRA_BEFORE
#define QPEL_EXTRA_BEFORE
Definition: hevcdec.h:66
stereo3d.h
AVMasteringDisplayMetadata::white_point
AVRational white_point[2]
CIE 1931 xy chromaticity coords of white point.
Definition: mastering_display_metadata.h:47
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:127
s
#define s(width, name)
Definition: cbs_vp9.c:257
ff_thread_await_progress2
void ff_thread_await_progress2(AVCodecContext *avctx, int field, int thread, int shift)
Definition: pthread_slice.c:185
SAO_NOT_APPLIED
@ SAO_NOT_APPLIED
Definition: hevcdec.h:213
hls_sao_param
static void hls_sao_param(HEVCContext *s, int rx, int ry)
Definition: hevcdec.c:990
set_sps
static int set_sps(HEVCContext *s, const HEVCSPS *sps, enum AVPixelFormat pix_fmt)
Definition: hevcdec.c:477
HEVCSEIFilmGrainCharacteristics::num_intensity_intervals
uint16_t num_intensity_intervals[3]
Definition: hevc_sei.h:125
AV_ZERO32
#define AV_ZERO32(d)
Definition: intreadwrite.h:629
ff_hevc_ref_idx_lx_decode
int ff_hevc_ref_idx_lx_decode(HEVCContext *s, int num_ref_idx_lx)
Definition: hevc_cabac.c:814
s1
#define s1
Definition: regdef.h:38
ff_hevc_nal_is_nonref
static av_always_inline int ff_hevc_nal_is_nonref(enum HEVCNALUnitType type)
Definition: hevcdec.h:638
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
av_film_grain_params_create_side_data
AVFilmGrainParams * av_film_grain_params_create_side_data(AVFrame *frame)
Allocate a complete AVFilmGrainParams and add it to the frame.
Definition: film_grain_params.c:31
luma_intra_pred_mode
static int luma_intra_pred_mode(HEVCContext *s, int x0, int y0, int pu_size, int prev_intra_luma_pred_flag)
8.4.1
Definition: hevcdec.c:1969
AVFilmGrainParams::codec
union AVFilmGrainParams::@291 codec
Additional fields may be added both here and in any structure included.
ff_hevc_set_new_ref
int ff_hevc_set_new_ref(HEVCContext *s, AVFrame **frame, int poc)
Definition: hevc_refs.c:135
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:290
SliceHeader::slice_rps
ShortTermRPS slice_rps
Definition: hevcdec.h:270
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
HEVCSEIFilmGrainCharacteristics::model_id
int model_id
Definition: hevc_sei.h:114
ff_hevc_cu_transquant_bypass_flag_decode
int ff_hevc_cu_transquant_bypass_flag_decode(HEVCContext *s)
Definition: hevc_cabac.c:620
IS_IDR
#define IS_IDR(s)
Definition: hevcdec.h:78
ff_hevc_intra_chroma_pred_mode_decode
int ff_hevc_intra_chroma_pred_mode_decode(HEVCContext *s)
Definition: hevc_cabac.c:777
set_ct_depth
static av_always_inline void set_ct_depth(HEVCContext *s, int x0, int y0, int log2_cb_size, int ct_depth)
Definition: hevcdec.c:2049
H2645NAL::data
const uint8_t * data
Definition: h2645_parse.h:38
ff_hevc_slice_rpl
int ff_hevc_slice_rpl(HEVCContext *s)
Construct the reference picture list(s) for the current slice.
Definition: hevc_refs.c:291
RefPicList::ref
struct HEVCFrame * ref[HEVC_MAX_REFS]
Definition: hevcdec.h:240
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demuxing_decoding.c:41
ff_hevc_sao_offset_abs_decode
int ff_hevc_sao_offset_abs_decode(HEVCContext *s)
Definition: hevc_cabac.c:593
H2645NAL::skipped_bytes_pos
int * skipped_bytes_pos
Definition: h2645_parse.h:68
VUI::colour_primaries
uint8_t colour_primaries
Definition: hevc_ps.h:59
HEVC_SLICE_I
@ HEVC_SLICE_I
Definition: hevc.h:98
hls_coding_unit
static int hls_coding_unit(HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:2157
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
SliceHeader::size
int * size
Definition: hevcdec.h:305
HEVCSEIUnregistered::buf_ref
AVBufferRef ** buf_ref
Definition: hevc_sei.h:65
AVFilmGrainH274Params::comp_model_value
int16_t comp_model_value[3][256][6]
Specifies the model values for the component for each intensity interval.
Definition: film_grain_params.h:205
SliceHeader::collocated_list
uint8_t collocated_list
Definition: hevcdec.h:288
atomic_load
#define atomic_load(object)
Definition: stdatomic.h:93
ff_hevc_luma_mv_merge_mode
void ff_hevc_luma_mv_merge_mode(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv)
Definition: hevc_mvs.c:479
AVCOL_PRI_UNSPECIFIED
@ AVCOL_PRI_UNSPECIFIED
Definition: pixfmt.h:451
HEVC_MAX_PPS_COUNT
@ HEVC_MAX_PPS_COUNT
Definition: hevc.h:114
AVDISCARD_BIDIR
@ AVDISCARD_BIDIR
discard all bidirectional frames
Definition: defs.h:51
get_se_golomb
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
Definition: golomb.h:241
INTRA_ANGULAR_26
@ INTRA_ANGULAR_26
Definition: hevcdec.h:201
H2645NAL::type
int type
NAL unit type.
Definition: h2645_parse.h:54
CodingUnit::max_trafo_depth
uint8_t max_trafo_depth
MaxTrafoDepth.
Definition: hevcdec.h:336
SliceHeader::slice_ctb_addr_rs
int slice_ctb_addr_rs
Definition: hevcdec.h:324
arg
const char * arg
Definition: jacosubdec.c:67
AVStereo3D::flags
int flags
Additional information about the frame packing.
Definition: stereo3d.h:185
if
if(ret)
Definition: filter_design.txt:179
HEVC_NAL_IDR_N_LP
@ HEVC_NAL_IDR_N_LP
Definition: hevc.h:49
AVFilmGrainH274Params::model_id
int model_id
Specifies the film grain simulation mode.
Definition: film_grain_params.h:137
SliceHeader::pic_output_flag
uint8_t pic_output_flag
Definition: hevcdec.h:264
hls_slice_data_wpp
static int hls_slice_data_wpp(HEVCContext *s, const H2645NAL *nal)
Definition: hevcdec.c:2607
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:113
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:54
ff_hevc_sao_offset_sign_decode
int ff_hevc_sao_offset_sign_decode(HEVCContext *s)
Definition: hevc_cabac.c:603
PredictionUnit::rem_intra_luma_pred_mode
int rem_intra_luma_pred_mode
Definition: hevcdec.h:362
H2645NAL::raw_size
int raw_size
Definition: h2645_parse.h:46
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
IS_BLA
#define IS_BLA(s)
Definition: hevcdec.h:79
ff_hevc_merge_idx_decode
int ff_hevc_merge_idx_decode(HEVCContext *s)
Definition: hevc_cabac.c:788
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
ff_bswapdsp_init
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
HEVC_SLICE_B
@ HEVC_SLICE_B
Definition: hevc.h:96
flush
static void flush(AVCodecContext *avctx)
Definition: aacdec_template.c:593
NULL
#define NULL
Definition: coverity.c:32
hevc_ref_frame
static int hevc_ref_frame(HEVCContext *s, HEVCFrame *dst, HEVCFrame *src)
Definition: hevcdec.c:3421
HEVCSEIFilmGrainCharacteristics::separate_colour_description_present_flag
int separate_colour_description_present_flag
Definition: hevc_sei.h:115
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:967
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
HEVCLocalContext::tmp
int16_t tmp[MAX_PB_SIZE *MAX_PB_SIZE]
Definition: hevcdec.h:452
ff_hevc_ps_uninit
void ff_hevc_ps_uninit(HEVCParamSets *ps)
Definition: hevc_ps.c:1747
HEVC_NAL_PPS
@ HEVC_NAL_PPS
Definition: hevc.h:63
LongTermRPS::poc
int poc[32]
Definition: hevcdec.h:233
AVCHROMA_LOC_LEFT
@ AVCHROMA_LOC_LEFT
MPEG-2/4 4:2:0, H.264 default for 4:2:0.
Definition: pixfmt.h:597
CodingUnit::cu_transquant_bypass_flag
uint8_t cu_transquant_bypass_flag
Definition: hevcdec.h:337
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:418
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
HEVCSEIFilmGrainCharacteristics::color_primaries
int color_primaries
Definition: hevc_sei.h:119
HEVCLocalContext::first_qp_group
uint8_t first_qp_group
Definition: hevcdec.h:430
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
hls_transform_unit
static int hls_transform_unit(HEVCContext *s, int x0, int y0, int xBase, int yBase, int cb_xBase, int cb_yBase, int log2_cb_size, int log2_trafo_size, int blk_idx, int cbf_luma, int *cbf_cb, int *cbf_cr)
Definition: hevcdec.c:1080
AVHWAccel::end_frame
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
Definition: avcodec.h:2140
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:499
profiles.h
src
#define src
Definition: vp8dsp.c:255
av_buffer_pool_uninit
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:322
L0
#define L0
Definition: hevcdec.h:60
HEVCFrame::rpl_tab
RefPicListTab ** rpl_tab
Definition: hevcdec.h:401
LongTermRPS::poc_msb_present
uint8_t poc_msb_present[32]
Definition: hevcdec.h:234
ff_hevc_pel_weight
const uint8_t ff_hevc_pel_weight[65]
Definition: hevcdec.c:49
HEVC_NAL_SEI_SUFFIX
@ HEVC_NAL_SEI_SUFFIX
Definition: hevc.h:69
HEVCSEIFilmGrainCharacteristics::intensity_interval_lower_bound
uint8_t intensity_interval_lower_bound[3][256]
Definition: hevc_sei.h:127
HEVC_NAL_CRA_NUT
@ HEVC_NAL_CRA_NUT
Definition: hevc.h:50
av_frame_new_side_data_from_buf
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
Definition: frame.c:573
ONLY_IF_THREADS_ENABLED
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:156
PART_Nx2N
@ PART_Nx2N
Definition: hevcdec.h:147
RefPicListTab
Definition: hevcdec.h:246
BOUNDARY_UPPER_TILE
#define BOUNDARY_UPPER_TILE
Definition: hevcdec.h:462
vps
static int FUNC() vps(CodedBitstreamContext *ctx, RWContext *rw, H265RawVPS *current)
Definition: cbs_h265_syntax_template.c:423
ff_hevc_decode_extradata
int ff_hevc_decode_extradata(const uint8_t *data, int size, HEVCParamSets *ps, HEVCSEI *sei, int *is_nalff, int *nal_length_size, int err_recognition, int apply_defdispwin, void *logctx)
Definition: hevc_parse.c:80
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:1335
SliceHeader::nb_refs
unsigned int nb_refs[2]
Definition: hevcdec.h:280
Mv::x
int16_t x
horizontal component of motion vector
Definition: hevcdec.h:341
FF_CODEC_CAP_EXPORTS_CROPPING
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: internal.h:68
AVCodecContext::level
int level
level
Definition: avcodec.h:1651
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:542
HEVC_NAL_RASL_R
@ HEVC_NAL_RASL_R
Definition: hevc.h:38
PF_BI
@ PF_BI
Definition: hevcdec.h:171
AV_FRAME_DATA_SEI_UNREGISTERED
@ AV_FRAME_DATA_SEI_UNREGISTERED
User data unregistered metadata associated with a video frame.
Definition: frame.h:177
SAMPLE_CTB
#define SAMPLE_CTB(tab, x, y)
Definition: hevcdec.h:76
HEVCWindow
Definition: hevc_ps.h:42
SCAN_HORIZ
@ SCAN_HORIZ
Definition: hevcdec.h:228
hevc_data.h
hevc_decode_frame
static int hevc_decode_frame(AVCodecContext *avctx, void *data, int *got_output, AVPacket *avpkt)
Definition: hevcdec.c:3358
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
ff_hevc_frame_rps
int ff_hevc_frame_rps(HEVCContext *s)
Construct the reference picture sets for the current frame.
Definition: hevc_refs.c:443
HEVCLocalContext::edge_emu_buffer
uint8_t edge_emu_buffer[(MAX_PB_SIZE+7) *EDGE_EMU_BUFFER_STRIDE *2]
Definition: hevcdec.h:449
IS_IRAP
#define IS_IRAP(s)
Definition: hevcdec.h:81
HEVCSEIFilmGrainCharacteristics::blending_mode_id
int blending_mode_id
Definition: hevc_sei.h:122
LongTermRPS::used
uint8_t used[32]
Definition: hevcdec.h:235
SliceHeader::colour_plane_id
uint8_t colour_plane_id
RPS coded in the slice header itself is stored here.
Definition: hevcdec.h:265
ff_hevc_mvp_lx_flag_decode
int ff_hevc_mvp_lx_flag_decode(HEVCContext *s)
Definition: