FFmpeg
hevcdec.c
Go to the documentation of this file.
1 /*
2  * HEVC video Decoder
3  *
4  * Copyright (C) 2012 - 2013 Guillaume Martres
5  * Copyright (C) 2012 - 2013 Mickael Raulet
6  * Copyright (C) 2012 - 2013 Gildas Cocherel
7  * Copyright (C) 2012 - 2013 Wassim Hamidouche
8  *
9  * This file is part of FFmpeg.
10  *
11  * FFmpeg is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public
13  * License as published by the Free Software Foundation; either
14  * version 2.1 of the License, or (at your option) any later version.
15  *
16  * FFmpeg is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with FFmpeg; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24  */
25 
26 #include "config_components.h"
27 
28 #include "libavutil/attributes.h"
29 #include "libavutil/avstring.h"
30 #include "libavutil/common.h"
31 #include "libavutil/display.h"
33 #include "libavutil/internal.h"
35 #include "libavutil/md5.h"
36 #include "libavutil/opt.h"
37 #include "libavutil/pixdesc.h"
38 #include "libavutil/stereo3d.h"
39 #include "libavutil/timecode.h"
40 
41 #include "bswapdsp.h"
42 #include "bytestream.h"
43 #include "cabac_functions.h"
44 #include "codec_internal.h"
45 #include "decode.h"
46 #include "golomb.h"
47 #include "hevc.h"
48 #include "hevc_data.h"
49 #include "hevc_parse.h"
50 #include "hevcdec.h"
51 #include "hwconfig.h"
52 #include "internal.h"
53 #include "profiles.h"
54 #include "thread.h"
55 #include "threadframe.h"
56 
57 static const uint8_t hevc_pel_weight[65] = { [2] = 0, [4] = 1, [6] = 2, [8] = 3, [12] = 4, [16] = 5, [24] = 6, [32] = 7, [48] = 8, [64] = 9 };
58 
59 /**
60  * NOTE: Each function hls_foo correspond to the function foo in the
61  * specification (HLS stands for High Level Syntax).
62  */
63 
64 /**
65  * Section 5.7
66  */
67 
68 /* free everything allocated by pic_arrays_init() */
70 {
71  av_freep(&s->sao);
72  av_freep(&s->deblock);
73 
74  av_freep(&s->skip_flag);
75  av_freep(&s->tab_ct_depth);
76 
77  av_freep(&s->tab_ipm);
78  av_freep(&s->cbf_luma);
79  av_freep(&s->is_pcm);
80 
81  av_freep(&s->qp_y_tab);
82  av_freep(&s->tab_slice_address);
83  av_freep(&s->filter_slice_edges);
84 
85  av_freep(&s->horizontal_bs);
86  av_freep(&s->vertical_bs);
87 
88  av_freep(&s->sh.entry_point_offset);
89  av_freep(&s->sh.size);
90  av_freep(&s->sh.offset);
91 
92  av_buffer_pool_uninit(&s->tab_mvf_pool);
93  av_buffer_pool_uninit(&s->rpl_tab_pool);
94 }
95 
96 /* allocate arrays that depend on frame dimensions */
97 static int pic_arrays_init(HEVCContext *s, const HEVCSPS *sps)
98 {
99  int log2_min_cb_size = sps->log2_min_cb_size;
100  int width = sps->width;
101  int height = sps->height;
102  int pic_size_in_ctb = ((width >> log2_min_cb_size) + 1) *
103  ((height >> log2_min_cb_size) + 1);
104  int ctb_count = sps->ctb_width * sps->ctb_height;
105  int min_pu_size = sps->min_pu_width * sps->min_pu_height;
106 
107  s->bs_width = (width >> 2) + 1;
108  s->bs_height = (height >> 2) + 1;
109 
110  s->sao = av_calloc(ctb_count, sizeof(*s->sao));
111  s->deblock = av_calloc(ctb_count, sizeof(*s->deblock));
112  if (!s->sao || !s->deblock)
113  goto fail;
114 
115  s->skip_flag = av_malloc_array(sps->min_cb_height, sps->min_cb_width);
116  s->tab_ct_depth = av_malloc_array(sps->min_cb_height, sps->min_cb_width);
117  if (!s->skip_flag || !s->tab_ct_depth)
118  goto fail;
119 
120  s->cbf_luma = av_malloc_array(sps->min_tb_width, sps->min_tb_height);
121  s->tab_ipm = av_mallocz(min_pu_size);
122  s->is_pcm = av_malloc_array(sps->min_pu_width + 1, sps->min_pu_height + 1);
123  if (!s->tab_ipm || !s->cbf_luma || !s->is_pcm)
124  goto fail;
125 
126  s->filter_slice_edges = av_mallocz(ctb_count);
127  s->tab_slice_address = av_malloc_array(pic_size_in_ctb,
128  sizeof(*s->tab_slice_address));
129  s->qp_y_tab = av_malloc_array(pic_size_in_ctb,
130  sizeof(*s->qp_y_tab));
131  if (!s->qp_y_tab || !s->filter_slice_edges || !s->tab_slice_address)
132  goto fail;
133 
134  s->horizontal_bs = av_calloc(s->bs_width, s->bs_height);
135  s->vertical_bs = av_calloc(s->bs_width, s->bs_height);
136  if (!s->horizontal_bs || !s->vertical_bs)
137  goto fail;
138 
139  s->tab_mvf_pool = av_buffer_pool_init(min_pu_size * sizeof(MvField),
141  s->rpl_tab_pool = av_buffer_pool_init(ctb_count * sizeof(RefPicListTab),
143  if (!s->tab_mvf_pool || !s->rpl_tab_pool)
144  goto fail;
145 
146  return 0;
147 
148 fail:
150  return AVERROR(ENOMEM);
151 }
152 
154 {
155  int i = 0;
156  int j = 0;
157  uint8_t luma_weight_l0_flag[16];
158  uint8_t chroma_weight_l0_flag[16];
159  uint8_t luma_weight_l1_flag[16];
160  uint8_t chroma_weight_l1_flag[16];
161  int luma_log2_weight_denom;
162 
163  luma_log2_weight_denom = get_ue_golomb_long(gb);
164  if (luma_log2_weight_denom < 0 || luma_log2_weight_denom > 7) {
165  av_log(s->avctx, AV_LOG_ERROR, "luma_log2_weight_denom %d is invalid\n", luma_log2_weight_denom);
166  return AVERROR_INVALIDDATA;
167  }
168  s->sh.luma_log2_weight_denom = av_clip_uintp2(luma_log2_weight_denom, 3);
169  if (s->ps.sps->chroma_format_idc != 0) {
170  int64_t chroma_log2_weight_denom = luma_log2_weight_denom + (int64_t)get_se_golomb(gb);
171  if (chroma_log2_weight_denom < 0 || chroma_log2_weight_denom > 7) {
172  av_log(s->avctx, AV_LOG_ERROR, "chroma_log2_weight_denom %"PRId64" is invalid\n", chroma_log2_weight_denom);
173  return AVERROR_INVALIDDATA;
174  }
175  s->sh.chroma_log2_weight_denom = chroma_log2_weight_denom;
176  }
177 
178  for (i = 0; i < s->sh.nb_refs[L0]; i++) {
179  luma_weight_l0_flag[i] = get_bits1(gb);
180  if (!luma_weight_l0_flag[i]) {
181  s->sh.luma_weight_l0[i] = 1 << s->sh.luma_log2_weight_denom;
182  s->sh.luma_offset_l0[i] = 0;
183  }
184  }
185  if (s->ps.sps->chroma_format_idc != 0) {
186  for (i = 0; i < s->sh.nb_refs[L0]; i++)
187  chroma_weight_l0_flag[i] = get_bits1(gb);
188  } else {
189  for (i = 0; i < s->sh.nb_refs[L0]; i++)
190  chroma_weight_l0_flag[i] = 0;
191  }
192  for (i = 0; i < s->sh.nb_refs[L0]; i++) {
193  if (luma_weight_l0_flag[i]) {
194  int delta_luma_weight_l0 = get_se_golomb(gb);
195  if ((int8_t)delta_luma_weight_l0 != delta_luma_weight_l0)
196  return AVERROR_INVALIDDATA;
197  s->sh.luma_weight_l0[i] = (1 << s->sh.luma_log2_weight_denom) + delta_luma_weight_l0;
198  s->sh.luma_offset_l0[i] = get_se_golomb(gb);
199  }
200  if (chroma_weight_l0_flag[i]) {
201  for (j = 0; j < 2; j++) {
202  int delta_chroma_weight_l0 = get_se_golomb(gb);
203  int delta_chroma_offset_l0 = get_se_golomb(gb);
204 
205  if ( (int8_t)delta_chroma_weight_l0 != delta_chroma_weight_l0
206  || delta_chroma_offset_l0 < -(1<<17) || delta_chroma_offset_l0 > (1<<17)) {
207  return AVERROR_INVALIDDATA;
208  }
209 
210  s->sh.chroma_weight_l0[i][j] = (1 << s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l0;
211  s->sh.chroma_offset_l0[i][j] = av_clip((delta_chroma_offset_l0 - ((128 * s->sh.chroma_weight_l0[i][j])
212  >> s->sh.chroma_log2_weight_denom) + 128), -128, 127);
213  }
214  } else {
215  s->sh.chroma_weight_l0[i][0] = 1 << s->sh.chroma_log2_weight_denom;
216  s->sh.chroma_offset_l0[i][0] = 0;
217  s->sh.chroma_weight_l0[i][1] = 1 << s->sh.chroma_log2_weight_denom;
218  s->sh.chroma_offset_l0[i][1] = 0;
219  }
220  }
221  if (s->sh.slice_type == HEVC_SLICE_B) {
222  for (i = 0; i < s->sh.nb_refs[L1]; i++) {
223  luma_weight_l1_flag[i] = get_bits1(gb);
224  if (!luma_weight_l1_flag[i]) {
225  s->sh.luma_weight_l1[i] = 1 << s->sh.luma_log2_weight_denom;
226  s->sh.luma_offset_l1[i] = 0;
227  }
228  }
229  if (s->ps.sps->chroma_format_idc != 0) {
230  for (i = 0; i < s->sh.nb_refs[L1]; i++)
231  chroma_weight_l1_flag[i] = get_bits1(gb);
232  } else {
233  for (i = 0; i < s->sh.nb_refs[L1]; i++)
234  chroma_weight_l1_flag[i] = 0;
235  }
236  for (i = 0; i < s->sh.nb_refs[L1]; i++) {
237  if (luma_weight_l1_flag[i]) {
238  int delta_luma_weight_l1 = get_se_golomb(gb);
239  if ((int8_t)delta_luma_weight_l1 != delta_luma_weight_l1)
240  return AVERROR_INVALIDDATA;
241  s->sh.luma_weight_l1[i] = (1 << s->sh.luma_log2_weight_denom) + delta_luma_weight_l1;
242  s->sh.luma_offset_l1[i] = get_se_golomb(gb);
243  }
244  if (chroma_weight_l1_flag[i]) {
245  for (j = 0; j < 2; j++) {
246  int delta_chroma_weight_l1 = get_se_golomb(gb);
247  int delta_chroma_offset_l1 = get_se_golomb(gb);
248 
249  if ( (int8_t)delta_chroma_weight_l1 != delta_chroma_weight_l1
250  || delta_chroma_offset_l1 < -(1<<17) || delta_chroma_offset_l1 > (1<<17)) {
251  return AVERROR_INVALIDDATA;
252  }
253 
254  s->sh.chroma_weight_l1[i][j] = (1 << s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l1;
255  s->sh.chroma_offset_l1[i][j] = av_clip((delta_chroma_offset_l1 - ((128 * s->sh.chroma_weight_l1[i][j])
256  >> s->sh.chroma_log2_weight_denom) + 128), -128, 127);
257  }
258  } else {
259  s->sh.chroma_weight_l1[i][0] = 1 << s->sh.chroma_log2_weight_denom;
260  s->sh.chroma_offset_l1[i][0] = 0;
261  s->sh.chroma_weight_l1[i][1] = 1 << s->sh.chroma_log2_weight_denom;
262  s->sh.chroma_offset_l1[i][1] = 0;
263  }
264  }
265  }
266  return 0;
267 }
268 
270 {
271  const HEVCSPS *sps = s->ps.sps;
272  int max_poc_lsb = 1 << sps->log2_max_poc_lsb;
273  int prev_delta_msb = 0;
274  unsigned int nb_sps = 0, nb_sh;
275  int i;
276 
277  rps->nb_refs = 0;
278  if (!sps->long_term_ref_pics_present_flag)
279  return 0;
280 
281  if (sps->num_long_term_ref_pics_sps > 0)
282  nb_sps = get_ue_golomb_long(gb);
283  nb_sh = get_ue_golomb_long(gb);
284 
285  if (nb_sps > sps->num_long_term_ref_pics_sps)
286  return AVERROR_INVALIDDATA;
287  if (nb_sh + (uint64_t)nb_sps > FF_ARRAY_ELEMS(rps->poc))
288  return AVERROR_INVALIDDATA;
289 
290  rps->nb_refs = nb_sh + nb_sps;
291 
292  for (i = 0; i < rps->nb_refs; i++) {
293 
294  if (i < nb_sps) {
295  uint8_t lt_idx_sps = 0;
296 
297  if (sps->num_long_term_ref_pics_sps > 1)
298  lt_idx_sps = get_bits(gb, av_ceil_log2(sps->num_long_term_ref_pics_sps));
299 
300  rps->poc[i] = sps->lt_ref_pic_poc_lsb_sps[lt_idx_sps];
301  rps->used[i] = sps->used_by_curr_pic_lt_sps_flag[lt_idx_sps];
302  } else {
303  rps->poc[i] = get_bits(gb, sps->log2_max_poc_lsb);
304  rps->used[i] = get_bits1(gb);
305  }
306 
307  rps->poc_msb_present[i] = get_bits1(gb);
308  if (rps->poc_msb_present[i]) {
309  int64_t delta = get_ue_golomb_long(gb);
310  int64_t poc;
311 
312  if (i && i != nb_sps)
313  delta += prev_delta_msb;
314 
315  poc = rps->poc[i] + s->poc - delta * max_poc_lsb - s->sh.pic_order_cnt_lsb;
316  if (poc != (int32_t)poc)
317  return AVERROR_INVALIDDATA;
318  rps->poc[i] = poc;
319  prev_delta_msb = delta;
320  }
321  }
322 
323  return 0;
324 }
325 
327 {
328  AVCodecContext *avctx = s->avctx;
329  const HEVCParamSets *ps = &s->ps;
330  const HEVCVPS *vps = (const HEVCVPS*)ps->vps_list[sps->vps_id]->data;
331  const HEVCWindow *ow = &sps->output_window;
332  unsigned int num = 0, den = 0;
333 
334  avctx->pix_fmt = sps->pix_fmt;
335  avctx->coded_width = sps->width;
336  avctx->coded_height = sps->height;
337  avctx->width = sps->width - ow->left_offset - ow->right_offset;
338  avctx->height = sps->height - ow->top_offset - ow->bottom_offset;
339  avctx->has_b_frames = sps->temporal_layer[sps->max_sub_layers - 1].num_reorder_pics;
340  avctx->profile = sps->ptl.general_ptl.profile_idc;
341  avctx->level = sps->ptl.general_ptl.level_idc;
342 
343  ff_set_sar(avctx, sps->vui.sar);
344 
345  if (sps->vui.video_signal_type_present_flag)
346  avctx->color_range = sps->vui.video_full_range_flag ? AVCOL_RANGE_JPEG
348  else
349  avctx->color_range = AVCOL_RANGE_MPEG;
350 
351  if (sps->vui.colour_description_present_flag) {
352  avctx->color_primaries = sps->vui.colour_primaries;
353  avctx->color_trc = sps->vui.transfer_characteristic;
354  avctx->colorspace = sps->vui.matrix_coeffs;
355  } else {
359  }
360 
362  if (sps->chroma_format_idc == 1) {
363  if (sps->vui.chroma_loc_info_present_flag) {
364  if (sps->vui.chroma_sample_loc_type_top_field <= 5)
365  avctx->chroma_sample_location = sps->vui.chroma_sample_loc_type_top_field + 1;
366  } else
368  }
369 
370  if (vps->vps_timing_info_present_flag) {
371  num = vps->vps_num_units_in_tick;
372  den = vps->vps_time_scale;
373  } else if (sps->vui.vui_timing_info_present_flag) {
374  num = sps->vui.vui_num_units_in_tick;
375  den = sps->vui.vui_time_scale;
376  }
377 
378  if (num != 0 && den != 0)
379  av_reduce(&avctx->framerate.den, &avctx->framerate.num,
380  num, den, 1 << 30);
381 }
382 
384 {
385  AVCodecContext *avctx = s->avctx;
386 
387  if (s->sei.a53_caption.buf_ref)
388  s->avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS;
389 
390  if (s->sei.alternative_transfer.present &&
391  av_color_transfer_name(s->sei.alternative_transfer.preferred_transfer_characteristics) &&
392  s->sei.alternative_transfer.preferred_transfer_characteristics != AVCOL_TRC_UNSPECIFIED) {
393  avctx->color_trc = s->sei.alternative_transfer.preferred_transfer_characteristics;
394  }
395 
396  if (s->sei.film_grain_characteristics.present)
398 
399  return 0;
400 }
401 
403 {
404 #define HWACCEL_MAX (CONFIG_HEVC_DXVA2_HWACCEL + \
405  CONFIG_HEVC_D3D11VA_HWACCEL * 2 + \
406  CONFIG_HEVC_NVDEC_HWACCEL + \
407  CONFIG_HEVC_VAAPI_HWACCEL + \
408  CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL + \
409  CONFIG_HEVC_VDPAU_HWACCEL)
410  enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmt = pix_fmts;
411 
412  switch (sps->pix_fmt) {
413  case AV_PIX_FMT_YUV420P:
414  case AV_PIX_FMT_YUVJ420P:
415 #if CONFIG_HEVC_DXVA2_HWACCEL
416  *fmt++ = AV_PIX_FMT_DXVA2_VLD;
417 #endif
418 #if CONFIG_HEVC_D3D11VA_HWACCEL
419  *fmt++ = AV_PIX_FMT_D3D11VA_VLD;
420  *fmt++ = AV_PIX_FMT_D3D11;
421 #endif
422 #if CONFIG_HEVC_VAAPI_HWACCEL
423  *fmt++ = AV_PIX_FMT_VAAPI;
424 #endif
425 #if CONFIG_HEVC_VDPAU_HWACCEL
426  *fmt++ = AV_PIX_FMT_VDPAU;
427 #endif
428 #if CONFIG_HEVC_NVDEC_HWACCEL
429  *fmt++ = AV_PIX_FMT_CUDA;
430 #endif
431 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
432  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
433 #endif
434  break;
436 #if CONFIG_HEVC_DXVA2_HWACCEL
437  *fmt++ = AV_PIX_FMT_DXVA2_VLD;
438 #endif
439 #if CONFIG_HEVC_D3D11VA_HWACCEL
440  *fmt++ = AV_PIX_FMT_D3D11VA_VLD;
441  *fmt++ = AV_PIX_FMT_D3D11;
442 #endif
443 #if CONFIG_HEVC_VAAPI_HWACCEL
444  *fmt++ = AV_PIX_FMT_VAAPI;
445 #endif
446 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
447  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
448 #endif
449 #if CONFIG_HEVC_VDPAU_HWACCEL
450  *fmt++ = AV_PIX_FMT_VDPAU;
451 #endif
452 #if CONFIG_HEVC_NVDEC_HWACCEL
453  *fmt++ = AV_PIX_FMT_CUDA;
454 #endif
455  break;
456  case AV_PIX_FMT_YUV444P:
457 #if CONFIG_HEVC_VAAPI_HWACCEL
458  *fmt++ = AV_PIX_FMT_VAAPI;
459 #endif
460 #if CONFIG_HEVC_VDPAU_HWACCEL
461  *fmt++ = AV_PIX_FMT_VDPAU;
462 #endif
463 #if CONFIG_HEVC_NVDEC_HWACCEL
464  *fmt++ = AV_PIX_FMT_CUDA;
465 #endif
466 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
467  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
468 #endif
469  break;
470  case AV_PIX_FMT_YUV422P:
472 #if CONFIG_HEVC_VAAPI_HWACCEL
473  *fmt++ = AV_PIX_FMT_VAAPI;
474 #endif
475 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
476  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
477 #endif
478  break;
480 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
481  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
482 #endif
485 #if CONFIG_HEVC_VAAPI_HWACCEL
486  *fmt++ = AV_PIX_FMT_VAAPI;
487 #endif
488 #if CONFIG_HEVC_VDPAU_HWACCEL
489  *fmt++ = AV_PIX_FMT_VDPAU;
490 #endif
491 #if CONFIG_HEVC_NVDEC_HWACCEL
492  *fmt++ = AV_PIX_FMT_CUDA;
493 #endif
494  break;
496 #if CONFIG_HEVC_VAAPI_HWACCEL
497  *fmt++ = AV_PIX_FMT_VAAPI;
498 #endif
499  break;
500  }
501 
502  *fmt++ = sps->pix_fmt;
503  *fmt = AV_PIX_FMT_NONE;
504 
505  return ff_thread_get_format(s->avctx, pix_fmts);
506 }
507 
508 static int set_sps(HEVCContext *s, const HEVCSPS *sps,
509  enum AVPixelFormat pix_fmt)
510 {
511  int ret, i;
512 
514  s->ps.sps = NULL;
515  s->ps.vps = NULL;
516 
517  if (!sps)
518  return 0;
519 
520  ret = pic_arrays_init(s, sps);
521  if (ret < 0)
522  goto fail;
523 
525 
526  s->avctx->pix_fmt = pix_fmt;
527 
528  ff_hevc_pred_init(&s->hpc, sps->bit_depth);
529  ff_hevc_dsp_init (&s->hevcdsp, sps->bit_depth);
530  ff_videodsp_init (&s->vdsp, sps->bit_depth);
531 
532  for (i = 0; i < 3; i++) {
533  av_freep(&s->sao_pixel_buffer_h[i]);
534  av_freep(&s->sao_pixel_buffer_v[i]);
535  }
536 
537  if (sps->sao_enabled && !s->avctx->hwaccel) {
538  int c_count = (sps->chroma_format_idc != 0) ? 3 : 1;
539  int c_idx;
540 
541  for(c_idx = 0; c_idx < c_count; c_idx++) {
542  int w = sps->width >> sps->hshift[c_idx];
543  int h = sps->height >> sps->vshift[c_idx];
544  s->sao_pixel_buffer_h[c_idx] =
545  av_malloc((w * 2 * sps->ctb_height) <<
546  sps->pixel_shift);
547  s->sao_pixel_buffer_v[c_idx] =
548  av_malloc((h * 2 * sps->ctb_width) <<
549  sps->pixel_shift);
550  if (!s->sao_pixel_buffer_h[c_idx] ||
551  !s->sao_pixel_buffer_v[c_idx])
552  goto fail;
553  }
554  }
555 
556  s->ps.sps = sps;
557  s->ps.vps = (HEVCVPS*) s->ps.vps_list[s->ps.sps->vps_id]->data;
558 
559  return 0;
560 
561 fail:
563  for (i = 0; i < 3; i++) {
564  av_freep(&s->sao_pixel_buffer_h[i]);
565  av_freep(&s->sao_pixel_buffer_v[i]);
566  }
567  s->ps.sps = NULL;
568  return ret;
569 }
570 
572 {
573  GetBitContext *gb = &s->HEVClc->gb;
574  SliceHeader *sh = &s->sh;
575  int i, ret;
576 
577  // Coded parameters
579  if (s->ref && sh->first_slice_in_pic_flag) {
580  av_log(s->avctx, AV_LOG_ERROR, "Two slices reporting being the first in the same frame.\n");
581  return 1; // This slice will be skipped later, do not corrupt state
582  }
583 
584  if ((IS_IDR(s) || IS_BLA(s)) && sh->first_slice_in_pic_flag) {
585  s->seq_decode = (s->seq_decode + 1) & HEVC_SEQUENCE_COUNTER_MASK;
586  s->max_ra = INT_MAX;
587  if (IS_IDR(s))
589  }
591  if (IS_IRAP(s))
593 
594  sh->pps_id = get_ue_golomb_long(gb);
595  if (sh->pps_id >= HEVC_MAX_PPS_COUNT || !s->ps.pps_list[sh->pps_id]) {
596  av_log(s->avctx, AV_LOG_ERROR, "PPS id out of range: %d\n", sh->pps_id);
597  return AVERROR_INVALIDDATA;
598  }
599  if (!sh->first_slice_in_pic_flag &&
600  s->ps.pps != (HEVCPPS*)s->ps.pps_list[sh->pps_id]->data) {
601  av_log(s->avctx, AV_LOG_ERROR, "PPS changed between slices.\n");
602  return AVERROR_INVALIDDATA;
603  }
604  s->ps.pps = (HEVCPPS*)s->ps.pps_list[sh->pps_id]->data;
605  if (s->nal_unit_type == HEVC_NAL_CRA_NUT && s->last_eos == 1)
607 
608  if (s->ps.sps != (HEVCSPS*)s->ps.sps_list[s->ps.pps->sps_id]->data) {
609  const HEVCSPS *sps = (HEVCSPS*)s->ps.sps_list[s->ps.pps->sps_id]->data;
610  enum AVPixelFormat pix_fmt;
611 
613 
614  ret = set_sps(s, sps, sps->pix_fmt);
615  if (ret < 0)
616  return ret;
617 
618  pix_fmt = get_format(s, sps);
619  if (pix_fmt < 0)
620  return pix_fmt;
621  s->avctx->pix_fmt = pix_fmt;
622 
623  s->seq_decode = (s->seq_decode + 1) & HEVC_SEQUENCE_COUNTER_MASK;
624  s->max_ra = INT_MAX;
625  }
626 
628  if (ret < 0)
629  return ret;
630 
632  if (!sh->first_slice_in_pic_flag) {
633  int slice_address_length;
634 
635  if (s->ps.pps->dependent_slice_segments_enabled_flag)
637 
638  slice_address_length = av_ceil_log2(s->ps.sps->ctb_width *
639  s->ps.sps->ctb_height);
640  sh->slice_segment_addr = get_bitsz(gb, slice_address_length);
641  if (sh->slice_segment_addr >= s->ps.sps->ctb_width * s->ps.sps->ctb_height) {
642  av_log(s->avctx, AV_LOG_ERROR,
643  "Invalid slice segment address: %u.\n",
644  sh->slice_segment_addr);
645  return AVERROR_INVALIDDATA;
646  }
647 
648  if (!sh->dependent_slice_segment_flag) {
649  sh->slice_addr = sh->slice_segment_addr;
650  s->slice_idx++;
651  }
652  } else {
653  sh->slice_segment_addr = sh->slice_addr = 0;
654  s->slice_idx = 0;
655  s->slice_initialized = 0;
656  }
657 
658  if (!sh->dependent_slice_segment_flag) {
659  s->slice_initialized = 0;
660 
661  for (i = 0; i < s->ps.pps->num_extra_slice_header_bits; i++)
662  skip_bits(gb, 1); // slice_reserved_undetermined_flag[]
663 
664  sh->slice_type = get_ue_golomb_long(gb);
665  if (!(sh->slice_type == HEVC_SLICE_I ||
666  sh->slice_type == HEVC_SLICE_P ||
667  sh->slice_type == HEVC_SLICE_B)) {
668  av_log(s->avctx, AV_LOG_ERROR, "Unknown slice type: %d.\n",
669  sh->slice_type);
670  return AVERROR_INVALIDDATA;
671  }
672  if (IS_IRAP(s) && sh->slice_type != HEVC_SLICE_I) {
673  av_log(s->avctx, AV_LOG_ERROR, "Inter slices in an IRAP frame.\n");
674  return AVERROR_INVALIDDATA;
675  }
676 
677  // when flag is not present, picture is inferred to be output
678  sh->pic_output_flag = 1;
679  if (s->ps.pps->output_flag_present_flag)
680  sh->pic_output_flag = get_bits1(gb);
681 
682  if (s->ps.sps->separate_colour_plane_flag)
683  sh->colour_plane_id = get_bits(gb, 2);
684 
685  if (!IS_IDR(s)) {
686  int poc, pos;
687 
688  sh->pic_order_cnt_lsb = get_bits(gb, s->ps.sps->log2_max_poc_lsb);
689  poc = ff_hevc_compute_poc(s->ps.sps, s->pocTid0, sh->pic_order_cnt_lsb, s->nal_unit_type);
690  if (!sh->first_slice_in_pic_flag && poc != s->poc) {
691  av_log(s->avctx, AV_LOG_WARNING,
692  "Ignoring POC change between slices: %d -> %d\n", s->poc, poc);
693  if (s->avctx->err_recognition & AV_EF_EXPLODE)
694  return AVERROR_INVALIDDATA;
695  poc = s->poc;
696  }
697  s->poc = poc;
698 
700  pos = get_bits_left(gb);
702  ret = ff_hevc_decode_short_term_rps(gb, s->avctx, &sh->slice_rps, s->ps.sps, 1);
703  if (ret < 0)
704  return ret;
705 
706  sh->short_term_rps = &sh->slice_rps;
707  } else {
708  int numbits, rps_idx;
709 
710  if (!s->ps.sps->nb_st_rps) {
711  av_log(s->avctx, AV_LOG_ERROR, "No ref lists in the SPS.\n");
712  return AVERROR_INVALIDDATA;
713  }
714 
715  numbits = av_ceil_log2(s->ps.sps->nb_st_rps);
716  rps_idx = numbits > 0 ? get_bits(gb, numbits) : 0;
717  sh->short_term_rps = &s->ps.sps->st_rps[rps_idx];
718  }
720 
721  pos = get_bits_left(gb);
722  ret = decode_lt_rps(s, &sh->long_term_rps, gb);
723  if (ret < 0) {
724  av_log(s->avctx, AV_LOG_WARNING, "Invalid long term RPS.\n");
725  if (s->avctx->err_recognition & AV_EF_EXPLODE)
726  return AVERROR_INVALIDDATA;
727  }
729 
730  if (s->ps.sps->sps_temporal_mvp_enabled_flag)
732  else
734  } else {
735  s->sh.short_term_rps = NULL;
736  s->poc = 0;
737  }
738 
739  /* 8.3.1 */
740  if (sh->first_slice_in_pic_flag && s->temporal_id == 0 &&
741  s->nal_unit_type != HEVC_NAL_TRAIL_N &&
742  s->nal_unit_type != HEVC_NAL_TSA_N &&
743  s->nal_unit_type != HEVC_NAL_STSA_N &&
744  s->nal_unit_type != HEVC_NAL_RADL_N &&
745  s->nal_unit_type != HEVC_NAL_RADL_R &&
746  s->nal_unit_type != HEVC_NAL_RASL_N &&
747  s->nal_unit_type != HEVC_NAL_RASL_R)
748  s->pocTid0 = s->poc;
749 
750  if (s->ps.sps->sao_enabled) {
752  if (s->ps.sps->chroma_format_idc) {
755  }
756  } else {
760  }
761 
762  sh->nb_refs[L0] = sh->nb_refs[L1] = 0;
763  if (sh->slice_type == HEVC_SLICE_P || sh->slice_type == HEVC_SLICE_B) {
764  int nb_refs;
765 
766  sh->nb_refs[L0] = s->ps.pps->num_ref_idx_l0_default_active;
767  if (sh->slice_type == HEVC_SLICE_B)
768  sh->nb_refs[L1] = s->ps.pps->num_ref_idx_l1_default_active;
769 
770  if (get_bits1(gb)) { // num_ref_idx_active_override_flag
771  sh->nb_refs[L0] = get_ue_golomb_long(gb) + 1;
772  if (sh->slice_type == HEVC_SLICE_B)
773  sh->nb_refs[L1] = get_ue_golomb_long(gb) + 1;
774  }
775  if (sh->nb_refs[L0] > HEVC_MAX_REFS || sh->nb_refs[L1] > HEVC_MAX_REFS) {
776  av_log(s->avctx, AV_LOG_ERROR, "Too many refs: %d/%d.\n",
777  sh->nb_refs[L0], sh->nb_refs[L1]);
778  return AVERROR_INVALIDDATA;
779  }
780 
781  sh->rpl_modification_flag[0] = 0;
782  sh->rpl_modification_flag[1] = 0;
783  nb_refs = ff_hevc_frame_nb_refs(s);
784  if (!nb_refs) {
785  av_log(s->avctx, AV_LOG_ERROR, "Zero refs for a frame with P or B slices.\n");
786  return AVERROR_INVALIDDATA;
787  }
788 
789  if (s->ps.pps->lists_modification_present_flag && nb_refs > 1) {
790  sh->rpl_modification_flag[0] = get_bits1(gb);
791  if (sh->rpl_modification_flag[0]) {
792  for (i = 0; i < sh->nb_refs[L0]; i++)
793  sh->list_entry_lx[0][i] = get_bits(gb, av_ceil_log2(nb_refs));
794  }
795 
796  if (sh->slice_type == HEVC_SLICE_B) {
797  sh->rpl_modification_flag[1] = get_bits1(gb);
798  if (sh->rpl_modification_flag[1] == 1)
799  for (i = 0; i < sh->nb_refs[L1]; i++)
800  sh->list_entry_lx[1][i] = get_bits(gb, av_ceil_log2(nb_refs));
801  }
802  }
803 
804  if (sh->slice_type == HEVC_SLICE_B)
805  sh->mvd_l1_zero_flag = get_bits1(gb);
806 
807  if (s->ps.pps->cabac_init_present_flag)
808  sh->cabac_init_flag = get_bits1(gb);
809  else
810  sh->cabac_init_flag = 0;
811 
812  sh->collocated_ref_idx = 0;
814  sh->collocated_list = L0;
815  if (sh->slice_type == HEVC_SLICE_B)
816  sh->collocated_list = !get_bits1(gb);
817 
818  if (sh->nb_refs[sh->collocated_list] > 1) {
820  if (sh->collocated_ref_idx >= sh->nb_refs[sh->collocated_list]) {
821  av_log(s->avctx, AV_LOG_ERROR,
822  "Invalid collocated_ref_idx: %d.\n",
823  sh->collocated_ref_idx);
824  return AVERROR_INVALIDDATA;
825  }
826  }
827  }
828 
829  if ((s->ps.pps->weighted_pred_flag && sh->slice_type == HEVC_SLICE_P) ||
830  (s->ps.pps->weighted_bipred_flag && sh->slice_type == HEVC_SLICE_B)) {
831  int ret = pred_weight_table(s, gb);
832  if (ret < 0)
833  return ret;
834  }
835 
837  if (sh->max_num_merge_cand < 1 || sh->max_num_merge_cand > 5) {
838  av_log(s->avctx, AV_LOG_ERROR,
839  "Invalid number of merging MVP candidates: %d.\n",
840  sh->max_num_merge_cand);
841  return AVERROR_INVALIDDATA;
842  }
843  }
844 
845  sh->slice_qp_delta = get_se_golomb(gb);
846 
847  if (s->ps.pps->pic_slice_level_chroma_qp_offsets_present_flag) {
850  if (sh->slice_cb_qp_offset < -12 || sh->slice_cb_qp_offset > 12 ||
851  sh->slice_cr_qp_offset < -12 || sh->slice_cr_qp_offset > 12) {
852  av_log(s->avctx, AV_LOG_ERROR, "Invalid slice cx qp offset.\n");
853  return AVERROR_INVALIDDATA;
854  }
855  } else {
856  sh->slice_cb_qp_offset = 0;
857  sh->slice_cr_qp_offset = 0;
858  }
859 
860  if (s->ps.pps->chroma_qp_offset_list_enabled_flag)
862  else
864 
865  if (s->ps.pps->deblocking_filter_control_present_flag) {
866  int deblocking_filter_override_flag = 0;
867 
868  if (s->ps.pps->deblocking_filter_override_enabled_flag)
869  deblocking_filter_override_flag = get_bits1(gb);
870 
871  if (deblocking_filter_override_flag) {
874  int beta_offset_div2 = get_se_golomb(gb);
875  int tc_offset_div2 = get_se_golomb(gb) ;
876  if (beta_offset_div2 < -6 || beta_offset_div2 > 6 ||
877  tc_offset_div2 < -6 || tc_offset_div2 > 6) {
878  av_log(s->avctx, AV_LOG_ERROR,
879  "Invalid deblock filter offsets: %d, %d\n",
880  beta_offset_div2, tc_offset_div2);
881  return AVERROR_INVALIDDATA;
882  }
883  sh->beta_offset = beta_offset_div2 * 2;
884  sh->tc_offset = tc_offset_div2 * 2;
885  }
886  } else {
887  sh->disable_deblocking_filter_flag = s->ps.pps->disable_dbf;
888  sh->beta_offset = s->ps.pps->beta_offset;
889  sh->tc_offset = s->ps.pps->tc_offset;
890  }
891  } else {
893  sh->beta_offset = 0;
894  sh->tc_offset = 0;
895  }
896 
897  if (s->ps.pps->seq_loop_filter_across_slices_enabled_flag &&
902  } else {
903  sh->slice_loop_filter_across_slices_enabled_flag = s->ps.pps->seq_loop_filter_across_slices_enabled_flag;
904  }
905  } else if (!s->slice_initialized) {
906  av_log(s->avctx, AV_LOG_ERROR, "Independent slice segment missing.\n");
907  return AVERROR_INVALIDDATA;
908  }
909 
910  sh->num_entry_point_offsets = 0;
911  if (s->ps.pps->tiles_enabled_flag || s->ps.pps->entropy_coding_sync_enabled_flag) {
912  unsigned num_entry_point_offsets = get_ue_golomb_long(gb);
913  // It would be possible to bound this tighter but this here is simpler
914  if (num_entry_point_offsets > get_bits_left(gb)) {
915  av_log(s->avctx, AV_LOG_ERROR, "num_entry_point_offsets %d is invalid\n", num_entry_point_offsets);
916  return AVERROR_INVALIDDATA;
917  }
918 
919  sh->num_entry_point_offsets = num_entry_point_offsets;
920  if (sh->num_entry_point_offsets > 0) {
921  int offset_len = get_ue_golomb_long(gb) + 1;
922 
923  if (offset_len < 1 || offset_len > 32) {
924  sh->num_entry_point_offsets = 0;
925  av_log(s->avctx, AV_LOG_ERROR, "offset_len %d is invalid\n", offset_len);
926  return AVERROR_INVALIDDATA;
927  }
928 
930  av_freep(&sh->offset);
931  av_freep(&sh->size);
932  sh->entry_point_offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(unsigned));
933  sh->offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
934  sh->size = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
935  if (!sh->entry_point_offset || !sh->offset || !sh->size) {
936  sh->num_entry_point_offsets = 0;
937  av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate memory\n");
938  return AVERROR(ENOMEM);
939  }
940  for (i = 0; i < sh->num_entry_point_offsets; i++) {
941  unsigned val = get_bits_long(gb, offset_len);
942  sh->entry_point_offset[i] = val + 1; // +1; // +1 to get the size
943  }
944  if (s->threads_number > 1 && (s->ps.pps->num_tile_rows > 1 || s->ps.pps->num_tile_columns > 1)) {
945  s->enable_parallel_tiles = 0; // TODO: you can enable tiles in parallel here
946  s->threads_number = 1;
947  } else
948  s->enable_parallel_tiles = 0;
949  } else
950  s->enable_parallel_tiles = 0;
951  }
952 
953  if (s->ps.pps->slice_header_extension_present_flag) {
954  unsigned int length = get_ue_golomb_long(gb);
955  if (length*8LL > get_bits_left(gb)) {
956  av_log(s->avctx, AV_LOG_ERROR, "too many slice_header_extension_data_bytes\n");
957  return AVERROR_INVALIDDATA;
958  }
959  for (i = 0; i < length; i++)
960  skip_bits(gb, 8); // slice_header_extension_data_byte
961  }
962 
963  // Inferred parameters
964  sh->slice_qp = 26U + s->ps.pps->pic_init_qp_minus26 + sh->slice_qp_delta;
965  if (sh->slice_qp > 51 ||
966  sh->slice_qp < -s->ps.sps->qp_bd_offset) {
967  av_log(s->avctx, AV_LOG_ERROR,
968  "The slice_qp %d is outside the valid range "
969  "[%d, 51].\n",
970  sh->slice_qp,
971  -s->ps.sps->qp_bd_offset);
972  return AVERROR_INVALIDDATA;
973  }
974 
976 
977  if (!s->sh.slice_ctb_addr_rs && s->sh.dependent_slice_segment_flag) {
978  av_log(s->avctx, AV_LOG_ERROR, "Impossible slice segment.\n");
979  return AVERROR_INVALIDDATA;
980  }
981 
982  if (get_bits_left(gb) < 0) {
983  av_log(s->avctx, AV_LOG_ERROR,
984  "Overread slice header by %d bits\n", -get_bits_left(gb));
985  return AVERROR_INVALIDDATA;
986  }
987 
988  s->HEVClc->first_qp_group = !s->sh.dependent_slice_segment_flag;
989 
990  if (!s->ps.pps->cu_qp_delta_enabled_flag)
991  s->HEVClc->qp_y = s->sh.slice_qp;
992 
993  s->slice_initialized = 1;
994  s->HEVClc->tu.cu_qp_offset_cb = 0;
995  s->HEVClc->tu.cu_qp_offset_cr = 0;
996 
997  return 0;
998 }
999 
1000 #define CTB(tab, x, y) ((tab)[(y) * s->ps.sps->ctb_width + (x)])
1001 
1002 #define SET_SAO(elem, value) \
1003 do { \
1004  if (!sao_merge_up_flag && !sao_merge_left_flag) \
1005  sao->elem = value; \
1006  else if (sao_merge_left_flag) \
1007  sao->elem = CTB(s->sao, rx-1, ry).elem; \
1008  else if (sao_merge_up_flag) \
1009  sao->elem = CTB(s->sao, rx, ry-1).elem; \
1010  else \
1011  sao->elem = 0; \
1012 } while (0)
1013 
1014 static void hls_sao_param(HEVCLocalContext *lc, int rx, int ry)
1015 {
1016  const HEVCContext *const s = lc->parent;
1017  int sao_merge_left_flag = 0;
1018  int sao_merge_up_flag = 0;
1019  SAOParams *sao = &CTB(s->sao, rx, ry);
1020  int c_idx, i;
1021 
1022  if (s->sh.slice_sample_adaptive_offset_flag[0] ||
1023  s->sh.slice_sample_adaptive_offset_flag[1]) {
1024  if (rx > 0) {
1025  if (lc->ctb_left_flag)
1026  sao_merge_left_flag = ff_hevc_sao_merge_flag_decode(lc);
1027  }
1028  if (ry > 0 && !sao_merge_left_flag) {
1029  if (lc->ctb_up_flag)
1030  sao_merge_up_flag = ff_hevc_sao_merge_flag_decode(lc);
1031  }
1032  }
1033 
1034  for (c_idx = 0; c_idx < (s->ps.sps->chroma_format_idc ? 3 : 1); c_idx++) {
1035  int log2_sao_offset_scale = c_idx == 0 ? s->ps.pps->log2_sao_offset_scale_luma :
1036  s->ps.pps->log2_sao_offset_scale_chroma;
1037 
1038  if (!s->sh.slice_sample_adaptive_offset_flag[c_idx]) {
1039  sao->type_idx[c_idx] = SAO_NOT_APPLIED;
1040  continue;
1041  }
1042 
1043  if (c_idx == 2) {
1044  sao->type_idx[2] = sao->type_idx[1];
1045  sao->eo_class[2] = sao->eo_class[1];
1046  } else {
1047  SET_SAO(type_idx[c_idx], ff_hevc_sao_type_idx_decode(lc));
1048  }
1049 
1050  if (sao->type_idx[c_idx] == SAO_NOT_APPLIED)
1051  continue;
1052 
1053  for (i = 0; i < 4; i++)
1054  SET_SAO(offset_abs[c_idx][i], ff_hevc_sao_offset_abs_decode(lc));
1055 
1056  if (sao->type_idx[c_idx] == SAO_BAND) {
1057  for (i = 0; i < 4; i++) {
1058  if (sao->offset_abs[c_idx][i]) {
1059  SET_SAO(offset_sign[c_idx][i],
1061  } else {
1062  sao->offset_sign[c_idx][i] = 0;
1063  }
1064  }
1065  SET_SAO(band_position[c_idx], ff_hevc_sao_band_position_decode(lc));
1066  } else if (c_idx != 2) {
1067  SET_SAO(eo_class[c_idx], ff_hevc_sao_eo_class_decode(lc));
1068  }
1069 
1070  // Inferred parameters
1071  sao->offset_val[c_idx][0] = 0;
1072  for (i = 0; i < 4; i++) {
1073  sao->offset_val[c_idx][i + 1] = sao->offset_abs[c_idx][i];
1074  if (sao->type_idx[c_idx] == SAO_EDGE) {
1075  if (i > 1)
1076  sao->offset_val[c_idx][i + 1] = -sao->offset_val[c_idx][i + 1];
1077  } else if (sao->offset_sign[c_idx][i]) {
1078  sao->offset_val[c_idx][i + 1] = -sao->offset_val[c_idx][i + 1];
1079  }
1080  sao->offset_val[c_idx][i + 1] *= 1 << log2_sao_offset_scale;
1081  }
1082  }
1083 }
1084 
1085 #undef SET_SAO
1086 #undef CTB
1087 
1089 {
1090  int log2_res_scale_abs_plus1 = ff_hevc_log2_res_scale_abs(lc, idx);
1091 
1092  if (log2_res_scale_abs_plus1 != 0) {
1093  int res_scale_sign_flag = ff_hevc_res_scale_sign_flag(lc, idx);
1094  lc->tu.res_scale_val = (1 << (log2_res_scale_abs_plus1 - 1)) *
1095  (1 - 2 * res_scale_sign_flag);
1096  } else {
1097  lc->tu.res_scale_val = 0;
1098  }
1099 
1100 
1101  return 0;
1102 }
1103 
1104 static int hls_transform_unit(HEVCLocalContext *lc, int x0, int y0,
1105  int xBase, int yBase, int cb_xBase, int cb_yBase,
1106  int log2_cb_size, int log2_trafo_size,
1107  int blk_idx, int cbf_luma, int *cbf_cb, int *cbf_cr)
1108 {
1109  const HEVCContext *const s = lc->parent;
1110  const int log2_trafo_size_c = log2_trafo_size - s->ps.sps->hshift[1];
1111  int i;
1112 
1113  if (lc->cu.pred_mode == MODE_INTRA) {
1114  int trafo_size = 1 << log2_trafo_size;
1115  ff_hevc_set_neighbour_available(lc, x0, y0, trafo_size, trafo_size);
1116 
1117  s->hpc.intra_pred[log2_trafo_size - 2](lc, x0, y0, 0);
1118  }
1119 
1120  if (cbf_luma || cbf_cb[0] || cbf_cr[0] ||
1121  (s->ps.sps->chroma_format_idc == 2 && (cbf_cb[1] || cbf_cr[1]))) {
1122  int scan_idx = SCAN_DIAG;
1123  int scan_idx_c = SCAN_DIAG;
1124  int cbf_chroma = cbf_cb[0] || cbf_cr[0] ||
1125  (s->ps.sps->chroma_format_idc == 2 &&
1126  (cbf_cb[1] || cbf_cr[1]));
1127 
1128  if (s->ps.pps->cu_qp_delta_enabled_flag && !lc->tu.is_cu_qp_delta_coded) {
1130  if (lc->tu.cu_qp_delta != 0)
1131  if (ff_hevc_cu_qp_delta_sign_flag(lc) == 1)
1132  lc->tu.cu_qp_delta = -lc->tu.cu_qp_delta;
1133  lc->tu.is_cu_qp_delta_coded = 1;
1134 
1135  if (lc->tu.cu_qp_delta < -(26 + s->ps.sps->qp_bd_offset / 2) ||
1136  lc->tu.cu_qp_delta > (25 + s->ps.sps->qp_bd_offset / 2)) {
1137  av_log(s->avctx, AV_LOG_ERROR,
1138  "The cu_qp_delta %d is outside the valid range "
1139  "[%d, %d].\n",
1140  lc->tu.cu_qp_delta,
1141  -(26 + s->ps.sps->qp_bd_offset / 2),
1142  (25 + s->ps.sps->qp_bd_offset / 2));
1143  return AVERROR_INVALIDDATA;
1144  }
1145 
1146  ff_hevc_set_qPy(lc, cb_xBase, cb_yBase, log2_cb_size);
1147  }
1148 
1149  if (s->sh.cu_chroma_qp_offset_enabled_flag && cbf_chroma &&
1151  int cu_chroma_qp_offset_flag = ff_hevc_cu_chroma_qp_offset_flag(lc);
1152  if (cu_chroma_qp_offset_flag) {
1153  int cu_chroma_qp_offset_idx = 0;
1154  if (s->ps.pps->chroma_qp_offset_list_len_minus1 > 0) {
1155  cu_chroma_qp_offset_idx = ff_hevc_cu_chroma_qp_offset_idx(lc);
1156  av_log(s->avctx, AV_LOG_ERROR,
1157  "cu_chroma_qp_offset_idx not yet tested.\n");
1158  }
1159  lc->tu.cu_qp_offset_cb = s->ps.pps->cb_qp_offset_list[cu_chroma_qp_offset_idx];
1160  lc->tu.cu_qp_offset_cr = s->ps.pps->cr_qp_offset_list[cu_chroma_qp_offset_idx];
1161  } else {
1162  lc->tu.cu_qp_offset_cb = 0;
1163  lc->tu.cu_qp_offset_cr = 0;
1164  }
1166  }
1167 
1168  if (lc->cu.pred_mode == MODE_INTRA && log2_trafo_size < 4) {
1169  if (lc->tu.intra_pred_mode >= 6 &&
1170  lc->tu.intra_pred_mode <= 14) {
1171  scan_idx = SCAN_VERT;
1172  } else if (lc->tu.intra_pred_mode >= 22 &&
1173  lc->tu.intra_pred_mode <= 30) {
1174  scan_idx = SCAN_HORIZ;
1175  }
1176 
1177  if (lc->tu.intra_pred_mode_c >= 6 &&
1178  lc->tu.intra_pred_mode_c <= 14) {
1179  scan_idx_c = SCAN_VERT;
1180  } else if (lc->tu.intra_pred_mode_c >= 22 &&
1181  lc->tu.intra_pred_mode_c <= 30) {
1182  scan_idx_c = SCAN_HORIZ;
1183  }
1184  }
1185 
1186  lc->tu.cross_pf = 0;
1187 
1188  if (cbf_luma)
1189  ff_hevc_hls_residual_coding(lc, x0, y0, log2_trafo_size, scan_idx, 0);
1190  if (s->ps.sps->chroma_format_idc && (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3)) {
1191  int trafo_size_h = 1 << (log2_trafo_size_c + s->ps.sps->hshift[1]);
1192  int trafo_size_v = 1 << (log2_trafo_size_c + s->ps.sps->vshift[1]);
1193  lc->tu.cross_pf = (s->ps.pps->cross_component_prediction_enabled_flag && cbf_luma &&
1194  (lc->cu.pred_mode == MODE_INTER ||
1195  (lc->tu.chroma_mode_c == 4)));
1196 
1197  if (lc->tu.cross_pf) {
1198  hls_cross_component_pred(lc, 0);
1199  }
1200  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1201  if (lc->cu.pred_mode == MODE_INTRA) {
1202  ff_hevc_set_neighbour_available(lc, x0, y0 + (i << log2_trafo_size_c), trafo_size_h, trafo_size_v);
1203  s->hpc.intra_pred[log2_trafo_size_c - 2](lc, x0, y0 + (i << log2_trafo_size_c), 1);
1204  }
1205  if (cbf_cb[i])
1206  ff_hevc_hls_residual_coding(lc, x0, y0 + (i << log2_trafo_size_c),
1207  log2_trafo_size_c, scan_idx_c, 1);
1208  else
1209  if (lc->tu.cross_pf) {
1210  ptrdiff_t stride = s->frame->linesize[1];
1211  int hshift = s->ps.sps->hshift[1];
1212  int vshift = s->ps.sps->vshift[1];
1213  const int16_t *coeffs_y = (int16_t*)lc->edge_emu_buffer;
1214  int16_t *coeffs = (int16_t*)lc->edge_emu_buffer2;
1215  int size = 1 << log2_trafo_size_c;
1216 
1217  uint8_t *dst = &s->frame->data[1][(y0 >> vshift) * stride +
1218  ((x0 >> hshift) << s->ps.sps->pixel_shift)];
1219  for (i = 0; i < (size * size); i++) {
1220  coeffs[i] = ((lc->tu.res_scale_val * coeffs_y[i]) >> 3);
1221  }
1222  s->hevcdsp.add_residual[log2_trafo_size_c-2](dst, coeffs, stride);
1223  }
1224  }
1225 
1226  if (lc->tu.cross_pf) {
1227  hls_cross_component_pred(lc, 1);
1228  }
1229  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1230  if (lc->cu.pred_mode == MODE_INTRA) {
1231  ff_hevc_set_neighbour_available(lc, x0, y0 + (i << log2_trafo_size_c),
1232  trafo_size_h, trafo_size_v);
1233  s->hpc.intra_pred[log2_trafo_size_c - 2](lc, x0, y0 + (i << log2_trafo_size_c), 2);
1234  }
1235  if (cbf_cr[i])
1236  ff_hevc_hls_residual_coding(lc, x0, y0 + (i << log2_trafo_size_c),
1237  log2_trafo_size_c, scan_idx_c, 2);
1238  else
1239  if (lc->tu.cross_pf) {
1240  ptrdiff_t stride = s->frame->linesize[2];
1241  int hshift = s->ps.sps->hshift[2];
1242  int vshift = s->ps.sps->vshift[2];
1243  const int16_t *coeffs_y = (int16_t*)lc->edge_emu_buffer;
1244  int16_t *coeffs = (int16_t*)lc->edge_emu_buffer2;
1245  int size = 1 << log2_trafo_size_c;
1246 
1247  uint8_t *dst = &s->frame->data[2][(y0 >> vshift) * stride +
1248  ((x0 >> hshift) << s->ps.sps->pixel_shift)];
1249  for (i = 0; i < (size * size); i++) {
1250  coeffs[i] = ((lc->tu.res_scale_val * coeffs_y[i]) >> 3);
1251  }
1252  s->hevcdsp.add_residual[log2_trafo_size_c-2](dst, coeffs, stride);
1253  }
1254  }
1255  } else if (s->ps.sps->chroma_format_idc && blk_idx == 3) {
1256  int trafo_size_h = 1 << (log2_trafo_size + 1);
1257  int trafo_size_v = 1 << (log2_trafo_size + s->ps.sps->vshift[1]);
1258  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1259  if (lc->cu.pred_mode == MODE_INTRA) {
1260  ff_hevc_set_neighbour_available(lc, xBase, yBase + (i << log2_trafo_size),
1261  trafo_size_h, trafo_size_v);
1262  s->hpc.intra_pred[log2_trafo_size - 2](lc, xBase, yBase + (i << log2_trafo_size), 1);
1263  }
1264  if (cbf_cb[i])
1265  ff_hevc_hls_residual_coding(lc, xBase, yBase + (i << log2_trafo_size),
1266  log2_trafo_size, scan_idx_c, 1);
1267  }
1268  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1269  if (lc->cu.pred_mode == MODE_INTRA) {
1270  ff_hevc_set_neighbour_available(lc, xBase, yBase + (i << log2_trafo_size),
1271  trafo_size_h, trafo_size_v);
1272  s->hpc.intra_pred[log2_trafo_size - 2](lc, xBase, yBase + (i << log2_trafo_size), 2);
1273  }
1274  if (cbf_cr[i])
1275  ff_hevc_hls_residual_coding(lc, xBase, yBase + (i << log2_trafo_size),
1276  log2_trafo_size, scan_idx_c, 2);
1277  }
1278  }
1279  } else if (s->ps.sps->chroma_format_idc && lc->cu.pred_mode == MODE_INTRA) {
1280  if (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3) {
1281  int trafo_size_h = 1 << (log2_trafo_size_c + s->ps.sps->hshift[1]);
1282  int trafo_size_v = 1 << (log2_trafo_size_c + s->ps.sps->vshift[1]);
1283  ff_hevc_set_neighbour_available(lc, x0, y0, trafo_size_h, trafo_size_v);
1284  s->hpc.intra_pred[log2_trafo_size_c - 2](lc, x0, y0, 1);
1285  s->hpc.intra_pred[log2_trafo_size_c - 2](lc, x0, y0, 2);
1286  if (s->ps.sps->chroma_format_idc == 2) {
1287  ff_hevc_set_neighbour_available(lc, x0, y0 + (1 << log2_trafo_size_c),
1288  trafo_size_h, trafo_size_v);
1289  s->hpc.intra_pred[log2_trafo_size_c - 2](lc, x0, y0 + (1 << log2_trafo_size_c), 1);
1290  s->hpc.intra_pred[log2_trafo_size_c - 2](lc, x0, y0 + (1 << log2_trafo_size_c), 2);
1291  }
1292  } else if (blk_idx == 3) {
1293  int trafo_size_h = 1 << (log2_trafo_size + 1);
1294  int trafo_size_v = 1 << (log2_trafo_size + s->ps.sps->vshift[1]);
1295  ff_hevc_set_neighbour_available(lc, xBase, yBase,
1296  trafo_size_h, trafo_size_v);
1297  s->hpc.intra_pred[log2_trafo_size - 2](lc, xBase, yBase, 1);
1298  s->hpc.intra_pred[log2_trafo_size - 2](lc, xBase, yBase, 2);
1299  if (s->ps.sps->chroma_format_idc == 2) {
1300  ff_hevc_set_neighbour_available(lc, xBase, yBase + (1 << log2_trafo_size),
1301  trafo_size_h, trafo_size_v);
1302  s->hpc.intra_pred[log2_trafo_size - 2](lc, xBase, yBase + (1 << log2_trafo_size), 1);
1303  s->hpc.intra_pred[log2_trafo_size - 2](lc, xBase, yBase + (1 << log2_trafo_size), 2);
1304  }
1305  }
1306  }
1307 
1308  return 0;
1309 }
1310 
1311 static void set_deblocking_bypass(const HEVCContext *s, int x0, int y0, int log2_cb_size)
1312 {
1313  int cb_size = 1 << log2_cb_size;
1314  int log2_min_pu_size = s->ps.sps->log2_min_pu_size;
1315 
1316  int min_pu_width = s->ps.sps->min_pu_width;
1317  int x_end = FFMIN(x0 + cb_size, s->ps.sps->width);
1318  int y_end = FFMIN(y0 + cb_size, s->ps.sps->height);
1319  int i, j;
1320 
1321  for (j = (y0 >> log2_min_pu_size); j < (y_end >> log2_min_pu_size); j++)
1322  for (i = (x0 >> log2_min_pu_size); i < (x_end >> log2_min_pu_size); i++)
1323  s->is_pcm[i + j * min_pu_width] = 2;
1324 }
1325 
1326 static int hls_transform_tree(HEVCLocalContext *lc, int x0, int y0,
1327  int xBase, int yBase, int cb_xBase, int cb_yBase,
1328  int log2_cb_size, int log2_trafo_size,
1329  int trafo_depth, int blk_idx,
1330  const int *base_cbf_cb, const int *base_cbf_cr)
1331 {
1332  const HEVCContext *const s = lc->parent;
1333  uint8_t split_transform_flag;
1334  int cbf_cb[2];
1335  int cbf_cr[2];
1336  int ret;
1337 
1338  cbf_cb[0] = base_cbf_cb[0];
1339  cbf_cb[1] = base_cbf_cb[1];
1340  cbf_cr[0] = base_cbf_cr[0];
1341  cbf_cr[1] = base_cbf_cr[1];
1342 
1343  if (lc->cu.intra_split_flag) {
1344  if (trafo_depth == 1) {
1345  lc->tu.intra_pred_mode = lc->pu.intra_pred_mode[blk_idx];
1346  if (s->ps.sps->chroma_format_idc == 3) {
1347  lc->tu.intra_pred_mode_c = lc->pu.intra_pred_mode_c[blk_idx];
1348  lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[blk_idx];
1349  } else {
1351  lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[0];
1352  }
1353  }
1354  } else {
1355  lc->tu.intra_pred_mode = lc->pu.intra_pred_mode[0];
1357  lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[0];
1358  }
1359 
1360  if (log2_trafo_size <= s->ps.sps->log2_max_trafo_size &&
1361  log2_trafo_size > s->ps.sps->log2_min_tb_size &&
1362  trafo_depth < lc->cu.max_trafo_depth &&
1363  !(lc->cu.intra_split_flag && trafo_depth == 0)) {
1364  split_transform_flag = ff_hevc_split_transform_flag_decode(lc, log2_trafo_size);
1365  } else {
1366  int inter_split = s->ps.sps->max_transform_hierarchy_depth_inter == 0 &&
1367  lc->cu.pred_mode == MODE_INTER &&
1368  lc->cu.part_mode != PART_2Nx2N &&
1369  trafo_depth == 0;
1370 
1371  split_transform_flag = log2_trafo_size > s->ps.sps->log2_max_trafo_size ||
1372  (lc->cu.intra_split_flag && trafo_depth == 0) ||
1373  inter_split;
1374  }
1375 
1376  if (s->ps.sps->chroma_format_idc && (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3)) {
1377  if (trafo_depth == 0 || cbf_cb[0]) {
1378  cbf_cb[0] = ff_hevc_cbf_cb_cr_decode(lc, trafo_depth);
1379  if (s->ps.sps->chroma_format_idc == 2 && (!split_transform_flag || log2_trafo_size == 3)) {
1380  cbf_cb[1] = ff_hevc_cbf_cb_cr_decode(lc, trafo_depth);
1381  }
1382  }
1383 
1384  if (trafo_depth == 0 || cbf_cr[0]) {
1385  cbf_cr[0] = ff_hevc_cbf_cb_cr_decode(lc, trafo_depth);
1386  if (s->ps.sps->chroma_format_idc == 2 && (!split_transform_flag || log2_trafo_size == 3)) {
1387  cbf_cr[1] = ff_hevc_cbf_cb_cr_decode(lc, trafo_depth);
1388  }
1389  }
1390  }
1391 
1392  if (split_transform_flag) {
1393  const int trafo_size_split = 1 << (log2_trafo_size - 1);
1394  const int x1 = x0 + trafo_size_split;
1395  const int y1 = y0 + trafo_size_split;
1396 
1397 #define SUBDIVIDE(x, y, idx) \
1398 do { \
1399  ret = hls_transform_tree(lc, x, y, x0, y0, cb_xBase, cb_yBase, log2_cb_size,\
1400  log2_trafo_size - 1, trafo_depth + 1, idx, \
1401  cbf_cb, cbf_cr); \
1402  if (ret < 0) \
1403  return ret; \
1404 } while (0)
1405 
1406  SUBDIVIDE(x0, y0, 0);
1407  SUBDIVIDE(x1, y0, 1);
1408  SUBDIVIDE(x0, y1, 2);
1409  SUBDIVIDE(x1, y1, 3);
1410 
1411 #undef SUBDIVIDE
1412  } else {
1413  int min_tu_size = 1 << s->ps.sps->log2_min_tb_size;
1414  int log2_min_tu_size = s->ps.sps->log2_min_tb_size;
1415  int min_tu_width = s->ps.sps->min_tb_width;
1416  int cbf_luma = 1;
1417 
1418  if (lc->cu.pred_mode == MODE_INTRA || trafo_depth != 0 ||
1419  cbf_cb[0] || cbf_cr[0] ||
1420  (s->ps.sps->chroma_format_idc == 2 && (cbf_cb[1] || cbf_cr[1]))) {
1421  cbf_luma = ff_hevc_cbf_luma_decode(lc, trafo_depth);
1422  }
1423 
1424  ret = hls_transform_unit(lc, x0, y0, xBase, yBase, cb_xBase, cb_yBase,
1425  log2_cb_size, log2_trafo_size,
1426  blk_idx, cbf_luma, cbf_cb, cbf_cr);
1427  if (ret < 0)
1428  return ret;
1429  // TODO: store cbf_luma somewhere else
1430  if (cbf_luma) {
1431  int i, j;
1432  for (i = 0; i < (1 << log2_trafo_size); i += min_tu_size)
1433  for (j = 0; j < (1 << log2_trafo_size); j += min_tu_size) {
1434  int x_tu = (x0 + j) >> log2_min_tu_size;
1435  int y_tu = (y0 + i) >> log2_min_tu_size;
1436  s->cbf_luma[y_tu * min_tu_width + x_tu] = 1;
1437  }
1438  }
1439  if (!s->sh.disable_deblocking_filter_flag) {
1440  ff_hevc_deblocking_boundary_strengths(lc, x0, y0, log2_trafo_size);
1441  if (s->ps.pps->transquant_bypass_enable_flag &&
1443  set_deblocking_bypass(s, x0, y0, log2_trafo_size);
1444  }
1445  }
1446  return 0;
1447 }
1448 
1449 static int hls_pcm_sample(HEVCLocalContext *lc, int x0, int y0, int log2_cb_size)
1450 {
1451  const HEVCContext *const s = lc->parent;
1452  GetBitContext gb;
1453  int cb_size = 1 << log2_cb_size;
1454  ptrdiff_t stride0 = s->frame->linesize[0];
1455  ptrdiff_t stride1 = s->frame->linesize[1];
1456  ptrdiff_t stride2 = s->frame->linesize[2];
1457  uint8_t *dst0 = &s->frame->data[0][y0 * stride0 + (x0 << s->ps.sps->pixel_shift)];
1458  uint8_t *dst1 = &s->frame->data[1][(y0 >> s->ps.sps->vshift[1]) * stride1 + ((x0 >> s->ps.sps->hshift[1]) << s->ps.sps->pixel_shift)];
1459  uint8_t *dst2 = &s->frame->data[2][(y0 >> s->ps.sps->vshift[2]) * stride2 + ((x0 >> s->ps.sps->hshift[2]) << s->ps.sps->pixel_shift)];
1460 
1461  int length = cb_size * cb_size * s->ps.sps->pcm.bit_depth +
1462  (((cb_size >> s->ps.sps->hshift[1]) * (cb_size >> s->ps.sps->vshift[1])) +
1463  ((cb_size >> s->ps.sps->hshift[2]) * (cb_size >> s->ps.sps->vshift[2]))) *
1464  s->ps.sps->pcm.bit_depth_chroma;
1465  const uint8_t *pcm = skip_bytes(&lc->cc, (length + 7) >> 3);
1466  int ret;
1467 
1468  if (!s->sh.disable_deblocking_filter_flag)
1469  ff_hevc_deblocking_boundary_strengths(lc, x0, y0, log2_cb_size);
1470 
1471  ret = init_get_bits(&gb, pcm, length);
1472  if (ret < 0)
1473  return ret;
1474 
1475  s->hevcdsp.put_pcm(dst0, stride0, cb_size, cb_size, &gb, s->ps.sps->pcm.bit_depth);
1476  if (s->ps.sps->chroma_format_idc) {
1477  s->hevcdsp.put_pcm(dst1, stride1,
1478  cb_size >> s->ps.sps->hshift[1],
1479  cb_size >> s->ps.sps->vshift[1],
1480  &gb, s->ps.sps->pcm.bit_depth_chroma);
1481  s->hevcdsp.put_pcm(dst2, stride2,
1482  cb_size >> s->ps.sps->hshift[2],
1483  cb_size >> s->ps.sps->vshift[2],
1484  &gb, s->ps.sps->pcm.bit_depth_chroma);
1485  }
1486 
1487  return 0;
1488 }
1489 
1490 /**
1491  * 8.5.3.2.2.1 Luma sample unidirectional interpolation process
1492  *
1493  * @param s HEVC decoding context
1494  * @param dst target buffer for block data at block position
1495  * @param dststride stride of the dst buffer
1496  * @param ref reference picture buffer at origin (0, 0)
1497  * @param mv motion vector (relative to block position) to get pixel data from
1498  * @param x_off horizontal position of block from origin (0, 0)
1499  * @param y_off vertical position of block from origin (0, 0)
1500  * @param block_w width of block
1501  * @param block_h height of block
1502  * @param luma_weight weighting factor applied to the luma prediction
1503  * @param luma_offset additive offset applied to the luma prediction value
1504  */
1505 
1506 static void luma_mc_uni(HEVCLocalContext *lc, uint8_t *dst, ptrdiff_t dststride,
1507  const AVFrame *ref, const Mv *mv, int x_off, int y_off,
1508  int block_w, int block_h, int luma_weight, int luma_offset)
1509 {
1510  const HEVCContext *const s = lc->parent;
1511  const uint8_t *src = ref->data[0];
1512  ptrdiff_t srcstride = ref->linesize[0];
1513  int pic_width = s->ps.sps->width;
1514  int pic_height = s->ps.sps->height;
1515  int mx = mv->x & 3;
1516  int my = mv->y & 3;
1517  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1518  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1519  int idx = hevc_pel_weight[block_w];
1520 
1521  x_off += mv->x >> 2;
1522  y_off += mv->y >> 2;
1523  src += y_off * srcstride + (x_off * (1 << s->ps.sps->pixel_shift));
1524 
1525  if (x_off < QPEL_EXTRA_BEFORE || y_off < QPEL_EXTRA_AFTER ||
1526  x_off >= pic_width - block_w - QPEL_EXTRA_AFTER ||
1527  y_off >= pic_height - block_h - QPEL_EXTRA_AFTER) {
1528  const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1529  int offset = QPEL_EXTRA_BEFORE * srcstride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1530  int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1531 
1532  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src - offset,
1533  edge_emu_stride, srcstride,
1534  block_w + QPEL_EXTRA,
1535  block_h + QPEL_EXTRA,
1536  x_off - QPEL_EXTRA_BEFORE, y_off - QPEL_EXTRA_BEFORE,
1537  pic_width, pic_height);
1538  src = lc->edge_emu_buffer + buf_offset;
1539  srcstride = edge_emu_stride;
1540  }
1541 
1542  if (!weight_flag)
1543  s->hevcdsp.put_hevc_qpel_uni[idx][!!my][!!mx](dst, dststride, src, srcstride,
1544  block_h, mx, my, block_w);
1545  else
1546  s->hevcdsp.put_hevc_qpel_uni_w[idx][!!my][!!mx](dst, dststride, src, srcstride,
1547  block_h, s->sh.luma_log2_weight_denom,
1548  luma_weight, luma_offset, mx, my, block_w);
1549 }
1550 
1551 /**
1552  * 8.5.3.2.2.1 Luma sample bidirectional interpolation process
1553  *
1554  * @param s HEVC decoding context
1555  * @param dst target buffer for block data at block position
1556  * @param dststride stride of the dst buffer
1557  * @param ref0 reference picture0 buffer at origin (0, 0)
1558  * @param mv0 motion vector0 (relative to block position) to get pixel data from
1559  * @param x_off horizontal position of block from origin (0, 0)
1560  * @param y_off vertical position of block from origin (0, 0)
1561  * @param block_w width of block
1562  * @param block_h height of block
1563  * @param ref1 reference picture1 buffer at origin (0, 0)
1564  * @param mv1 motion vector1 (relative to block position) to get pixel data from
1565  * @param current_mv current motion vector structure
1566  */
1567  static void luma_mc_bi(HEVCLocalContext *lc, uint8_t *dst, ptrdiff_t dststride,
1568  const AVFrame *ref0, const Mv *mv0, int x_off, int y_off,
1569  int block_w, int block_h, const AVFrame *ref1,
1570  const Mv *mv1, struct MvField *current_mv)
1571 {
1572  const HEVCContext *const s = lc->parent;
1573  ptrdiff_t src0stride = ref0->linesize[0];
1574  ptrdiff_t src1stride = ref1->linesize[0];
1575  int pic_width = s->ps.sps->width;
1576  int pic_height = s->ps.sps->height;
1577  int mx0 = mv0->x & 3;
1578  int my0 = mv0->y & 3;
1579  int mx1 = mv1->x & 3;
1580  int my1 = mv1->y & 3;
1581  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1582  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1583  int x_off0 = x_off + (mv0->x >> 2);
1584  int y_off0 = y_off + (mv0->y >> 2);
1585  int x_off1 = x_off + (mv1->x >> 2);
1586  int y_off1 = y_off + (mv1->y >> 2);
1587  int idx = hevc_pel_weight[block_w];
1588 
1589  const uint8_t *src0 = ref0->data[0] + y_off0 * src0stride + (int)((unsigned)x_off0 << s->ps.sps->pixel_shift);
1590  const uint8_t *src1 = ref1->data[0] + y_off1 * src1stride + (int)((unsigned)x_off1 << s->ps.sps->pixel_shift);
1591 
1592  if (x_off0 < QPEL_EXTRA_BEFORE || y_off0 < QPEL_EXTRA_AFTER ||
1593  x_off0 >= pic_width - block_w - QPEL_EXTRA_AFTER ||
1594  y_off0 >= pic_height - block_h - QPEL_EXTRA_AFTER) {
1595  const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1596  int offset = QPEL_EXTRA_BEFORE * src0stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1597  int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1598 
1599  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src0 - offset,
1600  edge_emu_stride, src0stride,
1601  block_w + QPEL_EXTRA,
1602  block_h + QPEL_EXTRA,
1603  x_off0 - QPEL_EXTRA_BEFORE, y_off0 - QPEL_EXTRA_BEFORE,
1604  pic_width, pic_height);
1605  src0 = lc->edge_emu_buffer + buf_offset;
1606  src0stride = edge_emu_stride;
1607  }
1608 
1609  if (x_off1 < QPEL_EXTRA_BEFORE || y_off1 < QPEL_EXTRA_AFTER ||
1610  x_off1 >= pic_width - block_w - QPEL_EXTRA_AFTER ||
1611  y_off1 >= pic_height - block_h - QPEL_EXTRA_AFTER) {
1612  const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1613  int offset = QPEL_EXTRA_BEFORE * src1stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1614  int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1615 
1616  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer2, src1 - offset,
1617  edge_emu_stride, src1stride,
1618  block_w + QPEL_EXTRA,
1619  block_h + QPEL_EXTRA,
1620  x_off1 - QPEL_EXTRA_BEFORE, y_off1 - QPEL_EXTRA_BEFORE,
1621  pic_width, pic_height);
1622  src1 = lc->edge_emu_buffer2 + buf_offset;
1623  src1stride = edge_emu_stride;
1624  }
1625 
1626  s->hevcdsp.put_hevc_qpel[idx][!!my0][!!mx0](lc->tmp, src0, src0stride,
1627  block_h, mx0, my0, block_w);
1628  if (!weight_flag)
1629  s->hevcdsp.put_hevc_qpel_bi[idx][!!my1][!!mx1](dst, dststride, src1, src1stride, lc->tmp,
1630  block_h, mx1, my1, block_w);
1631  else
1632  s->hevcdsp.put_hevc_qpel_bi_w[idx][!!my1][!!mx1](dst, dststride, src1, src1stride, lc->tmp,
1633  block_h, s->sh.luma_log2_weight_denom,
1634  s->sh.luma_weight_l0[current_mv->ref_idx[0]],
1635  s->sh.luma_weight_l1[current_mv->ref_idx[1]],
1636  s->sh.luma_offset_l0[current_mv->ref_idx[0]],
1637  s->sh.luma_offset_l1[current_mv->ref_idx[1]],
1638  mx1, my1, block_w);
1639 
1640 }
1641 
1642 /**
1643  * 8.5.3.2.2.2 Chroma sample uniprediction interpolation process
1644  *
1645  * @param s HEVC decoding context
1646  * @param dst1 target buffer for block data at block position (U plane)
1647  * @param dst2 target buffer for block data at block position (V plane)
1648  * @param dststride stride of the dst1 and dst2 buffers
1649  * @param ref reference picture buffer at origin (0, 0)
1650  * @param mv motion vector (relative to block position) to get pixel data from
1651  * @param x_off horizontal position of block from origin (0, 0)
1652  * @param y_off vertical position of block from origin (0, 0)
1653  * @param block_w width of block
1654  * @param block_h height of block
1655  * @param chroma_weight weighting factor applied to the chroma prediction
1656  * @param chroma_offset additive offset applied to the chroma prediction value
1657  */
1658 
1659 static void chroma_mc_uni(HEVCLocalContext *lc, uint8_t *dst0,
1660  ptrdiff_t dststride, const uint8_t *src0, ptrdiff_t srcstride, int reflist,
1661  int x_off, int y_off, int block_w, int block_h,
1662  const struct MvField *current_mv, int chroma_weight, int chroma_offset)
1663 {
1664  const HEVCContext *const s = lc->parent;
1665  int pic_width = s->ps.sps->width >> s->ps.sps->hshift[1];
1666  int pic_height = s->ps.sps->height >> s->ps.sps->vshift[1];
1667  const Mv *mv = &current_mv->mv[reflist];
1668  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1669  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1670  int idx = hevc_pel_weight[block_w];
1671  int hshift = s->ps.sps->hshift[1];
1672  int vshift = s->ps.sps->vshift[1];
1673  intptr_t mx = av_mod_uintp2(mv->x, 2 + hshift);
1674  intptr_t my = av_mod_uintp2(mv->y, 2 + vshift);
1675  intptr_t _mx = mx << (1 - hshift);
1676  intptr_t _my = my << (1 - vshift);
1677 
1678  x_off += mv->x >> (2 + hshift);
1679  y_off += mv->y >> (2 + vshift);
1680  src0 += y_off * srcstride + (x_off * (1 << s->ps.sps->pixel_shift));
1681 
1682  if (x_off < EPEL_EXTRA_BEFORE || y_off < EPEL_EXTRA_AFTER ||
1683  x_off >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1684  y_off >= pic_height - block_h - EPEL_EXTRA_AFTER) {
1685  const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1686  int offset0 = EPEL_EXTRA_BEFORE * (srcstride + (1 << s->ps.sps->pixel_shift));
1687  int buf_offset0 = EPEL_EXTRA_BEFORE *
1688  (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1689  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src0 - offset0,
1690  edge_emu_stride, srcstride,
1691  block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1692  x_off - EPEL_EXTRA_BEFORE,
1693  y_off - EPEL_EXTRA_BEFORE,
1694  pic_width, pic_height);
1695 
1696  src0 = lc->edge_emu_buffer + buf_offset0;
1697  srcstride = edge_emu_stride;
1698  }
1699  if (!weight_flag)
1700  s->hevcdsp.put_hevc_epel_uni[idx][!!my][!!mx](dst0, dststride, src0, srcstride,
1701  block_h, _mx, _my, block_w);
1702  else
1703  s->hevcdsp.put_hevc_epel_uni_w[idx][!!my][!!mx](dst0, dststride, src0, srcstride,
1704  block_h, s->sh.chroma_log2_weight_denom,
1705  chroma_weight, chroma_offset, _mx, _my, block_w);
1706 }
1707 
1708 /**
1709  * 8.5.3.2.2.2 Chroma sample bidirectional interpolation process
1710  *
1711  * @param s HEVC decoding context
1712  * @param dst target buffer for block data at block position
1713  * @param dststride stride of the dst buffer
1714  * @param ref0 reference picture0 buffer at origin (0, 0)
1715  * @param mv0 motion vector0 (relative to block position) to get pixel data from
1716  * @param x_off horizontal position of block from origin (0, 0)
1717  * @param y_off vertical position of block from origin (0, 0)
1718  * @param block_w width of block
1719  * @param block_h height of block
1720  * @param ref1 reference picture1 buffer at origin (0, 0)
1721  * @param mv1 motion vector1 (relative to block position) to get pixel data from
1722  * @param current_mv current motion vector structure
1723  * @param cidx chroma component(cb, cr)
1724  */
1725 static void chroma_mc_bi(HEVCLocalContext *lc, uint8_t *dst0, ptrdiff_t dststride,
1726  const AVFrame *ref0, const AVFrame *ref1,
1727  int x_off, int y_off, int block_w, int block_h, const MvField *current_mv, int cidx)
1728 {
1729  const HEVCContext *const s = lc->parent;
1730  const uint8_t *src1 = ref0->data[cidx+1];
1731  const uint8_t *src2 = ref1->data[cidx+1];
1732  ptrdiff_t src1stride = ref0->linesize[cidx+1];
1733  ptrdiff_t src2stride = ref1->linesize[cidx+1];
1734  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1735  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1736  int pic_width = s->ps.sps->width >> s->ps.sps->hshift[1];
1737  int pic_height = s->ps.sps->height >> s->ps.sps->vshift[1];
1738  const Mv *const mv0 = &current_mv->mv[0];
1739  const Mv *const mv1 = &current_mv->mv[1];
1740  int hshift = s->ps.sps->hshift[1];
1741  int vshift = s->ps.sps->vshift[1];
1742 
1743  intptr_t mx0 = av_mod_uintp2(mv0->x, 2 + hshift);
1744  intptr_t my0 = av_mod_uintp2(mv0->y, 2 + vshift);
1745  intptr_t mx1 = av_mod_uintp2(mv1->x, 2 + hshift);
1746  intptr_t my1 = av_mod_uintp2(mv1->y, 2 + vshift);
1747  intptr_t _mx0 = mx0 << (1 - hshift);
1748  intptr_t _my0 = my0 << (1 - vshift);
1749  intptr_t _mx1 = mx1 << (1 - hshift);
1750  intptr_t _my1 = my1 << (1 - vshift);
1751 
1752  int x_off0 = x_off + (mv0->x >> (2 + hshift));
1753  int y_off0 = y_off + (mv0->y >> (2 + vshift));
1754  int x_off1 = x_off + (mv1->x >> (2 + hshift));
1755  int y_off1 = y_off + (mv1->y >> (2 + vshift));
1756  int idx = hevc_pel_weight[block_w];
1757  src1 += y_off0 * src1stride + (int)((unsigned)x_off0 << s->ps.sps->pixel_shift);
1758  src2 += y_off1 * src2stride + (int)((unsigned)x_off1 << s->ps.sps->pixel_shift);
1759 
1760  if (x_off0 < EPEL_EXTRA_BEFORE || y_off0 < EPEL_EXTRA_AFTER ||
1761  x_off0 >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1762  y_off0 >= pic_height - block_h - EPEL_EXTRA_AFTER) {
1763  const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1764  int offset1 = EPEL_EXTRA_BEFORE * (src1stride + (1 << s->ps.sps->pixel_shift));
1765  int buf_offset1 = EPEL_EXTRA_BEFORE *
1766  (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1767 
1768  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src1 - offset1,
1769  edge_emu_stride, src1stride,
1770  block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1771  x_off0 - EPEL_EXTRA_BEFORE,
1772  y_off0 - EPEL_EXTRA_BEFORE,
1773  pic_width, pic_height);
1774 
1775  src1 = lc->edge_emu_buffer + buf_offset1;
1776  src1stride = edge_emu_stride;
1777  }
1778 
1779  if (x_off1 < EPEL_EXTRA_BEFORE || y_off1 < EPEL_EXTRA_AFTER ||
1780  x_off1 >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1781  y_off1 >= pic_height - block_h - EPEL_EXTRA_AFTER) {
1782  const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1783  int offset1 = EPEL_EXTRA_BEFORE * (src2stride + (1 << s->ps.sps->pixel_shift));
1784  int buf_offset1 = EPEL_EXTRA_BEFORE *
1785  (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1786 
1787  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer2, src2 - offset1,
1788  edge_emu_stride, src2stride,
1789  block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1790  x_off1 - EPEL_EXTRA_BEFORE,
1791  y_off1 - EPEL_EXTRA_BEFORE,
1792  pic_width, pic_height);
1793 
1794  src2 = lc->edge_emu_buffer2 + buf_offset1;
1795  src2stride = edge_emu_stride;
1796  }
1797 
1798  s->hevcdsp.put_hevc_epel[idx][!!my0][!!mx0](lc->tmp, src1, src1stride,
1799  block_h, _mx0, _my0, block_w);
1800  if (!weight_flag)
1801  s->hevcdsp.put_hevc_epel_bi[idx][!!my1][!!mx1](dst0, s->frame->linesize[cidx+1],
1802  src2, src2stride, lc->tmp,
1803  block_h, _mx1, _my1, block_w);
1804  else
1805  s->hevcdsp.put_hevc_epel_bi_w[idx][!!my1][!!mx1](dst0, s->frame->linesize[cidx+1],
1806  src2, src2stride, lc->tmp,
1807  block_h,
1808  s->sh.chroma_log2_weight_denom,
1809  s->sh.chroma_weight_l0[current_mv->ref_idx[0]][cidx],
1810  s->sh.chroma_weight_l1[current_mv->ref_idx[1]][cidx],
1811  s->sh.chroma_offset_l0[current_mv->ref_idx[0]][cidx],
1812  s->sh.chroma_offset_l1[current_mv->ref_idx[1]][cidx],
1813  _mx1, _my1, block_w);
1814 }
1815 
1816 static void hevc_await_progress(const HEVCContext *s, const HEVCFrame *ref,
1817  const Mv *mv, int y0, int height)
1818 {
1819  if (s->threads_type == FF_THREAD_FRAME ) {
1820  int y = FFMAX(0, (mv->y >> 2) + y0 + height + 9);
1821 
1822  ff_thread_await_progress(&ref->tf, y, 0);
1823  }
1824 }
1825 
1826 static void hevc_luma_mv_mvp_mode(HEVCLocalContext *lc, int x0, int y0, int nPbW,
1827  int nPbH, int log2_cb_size, int part_idx,
1828  int merge_idx, MvField *mv)
1829 {
1830  const HEVCContext *const s = lc->parent;
1831  enum InterPredIdc inter_pred_idc = PRED_L0;
1832  int mvp_flag;
1833 
1834  ff_hevc_set_neighbour_available(lc, x0, y0, nPbW, nPbH);
1835  mv->pred_flag = 0;
1836  if (s->sh.slice_type == HEVC_SLICE_B)
1837  inter_pred_idc = ff_hevc_inter_pred_idc_decode(lc, nPbW, nPbH);
1838 
1839  if (inter_pred_idc != PRED_L1) {
1840  if (s->sh.nb_refs[L0])
1841  mv->ref_idx[0]= ff_hevc_ref_idx_lx_decode(lc, s->sh.nb_refs[L0]);
1842 
1843  mv->pred_flag = PF_L0;
1844  ff_hevc_hls_mvd_coding(lc, x0, y0, 0);
1845  mvp_flag = ff_hevc_mvp_lx_flag_decode(lc);
1846  ff_hevc_luma_mv_mvp_mode(lc, x0, y0, nPbW, nPbH, log2_cb_size,
1847  part_idx, merge_idx, mv, mvp_flag, 0);
1848  mv->mv[0].x += lc->pu.mvd.x;
1849  mv->mv[0].y += lc->pu.mvd.y;
1850  }
1851 
1852  if (inter_pred_idc != PRED_L0) {
1853  if (s->sh.nb_refs[L1])
1854  mv->ref_idx[1]= ff_hevc_ref_idx_lx_decode(lc, s->sh.nb_refs[L1]);
1855 
1856  if (s->sh.mvd_l1_zero_flag == 1 && inter_pred_idc == PRED_BI) {
1857  AV_ZERO32(&lc->pu.mvd);
1858  } else {
1859  ff_hevc_hls_mvd_coding(lc, x0, y0, 1);
1860  }
1861 
1862  mv->pred_flag += PF_L1;
1863  mvp_flag = ff_hevc_mvp_lx_flag_decode(lc);
1864  ff_hevc_luma_mv_mvp_mode(lc, x0, y0, nPbW, nPbH, log2_cb_size,
1865  part_idx, merge_idx, mv, mvp_flag, 1);
1866  mv->mv[1].x += lc->pu.mvd.x;
1867  mv->mv[1].y += lc->pu.mvd.y;
1868  }
1869 }
1870 
1871 static void hls_prediction_unit(HEVCLocalContext *lc, int x0, int y0,
1872  int nPbW, int nPbH,
1873  int log2_cb_size, int partIdx, int idx)
1874 {
1875 #define POS(c_idx, x, y) \
1876  &s->frame->data[c_idx][((y) >> s->ps.sps->vshift[c_idx]) * s->frame->linesize[c_idx] + \
1877  (((x) >> s->ps.sps->hshift[c_idx]) << s->ps.sps->pixel_shift)]
1878  const HEVCContext *const s = lc->parent;
1879  int merge_idx = 0;
1880  struct MvField current_mv = {{{ 0 }}};
1881 
1882  int min_pu_width = s->ps.sps->min_pu_width;
1883 
1884  MvField *tab_mvf = s->ref->tab_mvf;
1885  const RefPicList *refPicList = s->ref->refPicList;
1886  const HEVCFrame *ref0 = NULL, *ref1 = NULL;
1887  uint8_t *dst0 = POS(0, x0, y0);
1888  uint8_t *dst1 = POS(1, x0, y0);
1889  uint8_t *dst2 = POS(2, x0, y0);
1890  int log2_min_cb_size = s->ps.sps->log2_min_cb_size;
1891  int min_cb_width = s->ps.sps->min_cb_width;
1892  int x_cb = x0 >> log2_min_cb_size;
1893  int y_cb = y0 >> log2_min_cb_size;
1894  int x_pu, y_pu;
1895  int i, j;
1896 
1897  int skip_flag = SAMPLE_CTB(s->skip_flag, x_cb, y_cb);
1898 
1899  if (!skip_flag)
1901 
1902  if (skip_flag || lc->pu.merge_flag) {
1903  if (s->sh.max_num_merge_cand > 1)
1904  merge_idx = ff_hevc_merge_idx_decode(lc);
1905  else
1906  merge_idx = 0;
1907 
1908  ff_hevc_luma_mv_merge_mode(lc, x0, y0, nPbW, nPbH, log2_cb_size,
1909  partIdx, merge_idx, &current_mv);
1910  } else {
1911  hevc_luma_mv_mvp_mode(lc, x0, y0, nPbW, nPbH, log2_cb_size,
1912  partIdx, merge_idx, &current_mv);
1913  }
1914 
1915  x_pu = x0 >> s->ps.sps->log2_min_pu_size;
1916  y_pu = y0 >> s->ps.sps->log2_min_pu_size;
1917 
1918  for (j = 0; j < nPbH >> s->ps.sps->log2_min_pu_size; j++)
1919  for (i = 0; i < nPbW >> s->ps.sps->log2_min_pu_size; i++)
1920  tab_mvf[(y_pu + j) * min_pu_width + x_pu + i] = current_mv;
1921 
1922  if (current_mv.pred_flag & PF_L0) {
1923  ref0 = refPicList[0].ref[current_mv.ref_idx[0]];
1924  if (!ref0)
1925  return;
1926  hevc_await_progress(s, ref0, &current_mv.mv[0], y0, nPbH);
1927  }
1928  if (current_mv.pred_flag & PF_L1) {
1929  ref1 = refPicList[1].ref[current_mv.ref_idx[1]];
1930  if (!ref1)
1931  return;
1932  hevc_await_progress(s, ref1, &current_mv.mv[1], y0, nPbH);
1933  }
1934 
1935  if (current_mv.pred_flag == PF_L0) {
1936  int x0_c = x0 >> s->ps.sps->hshift[1];
1937  int y0_c = y0 >> s->ps.sps->vshift[1];
1938  int nPbW_c = nPbW >> s->ps.sps->hshift[1];
1939  int nPbH_c = nPbH >> s->ps.sps->vshift[1];
1940 
1941  luma_mc_uni(lc, dst0, s->frame->linesize[0], ref0->frame,
1942  &current_mv.mv[0], x0, y0, nPbW, nPbH,
1943  s->sh.luma_weight_l0[current_mv.ref_idx[0]],
1944  s->sh.luma_offset_l0[current_mv.ref_idx[0]]);
1945 
1946  if (s->ps.sps->chroma_format_idc) {
1947  chroma_mc_uni(lc, dst1, s->frame->linesize[1], ref0->frame->data[1], ref0->frame->linesize[1],
1948  0, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1949  s->sh.chroma_weight_l0[current_mv.ref_idx[0]][0], s->sh.chroma_offset_l0[current_mv.ref_idx[0]][0]);
1950  chroma_mc_uni(lc, dst2, s->frame->linesize[2], ref0->frame->data[2], ref0->frame->linesize[2],
1951  0, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1952  s->sh.chroma_weight_l0[current_mv.ref_idx[0]][1], s->sh.chroma_offset_l0[current_mv.ref_idx[0]][1]);
1953  }
1954  } else if (current_mv.pred_flag == PF_L1) {
1955  int x0_c = x0 >> s->ps.sps->hshift[1];
1956  int y0_c = y0 >> s->ps.sps->vshift[1];
1957  int nPbW_c = nPbW >> s->ps.sps->hshift[1];
1958  int nPbH_c = nPbH >> s->ps.sps->vshift[1];
1959 
1960  luma_mc_uni(lc, dst0, s->frame->linesize[0], ref1->frame,
1961  &current_mv.mv[1], x0, y0, nPbW, nPbH,
1962  s->sh.luma_weight_l1[current_mv.ref_idx[1]],
1963  s->sh.luma_offset_l1[current_mv.ref_idx[1]]);
1964 
1965  if (s->ps.sps->chroma_format_idc) {
1966  chroma_mc_uni(lc, dst1, s->frame->linesize[1], ref1->frame->data[1], ref1->frame->linesize[1],
1967  1, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1968  s->sh.chroma_weight_l1[current_mv.ref_idx[1]][0], s->sh.chroma_offset_l1[current_mv.ref_idx[1]][0]);
1969 
1970  chroma_mc_uni(lc, dst2, s->frame->linesize[2], ref1->frame->data[2], ref1->frame->linesize[2],
1971  1, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1972  s->sh.chroma_weight_l1[current_mv.ref_idx[1]][1], s->sh.chroma_offset_l1[current_mv.ref_idx[1]][1]);
1973  }
1974  } else if (current_mv.pred_flag == PF_BI) {
1975  int x0_c = x0 >> s->ps.sps->hshift[1];
1976  int y0_c = y0 >> s->ps.sps->vshift[1];
1977  int nPbW_c = nPbW >> s->ps.sps->hshift[1];
1978  int nPbH_c = nPbH >> s->ps.sps->vshift[1];
1979 
1980  luma_mc_bi(lc, dst0, s->frame->linesize[0], ref0->frame,
1981  &current_mv.mv[0], x0, y0, nPbW, nPbH,
1982  ref1->frame, &current_mv.mv[1], &current_mv);
1983 
1984  if (s->ps.sps->chroma_format_idc) {
1985  chroma_mc_bi(lc, dst1, s->frame->linesize[1], ref0->frame, ref1->frame,
1986  x0_c, y0_c, nPbW_c, nPbH_c, &current_mv, 0);
1987 
1988  chroma_mc_bi(lc, dst2, s->frame->linesize[2], ref0->frame, ref1->frame,
1989  x0_c, y0_c, nPbW_c, nPbH_c, &current_mv, 1);
1990  }
1991  }
1992 }
1993 
1994 /**
1995  * 8.4.1
1996  */
1997 static int luma_intra_pred_mode(HEVCLocalContext *lc, int x0, int y0, int pu_size,
1998  int prev_intra_luma_pred_flag)
1999 {
2000  const HEVCContext *const s = lc->parent;
2001  int x_pu = x0 >> s->ps.sps->log2_min_pu_size;
2002  int y_pu = y0 >> s->ps.sps->log2_min_pu_size;
2003  int min_pu_width = s->ps.sps->min_pu_width;
2004  int size_in_pus = pu_size >> s->ps.sps->log2_min_pu_size;
2005  int x0b = av_mod_uintp2(x0, s->ps.sps->log2_ctb_size);
2006  int y0b = av_mod_uintp2(y0, s->ps.sps->log2_ctb_size);
2007 
2008  int cand_up = (lc->ctb_up_flag || y0b) ?
2009  s->tab_ipm[(y_pu - 1) * min_pu_width + x_pu] : INTRA_DC;
2010  int cand_left = (lc->ctb_left_flag || x0b) ?
2011  s->tab_ipm[y_pu * min_pu_width + x_pu - 1] : INTRA_DC;
2012 
2013  int y_ctb = (y0 >> (s->ps.sps->log2_ctb_size)) << (s->ps.sps->log2_ctb_size);
2014 
2015  MvField *tab_mvf = s->ref->tab_mvf;
2016  int intra_pred_mode;
2017  int candidate[3];
2018  int i, j;
2019 
2020  // intra_pred_mode prediction does not cross vertical CTB boundaries
2021  if ((y0 - 1) < y_ctb)
2022  cand_up = INTRA_DC;
2023 
2024  if (cand_left == cand_up) {
2025  if (cand_left < 2) {
2026  candidate[0] = INTRA_PLANAR;
2027  candidate[1] = INTRA_DC;
2028  candidate[2] = INTRA_ANGULAR_26;
2029  } else {
2030  candidate[0] = cand_left;
2031  candidate[1] = 2 + ((cand_left - 2 - 1 + 32) & 31);
2032  candidate[2] = 2 + ((cand_left - 2 + 1) & 31);
2033  }
2034  } else {
2035  candidate[0] = cand_left;
2036  candidate[1] = cand_up;
2037  if (candidate[0] != INTRA_PLANAR && candidate[1] != INTRA_PLANAR) {
2038  candidate[2] = INTRA_PLANAR;
2039  } else if (candidate[0] != INTRA_DC && candidate[1] != INTRA_DC) {
2040  candidate[2] = INTRA_DC;
2041  } else {
2042  candidate[2] = INTRA_ANGULAR_26;
2043  }
2044  }
2045 
2046  if (prev_intra_luma_pred_flag) {
2047  intra_pred_mode = candidate[lc->pu.mpm_idx];
2048  } else {
2049  if (candidate[0] > candidate[1])
2050  FFSWAP(uint8_t, candidate[0], candidate[1]);
2051  if (candidate[0] > candidate[2])
2052  FFSWAP(uint8_t, candidate[0], candidate[2]);
2053  if (candidate[1] > candidate[2])
2054  FFSWAP(uint8_t, candidate[1], candidate[2]);
2055 
2056  intra_pred_mode = lc->pu.rem_intra_luma_pred_mode;
2057  for (i = 0; i < 3; i++)
2058  if (intra_pred_mode >= candidate[i])
2059  intra_pred_mode++;
2060  }
2061 
2062  /* write the intra prediction units into the mv array */
2063  if (!size_in_pus)
2064  size_in_pus = 1;
2065  for (i = 0; i < size_in_pus; i++) {
2066  memset(&s->tab_ipm[(y_pu + i) * min_pu_width + x_pu],
2067  intra_pred_mode, size_in_pus);
2068 
2069  for (j = 0; j < size_in_pus; j++) {
2070  tab_mvf[(y_pu + j) * min_pu_width + x_pu + i].pred_flag = PF_INTRA;
2071  }
2072  }
2073 
2074  return intra_pred_mode;
2075 }
2076 
2077 static av_always_inline void set_ct_depth(const HEVCContext *s, int x0, int y0,
2078  int log2_cb_size, int ct_depth)
2079 {
2080  int length = (1 << log2_cb_size) >> s->ps.sps->log2_min_cb_size;
2081  int x_cb = x0 >> s->ps.sps->log2_min_cb_size;
2082  int y_cb = y0 >> s->ps.sps->log2_min_cb_size;
2083  int y;
2084 
2085  for (y = 0; y < length; y++)
2086  memset(&s->tab_ct_depth[(y_cb + y) * s->ps.sps->min_cb_width + x_cb],
2087  ct_depth, length);
2088 }
2089 
2090 static const uint8_t tab_mode_idx[] = {
2091  0, 1, 2, 2, 2, 2, 3, 5, 7, 8, 10, 12, 13, 15, 17, 18, 19, 20,
2092  21, 22, 23, 23, 24, 24, 25, 25, 26, 27, 27, 28, 28, 29, 29, 30, 31};
2093 
2094 static void intra_prediction_unit(HEVCLocalContext *lc, int x0, int y0,
2095  int log2_cb_size)
2096 {
2097  const HEVCContext *const s = lc->parent;
2098  static const uint8_t intra_chroma_table[4] = { 0, 26, 10, 1 };
2099  uint8_t prev_intra_luma_pred_flag[4];
2100  int split = lc->cu.part_mode == PART_NxN;
2101  int pb_size = (1 << log2_cb_size) >> split;
2102  int side = split + 1;
2103  int chroma_mode;
2104  int i, j;
2105 
2106  for (i = 0; i < side; i++)
2107  for (j = 0; j < side; j++)
2108  prev_intra_luma_pred_flag[2 * i + j] = ff_hevc_prev_intra_luma_pred_flag_decode(lc);
2109 
2110  for (i = 0; i < side; i++) {
2111  for (j = 0; j < side; j++) {
2112  if (prev_intra_luma_pred_flag[2 * i + j])
2113  lc->pu.mpm_idx = ff_hevc_mpm_idx_decode(lc);
2114  else
2116 
2117  lc->pu.intra_pred_mode[2 * i + j] =
2118  luma_intra_pred_mode(lc, x0 + pb_size * j, y0 + pb_size * i, pb_size,
2119  prev_intra_luma_pred_flag[2 * i + j]);
2120  }
2121  }
2122 
2123  if (s->ps.sps->chroma_format_idc == 3) {
2124  for (i = 0; i < side; i++) {
2125  for (j = 0; j < side; j++) {
2126  lc->pu.chroma_mode_c[2 * i + j] = chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(lc);
2127  if (chroma_mode != 4) {
2128  if (lc->pu.intra_pred_mode[2 * i + j] == intra_chroma_table[chroma_mode])
2129  lc->pu.intra_pred_mode_c[2 * i + j] = 34;
2130  else
2131  lc->pu.intra_pred_mode_c[2 * i + j] = intra_chroma_table[chroma_mode];
2132  } else {
2133  lc->pu.intra_pred_mode_c[2 * i + j] = lc->pu.intra_pred_mode[2 * i + j];
2134  }
2135  }
2136  }
2137  } else if (s->ps.sps->chroma_format_idc == 2) {
2138  int mode_idx;
2139  lc->pu.chroma_mode_c[0] = chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(lc);
2140  if (chroma_mode != 4) {
2141  if (lc->pu.intra_pred_mode[0] == intra_chroma_table[chroma_mode])
2142  mode_idx = 34;
2143  else
2144  mode_idx = intra_chroma_table[chroma_mode];
2145  } else {
2146  mode_idx = lc->pu.intra_pred_mode[0];
2147  }
2148  lc->pu.intra_pred_mode_c[0] = tab_mode_idx[mode_idx];
2149  } else if (s->ps.sps->chroma_format_idc != 0) {
2150  chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(lc);
2151  if (chroma_mode != 4) {
2152  if (lc->pu.intra_pred_mode[0] == intra_chroma_table[chroma_mode])
2153  lc->pu.intra_pred_mode_c[0] = 34;
2154  else
2155  lc->pu.intra_pred_mode_c[0] = intra_chroma_table[chroma_mode];
2156  } else {
2157  lc->pu.intra_pred_mode_c[0] = lc->pu.intra_pred_mode[0];
2158  }
2159  }
2160 }
2161 
2163  int x0, int y0,
2164  int log2_cb_size)
2165 {
2166  const HEVCContext *const s = lc->parent;
2167  int pb_size = 1 << log2_cb_size;
2168  int size_in_pus = pb_size >> s->ps.sps->log2_min_pu_size;
2169  int min_pu_width = s->ps.sps->min_pu_width;
2170  MvField *tab_mvf = s->ref->tab_mvf;
2171  int x_pu = x0 >> s->ps.sps->log2_min_pu_size;
2172  int y_pu = y0 >> s->ps.sps->log2_min_pu_size;
2173  int j, k;
2174 
2175  if (size_in_pus == 0)
2176  size_in_pus = 1;
2177  for (j = 0; j < size_in_pus; j++)
2178  memset(&s->tab_ipm[(y_pu + j) * min_pu_width + x_pu], INTRA_DC, size_in_pus);
2179  if (lc->cu.pred_mode == MODE_INTRA)
2180  for (j = 0; j < size_in_pus; j++)
2181  for (k = 0; k < size_in_pus; k++)
2182  tab_mvf[(y_pu + j) * min_pu_width + x_pu + k].pred_flag = PF_INTRA;
2183 }
2184 
2185 static int hls_coding_unit(HEVCLocalContext *lc, const HEVCContext *s, int x0, int y0, int log2_cb_size)
2186 {
2187  int cb_size = 1 << log2_cb_size;
2188  int log2_min_cb_size = s->ps.sps->log2_min_cb_size;
2189  int length = cb_size >> log2_min_cb_size;
2190  int min_cb_width = s->ps.sps->min_cb_width;
2191  int x_cb = x0 >> log2_min_cb_size;
2192  int y_cb = y0 >> log2_min_cb_size;
2193  int idx = log2_cb_size - 2;
2194  int qp_block_mask = (1<<(s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth)) - 1;
2195  int x, y, ret;
2196 
2197  lc->cu.x = x0;
2198  lc->cu.y = y0;
2199  lc->cu.pred_mode = MODE_INTRA;
2200  lc->cu.part_mode = PART_2Nx2N;
2201  lc->cu.intra_split_flag = 0;
2202 
2203  SAMPLE_CTB(s->skip_flag, x_cb, y_cb) = 0;
2204  for (x = 0; x < 4; x++)
2205  lc->pu.intra_pred_mode[x] = 1;
2206  if (s->ps.pps->transquant_bypass_enable_flag) {
2208  if (lc->cu.cu_transquant_bypass_flag)
2209  set_deblocking_bypass(s, x0, y0, log2_cb_size);
2210  } else
2211  lc->cu.cu_transquant_bypass_flag = 0;
2212 
2213  if (s->sh.slice_type != HEVC_SLICE_I) {
2214  uint8_t skip_flag = ff_hevc_skip_flag_decode(lc, x0, y0, x_cb, y_cb);
2215 
2216  x = y_cb * min_cb_width + x_cb;
2217  for (y = 0; y < length; y++) {
2218  memset(&s->skip_flag[x], skip_flag, length);
2219  x += min_cb_width;
2220  }
2221  lc->cu.pred_mode = skip_flag ? MODE_SKIP : MODE_INTER;
2222  } else {
2223  x = y_cb * min_cb_width + x_cb;
2224  for (y = 0; y < length; y++) {
2225  memset(&s->skip_flag[x], 0, length);
2226  x += min_cb_width;
2227  }
2228  }
2229 
2230  if (SAMPLE_CTB(s->skip_flag, x_cb, y_cb)) {
2231  hls_prediction_unit(lc, x0, y0, cb_size, cb_size, log2_cb_size, 0, idx);
2232  intra_prediction_unit_default_value(lc, x0, y0, log2_cb_size);
2233 
2234  if (!s->sh.disable_deblocking_filter_flag)
2235  ff_hevc_deblocking_boundary_strengths(lc, x0, y0, log2_cb_size);
2236  } else {
2237  int pcm_flag = 0;
2238 
2239  if (s->sh.slice_type != HEVC_SLICE_I)
2241  if (lc->cu.pred_mode != MODE_INTRA ||
2242  log2_cb_size == s->ps.sps->log2_min_cb_size) {
2243  lc->cu.part_mode = ff_hevc_part_mode_decode(lc, log2_cb_size);
2244  lc->cu.intra_split_flag = lc->cu.part_mode == PART_NxN &&
2245  lc->cu.pred_mode == MODE_INTRA;
2246  }
2247 
2248  if (lc->cu.pred_mode == MODE_INTRA) {
2249  if (lc->cu.part_mode == PART_2Nx2N && s->ps.sps->pcm_enabled_flag &&
2250  log2_cb_size >= s->ps.sps->pcm.log2_min_pcm_cb_size &&
2251  log2_cb_size <= s->ps.sps->pcm.log2_max_pcm_cb_size) {
2252  pcm_flag = ff_hevc_pcm_flag_decode(lc);
2253  }
2254  if (pcm_flag) {
2255  intra_prediction_unit_default_value(lc, x0, y0, log2_cb_size);
2256  ret = hls_pcm_sample(lc, x0, y0, log2_cb_size);
2257  if (s->ps.sps->pcm.loop_filter_disable_flag)
2258  set_deblocking_bypass(s, x0, y0, log2_cb_size);
2259 
2260  if (ret < 0)
2261  return ret;
2262  } else {
2263  intra_prediction_unit(lc, x0, y0, log2_cb_size);
2264  }
2265  } else {
2266  intra_prediction_unit_default_value(lc, x0, y0, log2_cb_size);
2267  switch (lc->cu.part_mode) {
2268  case PART_2Nx2N:
2269  hls_prediction_unit(lc, x0, y0, cb_size, cb_size, log2_cb_size, 0, idx);
2270  break;
2271  case PART_2NxN:
2272  hls_prediction_unit(lc, x0, y0, cb_size, cb_size / 2, log2_cb_size, 0, idx);
2273  hls_prediction_unit(lc, x0, y0 + cb_size / 2, cb_size, cb_size / 2, log2_cb_size, 1, idx);
2274  break;
2275  case PART_Nx2N:
2276  hls_prediction_unit(lc, x0, y0, cb_size / 2, cb_size, log2_cb_size, 0, idx - 1);
2277  hls_prediction_unit(lc, x0 + cb_size / 2, y0, cb_size / 2, cb_size, log2_cb_size, 1, idx - 1);
2278  break;
2279  case PART_2NxnU:
2280  hls_prediction_unit(lc, x0, y0, cb_size, cb_size / 4, log2_cb_size, 0, idx);
2281  hls_prediction_unit(lc, x0, y0 + cb_size / 4, cb_size, cb_size * 3 / 4, log2_cb_size, 1, idx);
2282  break;
2283  case PART_2NxnD:
2284  hls_prediction_unit(lc, x0, y0, cb_size, cb_size * 3 / 4, log2_cb_size, 0, idx);
2285  hls_prediction_unit(lc, x0, y0 + cb_size * 3 / 4, cb_size, cb_size / 4, log2_cb_size, 1, idx);
2286  break;
2287  case PART_nLx2N:
2288  hls_prediction_unit(lc, x0, y0, cb_size / 4, cb_size, log2_cb_size, 0, idx - 2);
2289  hls_prediction_unit(lc, x0 + cb_size / 4, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 1, idx - 2);
2290  break;
2291  case PART_nRx2N:
2292  hls_prediction_unit(lc, x0, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 0, idx - 2);
2293  hls_prediction_unit(lc, x0 + cb_size * 3 / 4, y0, cb_size / 4, cb_size, log2_cb_size, 1, idx - 2);
2294  break;
2295  case PART_NxN:
2296  hls_prediction_unit(lc, x0, y0, cb_size / 2, cb_size / 2, log2_cb_size, 0, idx - 1);
2297  hls_prediction_unit(lc, x0 + cb_size / 2, y0, cb_size / 2, cb_size / 2, log2_cb_size, 1, idx - 1);
2298  hls_prediction_unit(lc, x0, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 2, idx - 1);
2299  hls_prediction_unit(lc, x0 + cb_size / 2, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 3, idx - 1);
2300  break;
2301  }
2302  }
2303 
2304  if (!pcm_flag) {
2305  int rqt_root_cbf = 1;
2306 
2307  if (lc->cu.pred_mode != MODE_INTRA &&
2308  !(lc->cu.part_mode == PART_2Nx2N && lc->pu.merge_flag)) {
2309  rqt_root_cbf = ff_hevc_no_residual_syntax_flag_decode(lc);
2310  }
2311  if (rqt_root_cbf) {
2312  const static int cbf[2] = { 0 };
2313  lc->cu.max_trafo_depth = lc->cu.pred_mode == MODE_INTRA ?
2314  s->ps.sps->max_transform_hierarchy_depth_intra + lc->cu.intra_split_flag :
2315  s->ps.sps->max_transform_hierarchy_depth_inter;
2316  ret = hls_transform_tree(lc, x0, y0, x0, y0, x0, y0,
2317  log2_cb_size,
2318  log2_cb_size, 0, 0, cbf, cbf);
2319  if (ret < 0)
2320  return ret;
2321  } else {
2322  if (!s->sh.disable_deblocking_filter_flag)
2323  ff_hevc_deblocking_boundary_strengths(lc, x0, y0, log2_cb_size);
2324  }
2325  }
2326  }
2327 
2328  if (s->ps.pps->cu_qp_delta_enabled_flag && lc->tu.is_cu_qp_delta_coded == 0)
2329  ff_hevc_set_qPy(lc, x0, y0, log2_cb_size);
2330 
2331  x = y_cb * min_cb_width + x_cb;
2332  for (y = 0; y < length; y++) {
2333  memset(&s->qp_y_tab[x], lc->qp_y, length);
2334  x += min_cb_width;
2335  }
2336 
2337  if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 &&
2338  ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0) {
2339  lc->qPy_pred = lc->qp_y;
2340  }
2341 
2342  set_ct_depth(s, x0, y0, log2_cb_size, lc->ct_depth);
2343 
2344  return 0;
2345 }
2346 
2347 static int hls_coding_quadtree(HEVCLocalContext *lc, int x0, int y0,
2348  int log2_cb_size, int cb_depth)
2349 {
2350  const HEVCContext *const s = lc->parent;
2351  const int cb_size = 1 << log2_cb_size;
2352  int ret;
2353  int split_cu;
2354 
2355  lc->ct_depth = cb_depth;
2356  if (x0 + cb_size <= s->ps.sps->width &&
2357  y0 + cb_size <= s->ps.sps->height &&
2358  log2_cb_size > s->ps.sps->log2_min_cb_size) {
2359  split_cu = ff_hevc_split_coding_unit_flag_decode(lc, cb_depth, x0, y0);
2360  } else {
2361  split_cu = (log2_cb_size > s->ps.sps->log2_min_cb_size);
2362  }
2363  if (s->ps.pps->cu_qp_delta_enabled_flag &&
2364  log2_cb_size >= s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth) {
2365  lc->tu.is_cu_qp_delta_coded = 0;
2366  lc->tu.cu_qp_delta = 0;
2367  }
2368 
2369  if (s->sh.cu_chroma_qp_offset_enabled_flag &&
2370  log2_cb_size >= s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_chroma_qp_offset_depth) {
2372  }
2373 
2374  if (split_cu) {
2375  int qp_block_mask = (1<<(s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth)) - 1;
2376  const int cb_size_split = cb_size >> 1;
2377  const int x1 = x0 + cb_size_split;
2378  const int y1 = y0 + cb_size_split;
2379 
2380  int more_data = 0;
2381 
2382  more_data = hls_coding_quadtree(lc, x0, y0, log2_cb_size - 1, cb_depth + 1);
2383  if (more_data < 0)
2384  return more_data;
2385 
2386  if (more_data && x1 < s->ps.sps->width) {
2387  more_data = hls_coding_quadtree(lc, x1, y0, log2_cb_size - 1, cb_depth + 1);
2388  if (more_data < 0)
2389  return more_data;
2390  }
2391  if (more_data && y1 < s->ps.sps->height) {
2392  more_data = hls_coding_quadtree(lc, x0, y1, log2_cb_size - 1, cb_depth + 1);
2393  if (more_data < 0)
2394  return more_data;
2395  }
2396  if (more_data && x1 < s->ps.sps->width &&
2397  y1 < s->ps.sps->height) {
2398  more_data = hls_coding_quadtree(lc, x1, y1, log2_cb_size - 1, cb_depth + 1);
2399  if (more_data < 0)
2400  return more_data;
2401  }
2402 
2403  if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 &&
2404  ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0)
2405  lc->qPy_pred = lc->qp_y;
2406 
2407  if (more_data)
2408  return ((x1 + cb_size_split) < s->ps.sps->width ||
2409  (y1 + cb_size_split) < s->ps.sps->height);
2410  else
2411  return 0;
2412  } else {
2413  ret = hls_coding_unit(lc, s, x0, y0, log2_cb_size);
2414  if (ret < 0)
2415  return ret;
2416  if ((!((x0 + cb_size) %
2417  (1 << (s->ps.sps->log2_ctb_size))) ||
2418  (x0 + cb_size >= s->ps.sps->width)) &&
2419  (!((y0 + cb_size) %
2420  (1 << (s->ps.sps->log2_ctb_size))) ||
2421  (y0 + cb_size >= s->ps.sps->height))) {
2422  int end_of_slice_flag = ff_hevc_end_of_slice_flag_decode(lc);
2423  return !end_of_slice_flag;
2424  } else {
2425  return 1;
2426  }
2427  }
2428 
2429  return 0;
2430 }
2431 
2432 static void hls_decode_neighbour(HEVCLocalContext *lc, int x_ctb, int y_ctb,
2433  int ctb_addr_ts)
2434 {
2435  const HEVCContext *const s = lc->parent;
2436  int ctb_size = 1 << s->ps.sps->log2_ctb_size;
2437  int ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2438  int ctb_addr_in_slice = ctb_addr_rs - s->sh.slice_addr;
2439 
2440  s->tab_slice_address[ctb_addr_rs] = s->sh.slice_addr;
2441 
2442  if (s->ps.pps->entropy_coding_sync_enabled_flag) {
2443  if (x_ctb == 0 && (y_ctb & (ctb_size - 1)) == 0)
2444  lc->first_qp_group = 1;
2445  lc->end_of_tiles_x = s->ps.sps->width;
2446  } else if (s->ps.pps->tiles_enabled_flag) {
2447  if (ctb_addr_ts && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[ctb_addr_ts - 1]) {
2448  int idxX = s->ps.pps->col_idxX[x_ctb >> s->ps.sps->log2_ctb_size];
2449  lc->end_of_tiles_x = x_ctb + (s->ps.pps->column_width[idxX] << s->ps.sps->log2_ctb_size);
2450  lc->first_qp_group = 1;
2451  }
2452  } else {
2453  lc->end_of_tiles_x = s->ps.sps->width;
2454  }
2455 
2456  lc->end_of_tiles_y = FFMIN(y_ctb + ctb_size, s->ps.sps->height);
2457 
2458  lc->boundary_flags = 0;
2459  if (s->ps.pps->tiles_enabled_flag) {
2460  if (x_ctb > 0 && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - 1]])
2462  if (x_ctb > 0 && s->tab_slice_address[ctb_addr_rs] != s->tab_slice_address[ctb_addr_rs - 1])
2464  if (y_ctb > 0 && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - s->ps.sps->ctb_width]])
2466  if (y_ctb > 0 && s->tab_slice_address[ctb_addr_rs] != s->tab_slice_address[ctb_addr_rs - s->ps.sps->ctb_width])
2468  } else {
2469  if (ctb_addr_in_slice <= 0)
2471  if (ctb_addr_in_slice < s->ps.sps->ctb_width)
2473  }
2474 
2475  lc->ctb_left_flag = ((x_ctb > 0) && (ctb_addr_in_slice > 0) && !(lc->boundary_flags & BOUNDARY_LEFT_TILE));
2476  lc->ctb_up_flag = ((y_ctb > 0) && (ctb_addr_in_slice >= s->ps.sps->ctb_width) && !(lc->boundary_flags & BOUNDARY_UPPER_TILE));
2477  lc->ctb_up_right_flag = ((y_ctb > 0) && (ctb_addr_in_slice+1 >= s->ps.sps->ctb_width) && (s->ps.pps->tile_id[ctb_addr_ts] == s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs+1 - s->ps.sps->ctb_width]]));
2478  lc->ctb_up_left_flag = ((x_ctb > 0) && (y_ctb > 0) && (ctb_addr_in_slice-1 >= s->ps.sps->ctb_width) && (s->ps.pps->tile_id[ctb_addr_ts] == s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs-1 - s->ps.sps->ctb_width]]));
2479 }
2480 
2481 static int hls_decode_entry(AVCodecContext *avctxt, void *isFilterThread)
2482 {
2483  HEVCContext *s = avctxt->priv_data;
2484  HEVCLocalContext *const lc = s->HEVClc;
2485  int ctb_size = 1 << s->ps.sps->log2_ctb_size;
2486  int more_data = 1;
2487  int x_ctb = 0;
2488  int y_ctb = 0;
2489  int ctb_addr_ts = s->ps.pps->ctb_addr_rs_to_ts[s->sh.slice_ctb_addr_rs];
2490  int ret;
2491 
2492  if (!ctb_addr_ts && s->sh.dependent_slice_segment_flag) {
2493  av_log(s->avctx, AV_LOG_ERROR, "Impossible initial tile.\n");
2494  return AVERROR_INVALIDDATA;
2495  }
2496 
2497  if (s->sh.dependent_slice_segment_flag) {
2498  int prev_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts - 1];
2499  if (s->tab_slice_address[prev_rs] != s->sh.slice_addr) {
2500  av_log(s->avctx, AV_LOG_ERROR, "Previous slice segment missing\n");
2501  return AVERROR_INVALIDDATA;
2502  }
2503  }
2504 
2505  while (more_data && ctb_addr_ts < s->ps.sps->ctb_size) {
2506  int ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2507 
2508  x_ctb = (ctb_addr_rs % ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size)) << s->ps.sps->log2_ctb_size;
2509  y_ctb = (ctb_addr_rs / ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size)) << s->ps.sps->log2_ctb_size;
2510  hls_decode_neighbour(lc, x_ctb, y_ctb, ctb_addr_ts);
2511 
2512  ret = ff_hevc_cabac_init(lc, ctb_addr_ts);
2513  if (ret < 0) {
2514  s->tab_slice_address[ctb_addr_rs] = -1;
2515  return ret;
2516  }
2517 
2518  hls_sao_param(lc, x_ctb >> s->ps.sps->log2_ctb_size, y_ctb >> s->ps.sps->log2_ctb_size);
2519 
2520  s->deblock[ctb_addr_rs].beta_offset = s->sh.beta_offset;
2521  s->deblock[ctb_addr_rs].tc_offset = s->sh.tc_offset;
2522  s->filter_slice_edges[ctb_addr_rs] = s->sh.slice_loop_filter_across_slices_enabled_flag;
2523 
2524  more_data = hls_coding_quadtree(lc, x_ctb, y_ctb, s->ps.sps->log2_ctb_size, 0);
2525  if (more_data < 0) {
2526  s->tab_slice_address[ctb_addr_rs] = -1;
2527  return more_data;
2528  }
2529 
2530 
2531  ctb_addr_ts++;
2532  ff_hevc_save_states(lc, ctb_addr_ts);
2533  ff_hevc_hls_filters(lc, x_ctb, y_ctb, ctb_size);
2534  }
2535 
2536  if (x_ctb + ctb_size >= s->ps.sps->width &&
2537  y_ctb + ctb_size >= s->ps.sps->height)
2538  ff_hevc_hls_filter(lc, x_ctb, y_ctb, ctb_size);
2539 
2540  return ctb_addr_ts;
2541 }
2542 
2544 {
2545  int arg[2];
2546  int ret[2];
2547 
2548  arg[0] = 0;
2549  arg[1] = 1;
2550 
2551  s->avctx->execute(s->avctx, hls_decode_entry, arg, ret , 1, sizeof(int));
2552  return ret[0];
2553 }
2554 static int hls_decode_entry_wpp(AVCodecContext *avctxt, void *hevc_lclist,
2555  int job, int self_id)
2556 {
2557  HEVCLocalContext *lc = ((HEVCLocalContext**)hevc_lclist)[self_id];
2558  const HEVCContext *const s = lc->parent;
2559  int ctb_size = 1 << s->ps.sps->log2_ctb_size;
2560  int more_data = 1;
2561  int ctb_row = job;
2562  int ctb_addr_rs = s->sh.slice_ctb_addr_rs + ctb_row * ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size);
2563  int ctb_addr_ts = s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs];
2564  int thread = ctb_row % s->threads_number;
2565  int ret;
2566 
2567  if(ctb_row) {
2568  ret = init_get_bits8(&lc->gb, s->data + s->sh.offset[ctb_row - 1], s->sh.size[ctb_row - 1]);
2569  if (ret < 0)
2570  goto error;
2571  ff_init_cabac_decoder(&lc->cc, s->data + s->sh.offset[(ctb_row)-1], s->sh.size[ctb_row - 1]);
2572  }
2573 
2574  while(more_data && ctb_addr_ts < s->ps.sps->ctb_size) {
2575  int x_ctb = (ctb_addr_rs % s->ps.sps->ctb_width) << s->ps.sps->log2_ctb_size;
2576  int y_ctb = (ctb_addr_rs / s->ps.sps->ctb_width) << s->ps.sps->log2_ctb_size;
2577 
2578  hls_decode_neighbour(lc, x_ctb, y_ctb, ctb_addr_ts);
2579 
2580  ff_thread_await_progress2(s->avctx, ctb_row, thread, SHIFT_CTB_WPP);
2581 
2582  /* atomic_load's prototype requires a pointer to non-const atomic variable
2583  * (due to implementations via mutexes, where reads involve writes).
2584  * Of course, casting const away here is nevertheless safe. */
2585  if (atomic_load((atomic_int*)&s->wpp_err)) {
2586  ff_thread_report_progress2(s->avctx, ctb_row , thread, SHIFT_CTB_WPP);
2587  return 0;
2588  }
2589 
2590  ret = ff_hevc_cabac_init(lc, ctb_addr_ts);
2591  if (ret < 0)
2592  goto error;
2593  hls_sao_param(lc, x_ctb >> s->ps.sps->log2_ctb_size, y_ctb >> s->ps.sps->log2_ctb_size);
2594  more_data = hls_coding_quadtree(lc, x_ctb, y_ctb, s->ps.sps->log2_ctb_size, 0);
2595 
2596  if (more_data < 0) {
2597  ret = more_data;
2598  goto error;
2599  }
2600 
2601  ctb_addr_ts++;
2602 
2603  ff_hevc_save_states(lc, ctb_addr_ts);
2604  ff_thread_report_progress2(s->avctx, ctb_row, thread, 1);
2605  ff_hevc_hls_filters(lc, x_ctb, y_ctb, ctb_size);
2606 
2607  if (!more_data && (x_ctb+ctb_size) < s->ps.sps->width && ctb_row != s->sh.num_entry_point_offsets) {
2608  /* Casting const away here is safe, because it is an atomic operation. */
2609  atomic_store((atomic_int*)&s->wpp_err, 1);
2610  ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
2611  return 0;
2612  }
2613 
2614  if ((x_ctb+ctb_size) >= s->ps.sps->width && (y_ctb+ctb_size) >= s->ps.sps->height ) {
2615  ff_hevc_hls_filter(lc, x_ctb, y_ctb, ctb_size);
2616  ff_thread_report_progress2(s->avctx, ctb_row , thread, SHIFT_CTB_WPP);
2617  return ctb_addr_ts;
2618  }
2619  ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2620  x_ctb+=ctb_size;
2621 
2622  if(x_ctb >= s->ps.sps->width) {
2623  break;
2624  }
2625  }
2626  ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
2627 
2628  return 0;
2629 error:
2630  s->tab_slice_address[ctb_addr_rs] = -1;
2631  /* Casting const away here is safe, because it is an atomic operation. */
2632  atomic_store((atomic_int*)&s->wpp_err, 1);
2633  ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
2634  return ret;
2635 }
2636 
2637 static int hls_slice_data_wpp(HEVCContext *s, const H2645NAL *nal)
2638 {
2639  const uint8_t *data = nal->data;
2640  int length = nal->size;
2641  HEVCLocalContext *lc = s->HEVClc;
2642  int *ret;
2643  int64_t offset;
2644  int64_t startheader, cmpt = 0;
2645  int i, j, res = 0;
2646 
2647  if (s->sh.slice_ctb_addr_rs + s->sh.num_entry_point_offsets * s->ps.sps->ctb_width >= s->ps.sps->ctb_width * s->ps.sps->ctb_height) {
2648  av_log(s->avctx, AV_LOG_ERROR, "WPP ctb addresses are wrong (%d %d %d %d)\n",
2649  s->sh.slice_ctb_addr_rs, s->sh.num_entry_point_offsets,
2650  s->ps.sps->ctb_width, s->ps.sps->ctb_height
2651  );
2652  return AVERROR_INVALIDDATA;
2653  }
2654 
2655  for (i = 1; i < s->threads_number; i++) {
2656  if (s->HEVClcList[i])
2657  continue;
2658  s->HEVClcList[i] = av_mallocz(sizeof(HEVCLocalContext));
2659  if (!s->HEVClcList[i])
2660  return AVERROR(ENOMEM);
2661  s->HEVClcList[i]->logctx = s->avctx;
2662  s->HEVClcList[i]->parent = s;
2663  s->HEVClcList[i]->common_cabac_state = &s->cabac;
2664  }
2665 
2666  offset = (lc->gb.index >> 3);
2667 
2668  for (j = 0, cmpt = 0, startheader = offset + s->sh.entry_point_offset[0]; j < nal->skipped_bytes; j++) {
2669  if (nal->skipped_bytes_pos[j] >= offset && nal->skipped_bytes_pos[j] < startheader) {
2670  startheader--;
2671  cmpt++;
2672  }
2673  }
2674 
2675  for (i = 1; i < s->sh.num_entry_point_offsets; i++) {
2676  offset += (s->sh.entry_point_offset[i - 1] - cmpt);
2677  for (j = 0, cmpt = 0, startheader = offset
2678  + s->sh.entry_point_offset[i]; j < nal->skipped_bytes; j++) {
2679  if (nal->skipped_bytes_pos[j] >= offset && nal->skipped_bytes_pos[j] < startheader) {
2680  startheader--;
2681  cmpt++;
2682  }
2683  }
2684  s->sh.size[i - 1] = s->sh.entry_point_offset[i] - cmpt;
2685  s->sh.offset[i - 1] = offset;
2686 
2687  }
2688  if (s->sh.num_entry_point_offsets != 0) {
2689  offset += s->sh.entry_point_offset[s->sh.num_entry_point_offsets - 1] - cmpt;
2690  if (length < offset) {
2691  av_log(s->avctx, AV_LOG_ERROR, "entry_point_offset table is corrupted\n");
2692  return AVERROR_INVALIDDATA;
2693  }
2694  s->sh.size[s->sh.num_entry_point_offsets - 1] = length - offset;
2695  s->sh.offset[s->sh.num_entry_point_offsets - 1] = offset;
2696 
2697  }
2698  s->data = data;
2699 
2700  for (i = 1; i < s->threads_number; i++) {
2701  s->HEVClcList[i]->first_qp_group = 1;
2702  s->HEVClcList[i]->qp_y = s->HEVClc->qp_y;
2703  }
2704 
2705  atomic_store(&s->wpp_err, 0);
2706  res = ff_slice_thread_allocz_entries(s->avctx, s->sh.num_entry_point_offsets + 1);
2707  if (res < 0)
2708  return res;
2709 
2710  ret = av_calloc(s->sh.num_entry_point_offsets + 1, sizeof(*ret));
2711  if (!ret)
2712  return AVERROR(ENOMEM);
2713 
2714  if (s->ps.pps->entropy_coding_sync_enabled_flag)
2715  s->avctx->execute2(s->avctx, hls_decode_entry_wpp, s->HEVClcList, ret, s->sh.num_entry_point_offsets + 1);
2716 
2717  for (i = 0; i <= s->sh.num_entry_point_offsets; i++)
2718  res += ret[i];
2719 
2720  av_free(ret);
2721  return res;
2722 }
2723 
2725 {
2726  AVFrame *out = s->ref->frame;
2727  int ret;
2728 
2729  if (s->sei.frame_packing.present &&
2730  s->sei.frame_packing.arrangement_type >= 3 &&
2731  s->sei.frame_packing.arrangement_type <= 5 &&
2732  s->sei.frame_packing.content_interpretation_type > 0 &&
2733  s->sei.frame_packing.content_interpretation_type < 3) {
2735  if (!stereo)
2736  return AVERROR(ENOMEM);
2737 
2738  switch (s->sei.frame_packing.arrangement_type) {
2739  case 3:
2740  if (s->sei.frame_packing.quincunx_subsampling)
2742  else
2743  stereo->type = AV_STEREO3D_SIDEBYSIDE;
2744  break;
2745  case 4:
2746  stereo->type = AV_STEREO3D_TOPBOTTOM;
2747  break;
2748  case 5:
2749  stereo->type = AV_STEREO3D_FRAMESEQUENCE;
2750  break;
2751  }
2752 
2753  if (s->sei.frame_packing.content_interpretation_type == 2)
2754  stereo->flags = AV_STEREO3D_FLAG_INVERT;
2755 
2756  if (s->sei.frame_packing.arrangement_type == 5) {
2757  if (s->sei.frame_packing.current_frame_is_frame0_flag)
2758  stereo->view = AV_STEREO3D_VIEW_LEFT;
2759  else
2760  stereo->view = AV_STEREO3D_VIEW_RIGHT;
2761  }
2762  }
2763 
2764  if (s->sei.display_orientation.present &&
2765  (s->sei.display_orientation.anticlockwise_rotation ||
2766  s->sei.display_orientation.hflip || s->sei.display_orientation.vflip)) {
2767  double angle = s->sei.display_orientation.anticlockwise_rotation * 360 / (double) (1 << 16);
2770  sizeof(int32_t) * 9);
2771  if (!rotation)
2772  return AVERROR(ENOMEM);
2773 
2774  /* av_display_rotation_set() expects the angle in the clockwise
2775  * direction, hence the first minus.
2776  * The below code applies the flips after the rotation, yet
2777  * the H.2645 specs require flipping to be applied first.
2778  * Because of R O(phi) = O(-phi) R (where R is flipping around
2779  * an arbitatry axis and O(phi) is the proper rotation by phi)
2780  * we can create display matrices as desired by negating
2781  * the degree once for every flip applied. */
2782  angle = -angle * (1 - 2 * !!s->sei.display_orientation.hflip)
2783  * (1 - 2 * !!s->sei.display_orientation.vflip);
2784  av_display_rotation_set((int32_t *)rotation->data, angle);
2785  av_display_matrix_flip((int32_t *)rotation->data,
2786  s->sei.display_orientation.hflip,
2787  s->sei.display_orientation.vflip);
2788  }
2789 
2790  // Decrement the mastering display flag when IRAP frame has no_rasl_output_flag=1
2791  // so the side data persists for the entire coded video sequence.
2792  if (s->sei.mastering_display.present > 0 &&
2793  IS_IRAP(s) && s->no_rasl_output_flag) {
2794  s->sei.mastering_display.present--;
2795  }
2796  if (s->sei.mastering_display.present) {
2797  // HEVC uses a g,b,r ordering, which we convert to a more natural r,g,b
2798  const int mapping[3] = {2, 0, 1};
2799  const int chroma_den = 50000;
2800  const int luma_den = 10000;
2801  int i;
2802  AVMasteringDisplayMetadata *metadata =
2804  if (!metadata)
2805  return AVERROR(ENOMEM);
2806 
2807  for (i = 0; i < 3; i++) {
2808  const int j = mapping[i];
2809  metadata->display_primaries[i][0].num = s->sei.mastering_display.display_primaries[j][0];
2810  metadata->display_primaries[i][0].den = chroma_den;
2811  metadata->display_primaries[i][1].num = s->sei.mastering_display.display_primaries[j][1];
2812  metadata->display_primaries[i][1].den = chroma_den;
2813  }
2814  metadata->white_point[0].num = s->sei.mastering_display.white_point[0];
2815  metadata->white_point[0].den = chroma_den;
2816  metadata->white_point[1].num = s->sei.mastering_display.white_point[1];
2817  metadata->white_point[1].den = chroma_den;
2818 
2819  metadata->max_luminance.num = s->sei.mastering_display.max_luminance;
2820  metadata->max_luminance.den = luma_den;
2821  metadata->min_luminance.num = s->sei.mastering_display.min_luminance;
2822  metadata->min_luminance.den = luma_den;
2823  metadata->has_luminance = 1;
2824  metadata->has_primaries = 1;
2825 
2826  av_log(s->avctx, AV_LOG_DEBUG, "Mastering Display Metadata:\n");
2827  av_log(s->avctx, AV_LOG_DEBUG,
2828  "r(%5.4f,%5.4f) g(%5.4f,%5.4f) b(%5.4f %5.4f) wp(%5.4f, %5.4f)\n",
2829  av_q2d(metadata->display_primaries[0][0]),
2830  av_q2d(metadata->display_primaries[0][1]),
2831  av_q2d(metadata->display_primaries[1][0]),
2832  av_q2d(metadata->display_primaries[1][1]),
2833  av_q2d(metadata->display_primaries[2][0]),
2834  av_q2d(metadata->display_primaries[2][1]),
2835  av_q2d(metadata->white_point[0]), av_q2d(metadata->white_point[1]));
2836  av_log(s->avctx, AV_LOG_DEBUG,
2837  "min_luminance=%f, max_luminance=%f\n",
2838  av_q2d(metadata->min_luminance), av_q2d(metadata->max_luminance));
2839  }
2840  // Decrement the mastering display flag when IRAP frame has no_rasl_output_flag=1
2841  // so the side data persists for the entire coded video sequence.
2842  if (s->sei.content_light.present > 0 &&
2843  IS_IRAP(s) && s->no_rasl_output_flag) {
2844  s->sei.content_light.present--;
2845  }
2846  if (s->sei.content_light.present) {
2847  AVContentLightMetadata *metadata =
2849  if (!metadata)
2850  return AVERROR(ENOMEM);
2851  metadata->MaxCLL = s->sei.content_light.max_content_light_level;
2852  metadata->MaxFALL = s->sei.content_light.max_pic_average_light_level;
2853 
2854  av_log(s->avctx, AV_LOG_DEBUG, "Content Light Level Metadata:\n");
2855  av_log(s->avctx, AV_LOG_DEBUG, "MaxCLL=%d, MaxFALL=%d\n",
2856  metadata->MaxCLL, metadata->MaxFALL);
2857  }
2858 
2859  if (s->sei.a53_caption.buf_ref) {
2860  HEVCSEIA53Caption *a53 = &s->sei.a53_caption;
2861 
2863  if (!sd)
2864  av_buffer_unref(&a53->buf_ref);
2865  a53->buf_ref = NULL;
2866  }
2867 
2868  for (int i = 0; i < s->sei.unregistered.nb_buf_ref; i++) {
2869  HEVCSEIUnregistered *unreg = &s->sei.unregistered;
2870 
2871  if (unreg->buf_ref[i]) {
2874  unreg->buf_ref[i]);
2875  if (!sd)
2876  av_buffer_unref(&unreg->buf_ref[i]);
2877  unreg->buf_ref[i] = NULL;
2878  }
2879  }
2880  s->sei.unregistered.nb_buf_ref = 0;
2881 
2882  if (s->sei.timecode.present) {
2883  uint32_t *tc_sd;
2884  char tcbuf[AV_TIMECODE_STR_SIZE];
2886  sizeof(uint32_t) * 4);
2887  if (!tcside)
2888  return AVERROR(ENOMEM);
2889 
2890  tc_sd = (uint32_t*)tcside->data;
2891  tc_sd[0] = s->sei.timecode.num_clock_ts;
2892 
2893  for (int i = 0; i < tc_sd[0]; i++) {
2894  int drop = s->sei.timecode.cnt_dropped_flag[i];
2895  int hh = s->sei.timecode.hours_value[i];
2896  int mm = s->sei.timecode.minutes_value[i];
2897  int ss = s->sei.timecode.seconds_value[i];
2898  int ff = s->sei.timecode.n_frames[i];
2899 
2900  tc_sd[i + 1] = av_timecode_get_smpte(s->avctx->framerate, drop, hh, mm, ss, ff);
2901  av_timecode_make_smpte_tc_string2(tcbuf, s->avctx->framerate, tc_sd[i + 1], 0, 0);
2902  av_dict_set(&out->metadata, "timecode", tcbuf, 0);
2903  }
2904 
2905  s->sei.timecode.num_clock_ts = 0;
2906  }
2907 
2908  if (s->sei.film_grain_characteristics.present) {
2909  HEVCSEIFilmGrainCharacteristics *fgc = &s->sei.film_grain_characteristics;
2911  if (!fgp)
2912  return AVERROR(ENOMEM);
2913 
2915  fgp->seed = s->ref->poc; /* no poc_offset in HEVC */
2916 
2917  fgp->codec.h274.model_id = fgc->model_id;
2921  fgp->codec.h274.color_range = fgc->full_range + 1;
2924  fgp->codec.h274.color_space = fgc->matrix_coeffs;
2925  } else {
2926  const HEVCSPS *sps = s->ps.sps;
2927  const VUI *vui = &sps->vui;
2928  fgp->codec.h274.bit_depth_luma = sps->bit_depth;
2929  fgp->codec.h274.bit_depth_chroma = sps->bit_depth_chroma;
2931  fgp->codec.h274.color_range = vui->video_full_range_flag + 1;
2932  else
2937  fgp->codec.h274.color_space = vui->matrix_coeffs;
2938  } else {
2942  }
2943  }
2946 
2948  sizeof(fgp->codec.h274.component_model_present));
2950  sizeof(fgp->codec.h274.num_intensity_intervals));
2951  memcpy(&fgp->codec.h274.num_model_values, &fgc->num_model_values,
2952  sizeof(fgp->codec.h274.num_model_values));
2957  memcpy(&fgp->codec.h274.comp_model_value, &fgc->comp_model_value,
2958  sizeof(fgp->codec.h274.comp_model_value));
2959 
2960  fgc->present = fgc->persistence_flag;
2961  }
2962 
2963  if (s->sei.dynamic_hdr_plus.info) {
2964  AVBufferRef *info_ref = av_buffer_ref(s->sei.dynamic_hdr_plus.info);
2965  if (!info_ref)
2966  return AVERROR(ENOMEM);
2967 
2969  av_buffer_unref(&info_ref);
2970  return AVERROR(ENOMEM);
2971  }
2972  }
2973 
2974  if (s->rpu_buf) {
2976  if (!rpu)
2977  return AVERROR(ENOMEM);
2978 
2979  s->rpu_buf = NULL;
2980  }
2981 
2982  if ((ret = ff_dovi_attach_side_data(&s->dovi_ctx, out)) < 0)
2983  return ret;
2984 
2985  if (s->sei.dynamic_hdr_vivid.info) {
2986  AVBufferRef *info_ref = av_buffer_ref(s->sei.dynamic_hdr_vivid.info);
2987  if (!info_ref)
2988  return AVERROR(ENOMEM);
2989 
2991  av_buffer_unref(&info_ref);
2992  return AVERROR(ENOMEM);
2993  }
2994  }
2995 
2996  return 0;
2997 }
2998 
3000 {
3001  HEVCLocalContext *lc = s->HEVClc;
3002  int pic_size_in_ctb = ((s->ps.sps->width >> s->ps.sps->log2_min_cb_size) + 1) *
3003  ((s->ps.sps->height >> s->ps.sps->log2_min_cb_size) + 1);
3004  int ret;
3005 
3006  memset(s->horizontal_bs, 0, s->bs_width * s->bs_height);
3007  memset(s->vertical_bs, 0, s->bs_width * s->bs_height);
3008  memset(s->cbf_luma, 0, s->ps.sps->min_tb_width * s->ps.sps->min_tb_height);
3009  memset(s->is_pcm, 0, (s->ps.sps->min_pu_width + 1) * (s->ps.sps->min_pu_height + 1));
3010  memset(s->tab_slice_address, -1, pic_size_in_ctb * sizeof(*s->tab_slice_address));
3011 
3012  s->is_decoded = 0;
3013  s->first_nal_type = s->nal_unit_type;
3014 
3015  s->no_rasl_output_flag = IS_IDR(s) || IS_BLA(s) || (s->nal_unit_type == HEVC_NAL_CRA_NUT && s->last_eos);
3016 
3017  if (s->ps.pps->tiles_enabled_flag)
3018  lc->end_of_tiles_x = s->ps.pps->column_width[0] << s->ps.sps->log2_ctb_size;
3019 
3020  ret = ff_hevc_set_new_ref(s, &s->frame, s->poc);
3021  if (ret < 0)
3022  goto fail;
3023 
3024  ret = ff_hevc_frame_rps(s);
3025  if (ret < 0) {
3026  av_log(s->avctx, AV_LOG_ERROR, "Error constructing the frame RPS.\n");
3027  goto fail;
3028  }
3029 
3030  s->ref->frame->key_frame = IS_IRAP(s);
3031 
3032  s->ref->needs_fg = s->sei.film_grain_characteristics.present &&
3033  !(s->avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN) &&
3034  !s->avctx->hwaccel;
3035 
3036  if (s->ref->needs_fg) {
3037  s->ref->frame_grain->format = s->ref->frame->format;
3038  s->ref->frame_grain->width = s->ref->frame->width;
3039  s->ref->frame_grain->height = s->ref->frame->height;
3040  if ((ret = ff_thread_get_buffer(s->avctx, s->ref->frame_grain, 0)) < 0)
3041  goto fail;
3042  }
3043 
3044  ret = set_side_data(s);
3045  if (ret < 0)
3046  goto fail;
3047 
3048  s->frame->pict_type = 3 - s->sh.slice_type;
3049 
3050  if (!IS_IRAP(s))
3052 
3053  av_frame_unref(s->output_frame);
3054  ret = ff_hevc_output_frame(s, s->output_frame, 0);
3055  if (ret < 0)
3056  goto fail;
3057 
3058  if (!s->avctx->hwaccel)
3059  ff_thread_finish_setup(s->avctx);
3060 
3061  return 0;
3062 
3063 fail:
3064  if (s->ref)
3065  ff_hevc_unref_frame(s, s->ref, ~0);
3066  s->ref = NULL;
3067  return ret;
3068 }
3069 
3071 {
3072  HEVCFrame *out = s->ref;
3073  const AVFrameSideData *sd;
3074  int ret;
3075 
3076  if (out->needs_fg) {
3078  av_assert0(out->frame_grain->buf[0] && sd);
3079  ret = ff_h274_apply_film_grain(out->frame_grain, out->frame, &s->h274db,
3080  (AVFilmGrainParams *) sd->data);
3081 
3082  if (ret < 0) {
3083  av_log(s->avctx, AV_LOG_WARNING, "Failed synthesizing film "
3084  "grain, ignoring: %s\n", av_err2str(ret));
3085  out->needs_fg = 0;
3086  }
3087  }
3088 
3089  return 0;
3090 }
3091 
3092 static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal)
3093 {
3094  HEVCLocalContext *lc = s->HEVClc;
3095  GetBitContext *gb = &lc->gb;
3096  int ctb_addr_ts, ret;
3097 
3098  *gb = nal->gb;
3099  s->nal_unit_type = nal->type;
3100  s->temporal_id = nal->temporal_id;
3101 
3102  switch (s->nal_unit_type) {
3103  case HEVC_NAL_VPS:
3104  if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
3105  ret = s->avctx->hwaccel->decode_params(s->avctx,
3106  nal->type,
3107  nal->raw_data,
3108  nal->raw_size);
3109  if (ret < 0)
3110  goto fail;
3111  }
3112  ret = ff_hevc_decode_nal_vps(gb, s->avctx, &s->ps);
3113  if (ret < 0)
3114  goto fail;
3115  break;
3116  case HEVC_NAL_SPS:
3117  if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
3118  ret = s->avctx->hwaccel->decode_params(s->avctx,
3119  nal->type,
3120  nal->raw_data,
3121  nal->raw_size);
3122  if (ret < 0)
3123  goto fail;
3124  }
3125  ret = ff_hevc_decode_nal_sps(gb, s->avctx, &s->ps,
3126  s->apply_defdispwin);
3127  if (ret < 0)
3128  goto fail;
3129  break;
3130  case HEVC_NAL_PPS:
3131  if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
3132  ret = s->avctx->hwaccel->decode_params(s->avctx,
3133  nal->type,
3134  nal->raw_data,
3135  nal->raw_size);
3136  if (ret < 0)
3137  goto fail;
3138  }
3139  ret = ff_hevc_decode_nal_pps(gb, s->avctx, &s->ps);
3140  if (ret < 0)
3141  goto fail;
3142  break;
3143  case HEVC_NAL_SEI_PREFIX:
3144  case HEVC_NAL_SEI_SUFFIX:
3145  if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
3146  ret = s->avctx->hwaccel->decode_params(s->avctx,
3147  nal->type,
3148  nal->raw_data,
3149  nal->raw_size);
3150  if (ret < 0)
3151  goto fail;
3152  }
3153  ret = ff_hevc_decode_nal_sei(gb, s->avctx, &s->sei, &s->ps, s->nal_unit_type);
3154  if (ret < 0)
3155  goto fail;
3156  break;
3157  case HEVC_NAL_TRAIL_R:
3158  case HEVC_NAL_TRAIL_N:
3159  case HEVC_NAL_TSA_N:
3160  case HEVC_NAL_TSA_R:
3161  case HEVC_NAL_STSA_N:
3162  case HEVC_NAL_STSA_R:
3163  case HEVC_NAL_BLA_W_LP:
3164  case HEVC_NAL_BLA_W_RADL:
3165  case HEVC_NAL_BLA_N_LP:
3166  case HEVC_NAL_IDR_W_RADL:
3167  case HEVC_NAL_IDR_N_LP:
3168  case HEVC_NAL_CRA_NUT:
3169  case HEVC_NAL_RADL_N:
3170  case HEVC_NAL_RADL_R:
3171  case HEVC_NAL_RASL_N:
3172  case HEVC_NAL_RASL_R:
3173  ret = hls_slice_header(s);
3174  if (ret < 0)
3175  return ret;
3176  if (ret == 1) {
3178  goto fail;
3179  }
3180 
3181 
3182  if (
3183  (s->avctx->skip_frame >= AVDISCARD_BIDIR && s->sh.slice_type == HEVC_SLICE_B) ||
3184  (s->avctx->skip_frame >= AVDISCARD_NONINTRA && s->sh.slice_type != HEVC_SLICE_I) ||
3185  (s->avctx->skip_frame >= AVDISCARD_NONKEY && !IS_IRAP(s))) {
3186  break;
3187  }
3188 
3189  if (s->sh.first_slice_in_pic_flag) {
3190  if (s->max_ra == INT_MAX) {
3191  if (s->nal_unit_type == HEVC_NAL_CRA_NUT || IS_BLA(s)) {
3192  s->max_ra = s->poc;
3193  } else {
3194  if (IS_IDR(s))
3195  s->max_ra = INT_MIN;
3196  }
3197  }
3198 
3199  if ((s->nal_unit_type == HEVC_NAL_RASL_R || s->nal_unit_type == HEVC_NAL_RASL_N) &&
3200  s->poc <= s->max_ra) {
3201  s->is_decoded = 0;
3202  break;
3203  } else {
3204  if (s->nal_unit_type == HEVC_NAL_RASL_R && s->poc > s->max_ra)
3205  s->max_ra = INT_MIN;
3206  }
3207 
3208  s->overlap ++;
3209  ret = hevc_frame_start(s);
3210  if (ret < 0)
3211  return ret;
3212  } else if (!s->ref) {
3213  av_log(s->avctx, AV_LOG_ERROR, "First slice in a frame missing.\n");
3214  goto fail;
3215  }
3216 
3217  if (s->nal_unit_type != s->first_nal_type) {
3218  av_log(s->avctx, AV_LOG_ERROR,
3219  "Non-matching NAL types of the VCL NALUs: %d %d\n",
3220  s->first_nal_type, s->nal_unit_type);
3221  return AVERROR_INVALIDDATA;
3222  }
3223 
3224  if (!s->sh.dependent_slice_segment_flag &&
3225  s->sh.slice_type != HEVC_SLICE_I) {
3226  ret = ff_hevc_slice_rpl(s);
3227  if (ret < 0) {
3228  av_log(s->avctx, AV_LOG_WARNING,
3229  "Error constructing the reference lists for the current slice.\n");
3230  goto fail;
3231  }
3232  }
3233 
3234  if (s->sh.first_slice_in_pic_flag && s->avctx->hwaccel) {
3235  ret = s->avctx->hwaccel->start_frame(s->avctx, NULL, 0);
3236  if (ret < 0)
3237  goto fail;
3238  }
3239 
3240  if (s->avctx->hwaccel) {
3241  ret = s->avctx->hwaccel->decode_slice(s->avctx, nal->raw_data, nal->raw_size);
3242  if (ret < 0)
3243  goto fail;
3244  } else {
3245  if (s->threads_number > 1 && s->sh.num_entry_point_offsets > 0)
3246  ctb_addr_ts = hls_slice_data_wpp(s, nal);
3247  else
3248  ctb_addr_ts = hls_slice_data(s);
3249  if (ctb_addr_ts >= (s->ps.sps->ctb_width * s->ps.sps->ctb_height)) {
3250  ret = hevc_frame_end(s);
3251  if (ret < 0)
3252  goto fail;
3253  s->is_decoded = 1;
3254  }
3255 
3256  if (ctb_addr_ts < 0) {
3257  ret = ctb_addr_ts;
3258  goto fail;
3259  }
3260  }
3261  break;
3262  case HEVC_NAL_EOS_NUT:
3263  case HEVC_NAL_EOB_NUT:
3264  s->seq_decode = (s->seq_decode + 1) & HEVC_SEQUENCE_COUNTER_MASK;
3265  s->max_ra = INT_MAX;
3266  break;
3267  case HEVC_NAL_AUD:
3268  case HEVC_NAL_FD_NUT:
3269  case HEVC_NAL_UNSPEC62:
3270  break;
3271  default:
3272  av_log(s->avctx, AV_LOG_INFO,
3273  "Skipping NAL unit %d\n", s->nal_unit_type);
3274  }
3275 
3276  return 0;
3277 fail:
3278  if (s->avctx->err_recognition & AV_EF_EXPLODE)
3279  return ret;
3280  return 0;
3281 }
3282 
3283 static int decode_nal_units(HEVCContext *s, const uint8_t *buf, int length)
3284 {
3285  int i, ret = 0;
3286  int eos_at_start = 1;
3287 
3288  s->ref = NULL;
3289  s->last_eos = s->eos;
3290  s->eos = 0;
3291  s->overlap = 0;
3292 
3293  /* split the input packet into NAL units, so we know the upper bound on the
3294  * number of slices in the frame */
3295  ret = ff_h2645_packet_split(&s->pkt, buf, length, s->avctx, s->is_nalff,
3296  s->nal_length_size, s->avctx->codec_id, 1, 0);
3297  if (ret < 0) {
3298  av_log(s->avctx, AV_LOG_ERROR,
3299  "Error splitting the input into NAL units.\n");
3300  return ret;
3301  }
3302 
3303  for (i = 0; i < s->pkt.nb_nals; i++) {
3304  if (s->pkt.nals[i].type == HEVC_NAL_EOB_NUT ||
3305  s->pkt.nals[i].type == HEVC_NAL_EOS_NUT) {
3306  if (eos_at_start) {
3307  s->last_eos = 1;
3308  } else {
3309  s->eos = 1;
3310  }
3311  } else {
3312  eos_at_start = 0;
3313  }
3314  }
3315 
3316  /*
3317  * Check for RPU delimiter.
3318  *
3319  * Dolby Vision RPUs masquerade as unregistered NALs of type 62.
3320  *
3321  * We have to do this check here an create the rpu buffer, since RPUs are appended
3322  * to the end of an AU; they are the last non-EOB/EOS NAL in the AU.
3323  */
3324  if (s->pkt.nb_nals > 1 && s->pkt.nals[s->pkt.nb_nals - 1].type == HEVC_NAL_UNSPEC62 &&
3325  s->pkt.nals[s->pkt.nb_nals - 1].size > 2 && !s->pkt.nals[s->pkt.nb_nals - 1].nuh_layer_id
3326  && !s->pkt.nals[s->pkt.nb_nals - 1].temporal_id) {
3327  H2645NAL *nal = &s->pkt.nals[s->pkt.nb_nals - 1];
3328  if (s->rpu_buf) {
3329  av_buffer_unref(&s->rpu_buf);
3330  av_log(s->avctx, AV_LOG_WARNING, "Multiple Dolby Vision RPUs found in one AU. Skipping previous.\n");
3331  }
3332 
3333  s->rpu_buf = av_buffer_alloc(nal->raw_size - 2);
3334  if (!s->rpu_buf)
3335  return AVERROR(ENOMEM);
3336  memcpy(s->rpu_buf->data, nal->raw_data + 2, nal->raw_size - 2);
3337 
3338  ret = ff_dovi_rpu_parse(&s->dovi_ctx, nal->data + 2, nal->size - 2);
3339  if (ret < 0) {
3340  av_buffer_unref(&s->rpu_buf);
3341  av_log(s->avctx, AV_LOG_WARNING, "Error parsing DOVI NAL unit.\n");
3342  /* ignore */
3343  }
3344  }
3345 
3346  /* decode the NAL units */
3347  for (i = 0; i < s->pkt.nb_nals; i++) {
3348  H2645NAL *nal = &s->pkt.nals[i];
3349 
3350  if (s->avctx->skip_frame >= AVDISCARD_ALL ||
3351  (s->avctx->skip_frame >= AVDISCARD_NONREF
3352  && ff_hevc_nal_is_nonref(nal->type)) || nal->nuh_layer_id > 0)
3353  continue;
3354 
3355  ret = decode_nal_unit(s, nal);
3356  if (ret >= 0 && s->overlap > 2)
3358  if (ret < 0) {
3359  av_log(s->avctx, AV_LOG_WARNING,
3360  "Error parsing NAL unit #%d.\n", i);
3361  goto fail;
3362  }
3363  }
3364 
3365 fail:
3366  if (s->ref && s->threads_type == FF_THREAD_FRAME)
3367  ff_thread_report_progress(&s->ref->tf, INT_MAX, 0);
3368 
3369  return ret;
3370 }
3371 
3373 {
3375  char msg_buf[4 * (50 + 2 * 2 * 16 /* MD5-size */)];
3376  int pixel_shift;
3377  int err = 0;
3378  int i, j;
3379 
3380  if (!desc)
3381  return AVERROR(EINVAL);
3382 
3383  pixel_shift = desc->comp[0].depth > 8;
3384 
3385  /* the checksums are LE, so we have to byteswap for >8bpp formats
3386  * on BE arches */
3387 #if HAVE_BIGENDIAN
3388  if (pixel_shift && !s->checksum_buf) {
3389  av_fast_malloc(&s->checksum_buf, &s->checksum_buf_size,
3390  FFMAX3(frame->linesize[0], frame->linesize[1],
3391  frame->linesize[2]));
3392  if (!s->checksum_buf)
3393  return AVERROR(ENOMEM);
3394  }
3395 #endif
3396 
3397  msg_buf[0] = '\0';
3398  for (i = 0; frame->data[i]; i++) {
3399  int width = s->avctx->coded_width;
3400  int height = s->avctx->coded_height;
3401  int w = (i == 1 || i == 2) ? (width >> desc->log2_chroma_w) : width;
3402  int h = (i == 1 || i == 2) ? (height >> desc->log2_chroma_h) : height;
3403  uint8_t md5[16];
3404 
3405  av_md5_init(s->md5_ctx);
3406  for (j = 0; j < h; j++) {
3407  const uint8_t *src = frame->data[i] + j * frame->linesize[i];
3408 #if HAVE_BIGENDIAN
3409  if (pixel_shift) {
3410  s->bdsp.bswap16_buf((uint16_t *) s->checksum_buf,
3411  (const uint16_t *) src, w);
3412  src = s->checksum_buf;
3413  }
3414 #endif
3415  av_md5_update(s->md5_ctx, src, w << pixel_shift);
3416  }
3417  av_md5_final(s->md5_ctx, md5);
3418 
3419 #define MD5_PRI "%016" PRIx64 "%016" PRIx64
3420 #define MD5_PRI_ARG(buf) AV_RB64(buf), AV_RB64((const uint8_t*)(buf) + 8)
3421 
3422  if (!memcmp(md5, s->sei.picture_hash.md5[i], 16)) {
3423  av_strlcatf(msg_buf, sizeof(msg_buf),
3424  "plane %d - correct " MD5_PRI "; ",
3425  i, MD5_PRI_ARG(md5));
3426  } else {
3427  av_strlcatf(msg_buf, sizeof(msg_buf),
3428  "mismatching checksum of plane %d - " MD5_PRI " != " MD5_PRI "; ",
3429  i, MD5_PRI_ARG(md5), MD5_PRI_ARG(s->sei.picture_hash.md5[i]));
3430  err = AVERROR_INVALIDDATA;
3431  }
3432  }
3433 
3434  av_log(s->avctx, err < 0 ? AV_LOG_ERROR : AV_LOG_DEBUG,
3435  "Verifying checksum for frame with POC %d: %s\n",
3436  s->poc, msg_buf);
3437 
3438  return err;
3439 }
3440 
3441 static int hevc_decode_extradata(HEVCContext *s, uint8_t *buf, int length, int first)
3442 {
3443  int ret, i;
3444 
3445  ret = ff_hevc_decode_extradata(buf, length, &s->ps, &s->sei, &s->is_nalff,
3446  &s->nal_length_size, s->avctx->err_recognition,
3447  s->apply_defdispwin, s->avctx);
3448  if (ret < 0)
3449  return ret;
3450 
3451  /* export stream parameters from the first SPS */
3452  for (i = 0; i < FF_ARRAY_ELEMS(s->ps.sps_list); i++) {
3453  if (first && s->ps.sps_list[i]) {
3454  const HEVCSPS *sps = (const HEVCSPS*)s->ps.sps_list[i]->data;
3456  break;
3457  }
3458  }
3459 
3460  /* export stream parameters from SEI */
3462  if (ret < 0)
3463  return ret;
3464 
3465  return 0;
3466 }
3467 
3468 static int hevc_decode_frame(AVCodecContext *avctx, AVFrame *rframe,
3469  int *got_output, AVPacket *avpkt)
3470 {
3471  int ret;
3472  uint8_t *sd;
3473  size_t sd_size;
3474  HEVCContext *s = avctx->priv_data;
3475 
3476  if (!avpkt->size) {
3477  ret = ff_hevc_output_frame(s, rframe, 1);
3478  if (ret < 0)
3479  return ret;
3480 
3481  *got_output = ret;
3482  return 0;
3483  }
3484 
3485  sd = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, &sd_size);
3486  if (sd && sd_size > 0) {
3487  ret = hevc_decode_extradata(s, sd, sd_size, 0);
3488  if (ret < 0)
3489  return ret;
3490  }
3491 
3492  sd = av_packet_get_side_data(avpkt, AV_PKT_DATA_DOVI_CONF, &sd_size);
3493  if (sd && sd_size > 0)
3495 
3496  s->ref = NULL;
3497  ret = decode_nal_units(s, avpkt->data, avpkt->size);
3498  if (ret < 0)
3499  return ret;
3500 
3501  if (avctx->hwaccel) {
3502  if (s->ref && (ret = avctx->hwaccel->end_frame(avctx)) < 0) {
3503  av_log(avctx, AV_LOG_ERROR,
3504  "hardware accelerator failed to decode picture\n");
3505  ff_hevc_unref_frame(s, s->ref, ~0);
3506  return ret;
3507  }
3508  } else {
3509  /* verify the SEI checksum */
3510  if (avctx->err_recognition & AV_EF_CRCCHECK && s->ref && s->is_decoded &&
3511  s->sei.picture_hash.is_md5) {
3512  ret = verify_md5(s, s->ref->frame);
3513  if (ret < 0 && avctx->err_recognition & AV_EF_EXPLODE) {
3514  ff_hevc_unref_frame(s, s->ref, ~0);
3515  return ret;
3516  }
3517  }
3518  }
3519  s->sei.picture_hash.is_md5 = 0;
3520 
3521  if (s->is_decoded) {
3522  av_log(avctx, AV_LOG_DEBUG, "Decoded frame with POC %d.\n", s->poc);
3523  s->is_decoded = 0;
3524  }
3525 
3526  if (s->output_frame->buf[0]) {
3527  av_frame_move_ref(rframe, s->output_frame);
3528  *got_output = 1;
3529  }
3530 
3531  return avpkt->size;
3532 }
3533 
3535 {
3536  int ret;
3537 
3538  ret = ff_thread_ref_frame(&dst->tf, &src->tf);
3539  if (ret < 0)
3540  return ret;
3541 
3542  if (src->needs_fg) {
3543  ret = av_frame_ref(dst->frame_grain, src->frame_grain);
3544  if (ret < 0)
3545  return ret;
3546  dst->needs_fg = 1;
3547  }
3548 
3549  dst->tab_mvf_buf = av_buffer_ref(src->tab_mvf_buf);
3550  if (!dst->tab_mvf_buf)
3551  goto fail;
3552  dst->tab_mvf = src->tab_mvf;
3553 
3554  dst->rpl_tab_buf = av_buffer_ref(src->rpl_tab_buf);
3555  if (!dst->rpl_tab_buf)
3556  goto fail;
3557  dst->rpl_tab = src->rpl_tab;
3558 
3559  dst->rpl_buf = av_buffer_ref(src->rpl_buf);
3560  if (!dst->rpl_buf)
3561  goto fail;
3562 
3563  dst->poc = src->poc;
3564  dst->ctb_count = src->ctb_count;
3565  dst->flags = src->flags;
3566  dst->sequence = src->sequence;
3567 
3568  if (src->hwaccel_picture_private) {
3569  dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
3570  if (!dst->hwaccel_priv_buf)
3571  goto fail;
3573  }
3574 
3575  return 0;
3576 fail:
3577  ff_hevc_unref_frame(s, dst, ~0);
3578  return AVERROR(ENOMEM);
3579 }
3580 
3582 {
3583  HEVCContext *s = avctx->priv_data;
3584  int i;
3585 
3586  pic_arrays_free(s);
3587 
3588  ff_dovi_ctx_unref(&s->dovi_ctx);
3589  av_buffer_unref(&s->rpu_buf);
3590 
3591  av_freep(&s->md5_ctx);
3592 
3593  for (i = 0; i < 3; i++) {
3594  av_freep(&s->sao_pixel_buffer_h[i]);
3595  av_freep(&s->sao_pixel_buffer_v[i]);
3596  }
3597  av_frame_free(&s->output_frame);
3598 
3599  for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
3600  ff_hevc_unref_frame(s, &s->DPB[i], ~0);
3601  av_frame_free(&s->DPB[i].frame);
3602  av_frame_free(&s->DPB[i].frame_grain);
3603  }
3604 
3605  ff_hevc_ps_uninit(&s->ps);
3606 
3607  av_freep(&s->sh.entry_point_offset);
3608  av_freep(&s->sh.offset);
3609  av_freep(&s->sh.size);
3610 
3611  if (s->HEVClcList) {
3612  for (i = 1; i < s->threads_number; i++) {
3613  av_freep(&s->HEVClcList[i]);
3614  }
3615  }
3616  av_freep(&s->HEVClc);
3617  av_freep(&s->HEVClcList);
3618 
3619  ff_h2645_packet_uninit(&s->pkt);
3620 
3621  ff_hevc_reset_sei(&s->sei);
3622 
3623  return 0;
3624 }
3625 
3627 {
3628  HEVCContext *s = avctx->priv_data;
3629  int i;
3630 
3631  s->avctx = avctx;
3632 
3633  s->HEVClc = av_mallocz(sizeof(HEVCLocalContext));
3634  s->HEVClcList = av_mallocz(sizeof(HEVCLocalContext*) * s->threads_number);
3635  if (!s->HEVClc || !s->HEVClcList)
3636  return AVERROR(ENOMEM);
3637  s->HEVClc->parent = s;
3638  s->HEVClc->logctx = avctx;
3639  s->HEVClc->common_cabac_state = &s->cabac;
3640  s->HEVClcList[0] = s->HEVClc;
3641 
3642  s->output_frame = av_frame_alloc();
3643  if (!s->output_frame)
3644  return AVERROR(ENOMEM);
3645 
3646  for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
3647  s->DPB[i].frame = av_frame_alloc();
3648  if (!s->DPB[i].frame)
3649  return AVERROR(ENOMEM);
3650  s->DPB[i].tf.f = s->DPB[i].frame;
3651 
3652  s->DPB[i].frame_grain = av_frame_alloc();
3653  if (!s->DPB[i].frame_grain)
3654  return AVERROR(ENOMEM);
3655  }
3656 
3657  s->max_ra = INT_MAX;
3658 
3659  s->md5_ctx = av_md5_alloc();
3660  if (!s->md5_ctx)
3661  return AVERROR(ENOMEM);
3662 
3663  ff_bswapdsp_init(&s->bdsp);
3664 
3665  s->dovi_ctx.logctx = avctx;
3666  s->eos = 0;
3667 
3668  ff_hevc_reset_sei(&s->sei);
3669 
3670  return 0;
3671 }
3672 
3673 #if HAVE_THREADS
3674 static int hevc_update_thread_context(AVCodecContext *dst,
3675  const AVCodecContext *src)
3676 {
3677  HEVCContext *s = dst->priv_data;
3678  HEVCContext *s0 = src->priv_data;
3679  int i, ret;
3680 
3681  for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
3682  ff_hevc_unref_frame(s, &s->DPB[i], ~0);
3683  if (s0->DPB[i].frame->buf[0]) {
3684  ret = hevc_ref_frame(s, &s->DPB[i], &s0->DPB[i]);
3685  if (ret < 0)
3686  return ret;
3687  }
3688  }
3689 
3690  if (s->ps.sps != s0->ps.sps)
3691  s->ps.sps = NULL;
3692  for (i = 0; i < FF_ARRAY_ELEMS(s->ps.vps_list); i++) {
3693  ret = av_buffer_replace(&s->ps.vps_list[i], s0->ps.vps_list[i]);
3694  if (ret < 0)
3695  return ret;
3696  }
3697 
3698  for (i = 0; i < FF_ARRAY_ELEMS(s->ps.sps_list); i++) {
3699  ret = av_buffer_replace(&s->ps.sps_list[i], s0->ps.sps_list[i]);
3700  if (ret < 0)
3701  return ret;
3702  }
3703 
3704  for (i = 0; i < FF_ARRAY_ELEMS(s->ps.pps_list); i++) {
3705  ret = av_buffer_replace(&s->ps.pps_list[i], s0->ps.pps_list[i]);
3706  if (ret < 0)
3707  return ret;
3708  }
3709 
3710  if (s->ps.sps != s0->ps.sps)
3711  if ((ret = set_sps(s, s0->ps.sps, src->pix_fmt)) < 0)
3712  return ret;
3713 
3714  s->seq_decode = s0->seq_decode;
3715  s->seq_output = s0->seq_output;
3716  s->pocTid0 = s0->pocTid0;
3717  s->max_ra = s0->max_ra;
3718  s->eos = s0->eos;
3719  s->no_rasl_output_flag = s0->no_rasl_output_flag;
3720 
3721  s->is_nalff = s0->is_nalff;
3722  s->nal_length_size = s0->nal_length_size;
3723 
3724  s->threads_number = s0->threads_number;
3725  s->threads_type = s0->threads_type;
3726 
3727  if (s0->eos) {
3728  s->seq_decode = (s->seq_decode + 1) & HEVC_SEQUENCE_COUNTER_MASK;
3729  s->max_ra = INT_MAX;
3730  }
3731 
3732  ret = av_buffer_replace(&s->sei.a53_caption.buf_ref, s0->sei.a53_caption.buf_ref);
3733  if (ret < 0)
3734  return ret;
3735 
3736  for (i = 0; i < s->sei.unregistered.nb_buf_ref; i++)
3737  av_buffer_unref(&s->sei.unregistered.buf_ref[i]);
3738  s->sei.unregistered.nb_buf_ref = 0;
3739 
3740  if (s0->sei.unregistered.nb_buf_ref) {
3741  ret = av_reallocp_array(&s->sei.unregistered.buf_ref,
3742  s0->sei.unregistered.nb_buf_ref,
3743  sizeof(*s->sei.unregistered.buf_ref));
3744  if (ret < 0)
3745  return ret;
3746 
3747  for (i = 0; i < s0->sei.unregistered.nb_buf_ref; i++) {
3748  s->sei.unregistered.buf_ref[i] = av_buffer_ref(s0->sei.unregistered.buf_ref[i]);
3749  if (!s->sei.unregistered.buf_ref[i])
3750  return AVERROR(ENOMEM);
3751  s->sei.unregistered.nb_buf_ref++;
3752  }
3753  }
3754 
3755  ret = av_buffer_replace(&s->sei.dynamic_hdr_plus.info, s0->sei.dynamic_hdr_plus.info);
3756  if (ret < 0)
3757  return ret;
3758 
3759  ret = av_buffer_replace(&s->rpu_buf, s0->rpu_buf);
3760  if (ret < 0)
3761  return ret;
3762 
3763  ret = ff_dovi_ctx_replace(&s->dovi_ctx, &s0->dovi_ctx);
3764  if (ret < 0)
3765  return ret;
3766 
3767  ret = av_buffer_replace(&s->sei.dynamic_hdr_vivid.info, s0->sei.dynamic_hdr_vivid.info);
3768  if (ret < 0)
3769  return ret;
3770 
3771  s->sei.frame_packing = s0->sei.frame_packing;
3772  s->sei.display_orientation = s0->sei.display_orientation;
3773  s->sei.mastering_display = s0->sei.mastering_display;
3774  s->sei.content_light = s0->sei.content_light;
3775  s->sei.alternative_transfer = s0->sei.alternative_transfer;
3776 
3778  if (ret < 0)
3779  return ret;
3780 
3781  return 0;
3782 }
3783 #endif
3784 
3786 {
3787  HEVCContext *s = avctx->priv_data;
3788  int ret;
3789 
3790  if (avctx->active_thread_type & FF_THREAD_SLICE) {
3791  s->threads_number = avctx->thread_count;
3793  if (ret < 0)
3794  return ret;
3795  } else
3796  s->threads_number = 1;
3797 
3798  if((avctx->active_thread_type & FF_THREAD_FRAME) && avctx->thread_count > 1)
3799  s->threads_type = FF_THREAD_FRAME;
3800  else
3801  s->threads_type = FF_THREAD_SLICE;
3802 
3803  ret = hevc_init_context(avctx);
3804  if (ret < 0)
3805  return ret;
3806 
3807  s->enable_parallel_tiles = 0;
3808  s->sei.picture_timing.picture_struct = 0;
3809  s->eos = 1;
3810 
3811  atomic_init(&s->wpp_err, 0);
3812 
3813  if (!avctx->internal->is_copy) {
3814  if (avctx->extradata_size > 0 && avctx->extradata) {
3815  ret = hevc_decode_extradata(s, avctx->extradata, avctx->extradata_size, 1);
3816  if (ret < 0) {
3817  return ret;
3818  }
3819  }
3820  }
3821 
3822  return 0;
3823 }
3824 
3826 {
3827  HEVCContext *s = avctx->priv_data;
3829  ff_hevc_reset_sei(&s->sei);
3830  ff_dovi_ctx_flush(&s->dovi_ctx);
3831  av_buffer_unref(&s->rpu_buf);
3832  s->max_ra = INT_MAX;
3833  s->eos = 1;
3834 }
3835 
3836 #define OFFSET(x) offsetof(HEVCContext, x)
3837 #define PAR (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM)
3838 
3839 static const AVOption options[] = {
3840  { "apply_defdispwin", "Apply default display window from VUI", OFFSET(apply_defdispwin),
3841  AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, PAR },
3842  { "strict-displaywin", "stricly apply default display window size", OFFSET(apply_defdispwin),
3843  AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, PAR },
3844  { NULL },
3845 };
3846 
3847 static const AVClass hevc_decoder_class = {
3848  .class_name = "HEVC decoder",
3849  .item_name = av_default_item_name,
3850  .option = options,
3851  .version = LIBAVUTIL_VERSION_INT,
3852 };
3853 
3855  .p.name = "hevc",
3856  CODEC_LONG_NAME("HEVC (High Efficiency Video Coding)"),
3857  .p.type = AVMEDIA_TYPE_VIDEO,
3858  .p.id = AV_CODEC_ID_HEVC,
3859  .priv_data_size = sizeof(HEVCContext),
3860  .p.priv_class = &hevc_decoder_class,
3861  .init = hevc_decode_init,
3862  .close = hevc_decode_free,
3864  .flush = hevc_decode_flush,
3865  UPDATE_THREAD_CONTEXT(hevc_update_thread_context),
3866  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY |
3868  .caps_internal = FF_CODEC_CAP_EXPORTS_CROPPING |
3870  .p.profiles = NULL_IF_CONFIG_SMALL(ff_hevc_profiles),
3871  .hw_configs = (const AVCodecHWConfigInternal *const []) {
3872 #if CONFIG_HEVC_DXVA2_HWACCEL
3873  HWACCEL_DXVA2(hevc),
3874 #endif
3875 #if CONFIG_HEVC_D3D11VA_HWACCEL
3876  HWACCEL_D3D11VA(hevc),
3877 #endif
3878 #if CONFIG_HEVC_D3D11VA2_HWACCEL
3879  HWACCEL_D3D11VA2(hevc),
3880 #endif
3881 #if CONFIG_HEVC_NVDEC_HWACCEL
3882  HWACCEL_NVDEC(hevc),
3883 #endif
3884 #if CONFIG_HEVC_VAAPI_HWACCEL
3885  HWACCEL_VAAPI(hevc),
3886 #endif
3887 #if CONFIG_HEVC_VDPAU_HWACCEL
3888  HWACCEL_VDPAU(hevc),
3889 #endif
3890 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
3891  HWACCEL_VIDEOTOOLBOX(hevc),
3892 #endif
3893  NULL
3894  },
3895 };
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:31
ff_hevc_sao_offset_sign_decode
int ff_hevc_sao_offset_sign_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:606
HEVCSEIFilmGrainCharacteristics::comp_model_present_flag
int comp_model_present_flag[3]
Definition: hevc_sei.h:129
verify_md5
static int verify_md5(HEVCContext *s, AVFrame *frame)
Definition: hevcdec.c:3372
hwconfig.h
AVMasteringDisplayMetadata::has_primaries
int has_primaries
Flag indicating whether the display primaries (and white point) are set.
Definition: mastering_display_metadata.h:62
MD5_PRI
#define MD5_PRI
HEVC_NAL_RADL_N
@ HEVC_NAL_RADL_N
Definition: hevc.h:35
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1369
SliceHeader::beta_offset
int beta_offset
beta_offset_div2 * 2
Definition: hevcdec.h:300
bswapdsp.h
L1
F H1 F F H1 F F F F H1<-F-------F-------F v v v H2 H3 H2 ^ ^ ^ F-------F-------F-> H1<-F-------F-------F|||||||||F H1 F|||||||||F H1 Funavailable fullpel samples(outside the picture for example) shall be equalto the closest available fullpel sampleSmaller pel interpolation:--------------------------if diag_mc is set then points which lie on a line between 2 vertically, horizontally or diagonally adjacent halfpel points shall be interpolatedlinearly with rounding to nearest and halfway values rounded up.points which lie on 2 diagonals at the same time should only use the onediagonal not containing the fullpel point F--> O q O<--h1-> O q O<--F v \/v \/v O O O O O O O|/|\|q q q q q|/|\|O O O O O O O ^/\ ^/\ ^ h2--> O q O<--h3-> O q O<--h2 v \/v \/v O O O O O O O|\|/|q q q q q|\|/|O O O O O O O ^/\ ^/\ ^ F--> O q O<--h1-> O q O<--Fthe remaining points shall be bilinearly interpolated from theup to 4 surrounding halfpel and fullpel points, again rounding should be tonearest and halfway values rounded upcompliant Snow decoders MUST support 1-1/8 pel luma and 1/2-1/16 pel chromainterpolation at leastOverlapped block motion compensation:-------------------------------------FIXMELL band prediction:===================Each sample in the LL0 subband is predicted by the median of the left, top andleft+top-topleft samples, samples outside the subband shall be considered tobe 0. To reverse this prediction in the decoder apply the following.for(y=0;y< height;y++){ for(x=0;x< width;x++){ sample[y][x]+=median(sample[y-1][x], sample[y][x-1], sample[y-1][x]+sample[y][x-1]-sample[y-1][x-1]);}}sample[-1][ *]=sample[ *][-1]=0;width, height here are the width and height of the LL0 subband not of the finalvideoDequantization:===============FIXMEWavelet Transform:==================Snow supports 2 wavelet transforms, the symmetric biorthogonal 5/3 integertransform and an integer approximation of the symmetric biorthogonal 9/7daubechies wavelet.2D IDWT(inverse discrete wavelet transform) --------------------------------------------The 2D IDWT applies a 2D filter recursively, each time combining the4 lowest frequency subbands into a single subband until only 1 subbandremains.The 2D filter is done by first applying a 1D filter in the vertical directionand then applying it in the horizontal one. --------------- --------------- --------------- ---------------|LL0|HL0|||||||||||||---+---|HL1||L0|H0|HL1||LL1|HL1|||||LH0|HH0|||||||||||||-------+-------|-> L1 H1 LH1 HH1 LH1 HH1 LH1 HH1 L1
Definition: snow.txt:554
av_buffer_pool_init
AVBufferPool * av_buffer_pool_init(size_t size, AVBufferRef *(*alloc)(size_t size))
Allocate and initialize a buffer pool.
Definition: buffer.c:280
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_TIMECODE_STR_SIZE
#define AV_TIMECODE_STR_SIZE
Definition: timecode.h:33
HEVCLocalContext
Definition: hevcdec.h:432
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:225
HEVCFrame::flags
uint8_t flags
A combination of HEVC_FRAME_FLAG_*.
Definition: hevcdec.h:429
AVMasteringDisplayMetadata::max_luminance
AVRational max_luminance
Max luminance of mastering display (cd/m^2).
Definition: mastering_display_metadata.h:57
HWACCEL_MAX
#define HWACCEL_MAX
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
HEVCFrame::tf
ThreadFrame tf
Definition: hevcdec.h:404
ff_hevc_hls_residual_coding
void ff_hevc_hls_residual_coding(HEVCLocalContext *lc, int x0, int y0, int log2_trafo_size, enum ScanType scan_idx, int c_idx)
Definition: hevc_cabac.c:1039
ff_hevc_skip_flag_decode
int ff_hevc_skip_flag_decode(HEVCLocalContext *lc, int x0, int y0, int x_cb, int y_cb)
Definition: hevc_cabac.c:628
HEVCFrame::hwaccel_priv_buf
AVBufferRef * hwaccel_priv_buf
Definition: hevcdec.h:417
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
ff_hevc_cu_chroma_qp_offset_idx
int ff_hevc_cu_chroma_qp_offset_idx(HEVCLocalContext *lc)
Definition: hevc_cabac.c:681
AV_STEREO3D_VIEW_LEFT
@ AV_STEREO3D_VIEW_LEFT
Frame contains only the left view.
Definition: stereo3d.h:156
av_clip
#define av_clip
Definition: common.h:95
atomic_store
#define atomic_store(object, desired)
Definition: stdatomic.h:85
ff_hevc_pcm_flag_decode
int ff_hevc_pcm_flag_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:755
set_deblocking_bypass
static void set_deblocking_bypass(const HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:1311
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:839
ff_hevc_pred_init
void ff_hevc_pred_init(HEVCPredContext *hpc, int bit_depth)
Definition: hevcpred.c:43
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AV_STEREO3D_SIDEBYSIDE_QUINCUNX
@ AV_STEREO3D_SIDEBYSIDE_QUINCUNX
Views are next to each other, but when upscaling apply a checkerboard pattern.
Definition: stereo3d.h:117
ff_dovi_ctx_unref
void ff_dovi_ctx_unref(DOVIContext *s)
Completely reset a DOVIContext, preserving only logctx.
Definition: dovi_rpu.c:43
HEVCSEIUnregistered
Definition: hevc_sei.h:65
chroma_mc_uni
static void chroma_mc_uni(HEVCLocalContext *lc, uint8_t *dst0, ptrdiff_t dststride, const uint8_t *src0, ptrdiff_t srcstride, int reflist, int x_off, int y_off, int block_w, int block_h, const struct MvField *current_mv, int chroma_weight, int chroma_offset)
8.5.3.2.2.2 Chroma sample uniprediction interpolation process
Definition: hevcdec.c:1659
hevc_decode_flush
static void hevc_decode_flush(AVCodecContext *avctx)
Definition: hevcdec.c:3825
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:975
PART_NxN
@ PART_NxN
Definition: hevcdec.h:145
decode_nal_unit
static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal)
Definition: hevcdec.c:3092
out
FILE * out
Definition: movenc.c:54
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:691
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:679
SAO_BAND
@ SAO_BAND
Definition: hevcdec.h:211
ff_hevc_profiles
const AVProfile ff_hevc_profiles[]
Definition: profiles.c:83
ff_hevc_pred_mode_decode
int ff_hevc_pred_mode_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:692
HEVCSEIFilmGrainCharacteristics::matrix_coeffs
int matrix_coeffs
Definition: hevc_sei.h:126
AVFilmGrainH274Params::color_space
enum AVColorSpace color_space
Definition: film_grain_params.h:152
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2860
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:59
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:119
AVMasteringDisplayMetadata::display_primaries
AVRational display_primaries[3][2]
CIE 1931 xy chromaticity coords of color primaries (r, g, b order).
Definition: mastering_display_metadata.h:42
src1
const pixel * src1
Definition: h264pred_template.c:421
AVMasteringDisplayMetadata::has_luminance
int has_luminance
Flag indicating whether the luminance (min_ and max_) have been set.
Definition: mastering_display_metadata.h:67
set_ct_depth
static av_always_inline void set_ct_depth(const HEVCContext *s, int x0, int y0, int log2_cb_size, int ct_depth)
Definition: hevcdec.c:2077
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1351
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:546
HEVCLocalContext::ctb_up_flag
uint8_t ctb_up_flag
Definition: hevcdec.h:465
HEVCFrame::needs_fg
int needs_fg
Definition: hevcdec.h:405
mv
static const int8_t mv[256][2]
Definition: 4xm.c:80
SliceHeader::num_entry_point_offsets
int num_entry_point_offsets
Definition: hevcdec.h:308
HEVC_NAL_STSA_N
@ HEVC_NAL_STSA_N
Definition: hevc.h:33
HEVCFrame::frame_grain
AVFrame * frame_grain
Definition: hevcdec.h:403
AV_FRAME_DATA_FILM_GRAIN_PARAMS
@ AV_FRAME_DATA_FILM_GRAIN_PARAMS
Film grain parameters for a frame, described by AVFilmGrainParams.
Definition: frame.h:184
PART_2NxnU
@ PART_2NxnU
Definition: hevcdec.h:146
AVFilmGrainH274Params::blending_mode_id
int blending_mode_id
Specifies the blending mode used to blend the simulated film grain with the decoded images.
Definition: film_grain_params.h:160
ff_hevc_luma_mv_mvp_mode
void ff_hevc_luma_mv_mvp_mode(HEVCLocalContext *lc, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv, int mvp_lx_flag, int LX)
Definition: hevc_mvs.c:583
AV_FRAME_DATA_S12M_TIMECODE
@ AV_FRAME_DATA_S12M_TIMECODE
Timecode which conforms to SMPTE ST 12-1.
Definition: frame.h:152
av_mod_uintp2
#define av_mod_uintp2
Definition: common.h:122
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:116
AVContentLightMetadata::MaxCLL
unsigned MaxCLL
Max content light level (cd/m^2).
Definition: mastering_display_metadata.h:102
luma_intra_pred_mode
static int luma_intra_pred_mode(HEVCLocalContext *lc, int x0, int y0, int pu_size, int prev_intra_luma_pred_flag)
8.4.1
Definition: hevcdec.c:1997
H2645NAL::nuh_layer_id
int nuh_layer_id
Definition: h2645_parse.h:67
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
pixdesc.h
HEVCFrame::tab_mvf
MvField * tab_mvf
Definition: hevcdec.h:406
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:968
TransformUnit::cu_qp_delta
int cu_qp_delta
Definition: hevcdec.h:373
HEVC_NAL_TSA_N
@ HEVC_NAL_TSA_N
Definition: hevc.h:31
ff_hevc_cu_transquant_bypass_flag_decode
int ff_hevc_cu_transquant_bypass_flag_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:623
w
uint8_t w
Definition: llviddspenc.c:38
HWACCEL_DXVA2
#define HWACCEL_DXVA2(codec)
Definition: hwconfig.h:67
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:630
HEVCFrame::hwaccel_picture_private
void * hwaccel_picture_private
Definition: hevcdec.h:418
av_display_matrix_flip
void av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip)
Flip the input matrix horizontally and/or vertically.
Definition: display.c:66
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:374
PAR
#define PAR
Definition: hevcdec.c:3837
INTRA_DC
@ INTRA_DC
Definition: hevcdec.h:173
AVOption
AVOption.
Definition: opt.h:251
HWACCEL_D3D11VA2
#define HWACCEL_D3D11VA2(codec)
Definition: hwconfig.h:69
ff_h2645_packet_uninit
void ff_h2645_packet_uninit(H2645Packet *pkt)
Free all the allocated memory in the packet.
Definition: h2645_parse.c:528
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:530
hevc_decode_free
static av_cold int hevc_decode_free(AVCodecContext *avctx)
Definition: hevcdec.c:3581
data
const char data[16]
Definition: mxf.c:146
Mv::y
int16_t y
vertical component of motion vector
Definition: hevcdec.h:344
AV_FRAME_DATA_DOVI_RPU_BUFFER
@ AV_FRAME_DATA_DOVI_RPU_BUFFER
Dolby Vision RPU raw data, suitable for passing to x265 or other libraries.
Definition: frame.h:197
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:431
SAO_EDGE
@ SAO_EDGE
Definition: hevcdec.h:212
atomic_int
intptr_t atomic_int
Definition: stdatomic.h:55
SliceHeader::slice_temporal_mvp_enabled_flag
uint8_t slice_temporal_mvp_enabled_flag
Definition: hevcdec.h:280
MvField::mv
Mv mv[2]
Definition: hevcdec.h:348
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:219
TransformUnit::is_cu_qp_delta_coded
uint8_t is_cu_qp_delta_coded
Definition: hevcdec.h:381
FFCodec
Definition: codec_internal.h:119
HEVC_NAL_RASL_N
@ HEVC_NAL_RASL_N
Definition: hevc.h:37
ff_hevc_intra_chroma_pred_mode_decode
int ff_hevc_intra_chroma_pred_mode_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:783
HEVC_NAL_STSA_R
@ HEVC_NAL_STSA_R
Definition: hevc.h:34
MODE_INTRA
@ MODE_INTRA
Definition: hevcdec.h:154
AVFilmGrainH274Params::color_range
enum AVColorRange color_range
Definition: film_grain_params.h:149
av_display_rotation_set
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure clockwise rotation by the specified angle (in de...
Definition: display.c:51
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
HEVC_NAL_BLA_W_RADL
@ HEVC_NAL_BLA_W_RADL
Definition: hevc.h:46
SliceHeader::slice_loop_filter_across_slices_enabled_flag
uint8_t slice_loop_filter_across_slices_enabled_flag
Definition: hevcdec.h:289
SAOParams::offset_sign
int offset_sign[3][4]
sao_offset_sign
Definition: hevcdsp.h:36
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
export_stream_params
static void export_stream_params(HEVCContext *s, const HEVCSPS *sps)
Definition: hevcdec.c:326
HEVCLocalContext::ctb_up_left_flag
uint8_t ctb_up_left_flag
Definition: hevcdec.h:467
ff_dovi_ctx_replace
int ff_dovi_ctx_replace(DOVIContext *s, const DOVIContext *s0)
Definition: dovi_rpu.c:64
H2645NAL::temporal_id
int temporal_id
HEVC only, nuh_temporal_id_plus_1 - 1.
Definition: h2645_parse.h:62
av_timecode_get_smpte
uint32_t av_timecode_get_smpte(AVRational rate, int drop, int hh, int mm, int ss, int ff)
Convert sei info to SMPTE 12M binary representation.
Definition: timecode.c:69
RefPicList
Definition: hevcdec.h:241
AV_STEREO3D_VIEW_RIGHT
@ AV_STEREO3D_VIEW_RIGHT
Frame contains only the right view.
Definition: stereo3d.h:161
av_strlcatf
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:104
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:649
thread.h
ff_thread_await_progress
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
OFFSET
#define OFFSET(x)
Definition: hevcdec.c:3836
AVFilmGrainParams::seed
uint64_t seed
Seed to use for the synthesis process, if the codec allows for it.
Definition: film_grain_params.h:228
PF_INTRA
@ PF_INTRA
Definition: hevcdec.h:165
HEVC_MAX_REFS
@ HEVC_MAX_REFS
Definition: hevc.h:119
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:346
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
ff_hevc_cu_qp_delta_sign_flag
int ff_hevc_cu_qp_delta_sign_flag(HEVCLocalContext *lc)
Definition: hevc_cabac.c:671
hls_decode_neighbour
static void hls_decode_neighbour(HEVCLocalContext *lc, int x_ctb, int y_ctb, int ctb_addr_ts)
Definition: hevcdec.c:2432
MODE_SKIP
@ MODE_SKIP
Definition: hevcdec.h:155
HEVCLocalContext::end_of_tiles_x
int end_of_tiles_x
Definition: hevcdec.h:468
AVContentLightMetadata
Content light level needed by to transmit HDR over HDMI (CTA-861.3).
Definition: mastering_display_metadata.h:98
CodingUnit::x
int x
Definition: hevcdec.h:330
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
BOUNDARY_LEFT_TILE
#define BOUNDARY_LEFT_TILE
Definition: hevcdec.h:482
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:1735
golomb.h
exp golomb vlc stuff
AVCodecInternal::is_copy
int is_copy
When using frame-threaded decoding, this field is set for the first worker thread (e....
Definition: internal.h:57
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:67
PART_2Nx2N
@ PART_2Nx2N
Definition: hevcdec.h:142
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
SET_SAO
#define SET_SAO(elem, value)
Definition: hevcdec.c:1002
HEVCLocalContext::ctb_up_right_flag
uint8_t ctb_up_right_flag
Definition: hevcdec.h:466
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:123
ff_hevc_clear_refs
void ff_hevc_clear_refs(HEVCContext *s)
Mark all frames in DPB as unused for reference.
Definition: hevc_refs.c:68
PRED_BI
@ PRED_BI
Definition: hevcdec.h:161
ff_hevc_log2_res_scale_abs
int ff_hevc_log2_res_scale_abs(HEVCLocalContext *lc, int idx)
Definition: hevc_cabac.c:909
ff_hevc_hls_mvd_coding
void ff_hevc_hls_mvd_coding(HEVCLocalContext *lc, int x0, int y0, int log2_cb_size)
Definition: hevc_cabac.c:1549
luma_mc_uni
static void luma_mc_uni(HEVCLocalContext *lc, uint8_t *dst, ptrdiff_t dststride, const AVFrame *ref, const Mv *mv, int x_off, int y_off, int block_w, int block_h, int luma_weight, int luma_offset)
8.5.3.2.2.1 Luma sample unidirectional interpolation process
Definition: hevcdec.c:1506
av_ceil_log2
#define av_ceil_log2
Definition: common.h:92
fail
#define fail()
Definition: checkasm.h:133
PredictionUnit::intra_pred_mode_c
uint8_t intra_pred_mode_c[4]
Definition: hevcdec.h:368
AVCodecContext::thread_count
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
Definition: avcodec.h:1466
md5
struct AVMD5 * md5
Definition: movenc.c:56
InterPredIdc
InterPredIdc
Definition: hevcdec.h:158
MODE_INTER
@ MODE_INTER
Definition: hevcdec.h:153
HEVCSEIA53Caption
Definition: hevc_sei.h:61
ff_hevc_hls_filter
void ff_hevc_hls_filter(HEVCLocalContext *lc, int x, int y, int ctb_size)
Definition: hevc_filter.c:851
timecode.h
GetBitContext
Definition: get_bits.h:61
HEVCLocalContext::pu
PredictionUnit pu
Definition: hevcdec.h:478
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
ff_hevc_cu_chroma_qp_offset_flag
int ff_hevc_cu_chroma_qp_offset_flag(HEVCLocalContext *lc)
Definition: hevc_cabac.c:676
decode_lt_rps
static int decode_lt_rps(HEVCContext *s, LongTermRPS *rps, GetBitContext *gb)
Definition: hevcdec.c:269
TransformUnit::res_scale_val
int res_scale_val
Definition: hevcdec.h:375
HEVCSEIFilmGrainCharacteristics::present
int present
Definition: hevc_sei.h:118
SliceHeader::short_term_ref_pic_set_size
int short_term_ref_pic_set_size
Definition: hevcdec.h:271
hevc_decoder_class
static const AVClass hevc_decoder_class
Definition: hevcdec.c:3847
val
static double val(void *priv, double ch)
Definition: aeval.c:77
HWACCEL_VDPAU
#define HWACCEL_VDPAU(codec)
Definition: hwconfig.h:75
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
ff_hevc_output_frame
int ff_hevc_output_frame(HEVCContext *s, AVFrame *out, int flush)
Find next frame in output order and put a reference to it in frame.
Definition: hevc_refs.c:186
HEVCSEIFilmGrainCharacteristics::bit_depth_chroma
int bit_depth_chroma
Definition: hevc_sei.h:122
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:586
SliceHeader::long_term_ref_pic_set_size
int long_term_ref_pic_set_size
Definition: hevcdec.h:274
HEVCSEIFilmGrainCharacteristics::log2_scale_factor
int log2_scale_factor
Definition: hevc_sei.h:128
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:260
CTB
#define CTB(tab, x, y)
Definition: hevcdec.c:1000
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
ff_hevc_decode_nal_sei
int ff_hevc_decode_nal_sei(GetBitContext *gb, void *logctx, HEVCSEI *s, const HEVCParamSets *ps, enum HEVCNALUnitType type)
Definition: hevc_sei.c:552
AVRational::num
int num
Numerator.
Definition: rational.h:59
intra_prediction_unit
static void intra_prediction_unit(HEVCLocalContext *lc, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:2094
HEVC_NAL_UNSPEC62
@ HEVC_NAL_UNSPEC62
Definition: hevc.h:91
ff_hevc_deblocking_boundary_strengths
void ff_hevc_deblocking_boundary_strengths(HEVCLocalContext *lc, int x0, int y0, int log2_trafo_size)
Definition: hevc_filter.c:723
SliceHeader::slice_segment_addr
unsigned int slice_segment_addr
address (in raster order) of the first block in the current slice
Definition: hevcdec.h:256
AVFilmGrainH274Params::intensity_interval_upper_bound
uint8_t intensity_interval_upper_bound[3][256]
Specifies the upper bound of each intensity interval for which the set of model values applies for th...
Definition: film_grain_params.h:194
hevc_parse.h
MvField::ref_idx
int8_t ref_idx[2]
Definition: hevcdec.h:349
AVFilmGrainH274Params::bit_depth_luma
int bit_depth_luma
Specifies the bit depth used for the luma component.
Definition: film_grain_params.h:142
SAOParams::eo_class
int eo_class[3]
sao_eo_class
Definition: hevcdsp.h:40
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:104
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:434
hevc_luma_mv_mvp_mode
static void hevc_luma_mv_mvp_mode(HEVCLocalContext *lc, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv)
Definition: hevcdec.c:1826
ff_thread_report_progress2
void ff_thread_report_progress2(AVCodecContext *avctx, int field, int thread, int n)
Definition: pthread_slice.c:210
first
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But first
Definition: rate_distortion.txt:12
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:961
AV_STEREO3D_FRAMESEQUENCE
@ AV_STEREO3D_FRAMESEQUENCE
Views are alternated temporally.
Definition: stereo3d.h:92
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
QPEL_EXTRA_AFTER
#define QPEL_EXTRA_AFTER
Definition: hevcdec.h:64
HEVC_NAL_BLA_N_LP
@ HEVC_NAL_BLA_N_LP
Definition: hevc.h:47
SAOParams::type_idx
uint8_t type_idx[3]
sao_type_idx
Definition: hevcdsp.h:44
film_grain_params.h
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
TransformUnit::intra_pred_mode
int intra_pred_mode
Definition: hevcdec.h:378
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:667
HEVCSEIFilmGrainCharacteristics::full_range
int full_range
Definition: hevc_sei.h:123
HEVC_NAL_RADL_R
@ HEVC_NAL_RADL_R
Definition: hevc.h:36
hls_prediction_unit
static void hls_prediction_unit(HEVCLocalContext *lc, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int partIdx, int idx)
Definition: hevcdec.c:1871
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:611
SliceHeader::cabac_init_flag
uint8_t cabac_init_flag
Definition: hevcdec.h:287
H2645NAL::size
int size
Definition: h2645_parse.h:36
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:500
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:694
hls_transform_unit
static int hls_transform_unit(HEVCLocalContext *lc, int x0, int y0, int xBase, int yBase, int cb_xBase, int cb_yBase, int log2_cb_size, int log2_trafo_size, int blk_idx, int cbf_luma, int *cbf_cb, int *cbf_cr)
Definition: hevcdec.c:1104
VUI::matrix_coeffs
uint8_t matrix_coeffs
Definition: hevc_ps.h:61
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:279
QPEL_EXTRA_BEFORE
#define QPEL_EXTRA_BEFORE
Definition: hevcdec.h:63
ff_hevc_rem_intra_luma_pred_mode_decode
int ff_hevc_rem_intra_luma_pred_mode_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:773
ff_hevc_sao_merge_flag_decode
int ff_hevc_sao_merge_flag_decode(HEVCLocalContext *lc)
Definition: hevc_cabac.c:571
stereo3d.h
AVMasteringDisplayMetadata::white_point
AVRational white_point[2]
CIE 1931 xy chromaticity coords of white point.
Definition: mastering_display_metadata.h:47
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:127
HEVCLocalContext::parent
const struct HEVCContext * parent
Definition: hevcdec.h:440
s
#define s(width, name)
Definition: cbs_vp9.c:256
ff_thread_await_progress2
void ff_thread_await_progress2(AVCodecContext *avctx, int field, int thread, int shift)
Definition: pthread_slice.c:222
SAO_NOT_APPLIED
@ SAO_NOT_APPLIED
Definition: hevcdec.h:210
set_sps
static int set_sps(HEVCContext *s, const HEVCSPS *sps, enum AVPixelFormat pix_fmt)
Definition: hevcdec.c:508
HEVCSEIFilmGrainCharacteristics::num_intensity_intervals
uint16_t num_intensity_intervals[3]
Definition: hevc_sei.h:130
AV_ZERO32
#define AV_ZERO32(d)
Definition: intreadwrite.h:629
ff_hevc_nal_is_nonref
static av_always_inline int ff_hevc_nal_is_nonref(enum HEVCNALUnitType type)
Definition: hevcdec.h:661
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
av_film_grain_params_create_side_data
AVFilmGrainParams * av_film_grain_params_create_side_data(AVFrame *frame)
Allocate a complete AVFilmGrainParams and add it to the frame.
Definition: film_grain_params.c:31
ff_hevc_set_new_ref
int ff_hevc_set_new_ref(HEVCContext *s, AVFrame **frame, int poc)
Definition: hevc_refs.c:137
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
SliceHeader::slice_rps
ShortTermRPS slice_rps
Definition: hevcdec.h:272
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
HEVCSEIFilmGrainCharacteristics::model_id
int model_id
Definition: hevc_sei.h:119
decode.h
IS_IDR
#define IS_IDR(s)
Definition: hevcdec.h:75
H2645NAL::data
const uint8_t * data
Definition: h2645_parse.h:35
ff_hevc_slice_rpl
int ff_hevc_slice_rpl(HEVCContext *s)
Construct the reference picture list(s) for the current slice.
Definition: hevc_refs.c:312
RefPicList::ref
struct HEVCFrame * ref[HEVC_MAX_REFS]
Definition: hevcdec.h:242
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demuxing_decoding.c:41
H2645NAL::skipped_bytes_pos
int * skipped_bytes_pos
Definition: h2645_parse.h:71
VUI::colour_primaries
uint8_t colour_primaries
Definition: hevc_ps.h:59
HEVC_SLICE_I
@ HEVC_SLICE_I
Definition: hevc.h:98
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
SliceHeader::size
int * size
Definition: hevcdec.h:307
HEVCSEIUnregistered::buf_ref
AVBufferRef ** buf_ref
Definition: hevc_sei.h:66
ff_hevc_cabac_init
int ff_hevc_cabac_init(HEVCLocalContext *lc, int ctb_addr_ts)
Definition: hevc_cabac.c:512
AVFilmGrainH274Params::comp_model_value
int16_t comp_model_value[3][256][6]
Specifies the model values for the component for each intensity interval.
Definition: film_grain_params.h:205
ff_hevc_set_neighbour_available
void ff_hevc_set_neighbour_available(HEVCLocalContext *lc, int x0, int y0, int nPbW, int nPbH)
Definition: hevc_mvs.c:43
SliceHeader::collocated_list
uint8_t collocated_list
Definition: hevcdec.h:290
atomic_load
#define atomic_load(object)
Definition: stdatomic.h:93
AVCOL_PRI_UNSPECIFIED
@ AVCOL_PRI_UNSPECIFIED
Definition: pixfmt.h:505
AVDISCARD_BIDIR
@ AVDISCARD_BIDIR
discard all bidirectional frames
Definition: defs.h:73
get_se_golomb
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
Definition: golomb.h:239
INTRA_ANGULAR_26
@ INTRA_ANGULAR_26
Definition: hevcdec.h:198
H2645NAL::type
int type
NAL unit type.
Definition: h2645_parse.h:52
CodingUnit::max_trafo_depth
uint8_t max_trafo_depth
MaxTrafoDepth.
Definition: hevcdec.h:338
AV_FRAME_DATA_DYNAMIC_HDR_VIVID
@ AV_FRAME_DATA_DYNAMIC_HDR_VIVID
HDR Vivid dynamic metadata associated with a video frame.
Definition: frame.h:211
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:264
SliceHeader::slice_ctb_addr_rs
int slice_ctb_addr_rs
Definition: hevcdec.h:326
ff_thread_ref_frame
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
Definition: utils.c:885
FF_CODEC_PROPERTY_FILM_GRAIN
#define FF_CODEC_PROPERTY_FILM_GRAIN
Definition: avcodec.h:1850
arg
const char * arg
Definition: jacosubdec.c:67
AVStereo3D::flags
int flags
Additional information about the frame packing.
Definition: stereo3d.h:185
if
if(ret)
Definition: filter_design.txt:179
HEVC_NAL_IDR_N_LP
@ HEVC_NAL_IDR_N_LP
Definition: hevc.h:49
AVFilmGrainH274Params::model_id
int model_id
Specifies the film grain simulation mode.
Definition: film_grain_params.h:137
SliceHeader::pic_output_flag
uint8_t pic_output_flag
Definition: hevcdec.h:266
ff_hevc_cbf_cb_cr_decode
int ff_hevc_cbf_cb_cr_decode(HEVCLocalContext *lc, int trafo_depth)
Definition: hevc_cabac.c:884
hls_slice_data_wpp
static int hls_slice_data_wpp(HEVCContext *s, const H2645NAL *nal)
Definition: hevcdec.c:2637
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:113
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:76
threadframe.h
PredictionUnit::rem_intra_luma_pred_mode
int rem_intra_luma_pred_mode
Definition: hevcdec.h:364
H2645NAL::raw_size
int raw_size
Definition: h2645_parse.h:44
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
IS_BLA
#define IS_BLA(s)
Definition: hevcdec.h:76
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
ff_bswapdsp_init
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
HEVC_SLICE_B
@ HEVC_SLICE_B
Definition: hevc.h:96
NULL
#define NULL
Definition: coverity.c:32
hevc_ref_frame
static int hevc_ref_frame(HEVCContext *s, HEVCFrame *dst, HEVCFrame *src)
Definition: hevcdec.c:3534
HEVC_SEQUENCE_COUNTER_MASK
#define HEVC_SEQUENCE_COUNTER_MASK
Definition: