FFmpeg
hevcdec.c
Go to the documentation of this file.
1 /*
2  * HEVC video Decoder
3  *
4  * Copyright (C) 2012 - 2013 Guillaume Martres
5  * Copyright (C) 2012 - 2013 Mickael Raulet
6  * Copyright (C) 2012 - 2013 Gildas Cocherel
7  * Copyright (C) 2012 - 2013 Wassim Hamidouche
8  *
9  * This file is part of FFmpeg.
10  *
11  * FFmpeg is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public
13  * License as published by the Free Software Foundation; either
14  * version 2.1 of the License, or (at your option) any later version.
15  *
16  * FFmpeg is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with FFmpeg; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24  */
25 
26 #include "libavutil/attributes.h"
27 #include "libavutil/common.h"
28 #include "libavutil/display.h"
29 #include "libavutil/internal.h"
31 #include "libavutil/md5.h"
32 #include "libavutil/opt.h"
33 #include "libavutil/pixdesc.h"
34 #include "libavutil/stereo3d.h"
35 
36 #include "bswapdsp.h"
37 #include "bytestream.h"
38 #include "cabac_functions.h"
39 #include "golomb.h"
40 #include "hevc.h"
41 #include "hevc_data.h"
42 #include "hevc_parse.h"
43 #include "hevcdec.h"
44 #include "hwconfig.h"
45 #include "profiles.h"
46 
47 const uint8_t ff_hevc_pel_weight[65] = { [2] = 0, [4] = 1, [6] = 2, [8] = 3, [12] = 4, [16] = 5, [24] = 6, [32] = 7, [48] = 8, [64] = 9 };
48 
49 /**
50  * NOTE: Each function hls_foo correspond to the function foo in the
51  * specification (HLS stands for High Level Syntax).
52  */
53 
54 /**
55  * Section 5.7
56  */
57 
58 /* free everything allocated by pic_arrays_init() */
60 {
61  av_freep(&s->sao);
62  av_freep(&s->deblock);
63 
64  av_freep(&s->skip_flag);
65  av_freep(&s->tab_ct_depth);
66 
67  av_freep(&s->tab_ipm);
68  av_freep(&s->cbf_luma);
69  av_freep(&s->is_pcm);
70 
71  av_freep(&s->qp_y_tab);
72  av_freep(&s->tab_slice_address);
73  av_freep(&s->filter_slice_edges);
74 
75  av_freep(&s->horizontal_bs);
76  av_freep(&s->vertical_bs);
77 
78  av_freep(&s->sh.entry_point_offset);
79  av_freep(&s->sh.size);
80  av_freep(&s->sh.offset);
81 
82  av_buffer_pool_uninit(&s->tab_mvf_pool);
83  av_buffer_pool_uninit(&s->rpl_tab_pool);
84 }
85 
86 /* allocate arrays that depend on frame dimensions */
87 static int pic_arrays_init(HEVCContext *s, const HEVCSPS *sps)
88 {
89  int log2_min_cb_size = sps->log2_min_cb_size;
90  int width = sps->width;
91  int height = sps->height;
92  int pic_size_in_ctb = ((width >> log2_min_cb_size) + 1) *
93  ((height >> log2_min_cb_size) + 1);
94  int ctb_count = sps->ctb_width * sps->ctb_height;
95  int min_pu_size = sps->min_pu_width * sps->min_pu_height;
96 
97  s->bs_width = (width >> 2) + 1;
98  s->bs_height = (height >> 2) + 1;
99 
100  s->sao = av_mallocz_array(ctb_count, sizeof(*s->sao));
101  s->deblock = av_mallocz_array(ctb_count, sizeof(*s->deblock));
102  if (!s->sao || !s->deblock)
103  goto fail;
104 
105  s->skip_flag = av_malloc_array(sps->min_cb_height, sps->min_cb_width);
106  s->tab_ct_depth = av_malloc_array(sps->min_cb_height, sps->min_cb_width);
107  if (!s->skip_flag || !s->tab_ct_depth)
108  goto fail;
109 
110  s->cbf_luma = av_malloc_array(sps->min_tb_width, sps->min_tb_height);
111  s->tab_ipm = av_mallocz(min_pu_size);
112  s->is_pcm = av_malloc_array(sps->min_pu_width + 1, sps->min_pu_height + 1);
113  if (!s->tab_ipm || !s->cbf_luma || !s->is_pcm)
114  goto fail;
115 
116  s->filter_slice_edges = av_mallocz(ctb_count);
117  s->tab_slice_address = av_malloc_array(pic_size_in_ctb,
118  sizeof(*s->tab_slice_address));
119  s->qp_y_tab = av_malloc_array(pic_size_in_ctb,
120  sizeof(*s->qp_y_tab));
121  if (!s->qp_y_tab || !s->filter_slice_edges || !s->tab_slice_address)
122  goto fail;
123 
124  s->horizontal_bs = av_mallocz_array(s->bs_width, s->bs_height);
125  s->vertical_bs = av_mallocz_array(s->bs_width, s->bs_height);
126  if (!s->horizontal_bs || !s->vertical_bs)
127  goto fail;
128 
129  s->tab_mvf_pool = av_buffer_pool_init(min_pu_size * sizeof(MvField),
131  s->rpl_tab_pool = av_buffer_pool_init(ctb_count * sizeof(RefPicListTab),
133  if (!s->tab_mvf_pool || !s->rpl_tab_pool)
134  goto fail;
135 
136  return 0;
137 
138 fail:
140  return AVERROR(ENOMEM);
141 }
142 
144 {
145  int i = 0;
146  int j = 0;
147  uint8_t luma_weight_l0_flag[16];
148  uint8_t chroma_weight_l0_flag[16];
149  uint8_t luma_weight_l1_flag[16];
150  uint8_t chroma_weight_l1_flag[16];
151  int luma_log2_weight_denom;
152 
153  luma_log2_weight_denom = get_ue_golomb_long(gb);
154  if (luma_log2_weight_denom < 0 || luma_log2_weight_denom > 7) {
155  av_log(s->avctx, AV_LOG_ERROR, "luma_log2_weight_denom %d is invalid\n", luma_log2_weight_denom);
156  return AVERROR_INVALIDDATA;
157  }
158  s->sh.luma_log2_weight_denom = av_clip_uintp2(luma_log2_weight_denom, 3);
159  if (s->ps.sps->chroma_format_idc != 0) {
160  int64_t chroma_log2_weight_denom = luma_log2_weight_denom + (int64_t)get_se_golomb(gb);
161  if (chroma_log2_weight_denom < 0 || chroma_log2_weight_denom > 7) {
162  av_log(s->avctx, AV_LOG_ERROR, "chroma_log2_weight_denom %"PRId64" is invalid\n", chroma_log2_weight_denom);
163  return AVERROR_INVALIDDATA;
164  }
165  s->sh.chroma_log2_weight_denom = chroma_log2_weight_denom;
166  }
167 
168  for (i = 0; i < s->sh.nb_refs[L0]; i++) {
169  luma_weight_l0_flag[i] = get_bits1(gb);
170  if (!luma_weight_l0_flag[i]) {
171  s->sh.luma_weight_l0[i] = 1 << s->sh.luma_log2_weight_denom;
172  s->sh.luma_offset_l0[i] = 0;
173  }
174  }
175  if (s->ps.sps->chroma_format_idc != 0) {
176  for (i = 0; i < s->sh.nb_refs[L0]; i++)
177  chroma_weight_l0_flag[i] = get_bits1(gb);
178  } else {
179  for (i = 0; i < s->sh.nb_refs[L0]; i++)
180  chroma_weight_l0_flag[i] = 0;
181  }
182  for (i = 0; i < s->sh.nb_refs[L0]; i++) {
183  if (luma_weight_l0_flag[i]) {
184  int delta_luma_weight_l0 = get_se_golomb(gb);
185  if ((int8_t)delta_luma_weight_l0 != delta_luma_weight_l0)
186  return AVERROR_INVALIDDATA;
187  s->sh.luma_weight_l0[i] = (1 << s->sh.luma_log2_weight_denom) + delta_luma_weight_l0;
188  s->sh.luma_offset_l0[i] = get_se_golomb(gb);
189  }
190  if (chroma_weight_l0_flag[i]) {
191  for (j = 0; j < 2; j++) {
192  int delta_chroma_weight_l0 = get_se_golomb(gb);
193  int delta_chroma_offset_l0 = get_se_golomb(gb);
194 
195  if ( (int8_t)delta_chroma_weight_l0 != delta_chroma_weight_l0
196  || delta_chroma_offset_l0 < -(1<<17) || delta_chroma_offset_l0 > (1<<17)) {
197  return AVERROR_INVALIDDATA;
198  }
199 
200  s->sh.chroma_weight_l0[i][j] = (1 << s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l0;
201  s->sh.chroma_offset_l0[i][j] = av_clip((delta_chroma_offset_l0 - ((128 * s->sh.chroma_weight_l0[i][j])
202  >> s->sh.chroma_log2_weight_denom) + 128), -128, 127);
203  }
204  } else {
205  s->sh.chroma_weight_l0[i][0] = 1 << s->sh.chroma_log2_weight_denom;
206  s->sh.chroma_offset_l0[i][0] = 0;
207  s->sh.chroma_weight_l0[i][1] = 1 << s->sh.chroma_log2_weight_denom;
208  s->sh.chroma_offset_l0[i][1] = 0;
209  }
210  }
211  if (s->sh.slice_type == HEVC_SLICE_B) {
212  for (i = 0; i < s->sh.nb_refs[L1]; i++) {
213  luma_weight_l1_flag[i] = get_bits1(gb);
214  if (!luma_weight_l1_flag[i]) {
215  s->sh.luma_weight_l1[i] = 1 << s->sh.luma_log2_weight_denom;
216  s->sh.luma_offset_l1[i] = 0;
217  }
218  }
219  if (s->ps.sps->chroma_format_idc != 0) {
220  for (i = 0; i < s->sh.nb_refs[L1]; i++)
221  chroma_weight_l1_flag[i] = get_bits1(gb);
222  } else {
223  for (i = 0; i < s->sh.nb_refs[L1]; i++)
224  chroma_weight_l1_flag[i] = 0;
225  }
226  for (i = 0; i < s->sh.nb_refs[L1]; i++) {
227  if (luma_weight_l1_flag[i]) {
228  int delta_luma_weight_l1 = get_se_golomb(gb);
229  if ((int8_t)delta_luma_weight_l1 != delta_luma_weight_l1)
230  return AVERROR_INVALIDDATA;
231  s->sh.luma_weight_l1[i] = (1 << s->sh.luma_log2_weight_denom) + delta_luma_weight_l1;
232  s->sh.luma_offset_l1[i] = get_se_golomb(gb);
233  }
234  if (chroma_weight_l1_flag[i]) {
235  for (j = 0; j < 2; j++) {
236  int delta_chroma_weight_l1 = get_se_golomb(gb);
237  int delta_chroma_offset_l1 = get_se_golomb(gb);
238 
239  if ( (int8_t)delta_chroma_weight_l1 != delta_chroma_weight_l1
240  || delta_chroma_offset_l1 < -(1<<17) || delta_chroma_offset_l1 > (1<<17)) {
241  return AVERROR_INVALIDDATA;
242  }
243 
244  s->sh.chroma_weight_l1[i][j] = (1 << s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l1;
245  s->sh.chroma_offset_l1[i][j] = av_clip((delta_chroma_offset_l1 - ((128 * s->sh.chroma_weight_l1[i][j])
246  >> s->sh.chroma_log2_weight_denom) + 128), -128, 127);
247  }
248  } else {
249  s->sh.chroma_weight_l1[i][0] = 1 << s->sh.chroma_log2_weight_denom;
250  s->sh.chroma_offset_l1[i][0] = 0;
251  s->sh.chroma_weight_l1[i][1] = 1 << s->sh.chroma_log2_weight_denom;
252  s->sh.chroma_offset_l1[i][1] = 0;
253  }
254  }
255  }
256  return 0;
257 }
258 
260 {
261  const HEVCSPS *sps = s->ps.sps;
262  int max_poc_lsb = 1 << sps->log2_max_poc_lsb;
263  int prev_delta_msb = 0;
264  unsigned int nb_sps = 0, nb_sh;
265  int i;
266 
267  rps->nb_refs = 0;
268  if (!sps->long_term_ref_pics_present_flag)
269  return 0;
270 
271  if (sps->num_long_term_ref_pics_sps > 0)
272  nb_sps = get_ue_golomb_long(gb);
273  nb_sh = get_ue_golomb_long(gb);
274 
275  if (nb_sps > sps->num_long_term_ref_pics_sps)
276  return AVERROR_INVALIDDATA;
277  if (nb_sh + (uint64_t)nb_sps > FF_ARRAY_ELEMS(rps->poc))
278  return AVERROR_INVALIDDATA;
279 
280  rps->nb_refs = nb_sh + nb_sps;
281 
282  for (i = 0; i < rps->nb_refs; i++) {
283 
284  if (i < nb_sps) {
285  uint8_t lt_idx_sps = 0;
286 
287  if (sps->num_long_term_ref_pics_sps > 1)
288  lt_idx_sps = get_bits(gb, av_ceil_log2(sps->num_long_term_ref_pics_sps));
289 
290  rps->poc[i] = sps->lt_ref_pic_poc_lsb_sps[lt_idx_sps];
291  rps->used[i] = sps->used_by_curr_pic_lt_sps_flag[lt_idx_sps];
292  } else {
293  rps->poc[i] = get_bits(gb, sps->log2_max_poc_lsb);
294  rps->used[i] = get_bits1(gb);
295  }
296 
297  rps->poc_msb_present[i] = get_bits1(gb);
298  if (rps->poc_msb_present[i]) {
299  int64_t delta = get_ue_golomb_long(gb);
300  int64_t poc;
301 
302  if (i && i != nb_sps)
303  delta += prev_delta_msb;
304 
305  poc = rps->poc[i] + s->poc - delta * max_poc_lsb - s->sh.pic_order_cnt_lsb;
306  if (poc != (int32_t)poc)
307  return AVERROR_INVALIDDATA;
308  rps->poc[i] = poc;
309  prev_delta_msb = delta;
310  }
311  }
312 
313  return 0;
314 }
315 
317 {
318  AVCodecContext *avctx = s->avctx;
319  const HEVCParamSets *ps = &s->ps;
320  const HEVCVPS *vps = (const HEVCVPS*)ps->vps_list[sps->vps_id]->data;
321  const HEVCWindow *ow = &sps->output_window;
322  unsigned int num = 0, den = 0;
323 
324  avctx->pix_fmt = sps->pix_fmt;
325  avctx->coded_width = sps->width;
326  avctx->coded_height = sps->height;
327  avctx->width = sps->width - ow->left_offset - ow->right_offset;
328  avctx->height = sps->height - ow->top_offset - ow->bottom_offset;
329  avctx->has_b_frames = sps->temporal_layer[sps->max_sub_layers - 1].num_reorder_pics;
330  avctx->profile = sps->ptl.general_ptl.profile_idc;
331  avctx->level = sps->ptl.general_ptl.level_idc;
332 
333  ff_set_sar(avctx, sps->vui.sar);
334 
335  if (sps->vui.video_signal_type_present_flag)
336  avctx->color_range = sps->vui.video_full_range_flag ? AVCOL_RANGE_JPEG
338  else
339  avctx->color_range = AVCOL_RANGE_MPEG;
340 
341  if (sps->vui.colour_description_present_flag) {
342  avctx->color_primaries = sps->vui.colour_primaries;
343  avctx->color_trc = sps->vui.transfer_characteristic;
344  avctx->colorspace = sps->vui.matrix_coeffs;
345  } else {
349  }
350 
351  if (vps->vps_timing_info_present_flag) {
352  num = vps->vps_num_units_in_tick;
353  den = vps->vps_time_scale;
354  } else if (sps->vui.vui_timing_info_present_flag) {
355  num = sps->vui.vui_num_units_in_tick;
356  den = sps->vui.vui_time_scale;
357  }
358 
359  if (num != 0 && den != 0)
360  av_reduce(&avctx->framerate.den, &avctx->framerate.num,
361  num, den, 1 << 30);
362 
363  if (s->sei.alternative_transfer.present &&
364  av_color_transfer_name(s->sei.alternative_transfer.preferred_transfer_characteristics) &&
365  s->sei.alternative_transfer.preferred_transfer_characteristics != AVCOL_TRC_UNSPECIFIED) {
366  avctx->color_trc = s->sei.alternative_transfer.preferred_transfer_characteristics;
367  }
368 }
369 
371 {
372 #define HWACCEL_MAX (CONFIG_HEVC_DXVA2_HWACCEL + \
373  CONFIG_HEVC_D3D11VA_HWACCEL * 2 + \
374  CONFIG_HEVC_NVDEC_HWACCEL + \
375  CONFIG_HEVC_VAAPI_HWACCEL + \
376  CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL + \
377  CONFIG_HEVC_VDPAU_HWACCEL)
378  enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmt = pix_fmts;
379 
380  switch (sps->pix_fmt) {
381  case AV_PIX_FMT_YUV420P:
382  case AV_PIX_FMT_YUVJ420P:
383 #if CONFIG_HEVC_DXVA2_HWACCEL
384  *fmt++ = AV_PIX_FMT_DXVA2_VLD;
385 #endif
386 #if CONFIG_HEVC_D3D11VA_HWACCEL
387  *fmt++ = AV_PIX_FMT_D3D11VA_VLD;
388  *fmt++ = AV_PIX_FMT_D3D11;
389 #endif
390 #if CONFIG_HEVC_VAAPI_HWACCEL
391  *fmt++ = AV_PIX_FMT_VAAPI;
392 #endif
393 #if CONFIG_HEVC_VDPAU_HWACCEL
394  *fmt++ = AV_PIX_FMT_VDPAU;
395 #endif
396 #if CONFIG_HEVC_NVDEC_HWACCEL
397  *fmt++ = AV_PIX_FMT_CUDA;
398 #endif
399 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
400  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
401 #endif
402  break;
404 #if CONFIG_HEVC_DXVA2_HWACCEL
405  *fmt++ = AV_PIX_FMT_DXVA2_VLD;
406 #endif
407 #if CONFIG_HEVC_D3D11VA_HWACCEL
408  *fmt++ = AV_PIX_FMT_D3D11VA_VLD;
409  *fmt++ = AV_PIX_FMT_D3D11;
410 #endif
411 #if CONFIG_HEVC_VAAPI_HWACCEL
412  *fmt++ = AV_PIX_FMT_VAAPI;
413 #endif
414 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
415  *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
416 #endif
417 #if CONFIG_HEVC_NVDEC_HWACCEL
418  *fmt++ = AV_PIX_FMT_CUDA;
419 #endif
420  break;
421  case AV_PIX_FMT_YUV444P:
422 #if CONFIG_HEVC_VDPAU_HWACCEL
423  *fmt++ = AV_PIX_FMT_VDPAU;
424 #endif
425 #if CONFIG_HEVC_NVDEC_HWACCEL
426  *fmt++ = AV_PIX_FMT_CUDA;
427 #endif
428  break;
429  case AV_PIX_FMT_YUV422P:
431 #if CONFIG_HEVC_VAAPI_HWACCEL
432  *fmt++ = AV_PIX_FMT_VAAPI;
433 #endif
434  break;
438 #if CONFIG_HEVC_NVDEC_HWACCEL
439  *fmt++ = AV_PIX_FMT_CUDA;
440 #endif
441  break;
442  }
443 
444  *fmt++ = sps->pix_fmt;
445  *fmt = AV_PIX_FMT_NONE;
446 
447  return ff_thread_get_format(s->avctx, pix_fmts);
448 }
449 
450 static int set_sps(HEVCContext *s, const HEVCSPS *sps,
451  enum AVPixelFormat pix_fmt)
452 {
453  int ret, i;
454 
456  s->ps.sps = NULL;
457  s->ps.vps = NULL;
458 
459  if (!sps)
460  return 0;
461 
462  ret = pic_arrays_init(s, sps);
463  if (ret < 0)
464  goto fail;
465 
467 
468  s->avctx->pix_fmt = pix_fmt;
469 
470  ff_hevc_pred_init(&s->hpc, sps->bit_depth);
471  ff_hevc_dsp_init (&s->hevcdsp, sps->bit_depth);
472  ff_videodsp_init (&s->vdsp, sps->bit_depth);
473 
474  for (i = 0; i < 3; i++) {
475  av_freep(&s->sao_pixel_buffer_h[i]);
476  av_freep(&s->sao_pixel_buffer_v[i]);
477  }
478 
479  if (sps->sao_enabled && !s->avctx->hwaccel) {
480  int c_count = (sps->chroma_format_idc != 0) ? 3 : 1;
481  int c_idx;
482 
483  for(c_idx = 0; c_idx < c_count; c_idx++) {
484  int w = sps->width >> sps->hshift[c_idx];
485  int h = sps->height >> sps->vshift[c_idx];
486  s->sao_pixel_buffer_h[c_idx] =
487  av_malloc((w * 2 * sps->ctb_height) <<
488  sps->pixel_shift);
489  s->sao_pixel_buffer_v[c_idx] =
490  av_malloc((h * 2 * sps->ctb_width) <<
491  sps->pixel_shift);
492  }
493  }
494 
495  s->ps.sps = sps;
496  s->ps.vps = (HEVCVPS*) s->ps.vps_list[s->ps.sps->vps_id]->data;
497 
498  return 0;
499 
500 fail:
502  s->ps.sps = NULL;
503  return ret;
504 }
505 
507 {
508  GetBitContext *gb = &s->HEVClc->gb;
509  SliceHeader *sh = &s->sh;
510  int i, ret;
511 
512  // Coded parameters
514  if (s->ref && sh->first_slice_in_pic_flag) {
515  av_log(s->avctx, AV_LOG_ERROR, "Two slices reporting being the first in the same frame.\n");
516  return 1; // This slice will be skipped later, do not corrupt state
517  }
518 
519  if ((IS_IDR(s) || IS_BLA(s)) && sh->first_slice_in_pic_flag) {
520  s->seq_decode = (s->seq_decode + 1) & 0xff;
521  s->max_ra = INT_MAX;
522  if (IS_IDR(s))
524  }
526  if (IS_IRAP(s))
528 
529  sh->pps_id = get_ue_golomb_long(gb);
530  if (sh->pps_id >= HEVC_MAX_PPS_COUNT || !s->ps.pps_list[sh->pps_id]) {
531  av_log(s->avctx, AV_LOG_ERROR, "PPS id out of range: %d\n", sh->pps_id);
532  return AVERROR_INVALIDDATA;
533  }
534  if (!sh->first_slice_in_pic_flag &&
535  s->ps.pps != (HEVCPPS*)s->ps.pps_list[sh->pps_id]->data) {
536  av_log(s->avctx, AV_LOG_ERROR, "PPS changed between slices.\n");
537  return AVERROR_INVALIDDATA;
538  }
539  s->ps.pps = (HEVCPPS*)s->ps.pps_list[sh->pps_id]->data;
540  if (s->nal_unit_type == HEVC_NAL_CRA_NUT && s->last_eos == 1)
542 
543  if (s->ps.sps != (HEVCSPS*)s->ps.sps_list[s->ps.pps->sps_id]->data) {
544  const HEVCSPS *sps = (HEVCSPS*)s->ps.sps_list[s->ps.pps->sps_id]->data;
545  const HEVCSPS *last_sps = s->ps.sps;
546  enum AVPixelFormat pix_fmt;
547 
548  if (last_sps && IS_IRAP(s) && s->nal_unit_type != HEVC_NAL_CRA_NUT) {
549  if (sps->width != last_sps->width || sps->height != last_sps->height ||
550  sps->temporal_layer[sps->max_sub_layers - 1].max_dec_pic_buffering !=
551  last_sps->temporal_layer[last_sps->max_sub_layers - 1].max_dec_pic_buffering)
553  }
555 
556  ret = set_sps(s, sps, sps->pix_fmt);
557  if (ret < 0)
558  return ret;
559 
560  pix_fmt = get_format(s, sps);
561  if (pix_fmt < 0)
562  return pix_fmt;
563  s->avctx->pix_fmt = pix_fmt;
564 
565  s->seq_decode = (s->seq_decode + 1) & 0xff;
566  s->max_ra = INT_MAX;
567  }
568 
570  if (!sh->first_slice_in_pic_flag) {
571  int slice_address_length;
572 
573  if (s->ps.pps->dependent_slice_segments_enabled_flag)
575 
576  slice_address_length = av_ceil_log2(s->ps.sps->ctb_width *
577  s->ps.sps->ctb_height);
578  sh->slice_segment_addr = get_bitsz(gb, slice_address_length);
579  if (sh->slice_segment_addr >= s->ps.sps->ctb_width * s->ps.sps->ctb_height) {
580  av_log(s->avctx, AV_LOG_ERROR,
581  "Invalid slice segment address: %u.\n",
582  sh->slice_segment_addr);
583  return AVERROR_INVALIDDATA;
584  }
585 
586  if (!sh->dependent_slice_segment_flag) {
587  sh->slice_addr = sh->slice_segment_addr;
588  s->slice_idx++;
589  }
590  } else {
591  sh->slice_segment_addr = sh->slice_addr = 0;
592  s->slice_idx = 0;
593  s->slice_initialized = 0;
594  }
595 
596  if (!sh->dependent_slice_segment_flag) {
597  s->slice_initialized = 0;
598 
599  for (i = 0; i < s->ps.pps->num_extra_slice_header_bits; i++)
600  skip_bits(gb, 1); // slice_reserved_undetermined_flag[]
601 
602  sh->slice_type = get_ue_golomb_long(gb);
603  if (!(sh->slice_type == HEVC_SLICE_I ||
604  sh->slice_type == HEVC_SLICE_P ||
605  sh->slice_type == HEVC_SLICE_B)) {
606  av_log(s->avctx, AV_LOG_ERROR, "Unknown slice type: %d.\n",
607  sh->slice_type);
608  return AVERROR_INVALIDDATA;
609  }
610  if (IS_IRAP(s) && sh->slice_type != HEVC_SLICE_I) {
611  av_log(s->avctx, AV_LOG_ERROR, "Inter slices in an IRAP frame.\n");
612  return AVERROR_INVALIDDATA;
613  }
614 
615  // when flag is not present, picture is inferred to be output
616  sh->pic_output_flag = 1;
617  if (s->ps.pps->output_flag_present_flag)
618  sh->pic_output_flag = get_bits1(gb);
619 
620  if (s->ps.sps->separate_colour_plane_flag)
621  sh->colour_plane_id = get_bits(gb, 2);
622 
623  if (!IS_IDR(s)) {
624  int poc, pos;
625 
626  sh->pic_order_cnt_lsb = get_bits(gb, s->ps.sps->log2_max_poc_lsb);
627  poc = ff_hevc_compute_poc(s->ps.sps, s->pocTid0, sh->pic_order_cnt_lsb, s->nal_unit_type);
628  if (!sh->first_slice_in_pic_flag && poc != s->poc) {
629  av_log(s->avctx, AV_LOG_WARNING,
630  "Ignoring POC change between slices: %d -> %d\n", s->poc, poc);
631  if (s->avctx->err_recognition & AV_EF_EXPLODE)
632  return AVERROR_INVALIDDATA;
633  poc = s->poc;
634  }
635  s->poc = poc;
636 
638  pos = get_bits_left(gb);
640  ret = ff_hevc_decode_short_term_rps(gb, s->avctx, &sh->slice_rps, s->ps.sps, 1);
641  if (ret < 0)
642  return ret;
643 
644  sh->short_term_rps = &sh->slice_rps;
645  } else {
646  int numbits, rps_idx;
647 
648  if (!s->ps.sps->nb_st_rps) {
649  av_log(s->avctx, AV_LOG_ERROR, "No ref lists in the SPS.\n");
650  return AVERROR_INVALIDDATA;
651  }
652 
653  numbits = av_ceil_log2(s->ps.sps->nb_st_rps);
654  rps_idx = numbits > 0 ? get_bits(gb, numbits) : 0;
655  sh->short_term_rps = &s->ps.sps->st_rps[rps_idx];
656  }
658 
659  pos = get_bits_left(gb);
660  ret = decode_lt_rps(s, &sh->long_term_rps, gb);
661  if (ret < 0) {
662  av_log(s->avctx, AV_LOG_WARNING, "Invalid long term RPS.\n");
663  if (s->avctx->err_recognition & AV_EF_EXPLODE)
664  return AVERROR_INVALIDDATA;
665  }
667 
668  if (s->ps.sps->sps_temporal_mvp_enabled_flag)
670  else
672  } else {
673  s->sh.short_term_rps = NULL;
674  s->poc = 0;
675  }
676 
677  /* 8.3.1 */
678  if (sh->first_slice_in_pic_flag && s->temporal_id == 0 &&
679  s->nal_unit_type != HEVC_NAL_TRAIL_N &&
680  s->nal_unit_type != HEVC_NAL_TSA_N &&
681  s->nal_unit_type != HEVC_NAL_STSA_N &&
682  s->nal_unit_type != HEVC_NAL_RADL_N &&
683  s->nal_unit_type != HEVC_NAL_RADL_R &&
684  s->nal_unit_type != HEVC_NAL_RASL_N &&
685  s->nal_unit_type != HEVC_NAL_RASL_R)
686  s->pocTid0 = s->poc;
687 
688  if (s->ps.sps->sao_enabled) {
690  if (s->ps.sps->chroma_format_idc) {
693  }
694  } else {
698  }
699 
700  sh->nb_refs[L0] = sh->nb_refs[L1] = 0;
701  if (sh->slice_type == HEVC_SLICE_P || sh->slice_type == HEVC_SLICE_B) {
702  int nb_refs;
703 
704  sh->nb_refs[L0] = s->ps.pps->num_ref_idx_l0_default_active;
705  if (sh->slice_type == HEVC_SLICE_B)
706  sh->nb_refs[L1] = s->ps.pps->num_ref_idx_l1_default_active;
707 
708  if (get_bits1(gb)) { // num_ref_idx_active_override_flag
709  sh->nb_refs[L0] = get_ue_golomb_long(gb) + 1;
710  if (sh->slice_type == HEVC_SLICE_B)
711  sh->nb_refs[L1] = get_ue_golomb_long(gb) + 1;
712  }
713  if (sh->nb_refs[L0] > HEVC_MAX_REFS || sh->nb_refs[L1] > HEVC_MAX_REFS) {
714  av_log(s->avctx, AV_LOG_ERROR, "Too many refs: %d/%d.\n",
715  sh->nb_refs[L0], sh->nb_refs[L1]);
716  return AVERROR_INVALIDDATA;
717  }
718 
719  sh->rpl_modification_flag[0] = 0;
720  sh->rpl_modification_flag[1] = 0;
721  nb_refs = ff_hevc_frame_nb_refs(s);
722  if (!nb_refs) {
723  av_log(s->avctx, AV_LOG_ERROR, "Zero refs for a frame with P or B slices.\n");
724  return AVERROR_INVALIDDATA;
725  }
726 
727  if (s->ps.pps->lists_modification_present_flag && nb_refs > 1) {
728  sh->rpl_modification_flag[0] = get_bits1(gb);
729  if (sh->rpl_modification_flag[0]) {
730  for (i = 0; i < sh->nb_refs[L0]; i++)
731  sh->list_entry_lx[0][i] = get_bits(gb, av_ceil_log2(nb_refs));
732  }
733 
734  if (sh->slice_type == HEVC_SLICE_B) {
735  sh->rpl_modification_flag[1] = get_bits1(gb);
736  if (sh->rpl_modification_flag[1] == 1)
737  for (i = 0; i < sh->nb_refs[L1]; i++)
738  sh->list_entry_lx[1][i] = get_bits(gb, av_ceil_log2(nb_refs));
739  }
740  }
741 
742  if (sh->slice_type == HEVC_SLICE_B)
743  sh->mvd_l1_zero_flag = get_bits1(gb);
744 
745  if (s->ps.pps->cabac_init_present_flag)
746  sh->cabac_init_flag = get_bits1(gb);
747  else
748  sh->cabac_init_flag = 0;
749 
750  sh->collocated_ref_idx = 0;
752  sh->collocated_list = L0;
753  if (sh->slice_type == HEVC_SLICE_B)
754  sh->collocated_list = !get_bits1(gb);
755 
756  if (sh->nb_refs[sh->collocated_list] > 1) {
758  if (sh->collocated_ref_idx >= sh->nb_refs[sh->collocated_list]) {
759  av_log(s->avctx, AV_LOG_ERROR,
760  "Invalid collocated_ref_idx: %d.\n",
761  sh->collocated_ref_idx);
762  return AVERROR_INVALIDDATA;
763  }
764  }
765  }
766 
767  if ((s->ps.pps->weighted_pred_flag && sh->slice_type == HEVC_SLICE_P) ||
768  (s->ps.pps->weighted_bipred_flag && sh->slice_type == HEVC_SLICE_B)) {
769  int ret = pred_weight_table(s, gb);
770  if (ret < 0)
771  return ret;
772  }
773 
775  if (sh->max_num_merge_cand < 1 || sh->max_num_merge_cand > 5) {
776  av_log(s->avctx, AV_LOG_ERROR,
777  "Invalid number of merging MVP candidates: %d.\n",
778  sh->max_num_merge_cand);
779  return AVERROR_INVALIDDATA;
780  }
781  }
782 
783  sh->slice_qp_delta = get_se_golomb(gb);
784 
785  if (s->ps.pps->pic_slice_level_chroma_qp_offsets_present_flag) {
788  if (sh->slice_cb_qp_offset < -12 || sh->slice_cb_qp_offset > 12 ||
789  sh->slice_cr_qp_offset < -12 || sh->slice_cr_qp_offset > 12) {
790  av_log(s->avctx, AV_LOG_ERROR, "Invalid slice cx qp offset.\n");
791  return AVERROR_INVALIDDATA;
792  }
793  } else {
794  sh->slice_cb_qp_offset = 0;
795  sh->slice_cr_qp_offset = 0;
796  }
797 
798  if (s->ps.pps->chroma_qp_offset_list_enabled_flag)
800  else
802 
803  if (s->ps.pps->deblocking_filter_control_present_flag) {
804  int deblocking_filter_override_flag = 0;
805 
806  if (s->ps.pps->deblocking_filter_override_enabled_flag)
807  deblocking_filter_override_flag = get_bits1(gb);
808 
809  if (deblocking_filter_override_flag) {
812  int beta_offset_div2 = get_se_golomb(gb);
813  int tc_offset_div2 = get_se_golomb(gb) ;
814  if (beta_offset_div2 < -6 || beta_offset_div2 > 6 ||
815  tc_offset_div2 < -6 || tc_offset_div2 > 6) {
816  av_log(s->avctx, AV_LOG_ERROR,
817  "Invalid deblock filter offsets: %d, %d\n",
818  beta_offset_div2, tc_offset_div2);
819  return AVERROR_INVALIDDATA;
820  }
821  sh->beta_offset = beta_offset_div2 * 2;
822  sh->tc_offset = tc_offset_div2 * 2;
823  }
824  } else {
825  sh->disable_deblocking_filter_flag = s->ps.pps->disable_dbf;
826  sh->beta_offset = s->ps.pps->beta_offset;
827  sh->tc_offset = s->ps.pps->tc_offset;
828  }
829  } else {
831  sh->beta_offset = 0;
832  sh->tc_offset = 0;
833  }
834 
835  if (s->ps.pps->seq_loop_filter_across_slices_enabled_flag &&
840  } else {
841  sh->slice_loop_filter_across_slices_enabled_flag = s->ps.pps->seq_loop_filter_across_slices_enabled_flag;
842  }
843  } else if (!s->slice_initialized) {
844  av_log(s->avctx, AV_LOG_ERROR, "Independent slice segment missing.\n");
845  return AVERROR_INVALIDDATA;
846  }
847 
848  sh->num_entry_point_offsets = 0;
849  if (s->ps.pps->tiles_enabled_flag || s->ps.pps->entropy_coding_sync_enabled_flag) {
850  unsigned num_entry_point_offsets = get_ue_golomb_long(gb);
851  // It would be possible to bound this tighter but this here is simpler
852  if (num_entry_point_offsets > get_bits_left(gb)) {
853  av_log(s->avctx, AV_LOG_ERROR, "num_entry_point_offsets %d is invalid\n", num_entry_point_offsets);
854  return AVERROR_INVALIDDATA;
855  }
856 
857  sh->num_entry_point_offsets = num_entry_point_offsets;
858  if (sh->num_entry_point_offsets > 0) {
859  int offset_len = get_ue_golomb_long(gb) + 1;
860 
861  if (offset_len < 1 || offset_len > 32) {
862  sh->num_entry_point_offsets = 0;
863  av_log(s->avctx, AV_LOG_ERROR, "offset_len %d is invalid\n", offset_len);
864  return AVERROR_INVALIDDATA;
865  }
866 
868  av_freep(&sh->offset);
869  av_freep(&sh->size);
870  sh->entry_point_offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(unsigned));
871  sh->offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
872  sh->size = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
873  if (!sh->entry_point_offset || !sh->offset || !sh->size) {
874  sh->num_entry_point_offsets = 0;
875  av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate memory\n");
876  return AVERROR(ENOMEM);
877  }
878  for (i = 0; i < sh->num_entry_point_offsets; i++) {
879  unsigned val = get_bits_long(gb, offset_len);
880  sh->entry_point_offset[i] = val + 1; // +1; // +1 to get the size
881  }
882  if (s->threads_number > 1 && (s->ps.pps->num_tile_rows > 1 || s->ps.pps->num_tile_columns > 1)) {
883  s->enable_parallel_tiles = 0; // TODO: you can enable tiles in parallel here
884  s->threads_number = 1;
885  } else
886  s->enable_parallel_tiles = 0;
887  } else
888  s->enable_parallel_tiles = 0;
889  }
890 
891  if (s->ps.pps->slice_header_extension_present_flag) {
892  unsigned int length = get_ue_golomb_long(gb);
893  if (length*8LL > get_bits_left(gb)) {
894  av_log(s->avctx, AV_LOG_ERROR, "too many slice_header_extension_data_bytes\n");
895  return AVERROR_INVALIDDATA;
896  }
897  for (i = 0; i < length; i++)
898  skip_bits(gb, 8); // slice_header_extension_data_byte
899  }
900 
901  // Inferred parameters
902  sh->slice_qp = 26U + s->ps.pps->pic_init_qp_minus26 + sh->slice_qp_delta;
903  if (sh->slice_qp > 51 ||
904  sh->slice_qp < -s->ps.sps->qp_bd_offset) {
905  av_log(s->avctx, AV_LOG_ERROR,
906  "The slice_qp %d is outside the valid range "
907  "[%d, 51].\n",
908  sh->slice_qp,
909  -s->ps.sps->qp_bd_offset);
910  return AVERROR_INVALIDDATA;
911  }
912 
914 
915  if (!s->sh.slice_ctb_addr_rs && s->sh.dependent_slice_segment_flag) {
916  av_log(s->avctx, AV_LOG_ERROR, "Impossible slice segment.\n");
917  return AVERROR_INVALIDDATA;
918  }
919 
920  if (get_bits_left(gb) < 0) {
921  av_log(s->avctx, AV_LOG_ERROR,
922  "Overread slice header by %d bits\n", -get_bits_left(gb));
923  return AVERROR_INVALIDDATA;
924  }
925 
926  s->HEVClc->first_qp_group = !s->sh.dependent_slice_segment_flag;
927 
928  if (!s->ps.pps->cu_qp_delta_enabled_flag)
929  s->HEVClc->qp_y = s->sh.slice_qp;
930 
931  s->slice_initialized = 1;
932  s->HEVClc->tu.cu_qp_offset_cb = 0;
933  s->HEVClc->tu.cu_qp_offset_cr = 0;
934 
935  return 0;
936 }
937 
938 #define CTB(tab, x, y) ((tab)[(y) * s->ps.sps->ctb_width + (x)])
939 
940 #define SET_SAO(elem, value) \
941 do { \
942  if (!sao_merge_up_flag && !sao_merge_left_flag) \
943  sao->elem = value; \
944  else if (sao_merge_left_flag) \
945  sao->elem = CTB(s->sao, rx-1, ry).elem; \
946  else if (sao_merge_up_flag) \
947  sao->elem = CTB(s->sao, rx, ry-1).elem; \
948  else \
949  sao->elem = 0; \
950 } while (0)
951 
952 static void hls_sao_param(HEVCContext *s, int rx, int ry)
953 {
954  HEVCLocalContext *lc = s->HEVClc;
955  int sao_merge_left_flag = 0;
956  int sao_merge_up_flag = 0;
957  SAOParams *sao = &CTB(s->sao, rx, ry);
958  int c_idx, i;
959 
960  if (s->sh.slice_sample_adaptive_offset_flag[0] ||
961  s->sh.slice_sample_adaptive_offset_flag[1]) {
962  if (rx > 0) {
963  if (lc->ctb_left_flag)
964  sao_merge_left_flag = ff_hevc_sao_merge_flag_decode(s);
965  }
966  if (ry > 0 && !sao_merge_left_flag) {
967  if (lc->ctb_up_flag)
968  sao_merge_up_flag = ff_hevc_sao_merge_flag_decode(s);
969  }
970  }
971 
972  for (c_idx = 0; c_idx < (s->ps.sps->chroma_format_idc ? 3 : 1); c_idx++) {
973  int log2_sao_offset_scale = c_idx == 0 ? s->ps.pps->log2_sao_offset_scale_luma :
974  s->ps.pps->log2_sao_offset_scale_chroma;
975 
976  if (!s->sh.slice_sample_adaptive_offset_flag[c_idx]) {
977  sao->type_idx[c_idx] = SAO_NOT_APPLIED;
978  continue;
979  }
980 
981  if (c_idx == 2) {
982  sao->type_idx[2] = sao->type_idx[1];
983  sao->eo_class[2] = sao->eo_class[1];
984  } else {
985  SET_SAO(type_idx[c_idx], ff_hevc_sao_type_idx_decode(s));
986  }
987 
988  if (sao->type_idx[c_idx] == SAO_NOT_APPLIED)
989  continue;
990 
991  for (i = 0; i < 4; i++)
992  SET_SAO(offset_abs[c_idx][i], ff_hevc_sao_offset_abs_decode(s));
993 
994  if (sao->type_idx[c_idx] == SAO_BAND) {
995  for (i = 0; i < 4; i++) {
996  if (sao->offset_abs[c_idx][i]) {
997  SET_SAO(offset_sign[c_idx][i],
999  } else {
1000  sao->offset_sign[c_idx][i] = 0;
1001  }
1002  }
1003  SET_SAO(band_position[c_idx], ff_hevc_sao_band_position_decode(s));
1004  } else if (c_idx != 2) {
1005  SET_SAO(eo_class[c_idx], ff_hevc_sao_eo_class_decode(s));
1006  }
1007 
1008  // Inferred parameters
1009  sao->offset_val[c_idx][0] = 0;
1010  for (i = 0; i < 4; i++) {
1011  sao->offset_val[c_idx][i + 1] = sao->offset_abs[c_idx][i];
1012  if (sao->type_idx[c_idx] == SAO_EDGE) {
1013  if (i > 1)
1014  sao->offset_val[c_idx][i + 1] = -sao->offset_val[c_idx][i + 1];
1015  } else if (sao->offset_sign[c_idx][i]) {
1016  sao->offset_val[c_idx][i + 1] = -sao->offset_val[c_idx][i + 1];
1017  }
1018  sao->offset_val[c_idx][i + 1] *= 1 << log2_sao_offset_scale;
1019  }
1020  }
1021 }
1022 
1023 #undef SET_SAO
1024 #undef CTB
1025 
1026 static int hls_cross_component_pred(HEVCContext *s, int idx) {
1027  HEVCLocalContext *lc = s->HEVClc;
1028  int log2_res_scale_abs_plus1 = ff_hevc_log2_res_scale_abs(s, idx);
1029 
1030  if (log2_res_scale_abs_plus1 != 0) {
1031  int res_scale_sign_flag = ff_hevc_res_scale_sign_flag(s, idx);
1032  lc->tu.res_scale_val = (1 << (log2_res_scale_abs_plus1 - 1)) *
1033  (1 - 2 * res_scale_sign_flag);
1034  } else {
1035  lc->tu.res_scale_val = 0;
1036  }
1037 
1038 
1039  return 0;
1040 }
1041 
1042 static int hls_transform_unit(HEVCContext *s, int x0, int y0,
1043  int xBase, int yBase, int cb_xBase, int cb_yBase,
1044  int log2_cb_size, int log2_trafo_size,
1045  int blk_idx, int cbf_luma, int *cbf_cb, int *cbf_cr)
1046 {
1047  HEVCLocalContext *lc = s->HEVClc;
1048  const int log2_trafo_size_c = log2_trafo_size - s->ps.sps->hshift[1];
1049  int i;
1050 
1051  if (lc->cu.pred_mode == MODE_INTRA) {
1052  int trafo_size = 1 << log2_trafo_size;
1053  ff_hevc_set_neighbour_available(s, x0, y0, trafo_size, trafo_size);
1054 
1055  s->hpc.intra_pred[log2_trafo_size - 2](s, x0, y0, 0);
1056  }
1057 
1058  if (cbf_luma || cbf_cb[0] || cbf_cr[0] ||
1059  (s->ps.sps->chroma_format_idc == 2 && (cbf_cb[1] || cbf_cr[1]))) {
1060  int scan_idx = SCAN_DIAG;
1061  int scan_idx_c = SCAN_DIAG;
1062  int cbf_chroma = cbf_cb[0] || cbf_cr[0] ||
1063  (s->ps.sps->chroma_format_idc == 2 &&
1064  (cbf_cb[1] || cbf_cr[1]));
1065 
1066  if (s->ps.pps->cu_qp_delta_enabled_flag && !lc->tu.is_cu_qp_delta_coded) {
1068  if (lc->tu.cu_qp_delta != 0)
1069  if (ff_hevc_cu_qp_delta_sign_flag(s) == 1)
1070  lc->tu.cu_qp_delta = -lc->tu.cu_qp_delta;
1071  lc->tu.is_cu_qp_delta_coded = 1;
1072 
1073  if (lc->tu.cu_qp_delta < -(26 + s->ps.sps->qp_bd_offset / 2) ||
1074  lc->tu.cu_qp_delta > (25 + s->ps.sps->qp_bd_offset / 2)) {
1075  av_log(s->avctx, AV_LOG_ERROR,
1076  "The cu_qp_delta %d is outside the valid range "
1077  "[%d, %d].\n",
1078  lc->tu.cu_qp_delta,
1079  -(26 + s->ps.sps->qp_bd_offset / 2),
1080  (25 + s->ps.sps->qp_bd_offset / 2));
1081  return AVERROR_INVALIDDATA;
1082  }
1083 
1084  ff_hevc_set_qPy(s, cb_xBase, cb_yBase, log2_cb_size);
1085  }
1086 
1087  if (s->sh.cu_chroma_qp_offset_enabled_flag && cbf_chroma &&
1089  int cu_chroma_qp_offset_flag = ff_hevc_cu_chroma_qp_offset_flag(s);
1090  if (cu_chroma_qp_offset_flag) {
1091  int cu_chroma_qp_offset_idx = 0;
1092  if (s->ps.pps->chroma_qp_offset_list_len_minus1 > 0) {
1093  cu_chroma_qp_offset_idx = ff_hevc_cu_chroma_qp_offset_idx(s);
1094  av_log(s->avctx, AV_LOG_ERROR,
1095  "cu_chroma_qp_offset_idx not yet tested.\n");
1096  }
1097  lc->tu.cu_qp_offset_cb = s->ps.pps->cb_qp_offset_list[cu_chroma_qp_offset_idx];
1098  lc->tu.cu_qp_offset_cr = s->ps.pps->cr_qp_offset_list[cu_chroma_qp_offset_idx];
1099  } else {
1100  lc->tu.cu_qp_offset_cb = 0;
1101  lc->tu.cu_qp_offset_cr = 0;
1102  }
1104  }
1105 
1106  if (lc->cu.pred_mode == MODE_INTRA && log2_trafo_size < 4) {
1107  if (lc->tu.intra_pred_mode >= 6 &&
1108  lc->tu.intra_pred_mode <= 14) {
1109  scan_idx = SCAN_VERT;
1110  } else if (lc->tu.intra_pred_mode >= 22 &&
1111  lc->tu.intra_pred_mode <= 30) {
1112  scan_idx = SCAN_HORIZ;
1113  }
1114 
1115  if (lc->tu.intra_pred_mode_c >= 6 &&
1116  lc->tu.intra_pred_mode_c <= 14) {
1117  scan_idx_c = SCAN_VERT;
1118  } else if (lc->tu.intra_pred_mode_c >= 22 &&
1119  lc->tu.intra_pred_mode_c <= 30) {
1120  scan_idx_c = SCAN_HORIZ;
1121  }
1122  }
1123 
1124  lc->tu.cross_pf = 0;
1125 
1126  if (cbf_luma)
1127  ff_hevc_hls_residual_coding(s, x0, y0, log2_trafo_size, scan_idx, 0);
1128  if (s->ps.sps->chroma_format_idc && (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3)) {
1129  int trafo_size_h = 1 << (log2_trafo_size_c + s->ps.sps->hshift[1]);
1130  int trafo_size_v = 1 << (log2_trafo_size_c + s->ps.sps->vshift[1]);
1131  lc->tu.cross_pf = (s->ps.pps->cross_component_prediction_enabled_flag && cbf_luma &&
1132  (lc->cu.pred_mode == MODE_INTER ||
1133  (lc->tu.chroma_mode_c == 4)));
1134 
1135  if (lc->tu.cross_pf) {
1137  }
1138  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1139  if (lc->cu.pred_mode == MODE_INTRA) {
1140  ff_hevc_set_neighbour_available(s, x0, y0 + (i << log2_trafo_size_c), trafo_size_h, trafo_size_v);
1141  s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (i << log2_trafo_size_c), 1);
1142  }
1143  if (cbf_cb[i])
1144  ff_hevc_hls_residual_coding(s, x0, y0 + (i << log2_trafo_size_c),
1145  log2_trafo_size_c, scan_idx_c, 1);
1146  else
1147  if (lc->tu.cross_pf) {
1148  ptrdiff_t stride = s->frame->linesize[1];
1149  int hshift = s->ps.sps->hshift[1];
1150  int vshift = s->ps.sps->vshift[1];
1151  int16_t *coeffs_y = (int16_t*)lc->edge_emu_buffer;
1152  int16_t *coeffs = (int16_t*)lc->edge_emu_buffer2;
1153  int size = 1 << log2_trafo_size_c;
1154 
1155  uint8_t *dst = &s->frame->data[1][(y0 >> vshift) * stride +
1156  ((x0 >> hshift) << s->ps.sps->pixel_shift)];
1157  for (i = 0; i < (size * size); i++) {
1158  coeffs[i] = ((lc->tu.res_scale_val * coeffs_y[i]) >> 3);
1159  }
1160  s->hevcdsp.add_residual[log2_trafo_size_c-2](dst, coeffs, stride);
1161  }
1162  }
1163 
1164  if (lc->tu.cross_pf) {
1166  }
1167  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1168  if (lc->cu.pred_mode == MODE_INTRA) {
1169  ff_hevc_set_neighbour_available(s, x0, y0 + (i << log2_trafo_size_c), trafo_size_h, trafo_size_v);
1170  s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (i << log2_trafo_size_c), 2);
1171  }
1172  if (cbf_cr[i])
1173  ff_hevc_hls_residual_coding(s, x0, y0 + (i << log2_trafo_size_c),
1174  log2_trafo_size_c, scan_idx_c, 2);
1175  else
1176  if (lc->tu.cross_pf) {
1177  ptrdiff_t stride = s->frame->linesize[2];
1178  int hshift = s->ps.sps->hshift[2];
1179  int vshift = s->ps.sps->vshift[2];
1180  int16_t *coeffs_y = (int16_t*)lc->edge_emu_buffer;
1181  int16_t *coeffs = (int16_t*)lc->edge_emu_buffer2;
1182  int size = 1 << log2_trafo_size_c;
1183 
1184  uint8_t *dst = &s->frame->data[2][(y0 >> vshift) * stride +
1185  ((x0 >> hshift) << s->ps.sps->pixel_shift)];
1186  for (i = 0; i < (size * size); i++) {
1187  coeffs[i] = ((lc->tu.res_scale_val * coeffs_y[i]) >> 3);
1188  }
1189  s->hevcdsp.add_residual[log2_trafo_size_c-2](dst, coeffs, stride);
1190  }
1191  }
1192  } else if (s->ps.sps->chroma_format_idc && blk_idx == 3) {
1193  int trafo_size_h = 1 << (log2_trafo_size + 1);
1194  int trafo_size_v = 1 << (log2_trafo_size + s->ps.sps->vshift[1]);
1195  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1196  if (lc->cu.pred_mode == MODE_INTRA) {
1197  ff_hevc_set_neighbour_available(s, xBase, yBase + (i << log2_trafo_size),
1198  trafo_size_h, trafo_size_v);
1199  s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (i << log2_trafo_size), 1);
1200  }
1201  if (cbf_cb[i])
1202  ff_hevc_hls_residual_coding(s, xBase, yBase + (i << log2_trafo_size),
1203  log2_trafo_size, scan_idx_c, 1);
1204  }
1205  for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
1206  if (lc->cu.pred_mode == MODE_INTRA) {
1207  ff_hevc_set_neighbour_available(s, xBase, yBase + (i << log2_trafo_size),
1208  trafo_size_h, trafo_size_v);
1209  s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (i << log2_trafo_size), 2);
1210  }
1211  if (cbf_cr[i])
1212  ff_hevc_hls_residual_coding(s, xBase, yBase + (i << log2_trafo_size),
1213  log2_trafo_size, scan_idx_c, 2);
1214  }
1215  }
1216  } else if (s->ps.sps->chroma_format_idc && lc->cu.pred_mode == MODE_INTRA) {
1217  if (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3) {
1218  int trafo_size_h = 1 << (log2_trafo_size_c + s->ps.sps->hshift[1]);
1219  int trafo_size_v = 1 << (log2_trafo_size_c + s->ps.sps->vshift[1]);
1220  ff_hevc_set_neighbour_available(s, x0, y0, trafo_size_h, trafo_size_v);
1221  s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0, 1);
1222  s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0, 2);
1223  if (s->ps.sps->chroma_format_idc == 2) {
1224  ff_hevc_set_neighbour_available(s, x0, y0 + (1 << log2_trafo_size_c),
1225  trafo_size_h, trafo_size_v);
1226  s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (1 << log2_trafo_size_c), 1);
1227  s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (1 << log2_trafo_size_c), 2);
1228  }
1229  } else if (blk_idx == 3) {
1230  int trafo_size_h = 1 << (log2_trafo_size + 1);
1231  int trafo_size_v = 1 << (log2_trafo_size + s->ps.sps->vshift[1]);
1232  ff_hevc_set_neighbour_available(s, xBase, yBase,
1233  trafo_size_h, trafo_size_v);
1234  s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase, 1);
1235  s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase, 2);
1236  if (s->ps.sps->chroma_format_idc == 2) {
1237  ff_hevc_set_neighbour_available(s, xBase, yBase + (1 << (log2_trafo_size)),
1238  trafo_size_h, trafo_size_v);
1239  s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (1 << (log2_trafo_size)), 1);
1240  s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (1 << (log2_trafo_size)), 2);
1241  }
1242  }
1243  }
1244 
1245  return 0;
1246 }
1247 
1248 static void set_deblocking_bypass(HEVCContext *s, int x0, int y0, int log2_cb_size)
1249 {
1250  int cb_size = 1 << log2_cb_size;
1251  int log2_min_pu_size = s->ps.sps->log2_min_pu_size;
1252 
1253  int min_pu_width = s->ps.sps->min_pu_width;
1254  int x_end = FFMIN(x0 + cb_size, s->ps.sps->width);
1255  int y_end = FFMIN(y0 + cb_size, s->ps.sps->height);
1256  int i, j;
1257 
1258  for (j = (y0 >> log2_min_pu_size); j < (y_end >> log2_min_pu_size); j++)
1259  for (i = (x0 >> log2_min_pu_size); i < (x_end >> log2_min_pu_size); i++)
1260  s->is_pcm[i + j * min_pu_width] = 2;
1261 }
1262 
1263 static int hls_transform_tree(HEVCContext *s, int x0, int y0,
1264  int xBase, int yBase, int cb_xBase, int cb_yBase,
1265  int log2_cb_size, int log2_trafo_size,
1266  int trafo_depth, int blk_idx,
1267  const int *base_cbf_cb, const int *base_cbf_cr)
1268 {
1269  HEVCLocalContext *lc = s->HEVClc;
1270  uint8_t split_transform_flag;
1271  int cbf_cb[2];
1272  int cbf_cr[2];
1273  int ret;
1274 
1275  cbf_cb[0] = base_cbf_cb[0];
1276  cbf_cb[1] = base_cbf_cb[1];
1277  cbf_cr[0] = base_cbf_cr[0];
1278  cbf_cr[1] = base_cbf_cr[1];
1279 
1280  if (lc->cu.intra_split_flag) {
1281  if (trafo_depth == 1) {
1282  lc->tu.intra_pred_mode = lc->pu.intra_pred_mode[blk_idx];
1283  if (s->ps.sps->chroma_format_idc == 3) {
1284  lc->tu.intra_pred_mode_c = lc->pu.intra_pred_mode_c[blk_idx];
1285  lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[blk_idx];
1286  } else {
1288  lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[0];
1289  }
1290  }
1291  } else {
1292  lc->tu.intra_pred_mode = lc->pu.intra_pred_mode[0];
1294  lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[0];
1295  }
1296 
1297  if (log2_trafo_size <= s->ps.sps->log2_max_trafo_size &&
1298  log2_trafo_size > s->ps.sps->log2_min_tb_size &&
1299  trafo_depth < lc->cu.max_trafo_depth &&
1300  !(lc->cu.intra_split_flag && trafo_depth == 0)) {
1301  split_transform_flag = ff_hevc_split_transform_flag_decode(s, log2_trafo_size);
1302  } else {
1303  int inter_split = s->ps.sps->max_transform_hierarchy_depth_inter == 0 &&
1304  lc->cu.pred_mode == MODE_INTER &&
1305  lc->cu.part_mode != PART_2Nx2N &&
1306  trafo_depth == 0;
1307 
1308  split_transform_flag = log2_trafo_size > s->ps.sps->log2_max_trafo_size ||
1309  (lc->cu.intra_split_flag && trafo_depth == 0) ||
1310  inter_split;
1311  }
1312 
1313  if (s->ps.sps->chroma_format_idc && (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3)) {
1314  if (trafo_depth == 0 || cbf_cb[0]) {
1315  cbf_cb[0] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth);
1316  if (s->ps.sps->chroma_format_idc == 2 && (!split_transform_flag || log2_trafo_size == 3)) {
1317  cbf_cb[1] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth);
1318  }
1319  }
1320 
1321  if (trafo_depth == 0 || cbf_cr[0]) {
1322  cbf_cr[0] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth);
1323  if (s->ps.sps->chroma_format_idc == 2 && (!split_transform_flag || log2_trafo_size == 3)) {
1324  cbf_cr[1] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth);
1325  }
1326  }
1327  }
1328 
1329  if (split_transform_flag) {
1330  const int trafo_size_split = 1 << (log2_trafo_size - 1);
1331  const int x1 = x0 + trafo_size_split;
1332  const int y1 = y0 + trafo_size_split;
1333 
1334 #define SUBDIVIDE(x, y, idx) \
1335 do { \
1336  ret = hls_transform_tree(s, x, y, x0, y0, cb_xBase, cb_yBase, log2_cb_size, \
1337  log2_trafo_size - 1, trafo_depth + 1, idx, \
1338  cbf_cb, cbf_cr); \
1339  if (ret < 0) \
1340  return ret; \
1341 } while (0)
1342 
1343  SUBDIVIDE(x0, y0, 0);
1344  SUBDIVIDE(x1, y0, 1);
1345  SUBDIVIDE(x0, y1, 2);
1346  SUBDIVIDE(x1, y1, 3);
1347 
1348 #undef SUBDIVIDE
1349  } else {
1350  int min_tu_size = 1 << s->ps.sps->log2_min_tb_size;
1351  int log2_min_tu_size = s->ps.sps->log2_min_tb_size;
1352  int min_tu_width = s->ps.sps->min_tb_width;
1353  int cbf_luma = 1;
1354 
1355  if (lc->cu.pred_mode == MODE_INTRA || trafo_depth != 0 ||
1356  cbf_cb[0] || cbf_cr[0] ||
1357  (s->ps.sps->chroma_format_idc == 2 && (cbf_cb[1] || cbf_cr[1]))) {
1358  cbf_luma = ff_hevc_cbf_luma_decode(s, trafo_depth);
1359  }
1360 
1361  ret = hls_transform_unit(s, x0, y0, xBase, yBase, cb_xBase, cb_yBase,
1362  log2_cb_size, log2_trafo_size,
1363  blk_idx, cbf_luma, cbf_cb, cbf_cr);
1364  if (ret < 0)
1365  return ret;
1366  // TODO: store cbf_luma somewhere else
1367  if (cbf_luma) {
1368  int i, j;
1369  for (i = 0; i < (1 << log2_trafo_size); i += min_tu_size)
1370  for (j = 0; j < (1 << log2_trafo_size); j += min_tu_size) {
1371  int x_tu = (x0 + j) >> log2_min_tu_size;
1372  int y_tu = (y0 + i) >> log2_min_tu_size;
1373  s->cbf_luma[y_tu * min_tu_width + x_tu] = 1;
1374  }
1375  }
1376  if (!s->sh.disable_deblocking_filter_flag) {
1377  ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_trafo_size);
1378  if (s->ps.pps->transquant_bypass_enable_flag &&
1380  set_deblocking_bypass(s, x0, y0, log2_trafo_size);
1381  }
1382  }
1383  return 0;
1384 }
1385 
1386 static int hls_pcm_sample(HEVCContext *s, int x0, int y0, int log2_cb_size)
1387 {
1388  HEVCLocalContext *lc = s->HEVClc;
1389  GetBitContext gb;
1390  int cb_size = 1 << log2_cb_size;
1391  ptrdiff_t stride0 = s->frame->linesize[0];
1392  ptrdiff_t stride1 = s->frame->linesize[1];
1393  ptrdiff_t stride2 = s->frame->linesize[2];
1394  uint8_t *dst0 = &s->frame->data[0][y0 * stride0 + (x0 << s->ps.sps->pixel_shift)];
1395  uint8_t *dst1 = &s->frame->data[1][(y0 >> s->ps.sps->vshift[1]) * stride1 + ((x0 >> s->ps.sps->hshift[1]) << s->ps.sps->pixel_shift)];
1396  uint8_t *dst2 = &s->frame->data[2][(y0 >> s->ps.sps->vshift[2]) * stride2 + ((x0 >> s->ps.sps->hshift[2]) << s->ps.sps->pixel_shift)];
1397 
1398  int length = cb_size * cb_size * s->ps.sps->pcm.bit_depth +
1399  (((cb_size >> s->ps.sps->hshift[1]) * (cb_size >> s->ps.sps->vshift[1])) +
1400  ((cb_size >> s->ps.sps->hshift[2]) * (cb_size >> s->ps.sps->vshift[2]))) *
1401  s->ps.sps->pcm.bit_depth_chroma;
1402  const uint8_t *pcm = skip_bytes(&lc->cc, (length + 7) >> 3);
1403  int ret;
1404 
1405  if (!s->sh.disable_deblocking_filter_flag)
1406  ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_cb_size);
1407 
1408  ret = init_get_bits(&gb, pcm, length);
1409  if (ret < 0)
1410  return ret;
1411 
1412  s->hevcdsp.put_pcm(dst0, stride0, cb_size, cb_size, &gb, s->ps.sps->pcm.bit_depth);
1413  if (s->ps.sps->chroma_format_idc) {
1414  s->hevcdsp.put_pcm(dst1, stride1,
1415  cb_size >> s->ps.sps->hshift[1],
1416  cb_size >> s->ps.sps->vshift[1],
1417  &gb, s->ps.sps->pcm.bit_depth_chroma);
1418  s->hevcdsp.put_pcm(dst2, stride2,
1419  cb_size >> s->ps.sps->hshift[2],
1420  cb_size >> s->ps.sps->vshift[2],
1421  &gb, s->ps.sps->pcm.bit_depth_chroma);
1422  }
1423 
1424  return 0;
1425 }
1426 
1427 /**
1428  * 8.5.3.2.2.1 Luma sample unidirectional interpolation process
1429  *
1430  * @param s HEVC decoding context
1431  * @param dst target buffer for block data at block position
1432  * @param dststride stride of the dst buffer
1433  * @param ref reference picture buffer at origin (0, 0)
1434  * @param mv motion vector (relative to block position) to get pixel data from
1435  * @param x_off horizontal position of block from origin (0, 0)
1436  * @param y_off vertical position of block from origin (0, 0)
1437  * @param block_w width of block
1438  * @param block_h height of block
1439  * @param luma_weight weighting factor applied to the luma prediction
1440  * @param luma_offset additive offset applied to the luma prediction value
1441  */
1442 
1443 static void luma_mc_uni(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride,
1444  AVFrame *ref, const Mv *mv, int x_off, int y_off,
1445  int block_w, int block_h, int luma_weight, int luma_offset)
1446 {
1447  HEVCLocalContext *lc = s->HEVClc;
1448  uint8_t *src = ref->data[0];
1449  ptrdiff_t srcstride = ref->linesize[0];
1450  int pic_width = s->ps.sps->width;
1451  int pic_height = s->ps.sps->height;
1452  int mx = mv->x & 3;
1453  int my = mv->y & 3;
1454  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1455  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1456  int idx = ff_hevc_pel_weight[block_w];
1457 
1458  x_off += mv->x >> 2;
1459  y_off += mv->y >> 2;
1460  src += y_off * srcstride + (x_off * (1 << s->ps.sps->pixel_shift));
1461 
1462  if (x_off < QPEL_EXTRA_BEFORE || y_off < QPEL_EXTRA_AFTER ||
1463  x_off >= pic_width - block_w - QPEL_EXTRA_AFTER ||
1464  y_off >= pic_height - block_h - QPEL_EXTRA_AFTER) {
1465  const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1466  int offset = QPEL_EXTRA_BEFORE * srcstride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1467  int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1468 
1469  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src - offset,
1470  edge_emu_stride, srcstride,
1471  block_w + QPEL_EXTRA,
1472  block_h + QPEL_EXTRA,
1473  x_off - QPEL_EXTRA_BEFORE, y_off - QPEL_EXTRA_BEFORE,
1474  pic_width, pic_height);
1475  src = lc->edge_emu_buffer + buf_offset;
1476  srcstride = edge_emu_stride;
1477  }
1478 
1479  if (!weight_flag)
1480  s->hevcdsp.put_hevc_qpel_uni[idx][!!my][!!mx](dst, dststride, src, srcstride,
1481  block_h, mx, my, block_w);
1482  else
1483  s->hevcdsp.put_hevc_qpel_uni_w[idx][!!my][!!mx](dst, dststride, src, srcstride,
1484  block_h, s->sh.luma_log2_weight_denom,
1485  luma_weight, luma_offset, mx, my, block_w);
1486 }
1487 
1488 /**
1489  * 8.5.3.2.2.1 Luma sample bidirectional interpolation process
1490  *
1491  * @param s HEVC decoding context
1492  * @param dst target buffer for block data at block position
1493  * @param dststride stride of the dst buffer
1494  * @param ref0 reference picture0 buffer at origin (0, 0)
1495  * @param mv0 motion vector0 (relative to block position) to get pixel data from
1496  * @param x_off horizontal position of block from origin (0, 0)
1497  * @param y_off vertical position of block from origin (0, 0)
1498  * @param block_w width of block
1499  * @param block_h height of block
1500  * @param ref1 reference picture1 buffer at origin (0, 0)
1501  * @param mv1 motion vector1 (relative to block position) to get pixel data from
1502  * @param current_mv current motion vector structure
1503  */
1504  static void luma_mc_bi(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride,
1505  AVFrame *ref0, const Mv *mv0, int x_off, int y_off,
1506  int block_w, int block_h, AVFrame *ref1, const Mv *mv1, struct MvField *current_mv)
1507 {
1508  HEVCLocalContext *lc = s->HEVClc;
1509  ptrdiff_t src0stride = ref0->linesize[0];
1510  ptrdiff_t src1stride = ref1->linesize[0];
1511  int pic_width = s->ps.sps->width;
1512  int pic_height = s->ps.sps->height;
1513  int mx0 = mv0->x & 3;
1514  int my0 = mv0->y & 3;
1515  int mx1 = mv1->x & 3;
1516  int my1 = mv1->y & 3;
1517  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1518  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1519  int x_off0 = x_off + (mv0->x >> 2);
1520  int y_off0 = y_off + (mv0->y >> 2);
1521  int x_off1 = x_off + (mv1->x >> 2);
1522  int y_off1 = y_off + (mv1->y >> 2);
1523  int idx = ff_hevc_pel_weight[block_w];
1524 
1525  uint8_t *src0 = ref0->data[0] + y_off0 * src0stride + (int)((unsigned)x_off0 << s->ps.sps->pixel_shift);
1526  uint8_t *src1 = ref1->data[0] + y_off1 * src1stride + (int)((unsigned)x_off1 << s->ps.sps->pixel_shift);
1527 
1528  if (x_off0 < QPEL_EXTRA_BEFORE || y_off0 < QPEL_EXTRA_AFTER ||
1529  x_off0 >= pic_width - block_w - QPEL_EXTRA_AFTER ||
1530  y_off0 >= pic_height - block_h - QPEL_EXTRA_AFTER) {
1531  const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1532  int offset = QPEL_EXTRA_BEFORE * src0stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1533  int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1534 
1535  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src0 - offset,
1536  edge_emu_stride, src0stride,
1537  block_w + QPEL_EXTRA,
1538  block_h + QPEL_EXTRA,
1539  x_off0 - QPEL_EXTRA_BEFORE, y_off0 - QPEL_EXTRA_BEFORE,
1540  pic_width, pic_height);
1541  src0 = lc->edge_emu_buffer + buf_offset;
1542  src0stride = edge_emu_stride;
1543  }
1544 
1545  if (x_off1 < QPEL_EXTRA_BEFORE || y_off1 < QPEL_EXTRA_AFTER ||
1546  x_off1 >= pic_width - block_w - QPEL_EXTRA_AFTER ||
1547  y_off1 >= pic_height - block_h - QPEL_EXTRA_AFTER) {
1548  const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1549  int offset = QPEL_EXTRA_BEFORE * src1stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1550  int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
1551 
1552  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer2, src1 - offset,
1553  edge_emu_stride, src1stride,
1554  block_w + QPEL_EXTRA,
1555  block_h + QPEL_EXTRA,
1556  x_off1 - QPEL_EXTRA_BEFORE, y_off1 - QPEL_EXTRA_BEFORE,
1557  pic_width, pic_height);
1558  src1 = lc->edge_emu_buffer2 + buf_offset;
1559  src1stride = edge_emu_stride;
1560  }
1561 
1562  s->hevcdsp.put_hevc_qpel[idx][!!my0][!!mx0](lc->tmp, src0, src0stride,
1563  block_h, mx0, my0, block_w);
1564  if (!weight_flag)
1565  s->hevcdsp.put_hevc_qpel_bi[idx][!!my1][!!mx1](dst, dststride, src1, src1stride, lc->tmp,
1566  block_h, mx1, my1, block_w);
1567  else
1568  s->hevcdsp.put_hevc_qpel_bi_w[idx][!!my1][!!mx1](dst, dststride, src1, src1stride, lc->tmp,
1569  block_h, s->sh.luma_log2_weight_denom,
1570  s->sh.luma_weight_l0[current_mv->ref_idx[0]],
1571  s->sh.luma_weight_l1[current_mv->ref_idx[1]],
1572  s->sh.luma_offset_l0[current_mv->ref_idx[0]],
1573  s->sh.luma_offset_l1[current_mv->ref_idx[1]],
1574  mx1, my1, block_w);
1575 
1576 }
1577 
1578 /**
1579  * 8.5.3.2.2.2 Chroma sample uniprediction interpolation process
1580  *
1581  * @param s HEVC decoding context
1582  * @param dst1 target buffer for block data at block position (U plane)
1583  * @param dst2 target buffer for block data at block position (V plane)
1584  * @param dststride stride of the dst1 and dst2 buffers
1585  * @param ref reference picture buffer at origin (0, 0)
1586  * @param mv motion vector (relative to block position) to get pixel data from
1587  * @param x_off horizontal position of block from origin (0, 0)
1588  * @param y_off vertical position of block from origin (0, 0)
1589  * @param block_w width of block
1590  * @param block_h height of block
1591  * @param chroma_weight weighting factor applied to the chroma prediction
1592  * @param chroma_offset additive offset applied to the chroma prediction value
1593  */
1594 
1595 static void chroma_mc_uni(HEVCContext *s, uint8_t *dst0,
1596  ptrdiff_t dststride, uint8_t *src0, ptrdiff_t srcstride, int reflist,
1597  int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int chroma_weight, int chroma_offset)
1598 {
1599  HEVCLocalContext *lc = s->HEVClc;
1600  int pic_width = s->ps.sps->width >> s->ps.sps->hshift[1];
1601  int pic_height = s->ps.sps->height >> s->ps.sps->vshift[1];
1602  const Mv *mv = &current_mv->mv[reflist];
1603  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1604  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1605  int idx = ff_hevc_pel_weight[block_w];
1606  int hshift = s->ps.sps->hshift[1];
1607  int vshift = s->ps.sps->vshift[1];
1608  intptr_t mx = av_mod_uintp2(mv->x, 2 + hshift);
1609  intptr_t my = av_mod_uintp2(mv->y, 2 + vshift);
1610  intptr_t _mx = mx << (1 - hshift);
1611  intptr_t _my = my << (1 - vshift);
1612 
1613  x_off += mv->x >> (2 + hshift);
1614  y_off += mv->y >> (2 + vshift);
1615  src0 += y_off * srcstride + (x_off * (1 << s->ps.sps->pixel_shift));
1616 
1617  if (x_off < EPEL_EXTRA_BEFORE || y_off < EPEL_EXTRA_AFTER ||
1618  x_off >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1619  y_off >= pic_height - block_h - EPEL_EXTRA_AFTER) {
1620  const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1621  int offset0 = EPEL_EXTRA_BEFORE * (srcstride + (1 << s->ps.sps->pixel_shift));
1622  int buf_offset0 = EPEL_EXTRA_BEFORE *
1623  (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1624  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src0 - offset0,
1625  edge_emu_stride, srcstride,
1626  block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1627  x_off - EPEL_EXTRA_BEFORE,
1628  y_off - EPEL_EXTRA_BEFORE,
1629  pic_width, pic_height);
1630 
1631  src0 = lc->edge_emu_buffer + buf_offset0;
1632  srcstride = edge_emu_stride;
1633  }
1634  if (!weight_flag)
1635  s->hevcdsp.put_hevc_epel_uni[idx][!!my][!!mx](dst0, dststride, src0, srcstride,
1636  block_h, _mx, _my, block_w);
1637  else
1638  s->hevcdsp.put_hevc_epel_uni_w[idx][!!my][!!mx](dst0, dststride, src0, srcstride,
1639  block_h, s->sh.chroma_log2_weight_denom,
1640  chroma_weight, chroma_offset, _mx, _my, block_w);
1641 }
1642 
1643 /**
1644  * 8.5.3.2.2.2 Chroma sample bidirectional interpolation process
1645  *
1646  * @param s HEVC decoding context
1647  * @param dst target buffer for block data at block position
1648  * @param dststride stride of the dst buffer
1649  * @param ref0 reference picture0 buffer at origin (0, 0)
1650  * @param mv0 motion vector0 (relative to block position) to get pixel data from
1651  * @param x_off horizontal position of block from origin (0, 0)
1652  * @param y_off vertical position of block from origin (0, 0)
1653  * @param block_w width of block
1654  * @param block_h height of block
1655  * @param ref1 reference picture1 buffer at origin (0, 0)
1656  * @param mv1 motion vector1 (relative to block position) to get pixel data from
1657  * @param current_mv current motion vector structure
1658  * @param cidx chroma component(cb, cr)
1659  */
1660 static void chroma_mc_bi(HEVCContext *s, uint8_t *dst0, ptrdiff_t dststride, AVFrame *ref0, AVFrame *ref1,
1661  int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int cidx)
1662 {
1663  HEVCLocalContext *lc = s->HEVClc;
1664  uint8_t *src1 = ref0->data[cidx+1];
1665  uint8_t *src2 = ref1->data[cidx+1];
1666  ptrdiff_t src1stride = ref0->linesize[cidx+1];
1667  ptrdiff_t src2stride = ref1->linesize[cidx+1];
1668  int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1669  (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1670  int pic_width = s->ps.sps->width >> s->ps.sps->hshift[1];
1671  int pic_height = s->ps.sps->height >> s->ps.sps->vshift[1];
1672  Mv *mv0 = &current_mv->mv[0];
1673  Mv *mv1 = &current_mv->mv[1];
1674  int hshift = s->ps.sps->hshift[1];
1675  int vshift = s->ps.sps->vshift[1];
1676 
1677  intptr_t mx0 = av_mod_uintp2(mv0->x, 2 + hshift);
1678  intptr_t my0 = av_mod_uintp2(mv0->y, 2 + vshift);
1679  intptr_t mx1 = av_mod_uintp2(mv1->x, 2 + hshift);
1680  intptr_t my1 = av_mod_uintp2(mv1->y, 2 + vshift);
1681  intptr_t _mx0 = mx0 << (1 - hshift);
1682  intptr_t _my0 = my0 << (1 - vshift);
1683  intptr_t _mx1 = mx1 << (1 - hshift);
1684  intptr_t _my1 = my1 << (1 - vshift);
1685 
1686  int x_off0 = x_off + (mv0->x >> (2 + hshift));
1687  int y_off0 = y_off + (mv0->y >> (2 + vshift));
1688  int x_off1 = x_off + (mv1->x >> (2 + hshift));
1689  int y_off1 = y_off + (mv1->y >> (2 + vshift));
1690  int idx = ff_hevc_pel_weight[block_w];
1691  src1 += y_off0 * src1stride + (int)((unsigned)x_off0 << s->ps.sps->pixel_shift);
1692  src2 += y_off1 * src2stride + (int)((unsigned)x_off1 << s->ps.sps->pixel_shift);
1693 
1694  if (x_off0 < EPEL_EXTRA_BEFORE || y_off0 < EPEL_EXTRA_AFTER ||
1695  x_off0 >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1696  y_off0 >= pic_height - block_h - EPEL_EXTRA_AFTER) {
1697  const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1698  int offset1 = EPEL_EXTRA_BEFORE * (src1stride + (1 << s->ps.sps->pixel_shift));
1699  int buf_offset1 = EPEL_EXTRA_BEFORE *
1700  (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1701 
1702  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src1 - offset1,
1703  edge_emu_stride, src1stride,
1704  block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1705  x_off0 - EPEL_EXTRA_BEFORE,
1706  y_off0 - EPEL_EXTRA_BEFORE,
1707  pic_width, pic_height);
1708 
1709  src1 = lc->edge_emu_buffer + buf_offset1;
1710  src1stride = edge_emu_stride;
1711  }
1712 
1713  if (x_off1 < EPEL_EXTRA_BEFORE || y_off1 < EPEL_EXTRA_AFTER ||
1714  x_off1 >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1715  y_off1 >= pic_height - block_h - EPEL_EXTRA_AFTER) {
1716  const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1717  int offset1 = EPEL_EXTRA_BEFORE * (src2stride + (1 << s->ps.sps->pixel_shift));
1718  int buf_offset1 = EPEL_EXTRA_BEFORE *
1719  (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1720 
1721  s->vdsp.emulated_edge_mc(lc->edge_emu_buffer2, src2 - offset1,
1722  edge_emu_stride, src2stride,
1723  block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1724  x_off1 - EPEL_EXTRA_BEFORE,
1725  y_off1 - EPEL_EXTRA_BEFORE,
1726  pic_width, pic_height);
1727 
1728  src2 = lc->edge_emu_buffer2 + buf_offset1;
1729  src2stride = edge_emu_stride;
1730  }
1731 
1732  s->hevcdsp.put_hevc_epel[idx][!!my0][!!mx0](lc->tmp, src1, src1stride,
1733  block_h, _mx0, _my0, block_w);
1734  if (!weight_flag)
1735  s->hevcdsp.put_hevc_epel_bi[idx][!!my1][!!mx1](dst0, s->frame->linesize[cidx+1],
1736  src2, src2stride, lc->tmp,
1737  block_h, _mx1, _my1, block_w);
1738  else
1739  s->hevcdsp.put_hevc_epel_bi_w[idx][!!my1][!!mx1](dst0, s->frame->linesize[cidx+1],
1740  src2, src2stride, lc->tmp,
1741  block_h,
1742  s->sh.chroma_log2_weight_denom,
1743  s->sh.chroma_weight_l0[current_mv->ref_idx[0]][cidx],
1744  s->sh.chroma_weight_l1[current_mv->ref_idx[1]][cidx],
1745  s->sh.chroma_offset_l0[current_mv->ref_idx[0]][cidx],
1746  s->sh.chroma_offset_l1[current_mv->ref_idx[1]][cidx],
1747  _mx1, _my1, block_w);
1748 }
1749 
1751  const Mv *mv, int y0, int height)
1752 {
1753  if (s->threads_type == FF_THREAD_FRAME ) {
1754  int y = FFMAX(0, (mv->y >> 2) + y0 + height + 9);
1755 
1756  ff_thread_await_progress(&ref->tf, y, 0);
1757  }
1758 }
1759 
1760 static void hevc_luma_mv_mvp_mode(HEVCContext *s, int x0, int y0, int nPbW,
1761  int nPbH, int log2_cb_size, int part_idx,
1762  int merge_idx, MvField *mv)
1763 {
1764  HEVCLocalContext *lc = s->HEVClc;
1765  enum InterPredIdc inter_pred_idc = PRED_L0;
1766  int mvp_flag;
1767 
1768  ff_hevc_set_neighbour_available(s, x0, y0, nPbW, nPbH);
1769  mv->pred_flag = 0;
1770  if (s->sh.slice_type == HEVC_SLICE_B)
1771  inter_pred_idc = ff_hevc_inter_pred_idc_decode(s, nPbW, nPbH);
1772 
1773  if (inter_pred_idc != PRED_L1) {
1774  if (s->sh.nb_refs[L0])
1775  mv->ref_idx[0]= ff_hevc_ref_idx_lx_decode(s, s->sh.nb_refs[L0]);
1776 
1777  mv->pred_flag = PF_L0;
1778  ff_hevc_hls_mvd_coding(s, x0, y0, 0);
1779  mvp_flag = ff_hevc_mvp_lx_flag_decode(s);
1780  ff_hevc_luma_mv_mvp_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
1781  part_idx, merge_idx, mv, mvp_flag, 0);
1782  mv->mv[0].x += lc->pu.mvd.x;
1783  mv->mv[0].y += lc->pu.mvd.y;
1784  }
1785 
1786  if (inter_pred_idc != PRED_L0) {
1787  if (s->sh.nb_refs[L1])
1788  mv->ref_idx[1]= ff_hevc_ref_idx_lx_decode(s, s->sh.nb_refs[L1]);
1789 
1790  if (s->sh.mvd_l1_zero_flag == 1 && inter_pred_idc == PRED_BI) {
1791  AV_ZERO32(&lc->pu.mvd);
1792  } else {
1793  ff_hevc_hls_mvd_coding(s, x0, y0, 1);
1794  }
1795 
1796  mv->pred_flag += PF_L1;
1797  mvp_flag = ff_hevc_mvp_lx_flag_decode(s);
1798  ff_hevc_luma_mv_mvp_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
1799  part_idx, merge_idx, mv, mvp_flag, 1);
1800  mv->mv[1].x += lc->pu.mvd.x;
1801  mv->mv[1].y += lc->pu.mvd.y;
1802  }
1803 }
1804 
1805 static void hls_prediction_unit(HEVCContext *s, int x0, int y0,
1806  int nPbW, int nPbH,
1807  int log2_cb_size, int partIdx, int idx)
1808 {
1809 #define POS(c_idx, x, y) \
1810  &s->frame->data[c_idx][((y) >> s->ps.sps->vshift[c_idx]) * s->frame->linesize[c_idx] + \
1811  (((x) >> s->ps.sps->hshift[c_idx]) << s->ps.sps->pixel_shift)]
1812  HEVCLocalContext *lc = s->HEVClc;
1813  int merge_idx = 0;
1814  struct MvField current_mv = {{{ 0 }}};
1815 
1816  int min_pu_width = s->ps.sps->min_pu_width;
1817 
1818  MvField *tab_mvf = s->ref->tab_mvf;
1819  RefPicList *refPicList = s->ref->refPicList;
1820  HEVCFrame *ref0 = NULL, *ref1 = NULL;
1821  uint8_t *dst0 = POS(0, x0, y0);
1822  uint8_t *dst1 = POS(1, x0, y0);
1823  uint8_t *dst2 = POS(2, x0, y0);
1824  int log2_min_cb_size = s->ps.sps->log2_min_cb_size;
1825  int min_cb_width = s->ps.sps->min_cb_width;
1826  int x_cb = x0 >> log2_min_cb_size;
1827  int y_cb = y0 >> log2_min_cb_size;
1828  int x_pu, y_pu;
1829  int i, j;
1830 
1831  int skip_flag = SAMPLE_CTB(s->skip_flag, x_cb, y_cb);
1832 
1833  if (!skip_flag)
1835 
1836  if (skip_flag || lc->pu.merge_flag) {
1837  if (s->sh.max_num_merge_cand > 1)
1838  merge_idx = ff_hevc_merge_idx_decode(s);
1839  else
1840  merge_idx = 0;
1841 
1842  ff_hevc_luma_mv_merge_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
1843  partIdx, merge_idx, &current_mv);
1844  } else {
1845  hevc_luma_mv_mvp_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
1846  partIdx, merge_idx, &current_mv);
1847  }
1848 
1849  x_pu = x0 >> s->ps.sps->log2_min_pu_size;
1850  y_pu = y0 >> s->ps.sps->log2_min_pu_size;
1851 
1852  for (j = 0; j < nPbH >> s->ps.sps->log2_min_pu_size; j++)
1853  for (i = 0; i < nPbW >> s->ps.sps->log2_min_pu_size; i++)
1854  tab_mvf[(y_pu + j) * min_pu_width + x_pu + i] = current_mv;
1855 
1856  if (current_mv.pred_flag & PF_L0) {
1857  ref0 = refPicList[0].ref[current_mv.ref_idx[0]];
1858  if (!ref0)
1859  return;
1860  hevc_await_progress(s, ref0, &current_mv.mv[0], y0, nPbH);
1861  }
1862  if (current_mv.pred_flag & PF_L1) {
1863  ref1 = refPicList[1].ref[current_mv.ref_idx[1]];
1864  if (!ref1)
1865  return;
1866  hevc_await_progress(s, ref1, &current_mv.mv[1], y0, nPbH);
1867  }
1868 
1869  if (current_mv.pred_flag == PF_L0) {
1870  int x0_c = x0 >> s->ps.sps->hshift[1];
1871  int y0_c = y0 >> s->ps.sps->vshift[1];
1872  int nPbW_c = nPbW >> s->ps.sps->hshift[1];
1873  int nPbH_c = nPbH >> s->ps.sps->vshift[1];
1874 
1875  luma_mc_uni(s, dst0, s->frame->linesize[0], ref0->frame,
1876  &current_mv.mv[0], x0, y0, nPbW, nPbH,
1877  s->sh.luma_weight_l0[current_mv.ref_idx[0]],
1878  s->sh.luma_offset_l0[current_mv.ref_idx[0]]);
1879 
1880  if (s->ps.sps->chroma_format_idc) {
1881  chroma_mc_uni(s, dst1, s->frame->linesize[1], ref0->frame->data[1], ref0->frame->linesize[1],
1882  0, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1883  s->sh.chroma_weight_l0[current_mv.ref_idx[0]][0], s->sh.chroma_offset_l0[current_mv.ref_idx[0]][0]);
1884  chroma_mc_uni(s, dst2, s->frame->linesize[2], ref0->frame->data[2], ref0->frame->linesize[2],
1885  0, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1886  s->sh.chroma_weight_l0[current_mv.ref_idx[0]][1], s->sh.chroma_offset_l0[current_mv.ref_idx[0]][1]);
1887  }
1888  } else if (current_mv.pred_flag == PF_L1) {
1889  int x0_c = x0 >> s->ps.sps->hshift[1];
1890  int y0_c = y0 >> s->ps.sps->vshift[1];
1891  int nPbW_c = nPbW >> s->ps.sps->hshift[1];
1892  int nPbH_c = nPbH >> s->ps.sps->vshift[1];
1893 
1894  luma_mc_uni(s, dst0, s->frame->linesize[0], ref1->frame,
1895  &current_mv.mv[1], x0, y0, nPbW, nPbH,
1896  s->sh.luma_weight_l1[current_mv.ref_idx[1]],
1897  s->sh.luma_offset_l1[current_mv.ref_idx[1]]);
1898 
1899  if (s->ps.sps->chroma_format_idc) {
1900  chroma_mc_uni(s, dst1, s->frame->linesize[1], ref1->frame->data[1], ref1->frame->linesize[1],
1901  1, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1902  s->sh.chroma_weight_l1[current_mv.ref_idx[1]][0], s->sh.chroma_offset_l1[current_mv.ref_idx[1]][0]);
1903 
1904  chroma_mc_uni(s, dst2, s->frame->linesize[2], ref1->frame->data[2], ref1->frame->linesize[2],
1905  1, x0_c, y0_c, nPbW_c, nPbH_c, &current_mv,
1906  s->sh.chroma_weight_l1[current_mv.ref_idx[1]][1], s->sh.chroma_offset_l1[current_mv.ref_idx[1]][1]);
1907  }
1908  } else if (current_mv.pred_flag == PF_BI) {
1909  int x0_c = x0 >> s->ps.sps->hshift[1];
1910  int y0_c = y0 >> s->ps.sps->vshift[1];
1911  int nPbW_c = nPbW >> s->ps.sps->hshift[1];
1912  int nPbH_c = nPbH >> s->ps.sps->vshift[1];
1913 
1914  luma_mc_bi(s, dst0, s->frame->linesize[0], ref0->frame,
1915  &current_mv.mv[0], x0, y0, nPbW, nPbH,
1916  ref1->frame, &current_mv.mv[1], &current_mv);
1917 
1918  if (s->ps.sps->chroma_format_idc) {
1919  chroma_mc_bi(s, dst1, s->frame->linesize[1], ref0->frame, ref1->frame,
1920  x0_c, y0_c, nPbW_c, nPbH_c, &current_mv, 0);
1921 
1922  chroma_mc_bi(s, dst2, s->frame->linesize[2], ref0->frame, ref1->frame,
1923  x0_c, y0_c, nPbW_c, nPbH_c, &current_mv, 1);
1924  }
1925  }
1926 }
1927 
1928 /**
1929  * 8.4.1
1930  */
1931 static int luma_intra_pred_mode(HEVCContext *s, int x0, int y0, int pu_size,
1932  int prev_intra_luma_pred_flag)
1933 {
1934  HEVCLocalContext *lc = s->HEVClc;
1935  int x_pu = x0 >> s->ps.sps->log2_min_pu_size;
1936  int y_pu = y0 >> s->ps.sps->log2_min_pu_size;
1937  int min_pu_width = s->ps.sps->min_pu_width;
1938  int size_in_pus = pu_size >> s->ps.sps->log2_min_pu_size;
1939  int x0b = av_mod_uintp2(x0, s->ps.sps->log2_ctb_size);
1940  int y0b = av_mod_uintp2(y0, s->ps.sps->log2_ctb_size);
1941 
1942  int cand_up = (lc->ctb_up_flag || y0b) ?
1943  s->tab_ipm[(y_pu - 1) * min_pu_width + x_pu] : INTRA_DC;
1944  int cand_left = (lc->ctb_left_flag || x0b) ?
1945  s->tab_ipm[y_pu * min_pu_width + x_pu - 1] : INTRA_DC;
1946 
1947  int y_ctb = (y0 >> (s->ps.sps->log2_ctb_size)) << (s->ps.sps->log2_ctb_size);
1948 
1949  MvField *tab_mvf = s->ref->tab_mvf;
1950  int intra_pred_mode;
1951  int candidate[3];
1952  int i, j;
1953 
1954  // intra_pred_mode prediction does not cross vertical CTB boundaries
1955  if ((y0 - 1) < y_ctb)
1956  cand_up = INTRA_DC;
1957 
1958  if (cand_left == cand_up) {
1959  if (cand_left < 2) {
1960  candidate[0] = INTRA_PLANAR;
1961  candidate[1] = INTRA_DC;
1962  candidate[2] = INTRA_ANGULAR_26;
1963  } else {
1964  candidate[0] = cand_left;
1965  candidate[1] = 2 + ((cand_left - 2 - 1 + 32) & 31);
1966  candidate[2] = 2 + ((cand_left - 2 + 1) & 31);
1967  }
1968  } else {
1969  candidate[0] = cand_left;
1970  candidate[1] = cand_up;
1971  if (candidate[0] != INTRA_PLANAR && candidate[1] != INTRA_PLANAR) {
1972  candidate[2] = INTRA_PLANAR;
1973  } else if (candidate[0] != INTRA_DC && candidate[1] != INTRA_DC) {
1974  candidate[2] = INTRA_DC;
1975  } else {
1976  candidate[2] = INTRA_ANGULAR_26;
1977  }
1978  }
1979 
1980  if (prev_intra_luma_pred_flag) {
1981  intra_pred_mode = candidate[lc->pu.mpm_idx];
1982  } else {
1983  if (candidate[0] > candidate[1])
1984  FFSWAP(uint8_t, candidate[0], candidate[1]);
1985  if (candidate[0] > candidate[2])
1986  FFSWAP(uint8_t, candidate[0], candidate[2]);
1987  if (candidate[1] > candidate[2])
1988  FFSWAP(uint8_t, candidate[1], candidate[2]);
1989 
1990  intra_pred_mode = lc->pu.rem_intra_luma_pred_mode;
1991  for (i = 0; i < 3; i++)
1992  if (intra_pred_mode >= candidate[i])
1993  intra_pred_mode++;
1994  }
1995 
1996  /* write the intra prediction units into the mv array */
1997  if (!size_in_pus)
1998  size_in_pus = 1;
1999  for (i = 0; i < size_in_pus; i++) {
2000  memset(&s->tab_ipm[(y_pu + i) * min_pu_width + x_pu],
2001  intra_pred_mode, size_in_pus);
2002 
2003  for (j = 0; j < size_in_pus; j++) {
2004  tab_mvf[(y_pu + j) * min_pu_width + x_pu + i].pred_flag = PF_INTRA;
2005  }
2006  }
2007 
2008  return intra_pred_mode;
2009 }
2010 
2011 static av_always_inline void set_ct_depth(HEVCContext *s, int x0, int y0,
2012  int log2_cb_size, int ct_depth)
2013 {
2014  int length = (1 << log2_cb_size) >> s->ps.sps->log2_min_cb_size;
2015  int x_cb = x0 >> s->ps.sps->log2_min_cb_size;
2016  int y_cb = y0 >> s->ps.sps->log2_min_cb_size;
2017  int y;
2018 
2019  for (y = 0; y < length; y++)
2020  memset(&s->tab_ct_depth[(y_cb + y) * s->ps.sps->min_cb_width + x_cb],
2021  ct_depth, length);
2022 }
2023 
2024 static const uint8_t tab_mode_idx[] = {
2025  0, 1, 2, 2, 2, 2, 3, 5, 7, 8, 10, 12, 13, 15, 17, 18, 19, 20,
2026  21, 22, 23, 23, 24, 24, 25, 25, 26, 27, 27, 28, 28, 29, 29, 30, 31};
2027 
2028 static void intra_prediction_unit(HEVCContext *s, int x0, int y0,
2029  int log2_cb_size)
2030 {
2031  HEVCLocalContext *lc = s->HEVClc;
2032  static const uint8_t intra_chroma_table[4] = { 0, 26, 10, 1 };
2033  uint8_t prev_intra_luma_pred_flag[4];
2034  int split = lc->cu.part_mode == PART_NxN;
2035  int pb_size = (1 << log2_cb_size) >> split;
2036  int side = split + 1;
2037  int chroma_mode;
2038  int i, j;
2039 
2040  for (i = 0; i < side; i++)
2041  for (j = 0; j < side; j++)
2042  prev_intra_luma_pred_flag[2 * i + j] = ff_hevc_prev_intra_luma_pred_flag_decode(s);
2043 
2044  for (i = 0; i < side; i++) {
2045  for (j = 0; j < side; j++) {
2046  if (prev_intra_luma_pred_flag[2 * i + j])
2048  else
2050 
2051  lc->pu.intra_pred_mode[2 * i + j] =
2052  luma_intra_pred_mode(s, x0 + pb_size * j, y0 + pb_size * i, pb_size,
2053  prev_intra_luma_pred_flag[2 * i + j]);
2054  }
2055  }
2056 
2057  if (s->ps.sps->chroma_format_idc == 3) {
2058  for (i = 0; i < side; i++) {
2059  for (j = 0; j < side; j++) {
2060  lc->pu.chroma_mode_c[2 * i + j] = chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(s);
2061  if (chroma_mode != 4) {
2062  if (lc->pu.intra_pred_mode[2 * i + j] == intra_chroma_table[chroma_mode])
2063  lc->pu.intra_pred_mode_c[2 * i + j] = 34;
2064  else
2065  lc->pu.intra_pred_mode_c[2 * i + j] = intra_chroma_table[chroma_mode];
2066  } else {
2067  lc->pu.intra_pred_mode_c[2 * i + j] = lc->pu.intra_pred_mode[2 * i + j];
2068  }
2069  }
2070  }
2071  } else if (s->ps.sps->chroma_format_idc == 2) {
2072  int mode_idx;
2073  lc->pu.chroma_mode_c[0] = chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(s);
2074  if (chroma_mode != 4) {
2075  if (lc->pu.intra_pred_mode[0] == intra_chroma_table[chroma_mode])
2076  mode_idx = 34;
2077  else
2078  mode_idx = intra_chroma_table[chroma_mode];
2079  } else {
2080  mode_idx = lc->pu.intra_pred_mode[0];
2081  }
2082  lc->pu.intra_pred_mode_c[0] = tab_mode_idx[mode_idx];
2083  } else if (s->ps.sps->chroma_format_idc != 0) {
2085  if (chroma_mode != 4) {
2086  if (lc->pu.intra_pred_mode[0] == intra_chroma_table[chroma_mode])
2087  lc->pu.intra_pred_mode_c[0] = 34;
2088  else
2089  lc->pu.intra_pred_mode_c[0] = intra_chroma_table[chroma_mode];
2090  } else {
2091  lc->pu.intra_pred_mode_c[0] = lc->pu.intra_pred_mode[0];
2092  }
2093  }
2094 }
2095 
2097  int x0, int y0,
2098  int log2_cb_size)
2099 {
2100  HEVCLocalContext *lc = s->HEVClc;
2101  int pb_size = 1 << log2_cb_size;
2102  int size_in_pus = pb_size >> s->ps.sps->log2_min_pu_size;
2103  int min_pu_width = s->ps.sps->min_pu_width;
2104  MvField *tab_mvf = s->ref->tab_mvf;
2105  int x_pu = x0 >> s->ps.sps->log2_min_pu_size;
2106  int y_pu = y0 >> s->ps.sps->log2_min_pu_size;
2107  int j, k;
2108 
2109  if (size_in_pus == 0)
2110  size_in_pus = 1;
2111  for (j = 0; j < size_in_pus; j++)
2112  memset(&s->tab_ipm[(y_pu + j) * min_pu_width + x_pu], INTRA_DC, size_in_pus);
2113  if (lc->cu.pred_mode == MODE_INTRA)
2114  for (j = 0; j < size_in_pus; j++)
2115  for (k = 0; k < size_in_pus; k++)
2116  tab_mvf[(y_pu + j) * min_pu_width + x_pu + k].pred_flag = PF_INTRA;
2117 }
2118 
2119 static int hls_coding_unit(HEVCContext *s, int x0, int y0, int log2_cb_size)
2120 {
2121  int cb_size = 1 << log2_cb_size;
2122  HEVCLocalContext *lc = s->HEVClc;
2123  int log2_min_cb_size = s->ps.sps->log2_min_cb_size;
2124  int length = cb_size >> log2_min_cb_size;
2125  int min_cb_width = s->ps.sps->min_cb_width;
2126  int x_cb = x0 >> log2_min_cb_size;
2127  int y_cb = y0 >> log2_min_cb_size;
2128  int idx = log2_cb_size - 2;
2129  int qp_block_mask = (1<<(s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth)) - 1;
2130  int x, y, ret;
2131 
2132  lc->cu.x = x0;
2133  lc->cu.y = y0;
2134  lc->cu.pred_mode = MODE_INTRA;
2135  lc->cu.part_mode = PART_2Nx2N;
2136  lc->cu.intra_split_flag = 0;
2137 
2138  SAMPLE_CTB(s->skip_flag, x_cb, y_cb) = 0;
2139  for (x = 0; x < 4; x++)
2140  lc->pu.intra_pred_mode[x] = 1;
2141  if (s->ps.pps->transquant_bypass_enable_flag) {
2143  if (lc->cu.cu_transquant_bypass_flag)
2144  set_deblocking_bypass(s, x0, y0, log2_cb_size);
2145  } else
2146  lc->cu.cu_transquant_bypass_flag = 0;
2147 
2148  if (s->sh.slice_type != HEVC_SLICE_I) {
2149  uint8_t skip_flag = ff_hevc_skip_flag_decode(s, x0, y0, x_cb, y_cb);
2150 
2151  x = y_cb * min_cb_width + x_cb;
2152  for (y = 0; y < length; y++) {
2153  memset(&s->skip_flag[x], skip_flag, length);
2154  x += min_cb_width;
2155  }
2156  lc->cu.pred_mode = skip_flag ? MODE_SKIP : MODE_INTER;
2157  } else {
2158  x = y_cb * min_cb_width + x_cb;
2159  for (y = 0; y < length; y++) {
2160  memset(&s->skip_flag[x], 0, length);
2161  x += min_cb_width;
2162  }
2163  }
2164 
2165  if (SAMPLE_CTB(s->skip_flag, x_cb, y_cb)) {
2166  hls_prediction_unit(s, x0, y0, cb_size, cb_size, log2_cb_size, 0, idx);
2167  intra_prediction_unit_default_value(s, x0, y0, log2_cb_size);
2168 
2169  if (!s->sh.disable_deblocking_filter_flag)
2170  ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_cb_size);
2171  } else {
2172  int pcm_flag = 0;
2173 
2174  if (s->sh.slice_type != HEVC_SLICE_I)
2176  if (lc->cu.pred_mode != MODE_INTRA ||
2177  log2_cb_size == s->ps.sps->log2_min_cb_size) {
2178  lc->cu.part_mode = ff_hevc_part_mode_decode(s, log2_cb_size);
2179  lc->cu.intra_split_flag = lc->cu.part_mode == PART_NxN &&
2180  lc->cu.pred_mode == MODE_INTRA;
2181  }
2182 
2183  if (lc->cu.pred_mode == MODE_INTRA) {
2184  if (lc->cu.part_mode == PART_2Nx2N && s->ps.sps->pcm_enabled_flag &&
2185  log2_cb_size >= s->ps.sps->pcm.log2_min_pcm_cb_size &&
2186  log2_cb_size <= s->ps.sps->pcm.log2_max_pcm_cb_size) {
2187  pcm_flag = ff_hevc_pcm_flag_decode(s);
2188  }
2189  if (pcm_flag) {
2190  intra_prediction_unit_default_value(s, x0, y0, log2_cb_size);
2191  ret = hls_pcm_sample(s, x0, y0, log2_cb_size);
2192  if (s->ps.sps->pcm.loop_filter_disable_flag)
2193  set_deblocking_bypass(s, x0, y0, log2_cb_size);
2194 
2195  if (ret < 0)
2196  return ret;
2197  } else {
2198  intra_prediction_unit(s, x0, y0, log2_cb_size);
2199  }
2200  } else {
2201  intra_prediction_unit_default_value(s, x0, y0, log2_cb_size);
2202  switch (lc->cu.part_mode) {
2203  case PART_2Nx2N:
2204  hls_prediction_unit(s, x0, y0, cb_size, cb_size, log2_cb_size, 0, idx);
2205  break;
2206  case PART_2NxN:
2207  hls_prediction_unit(s, x0, y0, cb_size, cb_size / 2, log2_cb_size, 0, idx);
2208  hls_prediction_unit(s, x0, y0 + cb_size / 2, cb_size, cb_size / 2, log2_cb_size, 1, idx);
2209  break;
2210  case PART_Nx2N:
2211  hls_prediction_unit(s, x0, y0, cb_size / 2, cb_size, log2_cb_size, 0, idx - 1);
2212  hls_prediction_unit(s, x0 + cb_size / 2, y0, cb_size / 2, cb_size, log2_cb_size, 1, idx - 1);
2213  break;
2214  case PART_2NxnU:
2215  hls_prediction_unit(s, x0, y0, cb_size, cb_size / 4, log2_cb_size, 0, idx);
2216  hls_prediction_unit(s, x0, y0 + cb_size / 4, cb_size, cb_size * 3 / 4, log2_cb_size, 1, idx);
2217  break;
2218  case PART_2NxnD:
2219  hls_prediction_unit(s, x0, y0, cb_size, cb_size * 3 / 4, log2_cb_size, 0, idx);
2220  hls_prediction_unit(s, x0, y0 + cb_size * 3 / 4, cb_size, cb_size / 4, log2_cb_size, 1, idx);
2221  break;
2222  case PART_nLx2N:
2223  hls_prediction_unit(s, x0, y0, cb_size / 4, cb_size, log2_cb_size, 0, idx - 2);
2224  hls_prediction_unit(s, x0 + cb_size / 4, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 1, idx - 2);
2225  break;
2226  case PART_nRx2N:
2227  hls_prediction_unit(s, x0, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 0, idx - 2);
2228  hls_prediction_unit(s, x0 + cb_size * 3 / 4, y0, cb_size / 4, cb_size, log2_cb_size, 1, idx - 2);
2229  break;
2230  case PART_NxN:
2231  hls_prediction_unit(s, x0, y0, cb_size / 2, cb_size / 2, log2_cb_size, 0, idx - 1);
2232  hls_prediction_unit(s, x0 + cb_size / 2, y0, cb_size / 2, cb_size / 2, log2_cb_size, 1, idx - 1);
2233  hls_prediction_unit(s, x0, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 2, idx - 1);
2234  hls_prediction_unit(s, x0 + cb_size / 2, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 3, idx - 1);
2235  break;
2236  }
2237  }
2238 
2239  if (!pcm_flag) {
2240  int rqt_root_cbf = 1;
2241 
2242  if (lc->cu.pred_mode != MODE_INTRA &&
2243  !(lc->cu.part_mode == PART_2Nx2N && lc->pu.merge_flag)) {
2244  rqt_root_cbf = ff_hevc_no_residual_syntax_flag_decode(s);
2245  }
2246  if (rqt_root_cbf) {
2247  const static int cbf[2] = { 0 };
2248  lc->cu.max_trafo_depth = lc->cu.pred_mode == MODE_INTRA ?
2249  s->ps.sps->max_transform_hierarchy_depth_intra + lc->cu.intra_split_flag :
2250  s->ps.sps->max_transform_hierarchy_depth_inter;
2251  ret = hls_transform_tree(s, x0, y0, x0, y0, x0, y0,
2252  log2_cb_size,
2253  log2_cb_size, 0, 0, cbf, cbf);
2254  if (ret < 0)
2255  return ret;
2256  } else {
2257  if (!s->sh.disable_deblocking_filter_flag)
2258  ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_cb_size);
2259  }
2260  }
2261  }
2262 
2263  if (s->ps.pps->cu_qp_delta_enabled_flag && lc->tu.is_cu_qp_delta_coded == 0)
2264  ff_hevc_set_qPy(s, x0, y0, log2_cb_size);
2265 
2266  x = y_cb * min_cb_width + x_cb;
2267  for (y = 0; y < length; y++) {
2268  memset(&s->qp_y_tab[x], lc->qp_y, length);
2269  x += min_cb_width;
2270  }
2271 
2272  if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 &&
2273  ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0) {
2274  lc->qPy_pred = lc->qp_y;
2275  }
2276 
2277  set_ct_depth(s, x0, y0, log2_cb_size, lc->ct_depth);
2278 
2279  return 0;
2280 }
2281 
2282 static int hls_coding_quadtree(HEVCContext *s, int x0, int y0,
2283  int log2_cb_size, int cb_depth)
2284 {
2285  HEVCLocalContext *lc = s->HEVClc;
2286  const int cb_size = 1 << log2_cb_size;
2287  int ret;
2288  int split_cu;
2289 
2290  lc->ct_depth = cb_depth;
2291  if (x0 + cb_size <= s->ps.sps->width &&
2292  y0 + cb_size <= s->ps.sps->height &&
2293  log2_cb_size > s->ps.sps->log2_min_cb_size) {
2294  split_cu = ff_hevc_split_coding_unit_flag_decode(s, cb_depth, x0, y0);
2295  } else {
2296  split_cu = (log2_cb_size > s->ps.sps->log2_min_cb_size);
2297  }
2298  if (s->ps.pps->cu_qp_delta_enabled_flag &&
2299  log2_cb_size >= s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth) {
2300  lc->tu.is_cu_qp_delta_coded = 0;
2301  lc->tu.cu_qp_delta = 0;
2302  }
2303 
2304  if (s->sh.cu_chroma_qp_offset_enabled_flag &&
2305  log2_cb_size >= s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_chroma_qp_offset_depth) {
2307  }
2308 
2309  if (split_cu) {
2310  int qp_block_mask = (1<<(s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth)) - 1;
2311  const int cb_size_split = cb_size >> 1;
2312  const int x1 = x0 + cb_size_split;
2313  const int y1 = y0 + cb_size_split;
2314 
2315  int more_data = 0;
2316 
2317  more_data = hls_coding_quadtree(s, x0, y0, log2_cb_size - 1, cb_depth + 1);
2318  if (more_data < 0)
2319  return more_data;
2320 
2321  if (more_data && x1 < s->ps.sps->width) {
2322  more_data = hls_coding_quadtree(s, x1, y0, log2_cb_size - 1, cb_depth + 1);
2323  if (more_data < 0)
2324  return more_data;
2325  }
2326  if (more_data && y1 < s->ps.sps->height) {
2327  more_data = hls_coding_quadtree(s, x0, y1, log2_cb_size - 1, cb_depth + 1);
2328  if (more_data < 0)
2329  return more_data;
2330  }
2331  if (more_data && x1 < s->ps.sps->width &&
2332  y1 < s->ps.sps->height) {
2333  more_data = hls_coding_quadtree(s, x1, y1, log2_cb_size - 1, cb_depth + 1);
2334  if (more_data < 0)
2335  return more_data;
2336  }
2337 
2338  if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 &&
2339  ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0)
2340  lc->qPy_pred = lc->qp_y;
2341 
2342  if (more_data)
2343  return ((x1 + cb_size_split) < s->ps.sps->width ||
2344  (y1 + cb_size_split) < s->ps.sps->height);
2345  else
2346  return 0;
2347  } else {
2348  ret = hls_coding_unit(s, x0, y0, log2_cb_size);
2349  if (ret < 0)
2350  return ret;
2351  if ((!((x0 + cb_size) %
2352  (1 << (s->ps.sps->log2_ctb_size))) ||
2353  (x0 + cb_size >= s->ps.sps->width)) &&
2354  (!((y0 + cb_size) %
2355  (1 << (s->ps.sps->log2_ctb_size))) ||
2356  (y0 + cb_size >= s->ps.sps->height))) {
2357  int end_of_slice_flag = ff_hevc_end_of_slice_flag_decode(s);
2358  return !end_of_slice_flag;
2359  } else {
2360  return 1;
2361  }
2362  }
2363 
2364  return 0;
2365 }
2366 
2367 static void hls_decode_neighbour(HEVCContext *s, int x_ctb, int y_ctb,
2368  int ctb_addr_ts)
2369 {
2370  HEVCLocalContext *lc = s->HEVClc;
2371  int ctb_size = 1 << s->ps.sps->log2_ctb_size;
2372  int ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2373  int ctb_addr_in_slice = ctb_addr_rs - s->sh.slice_addr;
2374 
2375  s->tab_slice_address[ctb_addr_rs] = s->sh.slice_addr;
2376 
2377  if (s->ps.pps->entropy_coding_sync_enabled_flag) {
2378  if (x_ctb == 0 && (y_ctb & (ctb_size - 1)) == 0)
2379  lc->first_qp_group = 1;
2380  lc->end_of_tiles_x = s->ps.sps->width;
2381  } else if (s->ps.pps->tiles_enabled_flag) {
2382  if (ctb_addr_ts && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[ctb_addr_ts - 1]) {
2383  int idxX = s->ps.pps->col_idxX[x_ctb >> s->ps.sps->log2_ctb_size];
2384  lc->end_of_tiles_x = x_ctb + (s->ps.pps->column_width[idxX] << s->ps.sps->log2_ctb_size);
2385  lc->first_qp_group = 1;
2386  }
2387  } else {
2388  lc->end_of_tiles_x = s->ps.sps->width;
2389  }
2390 
2391  lc->end_of_tiles_y = FFMIN(y_ctb + ctb_size, s->ps.sps->height);
2392 
2393  lc->boundary_flags = 0;
2394  if (s->ps.pps->tiles_enabled_flag) {
2395  if (x_ctb > 0 && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - 1]])
2397  if (x_ctb > 0 && s->tab_slice_address[ctb_addr_rs] != s->tab_slice_address[ctb_addr_rs - 1])
2399  if (y_ctb > 0 && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - s->ps.sps->ctb_width]])
2401  if (y_ctb > 0 && s->tab_slice_address[ctb_addr_rs] != s->tab_slice_address[ctb_addr_rs - s->ps.sps->ctb_width])
2403  } else {
2404  if (ctb_addr_in_slice <= 0)
2406  if (ctb_addr_in_slice < s->ps.sps->ctb_width)
2408  }
2409 
2410  lc->ctb_left_flag = ((x_ctb > 0) && (ctb_addr_in_slice > 0) && !(lc->boundary_flags & BOUNDARY_LEFT_TILE));
2411  lc->ctb_up_flag = ((y_ctb > 0) && (ctb_addr_in_slice >= s->ps.sps->ctb_width) && !(lc->boundary_flags & BOUNDARY_UPPER_TILE));
2412  lc->ctb_up_right_flag = ((y_ctb > 0) && (ctb_addr_in_slice+1 >= s->ps.sps->ctb_width) && (s->ps.pps->tile_id[ctb_addr_ts] == s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs+1 - s->ps.sps->ctb_width]]));
2413  lc->ctb_up_left_flag = ((x_ctb > 0) && (y_ctb > 0) && (ctb_addr_in_slice-1 >= s->ps.sps->ctb_width) && (s->ps.pps->tile_id[ctb_addr_ts] == s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs-1 - s->ps.sps->ctb_width]]));
2414 }
2415 
2416 static int hls_decode_entry(AVCodecContext *avctxt, void *isFilterThread)
2417 {
2418  HEVCContext *s = avctxt->priv_data;
2419  int ctb_size = 1 << s->ps.sps->log2_ctb_size;
2420  int more_data = 1;
2421  int x_ctb = 0;
2422  int y_ctb = 0;
2423  int ctb_addr_ts = s->ps.pps->ctb_addr_rs_to_ts[s->sh.slice_ctb_addr_rs];
2424  int ret;
2425 
2426  if (!ctb_addr_ts && s->sh.dependent_slice_segment_flag) {
2427  av_log(s->avctx, AV_LOG_ERROR, "Impossible initial tile.\n");
2428  return AVERROR_INVALIDDATA;
2429  }
2430 
2431  if (s->sh.dependent_slice_segment_flag) {
2432  int prev_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts - 1];
2433  if (s->tab_slice_address[prev_rs] != s->sh.slice_addr) {
2434  av_log(s->avctx, AV_LOG_ERROR, "Previous slice segment missing\n");
2435  return AVERROR_INVALIDDATA;
2436  }
2437  }
2438 
2439  while (more_data && ctb_addr_ts < s->ps.sps->ctb_size) {
2440  int ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2441 
2442  x_ctb = (ctb_addr_rs % ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size)) << s->ps.sps->log2_ctb_size;
2443  y_ctb = (ctb_addr_rs / ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size)) << s->ps.sps->log2_ctb_size;
2444  hls_decode_neighbour(s, x_ctb, y_ctb, ctb_addr_ts);
2445 
2446  ret = ff_hevc_cabac_init(s, ctb_addr_ts);
2447  if (ret < 0) {
2448  s->tab_slice_address[ctb_addr_rs] = -1;
2449  return ret;
2450  }
2451 
2452  hls_sao_param(s, x_ctb >> s->ps.sps->log2_ctb_size, y_ctb >> s->ps.sps->log2_ctb_size);
2453 
2454  s->deblock[ctb_addr_rs].beta_offset = s->sh.beta_offset;
2455  s->deblock[ctb_addr_rs].tc_offset = s->sh.tc_offset;
2456  s->filter_slice_edges[ctb_addr_rs] = s->sh.slice_loop_filter_across_slices_enabled_flag;
2457 
2458  more_data = hls_coding_quadtree(s, x_ctb, y_ctb, s->ps.sps->log2_ctb_size, 0);
2459  if (more_data < 0) {
2460  s->tab_slice_address[ctb_addr_rs] = -1;
2461  return more_data;
2462  }
2463 
2464 
2465  ctb_addr_ts++;
2466  ff_hevc_save_states(s, ctb_addr_ts);
2467  ff_hevc_hls_filters(s, x_ctb, y_ctb, ctb_size);
2468  }
2469 
2470  if (x_ctb + ctb_size >= s->ps.sps->width &&
2471  y_ctb + ctb_size >= s->ps.sps->height)
2472  ff_hevc_hls_filter(s, x_ctb, y_ctb, ctb_size);
2473 
2474  return ctb_addr_ts;
2475 }
2476 
2478 {
2479  int arg[2];
2480  int ret[2];
2481 
2482  arg[0] = 0;
2483  arg[1] = 1;
2484 
2485  s->avctx->execute(s->avctx, hls_decode_entry, arg, ret , 1, sizeof(int));
2486  return ret[0];
2487 }
2488 static int hls_decode_entry_wpp(AVCodecContext *avctxt, void *input_ctb_row, int job, int self_id)
2489 {
2490  HEVCContext *s1 = avctxt->priv_data, *s;
2491  HEVCLocalContext *lc;
2492  int ctb_size = 1<< s1->ps.sps->log2_ctb_size;
2493  int more_data = 1;
2494  int *ctb_row_p = input_ctb_row;
2495  int ctb_row = ctb_row_p[job];
2496  int ctb_addr_rs = s1->sh.slice_ctb_addr_rs + ctb_row * ((s1->ps.sps->width + ctb_size - 1) >> s1->ps.sps->log2_ctb_size);
2497  int ctb_addr_ts = s1->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs];
2498  int thread = ctb_row % s1->threads_number;
2499  int ret;
2500 
2501  s = s1->sList[self_id];
2502  lc = s->HEVClc;
2503 
2504  if(ctb_row) {
2505  ret = init_get_bits8(&lc->gb, s->data + s->sh.offset[ctb_row - 1], s->sh.size[ctb_row - 1]);
2506  if (ret < 0)
2507  goto error;
2508  ff_init_cabac_decoder(&lc->cc, s->data + s->sh.offset[(ctb_row)-1], s->sh.size[ctb_row - 1]);
2509  }
2510 
2511  while(more_data && ctb_addr_ts < s->ps.sps->ctb_size) {
2512  int x_ctb = (ctb_addr_rs % s->ps.sps->ctb_width) << s->ps.sps->log2_ctb_size;
2513  int y_ctb = (ctb_addr_rs / s->ps.sps->ctb_width) << s->ps.sps->log2_ctb_size;
2514 
2515  hls_decode_neighbour(s, x_ctb, y_ctb, ctb_addr_ts);
2516 
2517  ff_thread_await_progress2(s->avctx, ctb_row, thread, SHIFT_CTB_WPP);
2518 
2519  if (atomic_load(&s1->wpp_err)) {
2520  ff_thread_report_progress2(s->avctx, ctb_row , thread, SHIFT_CTB_WPP);
2521  return 0;
2522  }
2523 
2524  ret = ff_hevc_cabac_init(s, ctb_addr_ts);
2525  if (ret < 0)
2526  goto error;
2527  hls_sao_param(s, x_ctb >> s->ps.sps->log2_ctb_size, y_ctb >> s->ps.sps->log2_ctb_size);
2528  more_data = hls_coding_quadtree(s, x_ctb, y_ctb, s->ps.sps->log2_ctb_size, 0);
2529 
2530  if (more_data < 0) {
2531  ret = more_data;
2532  goto error;
2533  }
2534 
2535  ctb_addr_ts++;
2536 
2537  ff_hevc_save_states(s, ctb_addr_ts);
2538  ff_thread_report_progress2(s->avctx, ctb_row, thread, 1);
2539  ff_hevc_hls_filters(s, x_ctb, y_ctb, ctb_size);
2540 
2541  if (!more_data && (x_ctb+ctb_size) < s->ps.sps->width && ctb_row != s->sh.num_entry_point_offsets) {
2542  atomic_store(&s1->wpp_err, 1);
2543  ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
2544  return 0;
2545  }
2546 
2547  if ((x_ctb+ctb_size) >= s->ps.sps->width && (y_ctb+ctb_size) >= s->ps.sps->height ) {
2548  ff_hevc_hls_filter(s, x_ctb, y_ctb, ctb_size);
2549  ff_thread_report_progress2(s->avctx, ctb_row , thread, SHIFT_CTB_WPP);
2550  return ctb_addr_ts;
2551  }
2552  ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2553  x_ctb+=ctb_size;
2554 
2555  if(x_ctb >= s->ps.sps->width) {
2556  break;
2557  }
2558  }
2559  ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
2560 
2561  return 0;
2562 error:
2563  s->tab_slice_address[ctb_addr_rs] = -1;
2564  atomic_store(&s1->wpp_err, 1);
2565  ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
2566  return ret;
2567 }
2568 
2569 static int hls_slice_data_wpp(HEVCContext *s, const H2645NAL *nal)
2570 {
2571  const uint8_t *data = nal->data;
2572  int length = nal->size;
2573  HEVCLocalContext *lc = s->HEVClc;
2574  int *ret = av_malloc_array(s->sh.num_entry_point_offsets + 1, sizeof(int));
2575  int *arg = av_malloc_array(s->sh.num_entry_point_offsets + 1, sizeof(int));
2576  int64_t offset;
2577  int64_t startheader, cmpt = 0;
2578  int i, j, res = 0;
2579 
2580  if (!ret || !arg) {
2581  av_free(ret);
2582  av_free(arg);
2583  return AVERROR(ENOMEM);
2584  }
2585 
2586  if (s->sh.slice_ctb_addr_rs + s->sh.num_entry_point_offsets * s->ps.sps->ctb_width >= s->ps.sps->ctb_width * s->ps.sps->ctb_height) {
2587  av_log(s->avctx, AV_LOG_ERROR, "WPP ctb addresses are wrong (%d %d %d %d)\n",
2588  s->sh.slice_ctb_addr_rs, s->sh.num_entry_point_offsets,
2589  s->ps.sps->ctb_width, s->ps.sps->ctb_height
2590  );
2591  res = AVERROR_INVALIDDATA;
2592  goto error;
2593  }
2594 
2595  ff_alloc_entries(s->avctx, s->sh.num_entry_point_offsets + 1);
2596 
2597  if (!s->sList[1]) {
2598  for (i = 1; i < s->threads_number; i++) {
2599  s->sList[i] = av_malloc(sizeof(HEVCContext));
2600  memcpy(s->sList[i], s, sizeof(HEVCContext));
2601  s->HEVClcList[i] = av_mallocz(sizeof(HEVCLocalContext));
2602  s->sList[i]->HEVClc = s->HEVClcList[i];
2603  }
2604  }
2605 
2606  offset = (lc->gb.index >> 3);
2607 
2608  for (j = 0, cmpt = 0, startheader = offset + s->sh.entry_point_offset[0]; j < nal->skipped_bytes; j++) {
2609  if (nal->skipped_bytes_pos[j] >= offset && nal->skipped_bytes_pos[j] < startheader) {
2610  startheader--;
2611  cmpt++;
2612  }
2613  }
2614 
2615  for (i = 1; i < s->sh.num_entry_point_offsets; i++) {
2616  offset += (s->sh.entry_point_offset[i - 1] - cmpt);
2617  for (j = 0, cmpt = 0, startheader = offset
2618  + s->sh.entry_point_offset[i]; j < nal->skipped_bytes; j++) {
2619  if (nal->skipped_bytes_pos[j] >= offset && nal->skipped_bytes_pos[j] < startheader) {
2620  startheader--;
2621  cmpt++;
2622  }
2623  }
2624  s->sh.size[i - 1] = s->sh.entry_point_offset[i] - cmpt;
2625  s->sh.offset[i - 1] = offset;
2626 
2627  }
2628  if (s->sh.num_entry_point_offsets != 0) {
2629  offset += s->sh.entry_point_offset[s->sh.num_entry_point_offsets - 1] - cmpt;
2630  if (length < offset) {
2631  av_log(s->avctx, AV_LOG_ERROR, "entry_point_offset table is corrupted\n");
2632  res = AVERROR_INVALIDDATA;
2633  goto error;
2634  }
2635  s->sh.size[s->sh.num_entry_point_offsets - 1] = length - offset;
2636  s->sh.offset[s->sh.num_entry_point_offsets - 1] = offset;
2637 
2638  }
2639  s->data = data;
2640 
2641  for (i = 1; i < s->threads_number; i++) {
2642  s->sList[i]->HEVClc->first_qp_group = 1;
2643  s->sList[i]->HEVClc->qp_y = s->sList[0]->HEVClc->qp_y;
2644  memcpy(s->sList[i], s, sizeof(HEVCContext));
2645  s->sList[i]->HEVClc = s->HEVClcList[i];
2646  }
2647 
2648  atomic_store(&s->wpp_err, 0);
2649  ff_reset_entries(s->avctx);
2650 
2651  for (i = 0; i <= s->sh.num_entry_point_offsets; i++) {
2652  arg[i] = i;
2653  ret[i] = 0;
2654  }
2655 
2656  if (s->ps.pps->entropy_coding_sync_enabled_flag)
2657  s->avctx->execute2(s->avctx, hls_decode_entry_wpp, arg, ret, s->sh.num_entry_point_offsets + 1);
2658 
2659  for (i = 0; i <= s->sh.num_entry_point_offsets; i++)
2660  res += ret[i];
2661 error:
2662  av_free(ret);
2663  av_free(arg);
2664  return res;
2665 }
2666 
2668 {
2669  AVFrame *out = s->ref->frame;
2670 
2671  if (s->sei.frame_packing.present &&
2672  s->sei.frame_packing.arrangement_type >= 3 &&
2673  s->sei.frame_packing.arrangement_type <= 5 &&
2674  s->sei.frame_packing.content_interpretation_type > 0 &&
2675  s->sei.frame_packing.content_interpretation_type < 3) {
2677  if (!stereo)
2678  return AVERROR(ENOMEM);
2679 
2680  switch (s->sei.frame_packing.arrangement_type) {
2681  case 3:
2682  if (s->sei.frame_packing.quincunx_subsampling)
2684  else
2685  stereo->type = AV_STEREO3D_SIDEBYSIDE;
2686  break;
2687  case 4:
2688  stereo->type = AV_STEREO3D_TOPBOTTOM;
2689  break;
2690  case 5:
2691  stereo->type = AV_STEREO3D_FRAMESEQUENCE;
2692  break;
2693  }
2694 
2695  if (s->sei.frame_packing.content_interpretation_type == 2)
2696  stereo->flags = AV_STEREO3D_FLAG_INVERT;
2697 
2698  if (s->sei.frame_packing.arrangement_type == 5) {
2699  if (s->sei.frame_packing.current_frame_is_frame0_flag)
2700  stereo->view = AV_STEREO3D_VIEW_LEFT;
2701  else
2702  stereo->view = AV_STEREO3D_VIEW_RIGHT;
2703  }
2704  }
2705 
2706  if (s->sei.display_orientation.present &&
2707  (s->sei.display_orientation.anticlockwise_rotation ||
2708  s->sei.display_orientation.hflip || s->sei.display_orientation.vflip)) {
2709  double angle = s->sei.display_orientation.anticlockwise_rotation * 360 / (double) (1 << 16);
2712  sizeof(int32_t) * 9);
2713  if (!rotation)
2714  return AVERROR(ENOMEM);
2715 
2716  av_display_rotation_set((int32_t *)rotation->data, angle);
2717  av_display_matrix_flip((int32_t *)rotation->data,
2718  s->sei.display_orientation.hflip,
2719  s->sei.display_orientation.vflip);
2720  }
2721 
2722  // Decrement the mastering display flag when IRAP frame has no_rasl_output_flag=1
2723  // so the side data persists for the entire coded video sequence.
2724  if (s->sei.mastering_display.present > 0 &&
2725  IS_IRAP(s) && s->no_rasl_output_flag) {
2726  s->sei.mastering_display.present--;
2727  }
2728  if (s->sei.mastering_display.present) {
2729  // HEVC uses a g,b,r ordering, which we convert to a more natural r,g,b
2730  const int mapping[3] = {2, 0, 1};
2731  const int chroma_den = 50000;
2732  const int luma_den = 10000;
2733  int i;
2734  AVMasteringDisplayMetadata *metadata =
2736  if (!metadata)
2737  return AVERROR(ENOMEM);
2738 
2739  for (i = 0; i < 3; i++) {
2740  const int j = mapping[i];
2741  metadata->display_primaries[i][0].num = s->sei.mastering_display.display_primaries[j][0];
2742  metadata->display_primaries[i][0].den = chroma_den;
2743  metadata->display_primaries[i][1].num = s->sei.mastering_display.display_primaries[j][1];
2744  metadata->display_primaries[i][1].den = chroma_den;
2745  }
2746  metadata->white_point[0].num = s->sei.mastering_display.white_point[0];
2747  metadata->white_point[0].den = chroma_den;
2748  metadata->white_point[1].num = s->sei.mastering_display.white_point[1];
2749  metadata->white_point[1].den = chroma_den;
2750 
2751  metadata->max_luminance.num = s->sei.mastering_display.max_luminance;
2752  metadata->max_luminance.den = luma_den;
2753  metadata->min_luminance.num = s->sei.mastering_display.min_luminance;
2754  metadata->min_luminance.den = luma_den;
2755  metadata->has_luminance = 1;
2756  metadata->has_primaries = 1;
2757 
2758  av_log(s->avctx, AV_LOG_DEBUG, "Mastering Display Metadata:\n");
2759  av_log(s->avctx, AV_LOG_DEBUG,
2760  "r(%5.4f,%5.4f) g(%5.4f,%5.4f) b(%5.4f %5.4f) wp(%5.4f, %5.4f)\n",
2761  av_q2d(metadata->display_primaries[0][0]),
2762  av_q2d(metadata->display_primaries[0][1]),
2763  av_q2d(metadata->display_primaries[1][0]),
2764  av_q2d(metadata->display_primaries[1][1]),
2765  av_q2d(metadata->display_primaries[2][0]),
2766  av_q2d(metadata->display_primaries[2][1]),
2767  av_q2d(metadata->white_point[0]), av_q2d(metadata->white_point[1]));
2768  av_log(s->avctx, AV_LOG_DEBUG,
2769  "min_luminance=%f, max_luminance=%f\n",
2770  av_q2d(metadata->min_luminance), av_q2d(metadata->max_luminance));
2771  }
2772  // Decrement the mastering display flag when IRAP frame has no_rasl_output_flag=1
2773  // so the side data persists for the entire coded video sequence.
2774  if (s->sei.content_light.present > 0 &&
2775  IS_IRAP(s) && s->no_rasl_output_flag) {
2776  s->sei.content_light.present--;
2777  }
2778  if (s->sei.content_light.present) {
2779  AVContentLightMetadata *metadata =
2781  if (!metadata)
2782  return AVERROR(ENOMEM);
2783  metadata->MaxCLL = s->sei.content_light.max_content_light_level;
2784  metadata->MaxFALL = s->sei.content_light.max_pic_average_light_level;
2785 
2786  av_log(s->avctx, AV_LOG_DEBUG, "Content Light Level Metadata:\n");
2787  av_log(s->avctx, AV_LOG_DEBUG, "MaxCLL=%d, MaxFALL=%d\n",
2788  metadata->MaxCLL, metadata->MaxFALL);
2789  }
2790 
2791  if (s->sei.a53_caption.buf_ref) {
2792  HEVCSEIA53Caption *a53 = &s->sei.a53_caption;
2793 
2795  if (!sd)
2796  av_buffer_unref(&a53->buf_ref);
2797  a53->buf_ref = NULL;
2798 
2799  s->avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS;
2800  }
2801 
2802  return 0;
2803 }
2804 
2806 {
2807  HEVCLocalContext *lc = s->HEVClc;
2808  int pic_size_in_ctb = ((s->ps.sps->width >> s->ps.sps->log2_min_cb_size) + 1) *
2809  ((s->ps.sps->height >> s->ps.sps->log2_min_cb_size) + 1);
2810  int ret;
2811 
2812  memset(s->horizontal_bs, 0, s->bs_width * s->bs_height);
2813  memset(s->vertical_bs, 0, s->bs_width * s->bs_height);
2814  memset(s->cbf_luma, 0, s->ps.sps->min_tb_width * s->ps.sps->min_tb_height);
2815  memset(s->is_pcm, 0, (s->ps.sps->min_pu_width + 1) * (s->ps.sps->min_pu_height + 1));
2816  memset(s->tab_slice_address, -1, pic_size_in_ctb * sizeof(*s->tab_slice_address));
2817 
2818  s->is_decoded = 0;
2819  s->first_nal_type = s->nal_unit_type;
2820 
2821  s->no_rasl_output_flag = IS_IDR(s) || IS_BLA(s) || (s->nal_unit_type == HEVC_NAL_CRA_NUT && s->last_eos);
2822 
2823  if (s->ps.pps->tiles_enabled_flag)
2824  lc->end_of_tiles_x = s->ps.pps->column_width[0] << s->ps.sps->log2_ctb_size;
2825 
2826  ret = ff_hevc_set_new_ref(s, &s->frame, s->poc);
2827  if (ret < 0)
2828  goto fail;
2829 
2830  ret = ff_hevc_frame_rps(s);
2831  if (ret < 0) {
2832  av_log(s->avctx, AV_LOG_ERROR, "Error constructing the frame RPS.\n");
2833  goto fail;
2834  }
2835 
2836  s->ref->frame->key_frame = IS_IRAP(s);
2837 
2838  ret = set_side_data(s);
2839  if (ret < 0)
2840  goto fail;
2841 
2842  s->frame->pict_type = 3 - s->sh.slice_type;
2843 
2844  if (!IS_IRAP(s))
2846 
2847  av_frame_unref(s->output_frame);
2848  ret = ff_hevc_output_frame(s, s->output_frame, 0);
2849  if (ret < 0)
2850  goto fail;
2851 
2852  if (!s->avctx->hwaccel)
2853  ff_thread_finish_setup(s->avctx);
2854 
2855  return 0;
2856 
2857 fail:
2858  if (s->ref)
2859  ff_hevc_unref_frame(s, s->ref, ~0);
2860  s->ref = NULL;
2861  return ret;
2862 }
2863 
2864 static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal)
2865 {
2866  HEVCLocalContext *lc = s->HEVClc;
2867  GetBitContext *gb = &lc->gb;
2868  int ctb_addr_ts, ret;
2869 
2870  *gb = nal->gb;
2871  s->nal_unit_type = nal->type;
2872  s->temporal_id = nal->temporal_id;
2873 
2874  switch (s->nal_unit_type) {
2875  case HEVC_NAL_VPS:
2876  if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
2877  ret = s->avctx->hwaccel->decode_params(s->avctx,
2878  nal->type,
2879  nal->raw_data,
2880  nal->raw_size);
2881  if (ret < 0)
2882  goto fail;
2883  }
2884  ret = ff_hevc_decode_nal_vps(gb, s->avctx, &s->ps);
2885  if (ret < 0)
2886  goto fail;
2887  break;
2888  case HEVC_NAL_SPS:
2889  if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
2890  ret = s->avctx->hwaccel->decode_params(s->avctx,
2891  nal->type,
2892  nal->raw_data,
2893  nal->raw_size);
2894  if (ret < 0)
2895  goto fail;
2896  }
2897  ret = ff_hevc_decode_nal_sps(gb, s->avctx, &s->ps,
2898  s->apply_defdispwin);
2899  if (ret < 0)
2900  goto fail;
2901  break;
2902  case HEVC_NAL_PPS:
2903  if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
2904  ret = s->avctx->hwaccel->decode_params(s->avctx,
2905  nal->type,
2906  nal->raw_data,
2907  nal->raw_size);
2908  if (ret < 0)
2909  goto fail;
2910  }
2911  ret = ff_hevc_decode_nal_pps(gb, s->avctx, &s->ps);
2912  if (ret < 0)
2913  goto fail;
2914  break;
2915  case HEVC_NAL_SEI_PREFIX:
2916  case HEVC_NAL_SEI_SUFFIX:
2917  if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
2918  ret = s->avctx->hwaccel->decode_params(s->avctx,
2919  nal->type,
2920  nal->raw_data,
2921  nal->raw_size);
2922  if (ret < 0)
2923  goto fail;
2924  }
2925  ret = ff_hevc_decode_nal_sei(gb, s->avctx, &s->sei, &s->ps, s->nal_unit_type);
2926  if (ret < 0)
2927  goto fail;
2928  break;
2929  case HEVC_NAL_TRAIL_R:
2930  case HEVC_NAL_TRAIL_N:
2931  case HEVC_NAL_TSA_N:
2932  case HEVC_NAL_TSA_R:
2933  case HEVC_NAL_STSA_N:
2934  case HEVC_NAL_STSA_R:
2935  case HEVC_NAL_BLA_W_LP:
2936  case HEVC_NAL_BLA_W_RADL:
2937  case HEVC_NAL_BLA_N_LP:
2938  case HEVC_NAL_IDR_W_RADL:
2939  case HEVC_NAL_IDR_N_LP:
2940  case HEVC_NAL_CRA_NUT:
2941  case HEVC_NAL_RADL_N:
2942  case HEVC_NAL_RADL_R:
2943  case HEVC_NAL_RASL_N:
2944  case HEVC_NAL_RASL_R:
2945  ret = hls_slice_header(s);
2946  if (ret < 0)
2947  return ret;
2948  if (ret == 1) {
2950  goto fail;
2951  }
2952 
2953 
2954  if (
2955  (s->avctx->skip_frame >= AVDISCARD_BIDIR && s->sh.slice_type == HEVC_SLICE_B) ||
2956  (s->avctx->skip_frame >= AVDISCARD_NONINTRA && s->sh.slice_type != HEVC_SLICE_I) ||
2957  (s->avctx->skip_frame >= AVDISCARD_NONKEY && !IS_IRAP(s))) {
2958  break;
2959  }
2960 
2961  if (s->sh.first_slice_in_pic_flag) {
2962  if (s->max_ra == INT_MAX) {
2963  if (s->nal_unit_type == HEVC_NAL_CRA_NUT || IS_BLA(s)) {
2964  s->max_ra = s->poc;
2965  } else {
2966  if (IS_IDR(s))
2967  s->max_ra = INT_MIN;
2968  }
2969  }
2970 
2971  if ((s->nal_unit_type == HEVC_NAL_RASL_R || s->nal_unit_type == HEVC_NAL_RASL_N) &&
2972  s->poc <= s->max_ra) {
2973  s->is_decoded = 0;
2974  break;
2975  } else {
2976  if (s->nal_unit_type == HEVC_NAL_RASL_R && s->poc > s->max_ra)
2977  s->max_ra = INT_MIN;
2978  }
2979 
2980  s->overlap ++;
2981  ret = hevc_frame_start(s);
2982  if (ret < 0)
2983  return ret;
2984  } else if (!s->ref) {
2985  av_log(s->avctx, AV_LOG_ERROR, "First slice in a frame missing.\n");
2986  goto fail;
2987  }
2988 
2989  if (s->nal_unit_type != s->first_nal_type) {
2990  av_log(s->avctx, AV_LOG_ERROR,
2991  "Non-matching NAL types of the VCL NALUs: %d %d\n",
2992  s->first_nal_type, s->nal_unit_type);
2993  return AVERROR_INVALIDDATA;
2994  }
2995 
2996  if (!s->sh.dependent_slice_segment_flag &&
2997  s->sh.slice_type != HEVC_SLICE_I) {
2998  ret = ff_hevc_slice_rpl(s);
2999  if (ret < 0) {
3000  av_log(s->avctx, AV_LOG_WARNING,
3001  "Error constructing the reference lists for the current slice.\n");
3002  goto fail;
3003  }
3004  }
3005 
3006  if (s->sh.first_slice_in_pic_flag && s->avctx->hwaccel) {
3007  ret = s->avctx->hwaccel->start_frame(s->avctx, NULL, 0);
3008  if (ret < 0)
3009  goto fail;
3010  }
3011 
3012  if (s->avctx->hwaccel) {
3013  ret = s->avctx->hwaccel->decode_slice(s->avctx, nal->raw_data, nal->raw_size);
3014  if (ret < 0)
3015  goto fail;
3016  } else {
3017  if (s->threads_number > 1 && s->sh.num_entry_point_offsets > 0)
3018  ctb_addr_ts = hls_slice_data_wpp(s, nal);
3019  else
3020  ctb_addr_ts = hls_slice_data(s);
3021  if (ctb_addr_ts >= (s->ps.sps->ctb_width * s->ps.sps->ctb_height)) {
3022  s->is_decoded = 1;
3023  }
3024 
3025  if (ctb_addr_ts < 0) {
3026  ret = ctb_addr_ts;
3027  goto fail;
3028  }
3029  }
3030  break;
3031  case HEVC_NAL_EOS_NUT:
3032  case HEVC_NAL_EOB_NUT:
3033  s->seq_decode = (s->seq_decode + 1) & 0xff;
3034  s->max_ra = INT_MAX;
3035  break;
3036  case HEVC_NAL_AUD:
3037  case HEVC_NAL_FD_NUT:
3038  break;
3039  default:
3040  av_log(s->avctx, AV_LOG_INFO,
3041  "Skipping NAL unit %d\n", s->nal_unit_type);
3042  }
3043 
3044  return 0;
3045 fail:
3046  if (s->avctx->err_recognition & AV_EF_EXPLODE)
3047  return ret;
3048  return 0;
3049 }
3050 
3051 static int decode_nal_units(HEVCContext *s, const uint8_t *buf, int length)
3052 {
3053  int i, ret = 0;
3054  int eos_at_start = 1;
3055 
3056  s->ref = NULL;
3057  s->last_eos = s->eos;
3058  s->eos = 0;
3059  s->overlap = 0;
3060 
3061  /* split the input packet into NAL units, so we know the upper bound on the
3062  * number of slices in the frame */
3063  ret = ff_h2645_packet_split(&s->pkt, buf, length, s->avctx, s->is_nalff,
3064  s->nal_length_size, s->avctx->codec_id, 1, 0);
3065  if (ret < 0) {
3066  av_log(s->avctx, AV_LOG_ERROR,
3067  "Error splitting the input into NAL units.\n");
3068  return ret;
3069  }
3070 
3071  for (i = 0; i < s->pkt.nb_nals; i++) {
3072  if (s->pkt.nals[i].type == HEVC_NAL_EOB_NUT ||
3073  s->pkt.nals[i].type == HEVC_NAL_EOS_NUT) {
3074  if (eos_at_start) {
3075  s->last_eos = 1;
3076  } else {
3077  s->eos = 1;
3078  }
3079  } else {
3080  eos_at_start = 0;
3081  }
3082  }
3083 
3084  /* decode the NAL units */
3085  for (i = 0; i < s->pkt.nb_nals; i++) {
3086  H2645NAL *nal = &s->pkt.nals[i];
3087 
3088  if (s->avctx->skip_frame >= AVDISCARD_ALL ||
3089  (s->avctx->skip_frame >= AVDISCARD_NONREF
3090  && ff_hevc_nal_is_nonref(nal->type)) || nal->nuh_layer_id > 0)
3091  continue;
3092 
3093  ret = decode_nal_unit(s, nal);
3094  if (ret >= 0 && s->overlap > 2)
3096  if (ret < 0) {
3097  av_log(s->avctx, AV_LOG_WARNING,
3098  "Error parsing NAL unit #%d.\n", i);
3099  goto fail;
3100  }
3101  }
3102 
3103 fail:
3104  if (s->ref && s->threads_type == FF_THREAD_FRAME)
3105  ff_thread_report_progress(&s->ref->tf, INT_MAX, 0);
3106 
3107  return ret;
3108 }
3109 
3110 static void print_md5(void *log_ctx, int level, uint8_t md5[16])
3111 {
3112  int i;
3113  for (i = 0; i < 16; i++)
3114  av_log(log_ctx, level, "%02"PRIx8, md5[i]);
3115 }
3116 
3118 {
3120  int pixel_shift;
3121  int i, j;
3122 
3123  if (!desc)
3124  return AVERROR(EINVAL);
3125 
3126  pixel_shift = desc->comp[0].depth > 8;
3127 
3128  av_log(s->avctx, AV_LOG_DEBUG, "Verifying checksum for frame with POC %d: ",
3129  s->poc);
3130 
3131  /* the checksums are LE, so we have to byteswap for >8bpp formats
3132  * on BE arches */
3133 #if HAVE_BIGENDIAN
3134  if (pixel_shift && !s->checksum_buf) {
3135  av_fast_malloc(&s->checksum_buf, &s->checksum_buf_size,
3136  FFMAX3(frame->linesize[0], frame->linesize[1],
3137  frame->linesize[2]));
3138  if (!s->checksum_buf)
3139  return AVERROR(ENOMEM);
3140  }
3141 #endif
3142 
3143  for (i = 0; frame->data[i]; i++) {
3144  int width = s->avctx->coded_width;
3145  int height = s->avctx->coded_height;
3146  int w = (i == 1 || i == 2) ? (width >> desc->log2_chroma_w) : width;
3147  int h = (i == 1 || i == 2) ? (height >> desc->log2_chroma_h) : height;
3148  uint8_t md5[16];
3149 
3150  av_md5_init(s->md5_ctx);
3151  for (j = 0; j < h; j++) {
3152  const uint8_t *src = frame->data[i] + j * frame->linesize[i];
3153 #if HAVE_BIGENDIAN
3154  if (pixel_shift) {
3155  s->bdsp.bswap16_buf((uint16_t *) s->checksum_buf,
3156  (const uint16_t *) src, w);
3157  src = s->checksum_buf;
3158  }
3159 #endif
3160  av_md5_update(s->md5_ctx, src, w << pixel_shift);
3161  }
3162  av_md5_final(s->md5_ctx, md5);
3163 
3164  if (!memcmp(md5, s->sei.picture_hash.md5[i], 16)) {
3165  av_log (s->avctx, AV_LOG_DEBUG, "plane %d - correct ", i);
3166  print_md5(s->avctx, AV_LOG_DEBUG, md5);
3167  av_log (s->avctx, AV_LOG_DEBUG, "; ");
3168  } else {
3169  av_log (s->avctx, AV_LOG_ERROR, "mismatching checksum of plane %d - ", i);
3170  print_md5(s->avctx, AV_LOG_ERROR, md5);
3171  av_log (s->avctx, AV_LOG_ERROR, " != ");
3172  print_md5(s->avctx, AV_LOG_ERROR, s->sei.picture_hash.md5[i]);
3173  av_log (s->avctx, AV_LOG_ERROR, "\n");
3174  return AVERROR_INVALIDDATA;
3175  }
3176  }
3177 
3178  av_log(s->avctx, AV_LOG_DEBUG, "\n");
3179 
3180  return 0;
3181 }
3182 
3183 static int hevc_decode_extradata(HEVCContext *s, uint8_t *buf, int length, int first)
3184 {
3185  int ret, i;
3186 
3187  ret = ff_hevc_decode_extradata(buf, length, &s->ps, &s->sei, &s->is_nalff,
3188  &s->nal_length_size, s->avctx->err_recognition,
3189  s->apply_defdispwin, s->avctx);
3190  if (ret < 0)
3191  return ret;
3192 
3193  /* export stream parameters from the first SPS */
3194  for (i = 0; i < FF_ARRAY_ELEMS(s->ps.sps_list); i++) {
3195  if (first && s->ps.sps_list[i]) {
3196  const HEVCSPS *sps = (const HEVCSPS*)s->ps.sps_list[i]->data;
3198  break;
3199  }
3200  }
3201 
3202  return 0;
3203 }
3204 
3205 static int hevc_decode_frame(AVCodecContext *avctx, void *data, int *got_output,
3206  AVPacket *avpkt)
3207 {
3208  int ret;
3209  int new_extradata_size;
3210  uint8_t *new_extradata;
3211  HEVCContext *s = avctx->priv_data;
3212 
3213  if (!avpkt->size) {
3214  ret = ff_hevc_output_frame(s, data, 1);
3215  if (ret < 0)
3216  return ret;
3217 
3218  *got_output = ret;
3219  return 0;
3220  }
3221 
3222  new_extradata = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA,
3223  &new_extradata_size);
3224  if (new_extradata && new_extradata_size > 0) {
3225  ret = hevc_decode_extradata(s, new_extradata, new_extradata_size, 0);
3226  if (ret < 0)
3227  return ret;
3228  }
3229 
3230  s->ref = NULL;
3231  ret = decode_nal_units(s, avpkt->data, avpkt->size);
3232  if (ret < 0)
3233  return ret;
3234 
3235  if (avctx->hwaccel) {
3236  if (s->ref && (ret = avctx->hwaccel->end_frame(avctx)) < 0) {
3237  av_log(avctx, AV_LOG_ERROR,
3238  "hardware accelerator failed to decode picture\n");
3239  ff_hevc_unref_frame(s, s->ref, ~0);
3240  return ret;
3241  }
3242  } else {
3243  /* verify the SEI checksum */
3244  if (avctx->err_recognition & AV_EF_CRCCHECK && s->is_decoded &&
3245  s->sei.picture_hash.is_md5) {
3246  ret = verify_md5(s, s->ref->frame);
3247  if (ret < 0 && avctx->err_recognition & AV_EF_EXPLODE) {
3248  ff_hevc_unref_frame(s, s->ref, ~0);
3249  return ret;
3250  }
3251  }
3252  }
3253  s->sei.picture_hash.is_md5 = 0;
3254 
3255  if (s->is_decoded) {
3256  av_log(avctx, AV_LOG_DEBUG, "Decoded frame with POC %d.\n", s->poc);
3257  s->is_decoded = 0;
3258  }
3259 
3260  if (s->output_frame->buf[0]) {
3261  av_frame_move_ref(data, s->output_frame);
3262  *got_output = 1;
3263  }
3264 
3265  return avpkt->size;
3266 }
3267 
3269 {
3270  int ret;
3271 
3272  ret = ff_thread_ref_frame(&dst->tf, &src->tf);
3273  if (ret < 0)
3274  return ret;
3275 
3276  dst->tab_mvf_buf = av_buffer_ref(src->tab_mvf_buf);
3277  if (!dst->tab_mvf_buf)
3278  goto fail;
3279  dst->tab_mvf = src->tab_mvf;
3280 
3281  dst->rpl_tab_buf = av_buffer_ref(src->rpl_tab_buf);
3282  if (!dst->rpl_tab_buf)
3283  goto fail;
3284  dst->rpl_tab = src->rpl_tab;
3285 
3286  dst->rpl_buf = av_buffer_ref(src->rpl_buf);
3287  if (!dst->rpl_buf)
3288  goto fail;
3289 
3290  dst->poc = src->poc;
3291  dst->ctb_count = src->ctb_count;
3292  dst->flags = src->flags;
3293  dst->sequence = src->sequence;
3294 
3295  if (src->hwaccel_picture_private) {
3296  dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
3297  if (!dst->hwaccel_priv_buf)
3298  goto fail;
3300  }
3301 
3302  return 0;
3303 fail:
3304  ff_hevc_unref_frame(s, dst, ~0);
3305  return AVERROR(ENOMEM);
3306 }
3307 
3309 {
3310  HEVCContext *s = avctx->priv_data;
3311  int i;
3312 
3313  pic_arrays_free(s);
3314 
3315  av_freep(&s->md5_ctx);
3316 
3317  av_freep(&s->cabac_state);
3318 
3319  for (i = 0; i < 3; i++) {
3320  av_freep(&s->sao_pixel_buffer_h[i]);
3321  av_freep(&s->sao_pixel_buffer_v[i]);
3322  }
3323  av_frame_free(&s->output_frame);
3324 
3325  for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
3326  ff_hevc_unref_frame(s, &s->DPB[i], ~0);
3327  av_frame_free(&s->DPB[i].frame);
3328  }
3329 
3330  ff_hevc_ps_uninit(&s->ps);
3331 
3332  av_freep(&s->sh.entry_point_offset);
3333  av_freep(&s->sh.offset);
3334  av_freep(&s->sh.size);
3335 
3336  for (i = 1; i < s->threads_number; i++) {
3337  HEVCLocalContext *lc = s->HEVClcList[i];
3338  if (lc) {
3339  av_freep(&s->HEVClcList[i]);
3340  av_freep(&s->sList[i]);
3341  }
3342  }
3343  if (s->HEVClc == s->HEVClcList[0])
3344  s->HEVClc = NULL;
3345  av_freep(&s->HEVClcList[0]);
3346 
3347  ff_h2645_packet_uninit(&s->pkt);
3348 
3349  ff_hevc_reset_sei(&s->sei);
3350 
3351  return 0;
3352 }
3353 
3355 {
3356  HEVCContext *s = avctx->priv_data;
3357  int i;
3358 
3359  s->avctx = avctx;
3360 
3361  s->HEVClc = av_mallocz(sizeof(HEVCLocalContext));
3362  if (!s->HEVClc)
3363  goto fail;
3364  s->HEVClcList[0] = s->HEVClc;
3365  s->sList[0] = s;
3366 
3367  s->cabac_state = av_malloc(HEVC_CONTEXTS);
3368  if (!s->cabac_state)
3369  goto fail;
3370 
3371  s->output_frame = av_frame_alloc();
3372  if (!s->output_frame)
3373  goto fail;
3374 
3375  for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
3376  s->DPB[i].frame = av_frame_alloc();
3377  if (!s->DPB[i].frame)
3378  goto fail;
3379  s->DPB[i].tf.f = s->DPB[i].frame;
3380  }
3381 
3382  s->max_ra = INT_MAX;
3383 
3384  s->md5_ctx = av_md5_alloc();
3385  if (!s->md5_ctx)
3386  goto fail;
3387 
3388  ff_bswapdsp_init(&s->bdsp);
3389 
3390  s->context_initialized = 1;
3391  s->eos = 0;
3392 
3393  ff_hevc_reset_sei(&s->sei);
3394 
3395  return 0;
3396 
3397 fail:
3398  hevc_decode_free(avctx);
3399  return AVERROR(ENOMEM);
3400 }
3401 
3402 #if HAVE_THREADS
3403 static int hevc_update_thread_context(AVCodecContext *dst,
3404  const AVCodecContext *src)
3405 {
3406  HEVCContext *s = dst->priv_data;
3407  HEVCContext *s0 = src->priv_data;
3408  int i, ret;
3409 
3410  if (!s->context_initialized) {
3411  ret = hevc_init_context(dst);
3412  if (ret < 0)
3413  return ret;
3414  }
3415 
3416  for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
3417  ff_hevc_unref_frame(s, &s->DPB[i], ~0);
3418  if (s0->DPB[i].frame->buf[0]) {
3419  ret = hevc_ref_frame(s, &s->DPB[i], &s0->DPB[i]);
3420  if (ret < 0)
3421  return ret;
3422  }
3423  }
3424 
3425  if (s->ps.sps != s0->ps.sps)
3426  s->ps.sps = NULL;
3427  for (i = 0; i < FF_ARRAY_ELEMS(s->ps.vps_list); i++) {
3428  av_buffer_unref(&s->ps.vps_list[i]);
3429  if (s0->ps.vps_list[i]) {
3430  s->ps.vps_list[i] = av_buffer_ref(s0->ps.vps_list[i]);
3431  if (!s->ps.vps_list[i])
3432  return AVERROR(ENOMEM);
3433  }
3434  }
3435 
3436  for (i = 0; i < FF_ARRAY_ELEMS(s->ps.sps_list); i++) {
3437  av_buffer_unref(&s->ps.sps_list[i]);
3438  if (s0->ps.sps_list[i]) {
3439  s->ps.sps_list[i] = av_buffer_ref(s0->ps.sps_list[i]);
3440  if (!s->ps.sps_list[i])
3441  return AVERROR(ENOMEM);
3442  }
3443  }
3444 
3445  for (i = 0; i < FF_ARRAY_ELEMS(s->ps.pps_list); i++) {
3446  av_buffer_unref(&s->ps.pps_list[i]);
3447  if (s0->ps.pps_list[i]) {
3448  s->ps.pps_list[i] = av_buffer_ref(s0->ps.pps_list[i]);
3449  if (!s->ps.pps_list[i])
3450  return AVERROR(ENOMEM);
3451  }
3452  }
3453 
3454  if (s->ps.sps != s0->ps.sps)
3455  if ((ret = set_sps(s, s0->ps.sps, src->pix_fmt)) < 0)
3456  return ret;
3457 
3458  s->seq_decode = s0->seq_decode;
3459  s->seq_output = s0->seq_output;
3460  s->pocTid0 = s0->pocTid0;
3461  s->max_ra = s0->max_ra;
3462  s->eos = s0->eos;
3463  s->no_rasl_output_flag = s0->no_rasl_output_flag;
3464 
3465  s->is_nalff = s0->is_nalff;
3466  s->nal_length_size = s0->nal_length_size;
3467 
3468  s->threads_number = s0->threads_number;
3469  s->threads_type = s0->threads_type;
3470 
3471  if (s0->eos) {
3472  s->seq_decode = (s->seq_decode + 1) & 0xff;
3473  s->max_ra = INT_MAX;
3474  }
3475 
3476  av_buffer_unref(&s->sei.a53_caption.buf_ref);
3477  if (s0->sei.a53_caption.buf_ref) {
3478  s->sei.a53_caption.buf_ref = av_buffer_ref(s0->sei.a53_caption.buf_ref);
3479  if (!s->sei.a53_caption.buf_ref)
3480  return AVERROR(ENOMEM);
3481  }
3482 
3483  s->sei.frame_packing = s0->sei.frame_packing;
3484  s->sei.display_orientation = s0->sei.display_orientation;
3485  s->sei.mastering_display = s0->sei.mastering_display;
3486  s->sei.content_light = s0->sei.content_light;
3487  s->sei.alternative_transfer = s0->sei.alternative_transfer;
3488 
3489  return 0;
3490 }
3491 #endif
3492 
3494 {
3495  HEVCContext *s = avctx->priv_data;
3496  int ret;
3497 
3498  ret = hevc_init_context(avctx);
3499  if (ret < 0)
3500  return ret;
3501 
3502  s->enable_parallel_tiles = 0;
3503  s->sei.picture_timing.picture_struct = 0;
3504  s->eos = 1;
3505 
3506  atomic_init(&s->wpp_err, 0);
3507 
3508  if(avctx->active_thread_type & FF_THREAD_SLICE)
3509  s->threads_number = avctx->thread_count;
3510  else
3511  s->threads_number = 1;
3512 
3513  if (!avctx->internal->is_copy) {
3514  if (avctx->extradata_size > 0 && avctx->extradata) {
3515  ret = hevc_decode_extradata(s, avctx->extradata, avctx->extradata_size, 1);
3516  if (ret < 0) {
3517  hevc_decode_free(avctx);
3518  return ret;
3519  }
3520  }
3521  }
3522 
3523  if((avctx->active_thread_type & FF_THREAD_FRAME) && avctx->thread_count > 1)
3524  s->threads_type = FF_THREAD_FRAME;
3525  else
3526  s->threads_type = FF_THREAD_SLICE;
3527 
3528  return 0;
3529 }
3530 
3532 {
3533  HEVCContext *s = avctx->priv_data;
3535  ff_hevc_reset_sei(&s->sei);
3536  s->max_ra = INT_MAX;
3537  s->eos = 1;
3538 }
3539 
3540 #define OFFSET(x) offsetof(HEVCContext, x)
3541 #define PAR (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM)
3542 
3543 static const AVOption options[] = {
3544  { "apply_defdispwin", "Apply default display window from VUI", OFFSET(apply_defdispwin),
3545  AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, PAR },
3546  { "strict-displaywin", "stricly apply default display window size", OFFSET(apply_defdispwin),
3547  AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, PAR },
3548  { NULL },
3549 };
3550 
3551 static const AVClass hevc_decoder_class = {
3552  .class_name = "HEVC decoder",
3553  .item_name = av_default_item_name,
3554  .option = options,
3555  .version = LIBAVUTIL_VERSION_INT,
3556 };
3557 
3559  .name = "hevc",
3560  .long_name = NULL_IF_CONFIG_SMALL("HEVC (High Efficiency Video Coding)"),
3561  .type = AVMEDIA_TYPE_VIDEO,
3562  .id = AV_CODEC_ID_HEVC,
3563  .priv_data_size = sizeof(HEVCContext),
3564  .priv_class = &hevc_decoder_class,
3566  .close = hevc_decode_free,
3569  .update_thread_context = ONLY_IF_THREADS_ENABLED(hevc_update_thread_context),
3570  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY |
3575  .hw_configs = (const AVCodecHWConfigInternal*[]) {
3576 #if CONFIG_HEVC_DXVA2_HWACCEL
3577  HWACCEL_DXVA2(hevc),
3578 #endif
3579 #if CONFIG_HEVC_D3D11VA_HWACCEL
3580  HWACCEL_D3D11VA(hevc),
3581 #endif
3582 #if CONFIG_HEVC_D3D11VA2_HWACCEL
3583  HWACCEL_D3D11VA2(hevc),
3584 #endif
3585 #if CONFIG_HEVC_NVDEC_HWACCEL
3586  HWACCEL_NVDEC(hevc),
3587 #endif
3588 #if CONFIG_HEVC_VAAPI_HWACCEL
3589  HWACCEL_VAAPI(hevc),
3590 #endif
3591 #if CONFIG_HEVC_VDPAU_HWACCEL
3592  HWACCEL_VDPAU(hevc),
3593 #endif
3594 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
3595  HWACCEL_VIDEOTOOLBOX(hevc),
3596 #endif
3597  NULL
3598  },
3599 };
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:29
verify_md5
static int verify_md5(HEVCContext *s, AVFrame *frame)
Definition: hevcdec.c:3117
hwconfig.h
AVMasteringDisplayMetadata::has_primaries
int has_primaries
Flag indicating whether the display primaries (and white point) are set.
Definition: mastering_display_metadata.h:62
HEVC_NAL_RADL_N
@ HEVC_NAL_RADL_N
Definition: hevc.h:35
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1690
SliceHeader::beta_offset
int beta_offset
beta_offset_div2 * 2
Definition: hevcdec.h:297
AVCodec
AVCodec.
Definition: codec.h:190
bswapdsp.h
L1
F H1 F F H1 F F F F H1<-F-------F-------F v v v H2 H3 H2 ^ ^ ^ F-------F-------F-> H1<-F-------F-------F|||||||||F H1 F|||||||||F H1 Funavailable fullpel samples(outside the picture for example) shall be equalto the closest available fullpel sampleSmaller pel interpolation:--------------------------if diag_mc is set then points which lie on a line between 2 vertically, horizontally or diagonally adjacent halfpel points shall be interpolatedlinearly with rounding to nearest and halfway values rounded up.points which lie on 2 diagonals at the same time should only use the onediagonal not containing the fullpel point F--> O q O<--h1-> O q O<--F v \/v \/v O O O O O O O|/|\|q q q q q|/|\|O O O O O O O ^/\ ^/\ ^ h2--> O q O<--h3-> O q O<--h2 v \/v \/v O O O O O O O|\|/|q q q q q|\|/|O O O O O O O ^/\ ^/\ ^ F--> O q O<--h1-> O q O<--Fthe remaining points shall be bilinearly interpolated from theup to 4 surrounding halfpel and fullpel points, again rounding should be tonearest and halfway values rounded upcompliant Snow decoders MUST support 1-1/8 pel luma and 1/2-1/16 pel chromainterpolation at leastOverlapped block motion compensation:-------------------------------------FIXMELL band prediction:===================Each sample in the LL0 subband is predicted by the median of the left, top andleft+top-topleft samples, samples outside the subband shall be considered tobe 0. To reverse this prediction in the decoder apply the following.for(y=0;y< height;y++){ for(x=0;x< width;x++){ sample[y][x]+=median(sample[y-1][x], sample[y][x-1], sample[y-1][x]+sample[y][x-1]-sample[y-1][x-1]);}}sample[-1][ *]=sample[ *][-1]=0;width, height here are the width and height of the LL0 subband not of the finalvideoDequantization:===============FIXMEWavelet Transform:==================Snow supports 2 wavelet transforms, the symmetric biorthogonal 5/3 integertransform and an integer approximation of the symmetric biorthogonal 9/7daubechies wavelet.2D IDWT(inverse discrete wavelet transform) --------------------------------------------The 2D IDWT applies a 2D filter recursively, each time combining the4 lowest frequency subbands into a single subband until only 1 subbandremains.The 2D filter is done by first applying a 1D filter in the vertical directionand then applying it in the horizontal one. --------------- --------------- --------------- ---------------|LL0|HL0|||||||||||||---+---|HL1||L0|H0|HL1||LL1|HL1|||||LH0|HH0|||||||||||||-------+-------|-> L1 H1 LH1 HH1 LH1 HH1 LH1 HH1 L1
Definition: snow.txt:554
stride
int stride
Definition: mace.c:144
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
HEVCLocalContext
Definition: hevcdec.h:424
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:235
HEVCFrame::flags
uint8_t flags
A combination of HEVC_FRAME_FLAG_*.
Definition: hevcdec.h:421
AVMasteringDisplayMetadata::max_luminance
AVRational max_luminance
Max luminance of mastering display (cd/m^2).
Definition: mastering_display_metadata.h:57
HWACCEL_MAX
#define HWACCEL_MAX
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
ff_hevc_sao_type_idx_decode
int ff_hevc_sao_type_idx_decode(HEVCContext *s)
Definition: hevc_cabac.c:566
HEVCFrame::tf
ThreadFrame tf
Definition: hevcdec.h:397
HEVCFrame::hwaccel_priv_buf
AVBufferRef * hwaccel_priv_buf
Definition: hevcdec.h:409
level
uint8_t level
Definition: svq3.c:210
ff_hevc_no_residual_syntax_flag_decode
int ff_hevc_no_residual_syntax_flag_decode(HEVCContext *s)
Definition: hevc_cabac.c:828
AV_STEREO3D_VIEW_LEFT
@ AV_STEREO3D_VIEW_LEFT
Frame contains only the left view.
Definition: stereo3d.h:156
atomic_store
#define atomic_store(object, desired)
Definition: stdatomic.h:85
hls_decode_neighbour
static void hls_decode_neighbour(HEVCContext *s, int x_ctb, int y_ctb, int ctb_addr_ts)
Definition: hevcdec.c:2367
ff_hevc_sao_eo_class_decode
int ff_hevc_sao_eo_class_decode(HEVCContext *s)
Definition: hevc_cabac.c:601
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
ff_hevc_pred_init
void ff_hevc_pred_init(HEVCPredContext *hpc, int bit_depth)
Definition: hevcpred.c:43
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
AV_STEREO3D_SIDEBYSIDE_QUINCUNX
@ AV_STEREO3D_SIDEBYSIDE_QUINCUNX
Views are next to each other, but when upscaling apply a checkerboard pattern.
Definition: stereo3d.h:117
hevc_decode_flush
static void hevc_decode_flush(AVCodecContext *avctx)
Definition: hevcdec.c:3531
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1154
ff_hevc_set_qPy
void ff_hevc_set_qPy(HEVCContext *s, int xBase, int yBase, int log2_cb_size)
Definition: hevc_filter.c:121
chroma_mc_bi
static void chroma_mc_bi(HEVCContext *s, uint8_t *dst0, ptrdiff_t dststride, AVFrame *ref0, AVFrame *ref1, int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int cidx)
8.5.3.2.2.2 Chroma sample bidirectional interpolation process
Definition: hevcdec.c:1660
PART_NxN
@ PART_NxN
Definition: hevcdec.h:147
luma_mc_bi
static void luma_mc_bi(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride, AVFrame *ref0, const Mv *mv0, int x_off, int y_off, int block_w, int block_h, AVFrame *ref1, const Mv *mv1, struct MvField *current_mv)
8.5.3.2.2.1 Luma sample bidirectional interpolation process
Definition: hevcdec.c:1504
ff_hevc_res_scale_sign_flag
int ff_hevc_res_scale_sign_flag(HEVCContext *s, int idx)
Definition: hevc_cabac.c:905
decode_nal_unit
static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal)
Definition: hevcdec.c:2864
ff_hevc_split_transform_flag_decode
int ff_hevc_split_transform_flag_decode(HEVCContext *s, int log2_trafo_size)
Definition: hevc_cabac.c:866
out
FILE * out
Definition: movenc.c:54
FFSWAP
#define FFSWAP(type, a, b)
Definition: common.h:99
SAO_BAND
@ SAO_BAND
Definition: hevcdec.h:213
ff_hevc_profiles
const AVProfile ff_hevc_profiles[]
Definition: profiles.c:77
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2549
ff_hevc_hls_filter
void ff_hevc_hls_filter(HEVCContext *s, int x, int y, int ctb_size)
Definition: hevc_filter.c:842
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:89
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
AVMasteringDisplayMetadata::display_primaries
AVRational display_primaries[3][2]
CIE 1931 xy chromaticity coords of color primaries (r, g, b order).
Definition: mastering_display_metadata.h:42
AVMasteringDisplayMetadata::has_luminance
int has_luminance
Flag indicating whether the luminance (min_ and max_) have been set.
Definition: mastering_display_metadata.h:67
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1655
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:546
HEVCLocalContext::ctb_up_flag
uint8_t ctb_up_flag
Definition: hevcdec.h:442
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:727
mv
static const int8_t mv[256][2]
Definition: 4xm.c:77
SliceHeader::num_entry_point_offsets
int num_entry_point_offsets
Definition: hevcdec.h:305
HEVC_NAL_STSA_N
@ HEVC_NAL_STSA_N
Definition: hevc.h:33
PART_2NxnU
@ PART_2NxnU
Definition: hevcdec.h:148
ff_hevc_cu_qp_delta_abs
int ff_hevc_cu_qp_delta_abs(HEVCContext *s)
Definition: hevc_cabac.c:633
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
AVContentLightMetadata::MaxCLL
unsigned MaxCLL
Max content light level (cd/m^2).
Definition: mastering_display_metadata.h:102
H2645NAL::nuh_layer_id
int nuh_layer_id
Definition: h2645_parse.h:62
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
set_deblocking_bypass
static void set_deblocking_bypass(HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:1248
pixdesc.h
HEVCFrame::tab_mvf
MvField * tab_mvf
Definition: hevcdec.h:398
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:1147
TransformUnit::cu_qp_delta
int cu_qp_delta
Definition: hevcdec.h:370
HEVC_NAL_TSA_N
@ HEVC_NAL_TSA_N
Definition: hevc.h:31
w
uint8_t w
Definition: llviddspenc.c:38
HWACCEL_DXVA2
#define HWACCEL_DXVA2(codec)
Definition: hwconfig.h:67
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:535
HEVCFrame::hwaccel_picture_private
void * hwaccel_picture_private
Definition: hevcdec.h:410
av_display_matrix_flip
void av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip)
Flip the input matrix horizontally and/or vertically.
Definition: display.c:65
AVPacket::data
uint8_t * data
Definition: packet.h:355
PAR
#define PAR
Definition: hevcdec.c:3541
INTRA_DC
@ INTRA_DC
Definition: hevcdec.h:175
AVOption
AVOption.
Definition: opt.h:246
HWACCEL_D3D11VA2
#define HWACCEL_D3D11VA2(codec)
Definition: hwconfig.h:69
ff_h2645_packet_uninit
void ff_h2645_packet_uninit(H2645Packet *pkt)
Free all the allocated memory in the packet.
Definition: h2645_parse.c:519
AVCOL_TRC_UNSPECIFIED
@ AVCOL_TRC_UNSPECIFIED
Definition: pixfmt.h:483
hevc_decode_free
static av_cold int hevc_decode_free(AVCodecContext *avctx)
Definition: hevcdec.c:3308
ff_hevc_hls_filters
void ff_hevc_hls_filters(HEVCContext *s, int x_ctb, int y_ctb, int ctb_size)
Definition: hevc_filter.c:878
data
const char data[16]
Definition: mxf.c:91
Mv::y
int16_t y
vertical component of motion vector
Definition: hevcdec.h:341
ff_hevc_mpm_idx_decode
int ff_hevc_mpm_idx_decode(HEVCContext *s)
Definition: hevc_cabac.c:752
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:397
SAO_EDGE
@ SAO_EDGE
Definition: hevcdec.h:214
ff_hevc_hls_residual_coding
void ff_hevc_hls_residual_coding(HEVCContext *s, int x0, int y0, int log2_trafo_size, enum ScanType scan_idx, int c_idx)
Definition: hevc_cabac.c:1024
SliceHeader::slice_temporal_mvp_enabled_flag
uint8_t slice_temporal_mvp_enabled_flag
Definition: hevcdec.h:277
MvField::mv
Mv mv[2]
Definition: hevcdec.h:345
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:229
TransformUnit::is_cu_qp_delta_coded
uint8_t is_cu_qp_delta_coded
Definition: hevcdec.h:378
HEVC_NAL_RASL_N
@ HEVC_NAL_RASL_N
Definition: hevc.h:37
av_mallocz_array
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:190
HEVC_NAL_STSA_R
@ HEVC_NAL_STSA_R
Definition: hevc.h:34
av_buffer_allocz
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:83
MODE_INTRA
@ MODE_INTRA
Definition: hevcdec.h:156
av_display_rotation_set
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure counterclockwise rotation by the specified angle...
Definition: display.c:50
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:84
HEVC_NAL_BLA_W_RADL
@ HEVC_NAL_BLA_W_RADL
Definition: hevc.h:46
SliceHeader::slice_loop_filter_across_slices_enabled_flag
uint8_t slice_loop_filter_across_slices_enabled_flag
Definition: hevcdec.h:286
SAOParams::offset_sign
int offset_sign[3][4]
sao_offset_sign
Definition: hevcdsp.h:34
export_stream_params
static void export_stream_params(HEVCContext *s, const HEVCSPS *sps)
Definition: hevcdec.c:316
HEVCLocalContext::ctb_up_left_flag
uint8_t ctb_up_left_flag
Definition: hevcdec.h:444
H2645NAL::temporal_id
int temporal_id
HEVC only, nuh_temporal_id_plus_1 - 1.
Definition: h2645_parse.h:57
RefPicList
Definition: hevcdec.h:238
AV_STEREO3D_VIEW_RIGHT
@ AV_STEREO3D_VIEW_RIGHT
Frame contains only the right view.
Definition: stereo3d.h:161
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
ff_thread_await_progress
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
OFFSET
#define OFFSET(x)
Definition: hevcdec.c:3540
PF_INTRA
@ PF_INTRA
Definition: hevcdec.h:167
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:314
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
MODE_SKIP
@ MODE_SKIP
Definition: hevcdec.h:157
HEVCLocalContext::end_of_tiles_x
int end_of_tiles_x
Definition: hevcdec.h:445
AVContentLightMetadata
Content light level needed by to transmit HDR over HDMI (CTA-861.3).
Definition: mastering_display_metadata.h:98
CodingUnit::x
int x
Definition: hevcdec.h:327
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
BOUNDARY_LEFT_TILE
#define BOUNDARY_LEFT_TILE
Definition: hevcdec.h:459
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:2069
golomb.h
exp golomb vlc stuff
AVCodecInternal::is_copy
int is_copy
Whether the parent AVCodecContext is a copy of the context which had init() called on it.
Definition: internal.h:123
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:67
PART_2Nx2N
@ PART_2Nx2N
Definition: hevcdec.h:144
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
av_buffer_pool_init
AVBufferPool * av_buffer_pool_init(int size, AVBufferRef *(*alloc)(int size))
Allocate and initialize a buffer pool.
Definition: buffer.c:239
SET_SAO
#define SET_SAO(elem, value)
Definition: hevcdec.c:940
HEVCLocalContext::ctb_up_right_flag
uint8_t ctb_up_right_flag
Definition: hevcdec.h:443
ff_hevc_clear_refs
void ff_hevc_clear_refs(HEVCContext *s)
Mark all frames in DPB as unused for reference.
Definition: hevc_refs.c:66
PRED_BI
@ PRED_BI
Definition: hevcdec.h:163
U
#define U(x)
Definition: vp56_arith.h:37
ff_hevc_split_coding_unit_flag_decode
int ff_hevc_split_coding_unit_flag_decode(HEVCContext *s, int ct_depth, int x0, int y0)
Definition: hevc_cabac.c:686
fail
#define fail()
Definition: checkasm.h:123
PredictionUnit::intra_pred_mode_c
uint8_t intra_pred_mode_c[4]
Definition: hevcdec.h:365
ff_hevc_sao_merge_flag_decode
int ff_hevc_sao_merge_flag_decode(HEVCContext *s)
Definition: hevc_cabac.c:561
AVCodecContext::thread_count
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
Definition: avcodec.h:1785
md5
struct AVMD5 * md5
Definition: movenc.c:56
InterPredIdc
InterPredIdc
Definition: hevcdec.h:160
MODE_INTER
@ MODE_INTER
Definition: hevcdec.h:155
HEVCSEIA53Caption
Definition: hevc_sei.h:90
GetBitContext
Definition: get_bits.h:61
HEVCLocalContext::pu
PredictionUnit pu
Definition: hevcdec.h:455
decode_lt_rps
static int decode_lt_rps(HEVCContext *s, LongTermRPS *rps, GetBitContext *gb)
Definition: hevcdec.c:259
TransformUnit::res_scale_val
int res_scale_val
Definition: hevcdec.h:372
SliceHeader::short_term_ref_pic_set_size
int short_term_ref_pic_set_size
Definition: hevcdec.h:268
hevc_decoder_class
static const AVClass hevc_decoder_class
Definition: hevcdec.c:3551
val
static double val(void *priv, double ch)
Definition: aeval.c:76
HEVC_MAX_PPS_COUNT
@ HEVC_MAX_PPS_COUNT
Definition: hevc.h:114
HWACCEL_VDPAU
#define HWACCEL_VDPAU(codec)
Definition: hwconfig.h:75
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
ff_hevc_output_frame
int ff_hevc_output_frame(HEVCContext *s, AVFrame *out, int flush)
Find next frame in output order and put a reference to it in frame.
Definition: hevc_refs.c:174
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:714
SliceHeader::long_term_ref_pic_set_size
int long_term_ref_pic_set_size
Definition: hevcdec.h:271
ff_hevc_luma_mv_mvp_mode
void ff_hevc_luma_mv_mvp_mode(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv, int mvp_lx_flag, int LX)
Definition: hevc_mvs.c:582
CTB
#define CTB(tab, x, y)
Definition: hevcdec.c:938
ff_reset_entries
void ff_reset_entries(AVCodecContext *avctx)
Definition: pthread_slice.c:238
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
ff_hevc_skip_flag_decode
int ff_hevc_skip_flag_decode(HEVCContext *s, int x0, int y0, int x_cb, int y_cb)
Definition: hevc_cabac.c:618
ff_hevc_merge_flag_decode
int ff_hevc_merge_flag_decode(HEVCContext *s)
Definition: hevc_cabac.c:792
AVRational::num
int num
Numerator.
Definition: rational.h:59
HWACCEL_VIDEOTOOLBOX
@ HWACCEL_VIDEOTOOLBOX
Definition: ffmpeg.h:62
SliceHeader::slice_segment_addr
unsigned int slice_segment_addr
address (in raster order) of the first block in the current slice
Definition: hevcdec.h:253
hevc_parse.h
MvField::ref_idx
int8_t ref_idx[2]
Definition: hevcdec.h:346
ff_hevc_save_states
void ff_hevc_save_states(HEVCContext *s, int ctb_addr_ts)
Definition: hevc_cabac.c:450
ff_hevc_deblocking_boundary_strengths
void ff_hevc_deblocking_boundary_strengths(HEVCContext *s, int x0, int y0, int log2_trafo_size)
Definition: hevc_filter.c:714
SAOParams::eo_class
int eo_class[3]
sao_eo_class
Definition: hevcdsp.h:38
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:400
ff_hevc_prev_intra_luma_pred_flag_decode
int ff_hevc_prev_intra_luma_pred_flag_decode(HEVCContext *s)
Definition: hevc_cabac.c:747
ff_hevc_decode_nal_sei
int ff_hevc_decode_nal_sei(GetBitContext *gb, void *logctx, HEVCSEI *s, const HEVCParamSets *ps, int type)
Definition: hevc_sei.c:360
ff_thread_report_progress2
void ff_thread_report_progress2(AVCodecContext *avctx, int field, int thread, int n)
Definition: pthread_slice.c:174
first
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But first
Definition: rate_distortion.txt:12
hls_decode_entry_wpp
static int hls_decode_entry_wpp(AVCodecContext *avctxt, void *input_ctb_row, int job, int self_id)
Definition: hevcdec.c:2488
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:1140
AV_STEREO3D_FRAMESEQUENCE
@ AV_STEREO3D_FRAMESEQUENCE
Views are alternated temporally.
Definition: stereo3d.h:92
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
QPEL_EXTRA_AFTER
#define QPEL_EXTRA_AFTER
Definition: hevcdec.h:66
HEVC_NAL_BLA_N_LP
@ HEVC_NAL_BLA_N_LP
Definition: hevc.h:47
SAOParams::type_idx
uint8_t type_idx[3]
sao_type_idx
Definition: hevcdsp.h:42
av_cold
#define av_cold
Definition: attributes.h:90
TransformUnit::intra_pred_mode
int intra_pred_mode
Definition: hevcdec.h:375
ff_hevc_hls_mvd_coding
void ff_hevc_hls_mvd_coding(HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevc_cabac.c:1534
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
HEVC_NAL_RADL_R
@ HEVC_NAL_RADL_R
Definition: hevc.h:36
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:568
SliceHeader::cabac_init_flag
uint8_t cabac_init_flag
Definition: hevcdec.h:284
H2645NAL::size
int size
Definition: h2645_parse.h:35
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
hls_pcm_sample
static int hls_pcm_sample(HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:1386
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:628
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:816
width
#define width
QPEL_EXTRA_BEFORE
#define QPEL_EXTRA_BEFORE
Definition: hevcdec.h:65
FFMAX3
#define FFMAX3(a, b, c)
Definition: common.h:95
stereo3d.h
AVMasteringDisplayMetadata::white_point
AVRational white_point[2]
CIE 1931 xy chromaticity coords of white point.
Definition: mastering_display_metadata.h:47
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:137
s
#define s(width, name)
Definition: cbs_vp9.c:257
ff_thread_await_progress2
void ff_thread_await_progress2(AVCodecContext *avctx, int field, int thread, int shift)
Definition: pthread_slice.c:185
SAO_NOT_APPLIED
@ SAO_NOT_APPLIED
Definition: hevcdec.h:212
hls_sao_param
static void hls_sao_param(HEVCContext *s, int rx, int ry)
Definition: hevcdec.c:952
set_sps
static int set_sps(HEVCContext *s, const HEVCSPS *sps, enum AVPixelFormat pix_fmt)
Definition: hevcdec.c:450
AV_ZERO32
#define AV_ZERO32(d)
Definition: intreadwrite.h:629
ff_hevc_ref_idx_lx_decode
int ff_hevc_ref_idx_lx_decode(HEVCContext *s, int num_ref_idx_lx)
Definition: hevc_cabac.c:807
s1
#define s1
Definition: regdef.h:38
ff_hevc_nal_is_nonref
static av_always_inline int ff_hevc_nal_is_nonref(enum HEVCNALUnitType type)
Definition: hevcdec.h:636
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
luma_intra_pred_mode
static int luma_intra_pred_mode(HEVCContext *s, int x0, int y0, int pu_size, int prev_intra_luma_pred_flag)
8.4.1
Definition: hevcdec.c:1931
ff_hevc_set_new_ref
int ff_hevc_set_new_ref(HEVCContext *s, AVFrame **frame, int poc)
Definition: hevc_refs.c:135
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
SliceHeader::slice_rps
ShortTermRPS slice_rps
Definition: hevcdec.h:269
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
ff_hevc_cu_transquant_bypass_flag_decode
int ff_hevc_cu_transquant_bypass_flag_decode(HEVCContext *s)
Definition: hevc_cabac.c:613
IS_IDR
#define IS_IDR(s)
Definition: hevcdec.h:77
ff_hevc_intra_chroma_pred_mode_decode
int ff_hevc_intra_chroma_pred_mode_decode(HEVCContext *s)
Definition: hevc_cabac.c:770
set_ct_depth
static av_always_inline void set_ct_depth(HEVCContext *s, int x0, int y0, int log2_cb_size, int ct_depth)
Definition: hevcdec.c:2011
H2645NAL::data
const uint8_t * data
Definition: h2645_parse.h:36
ff_hevc_slice_rpl
int ff_hevc_slice_rpl(HEVCContext *s)
Construct the reference picture list(s) for the current slice.
Definition: hevc_refs.c:291
RefPicList::ref
struct HEVCFrame * ref[HEVC_MAX_REFS]
Definition: hevcdec.h:239
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demuxing_decoding.c:40
ff_hevc_sao_offset_abs_decode
int ff_hevc_sao_offset_abs_decode(HEVCContext *s)
Definition: hevc_cabac.c:586
H2645NAL::skipped_bytes_pos
int * skipped_bytes_pos
Definition: h2645_parse.h:66
HEVC_SLICE_I
@ HEVC_SLICE_I
Definition: hevc.h:98
hls_coding_unit
static int hls_coding_unit(HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:2119
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
SliceHeader::size
int * size
Definition: hevcdec.h:304
SliceHeader::collocated_list
uint8_t collocated_list
Definition: hevcdec.h:287
atomic_load
#define atomic_load(object)
Definition: stdatomic.h:93
ff_hevc_luma_mv_merge_mode
void ff_hevc_luma_mv_merge_mode(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv)
Definition: hevc_mvs.c:479
AVCOL_PRI_UNSPECIFIED
@ AVCOL_PRI_UNSPECIFIED
Definition: pixfmt.h:458
AVDISCARD_BIDIR
@ AVDISCARD_BIDIR
discard all bidirectional frames
Definition: avcodec.h:233
get_se_golomb
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
Definition: golomb.h:239
INTRA_ANGULAR_26
@ INTRA_ANGULAR_26
Definition: hevcdec.h:200
H2645NAL::type
int type
NAL unit type.
Definition: h2645_parse.h:52
CodingUnit::max_trafo_depth
uint8_t max_trafo_depth
MaxTrafoDepth.
Definition: hevcdec.h:335
SliceHeader::slice_ctb_addr_rs
int slice_ctb_addr_rs
Definition: hevcdec.h:323
int32_t
int32_t
Definition: audio_convert.c:194
av_packet_get_side_data
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:353
arg
const char * arg
Definition: jacosubdec.c:66
AVStereo3D::flags
int flags
Additional information about the frame packing.
Definition: stereo3d.h:185
if
if(ret)
Definition: filter_design.txt:179
HEVC_NAL_IDR_N_LP
@ HEVC_NAL_IDR_N_LP
Definition: hevc.h:49
SliceHeader::pic_output_flag
uint8_t pic_output_flag
Definition: hevcdec.h:263
hls_slice_data_wpp
static int hls_slice_data_wpp(HEVCContext *s, const H2645NAL *nal)
Definition: hevcdec.c:2569
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:106
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: avcodec.h:236
ff_hevc_sao_offset_sign_decode
int ff_hevc_sao_offset_sign_decode(HEVCContext *s)
Definition: hevc_cabac.c:596
PredictionUnit::rem_intra_luma_pred_mode
int rem_intra_luma_pred_mode
Definition: hevcdec.h:361
H2645NAL::raw_size
int raw_size
Definition: h2645_parse.h:44
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
IS_BLA
#define IS_BLA(s)
Definition: hevcdec.h:78
ff_hevc_merge_idx_decode
int ff_hevc_merge_idx_decode(HEVCContext *s)
Definition: hevc_cabac.c:781
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
ff_bswapdsp_init
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
HEVC_SLICE_B
@ HEVC_SLICE_B
Definition: hevc.h:96
flush
static void flush(AVCodecContext *avctx)
Definition: aacdec_template.c:500
NULL
#define NULL
Definition: coverity.c:32
hevc_ref_frame
static int hevc_ref_frame(HEVCContext *s, HEVCFrame *dst, HEVCFrame *src)
Definition: hevcdec.c:3268
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1161
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:125
HEVCLocalContext::tmp
int16_t tmp[MAX_PB_SIZE *MAX_PB_SIZE]
Definition: hevcdec.h:451
ff_hevc_ps_uninit
void ff_hevc_ps_uninit(HEVCParamSets *ps)
Definition: hevc_ps.c:1747
HEVC_NAL_PPS
@ HEVC_NAL_PPS
Definition: hevc.h:63
LongTermRPS::poc
int poc[32]
Definition: hevcdec.h:232
CodingUnit::cu_transquant_bypass_flag
uint8_t cu_transquant_bypass_flag
Definition: hevcdec.h:336
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:561
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
HEVCLocalContext::first_qp_group
uint8_t first_qp_group
Definition: hevcdec.h:429
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
hls_transform_unit
static int hls_transform_unit(HEVCContext *s, int x0, int y0, int xBase, int yBase, int cb_xBase, int cb_yBase, int log2_cb_size, int log2_trafo_size, int blk_idx, int cbf_luma, int *cbf_cb, int *cbf_cr)
Definition: hevcdec.c:1042
AVHWAccel::end_frame
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
Definition: avcodec.h:2511
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
profiles.h
src
#define src
Definition: vp8dsp.c:254
av_buffer_pool_uninit
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:276
L0
#define L0
Definition: hevcdec.h:59
HEVCFrame::rpl_tab
RefPicListTab ** rpl_tab
Definition: hevcdec.h:400
LongTermRPS::poc_msb_present
uint8_t poc_msb_present[32]
Definition: hevcdec.h:233
ff_hevc_pel_weight
const uint8_t ff_hevc_pel_weight[65]
Definition: hevcdec.c:47
HEVC_NAL_SEI_SUFFIX
@ HEVC_NAL_SEI_SUFFIX
Definition: hevc.h:69
HEVC_NAL_CRA_NUT
@ HEVC_NAL_CRA_NUT
Definition: hevc.h:50
av_frame_new_side_data_from_buf
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
Definition: frame.c:695
ONLY_IF_THREADS_ENABLED
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:227
PART_Nx2N
@ PART_Nx2N
Definition: hevcdec.h:146
RefPicListTab
Definition: hevcdec.h:245
BOUNDARY_UPPER_TILE
#define BOUNDARY_UPPER_TILE
Definition: hevcdec.h:461
vps
static int FUNC() vps(CodedBitstreamContext *ctx, RWContext *rw, H265RawVPS *current)
Definition: cbs_h265_syntax_template.c:420
ff_hevc_decode_extradata
int ff_hevc_decode_extradata(const uint8_t *data, int size, HEVCParamSets *ps, HEVCSEI *sei, int *is_nalff, int *nal_length_size, int err_recognition, int apply_defdispwin, void *logctx)
Definition: hevc_parse.c:80
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:1666
SliceHeader::nb_refs
unsigned int nb_refs[2]
Definition: hevcdec.h:279
Mv::x
int16_t x
horizontal component of motion vector
Definition: hevcdec.h:340
FF_CODEC_CAP_EXPORTS_CROPPING
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: internal.h:66
AVCodecContext::level
int level
level
Definition: avcodec.h:1982
HEVC_NAL_RASL_R
@ HEVC_NAL_RASL_R
Definition: hevc.h:38
PF_BI
@ PF_BI
Definition: hevcdec.h:170
SAMPLE_CTB
#define SAMPLE_CTB(tab, x, y)
Definition: hevcdec.h:75
HEVCWindow
Definition: hevc_ps.h:42
SCAN_HORIZ
@ SCAN_HORIZ
Definition: hevcdec.h:227
hevc_data.h
hevc_decode_frame
static int hevc_decode_frame(AVCodecContext *avctx, void *data, int *got_output, AVPacket *avpkt)
Definition: hevcdec.c:3205
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
ff_hevc_frame_rps
int ff_hevc_frame_rps(HEVCContext *s)
Construct the reference picture sets for the current frame.
Definition: hevc_refs.c:443
HEVCLocalContext::edge_emu_buffer
uint8_t edge_emu_buffer[(MAX_PB_SIZE+7) *EDGE_EMU_BUFFER_STRIDE *2]
Definition: hevcdec.h:448
IS_IRAP
#define IS_IRAP(s)
Definition: hevcdec.h:80
LongTermRPS::used
uint8_t used[32]
Definition: hevcdec.h:234
SliceHeader::colour_plane_id
uint8_t colour_plane_id
RPS coded in the slice header itself is stored here.
Definition: hevcdec.h:264
ff_hevc_mvp_lx_flag_decode
int ff_hevc_mvp_lx_flag_decode(HEVCContext *s)
Definition: hevc_cabac.c:823
PART_nLx2N
@ PART_nLx2N
Definition: hevcdec.h:150
SliceHeader::dependent_slice_segment_flag
uint8_t dependent_slice_segment_flag
Definition: hevcdec.h:262
POS
#define POS(c_idx, x, y)
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: avcodec.h:235
SliceHeader::first_slice_in_pic_flag
uint8_t first_slice_in_pic_flag
Definition: hevcdec.h:261
desc
const char * desc
Definition: nvenc.c:79
HEVCLocalContext::ctb_left_flag
uint8_t ctb_left_flag
Definition: hevcdec.h:441
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
chroma_mc_uni
static void chroma_mc_uni(HEVCContext *s, uint8_t *dst0, ptrdiff_t dststride, uint8_t *src0, ptrdiff_t srcstride, int reflist, int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int chroma_weight, int chroma_offset)
8.5.3.2.2.2 Chroma sample uniprediction interpolation process
Definition: hevcdec.c:1595
ff_hevc_pred_mode_decode
int ff_hevc_pred_mode_decode(HEVCContext *s)
Definition: hevc_cabac.c:681
AVPacket::size
int size
Definition: packet.h:356
BOUNDARY_UPPER_SLICE
#define BOUNDARY_UPPER_SLICE
Definition: hevcdec.h:460
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
hevcdec.h
ff_hevc_set_neighbour_available
void ff_hevc_set_neighbour_available(HEVCContext *s, int x0, int y0, int nPbW, int nPbH)
Definition: hevc_mvs.c:42
decode_nal_units
static int decode_nal_units(HEVCContext *s, const uint8_t *buf, int length)
Definition: hevcdec.c:3051
SAOParams::offset_abs
int offset_abs[3][4]
sao_offset_abs
Definition: hevcdsp.h:33
AV_PIX_FMT_YUV422P10LE
@ AV_PIX_FMT_YUV422P10LE
planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
Definition: pixfmt.h:161
print_md5
static void print_md5(void *log_ctx, int level, uint8_t md5[16])
Definition: hevcdec.c:3110
INTRA_PLANAR
@ INTRA_PLANAR
Definition: hevcdec.h:174
HEVCFrame::rpl_buf
AVBufferRef * rpl_buf
Definition: hevcdec.h:407
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
ff_hevc_decode_nal_sps
int ff_hevc_decode_nal_sps(GetBitContext *gb, AVCodecContext *avctx, HEVCParamSets *ps, int apply_defdispwin)
Definition: hevc_ps.c:1250
PART_2NxnD
@ PART_2NxnD
Definition: hevcdec.h:149
size
int size
Definition: twinvq_data.h:11134
HEVC_NAL_BLA_W_LP
@ HEVC_NAL_BLA_W_LP
Definition: hevc.h:45
SCAN_VERT
@ SCAN_VERT
Definition: hevcdec.h:228
ff_hevc_compute_poc
int ff_hevc_compute_poc(const HEVCSPS *sps, int pocTid0, int poc_lsb, int nal_unit_type)
Compute POC of the current frame and return it.
Definition: hevc_ps.c:1763
H2645NAL::gb
GetBitContext gb
Definition: h2645_parse.h:47
intra_prediction_unit_default_value
static void intra_prediction_unit_default_value(HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:2096
SliceHeader::collocated_ref_idx
unsigned int collocated_ref_idx
Definition: hevcdec.h:289
SliceHeader::entry_point_offset
unsigned * entry_point_offset
Definition: hevcdec.h:302
H2645NAL
Definition: h2645_parse.h:32
hevc_await_progress
static void hevc_await_progress(HEVCContext *s, HEVCFrame *ref, const Mv *mv, int y0, int height)
Definition: hevcdec.c:1750
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:404
ff_hevc_decode_nal_vps
int ff_hevc_decode_nal_vps(GetBitContext *gb, AVCodecContext *avctx, HEVCParamSets *ps)
Definition: hevc_ps.c:458
pic_arrays_free
static void pic_arrays_free(HEVCContext *s)
NOTE: Each function hls_foo correspond to the function foo in the specification (HLS stands for High ...
Definition: hevcdec.c:59
AVFrameSideData::data
uint8_t * data
Definition: frame.h:208
TransformUnit::chroma_mode_c
int chroma_mode_c
Definition: hevcdec.h:377
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1797
GetBitContext::index
int index
Definition: get_bits.h:67
SliceHeader::short_term_ref_pic_set_sps_flag
int short_term_ref_pic_set_sps_flag
Definition: hevcdec.h:267
SliceHeader::no_output_of_prior_pics_flag
uint8_t no_output_of_prior_pics_flag
Definition: hevcdec.h:276
SliceHeader::max_num_merge_cand
unsigned int max_num_merge_cand
5 - 5_minus_max_num_merge_cand
Definition: hevcdec.h:300
AVCodecHWConfigInternal
Definition: hwconfig.h:29
MvField
Definition: hevcdec.h:344
QPEL_EXTRA
#define QPEL_EXTRA
Definition: hevcdec.h:67
PF_L1
@ PF_L1
Definition: hevcdec.h:169
ff_hevc_unref_frame
void ff_hevc_unref_frame(HEVCContext *s, HEVCFrame *frame, int flags)
Definition: hevc_refs.c:32
split
static char * split(char *message, char delim)
Definition: af_channelmap.c:81
get_format
static enum AVPixelFormat get_format(HEVCContext *s, const HEVCSPS *sps)
Definition: hevcdec.c:370
ff_h2645_packet_split
int ff_h2645_packet_split(H2645Packet *pkt, const uint8_t *buf, int length, void *logctx, int is_nalff, int nal_length_size, enum AVCodecID codec_id, int small_padding, int use_ref)
Split an input packet into NAL units.
Definition: h2645_parse.c:392
height
#define height
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:110
HWACCEL_D3D11VA
#define HWACCEL_D3D11VA(codec)
Definition: hwconfig.h:79
ff_hevc_pcm_flag_decode
int ff_hevc_pcm_flag_decode(HEVCContext *s)
Definition: hevc_cabac.c:742
av_content_light_metadata_create_side_data
AVContentLightMetadata * av_content_light_metadata_create_side_data(AVFrame *frame)
Allocate a complete AVContentLightMetadata and add it to the frame.
Definition: mastering_display_metadata.c:55
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
ff_hevc_cbf_cb_cr_decode
int ff_hevc_cbf_cb_cr_decode(HEVCContext *s, int trafo_depth)
Definition: hevc_cabac.c:871
attributes.h
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:313
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:71
hls_slice_data
static int hls_slice_data(HEVCContext *s)
Definition: hevcdec.c:2477
TransformUnit::cu_qp_offset_cb
int8_t cu_qp_offset_cb
Definition: hevcdec.h:380
pic_arrays_init
static int pic_arrays_init(HEVCContext *s, const HEVCSPS *sps)
Definition: hevcdec.c:87
AV_STEREO3D_FLAG_INVERT
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:167
HEVCFrame::rpl_tab_buf
AVBufferRef * rpl_tab_buf
Definition: hevcdec.h:406
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Definition: pixfmt.h:122
MvField::pred_flag
int8_t pred_flag
Definition: hevcdec.h:347
HEVCLocalContext::ct_depth
int ct_depth
Definition: hevcdec.h:453
src0
#define src0
Definition: h264pred.c:138
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1796
ff_init_cabac_decoder
int ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size)
Definition: cabac.c:176
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:197
PART_nRx2N
@ PART_nRx2N
Definition: hevcdec.h:151
EPEL_EXTRA_BEFORE
#define EPEL_EXTRA_BEFORE
Definition: hevcdec.h:62
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:282
SliceHeader::slice_cb_qp_offset
int slice_cb_qp_offset
Definition: hevcdec.h:292
SliceHeader
Definition: hevcdec.h:249
HEVCFrame::frame
AVFrame * frame
Definition: hevcdec.h:396
HEVC_NAL_TRAIL_R
@ HEVC_NAL_TRAIL_R
Definition: hevc.h:30
src1
#define src1
Definition: h264pred.c:139
hls_decode_entry
static int hls_decode_entry(AVCodecContext *avctxt, void *isFilterThread)
Definition: hevcdec.c:2416
ff_hevc_inter_pred_idc_decode
int ff_hevc_inter_pred_idc_decode(HEVCContext *s, int nPbW, int nPbH)
Definition: hevc_cabac.c:797
ff_hevc_cu_qp_delta_sign_flag
int ff_hevc_cu_qp_delta_sign_flag(HEVCContext *s)
Definition: hevc_cabac.c:660
hevc_frame_start
static int hevc_frame_start(HEVCContext *s)
Definition: hevcdec.c:2805
av_md5_init
void av_md5_init(AVMD5 *ctx)
Initialize MD5 hashing.
Definition: md5.c:143
SliceHeader::slice_sample_adaptive_offset_flag
uint8_t slice_sample_adaptive_offset_flag[3]
Definition: hevcdec.h:281
AVDISCARD_NONINTRA
@ AVDISCARD_NONINTRA
discard all non intra frames
Definition: avcodec.h:234
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
HEVCFrame
Definition: hevcdec.h:395
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:627
HEVCLocalContext::gb
GetBitContext gb
Definition: hevcdec.h:431
ff_hevc_cbf_luma_decode
int ff_hevc_cbf_luma_decode(HEVCContext *s, int trafo_depth)
Definition: hevc_cabac.c:876
internal.h
EPEL_EXTRA_AFTER
#define EPEL_EXTRA_AFTER
Definition: hevcdec.h:63
HEVCFrame::ctb_count
int ctb_count
Definition: hevcdec.h:401
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
display.h
SliceHeader::offset
int * offset
Definition: hevcdec.h:303
AV_STEREO3D_TOPBOTTOM
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:79
common.h
HEVCFrame::sequence
uint16_t sequence
A sequence counter, so that old frames are output first after a POC reset.
Definition: hevcdec.h:416
SliceHeader::mvd_l1_zero_flag
uint8_t mvd_l1_zero_flag
Definition: hevcdec.h:282
delta
float delta
Definition: vorbis_enc_data.h:457
md5.h
AV_CODEC_ID_HEVC
@ AV_CODEC_ID_HEVC
Definition: codec_id.h:223
ff_hevc_bump_frame
void ff_hevc_bump_frame(HEVCContext *s)
Definition: hevc_refs.c:233
av_always_inline
#define av_always_inline
Definition: attributes.h:49
HEVC_SLICE_P
@ HEVC_SLICE_P
Definition: hevc.h:97
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:583
PF_L0
@ PF_L0
Definition: hevcdec.h:168
EDGE_EMU_BUFFER_STRIDE
#define EDGE_EMU_BUFFER_STRIDE
Definition: hevcdec.h:69
HEVC_MAX_REFS
@ HEVC_MAX_REFS
Definition: hevc.h:119
tab_mode_idx
static const uint8_t tab_mode_idx[]
Definition: hevcdec.c:2024
uint8_t
uint8_t
Definition: audio_convert.c:194
cabac_functions.h
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:554
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
HEVCLocalContext::qp_y
int8_t qp_y
Definition: hevcdec.h:434
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:197
update_thread_context
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread. Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little speed gain at this point but it should work. If there are inter-frame dependencies
AVMasteringDisplayMetadata
Mastering display metadata capable of representing the color volume of the display used to master the...
Definition: mastering_display_metadata.h:38
HEVC_NAL_TSA_R
@ HEVC_NAL_TSA_R
Definition: hevc.h:32
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:512
SliceHeader::list_entry_lx
unsigned int list_entry_lx[2][32]
Definition: hevcdec.h:273
AVCodecContext::height
int height
Definition: avcodec.h:699
HEVCSEIA53Caption::buf_ref
AVBufferRef * buf_ref
Definition: hevc_sei.h:91
hevc_decode_extradata
static int hevc_decode_extradata(HEVCContext *s, uint8_t *buf, int length, int first)
Definition: hevcdec.c:3183
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
av_md5_final
void av_md5_final(AVMD5 *ctx, uint8_t *dst)
Finish hashing and output digest value.
Definition: md5.c:192
hevc_decode_init
static av_cold int hevc_decode_init(AVCodecContext *avctx)
Definition: hevcdec.c:3493
HEVCFrame::poc
int poc
Definition: hevcdec.h:402
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: pixfmt.h:534
hevc.h
ff_hevc_cu_chroma_qp_offset_idx
int ff_hevc_cu_chroma_qp_offset_idx(HEVCContext *s)
Definition: hevc_cabac.c:670
SAOParams
Definition: hevcdsp.h:32
SliceHeader::short_term_rps
const ShortTermRPS * short_term_rps
Definition: hevcdec.h:270
HEVC_NAL_VPS
@ HEVC_NAL_VPS
Definition: hevc.h:61
SliceHeader::cu_chroma_qp_offset_enabled_flag
uint8_t cu_chroma_qp_offset_enabled_flag
Definition: hevcdec.h:295
HEVC_NAL_IDR_W_RADL
@ HEVC_NAL_IDR_W_RADL
Definition: hevc.h:48
ff_hevc_cu_chroma_qp_offset_flag
int ff_hevc_cu_chroma_qp_offset_flag(HEVCContext *s)
Definition: hevc_cabac.c:665
ret
ret
Definition: filter_design.txt:187
H2645NAL::raw_data
const uint8_t * raw_data
Definition: h2645_parse.h:45
ff_hevc_reset_sei
void ff_hevc_reset_sei(HEVCSEI *s)
Reset SEI values that are stored on the Context.
Definition: hevc_sei.c:373
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
PRED_L1
@ PRED_L1
Definition: hevcdec.h:162
PredictionUnit::mvd
Mv mvd
Definition: hevcdec.h:363
SliceHeader::disable_deblocking_filter_flag
uint8_t disable_deblocking_filter_flag
slice_header_disable_deblocking_filter_flag
Definition: hevcdec.h:285
ff_hevc_dsp_init
void ff_hevc_dsp_init(HEVCDSPContext *hevcdsp, int bit_depth)
Definition: hevcdsp.c:126
HEVCLocalContext::edge_emu_buffer2
uint8_t edge_emu_buffer2[(MAX_PB_SIZE+7) *EDGE_EMU_BUFFER_STRIDE *2]
Definition: hevcdec.h:450
AV_EF_CRCCHECK
#define AV_EF_CRCCHECK
Verify checksums embedded in the bitstream (could be of either encoded or decoded data,...
Definition: avcodec.h:1663
AVStereo3D::type
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:180
sps
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
Definition: cbs_h264_syntax_template.c:260
av_md5_update
void av_md5_update(AVMD5 *ctx, const uint8_t *src, int len)
Update hash value.
Definition: md5.c:154
hevc_init_context
static av_cold int hevc_init_context(AVCodecContext *avctx)
Definition: hevcdec.c:3354
pos
unsigned int pos
Definition: spdifenc.c:412
hevc_luma_mv_mvp_mode
static void hevc_luma_mv_mvp_mode(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int part_idx, int merge_idx, MvField *mv)
Definition: hevcdec.c:1760
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:401
HEVC_NAL_EOS_NUT
@ HEVC_NAL_EOS_NUT
Definition: hevc.h:65
ff_hevc_rem_intra_luma_pred_mode_decode
int ff_hevc_rem_intra_luma_pred_mode_decode(HEVCContext *s)
Definition: hevc_cabac.c:760
ff_hevc_frame_nb_refs
int ff_hevc_frame_nb_refs(const HEVCContext *s)
Get the number of candidate references for the current frame.
Definition: hevc_refs.c:503
hls_prediction_unit
static void hls_prediction_unit(HEVCContext *s, int x0, int y0, int nPbW, int nPbH, int log2_cb_size, int partIdx, int idx)
Definition: hevcdec.c:1805
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen_template.c:38
HEVCLocalContext::boundary_flags
int boundary_flags
Definition: hevcdec.h:464
HEVC_NAL_TRAIL_N
@ HEVC_NAL_TRAIL_N
Definition: hevc.h:29
LongTermRPS
Definition: hevcdec.h:231
ff_set_sar
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
Definition: utils.c:119
SliceHeader::slice_type
enum HEVCSliceType slice_type
Definition: hevcdec.h:257
ff_hevc_flush_dpb
void ff_hevc_flush_dpb(HEVCContext *s)
Drop all frames currently in DPB.
Definition: hevc_refs.c:75
HEVC_NAL_AUD
@ HEVC_NAL_AUD
Definition: hevc.h:64
AVCodecContext
main external API structure.
Definition: avcodec.h:526
AVCodecContext::active_thread_type
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1804
SliceHeader::slice_qp
int8_t slice_qp
Definition: hevcdec.h:307
hls_coding_quadtree
static int hls_coding_quadtree(HEVCContext *s, int x0, int y0, int log2_cb_size, int cb_depth)
Definition: hevcdec.c:2282
av_mastering_display_metadata_create_side_data
AVMasteringDisplayMetadata * av_mastering_display_metadata_create_side_data(AVFrame *frame)
Allocate a complete AVMasteringDisplayMetadata and add it to the frame.
Definition: mastering_display_metadata.c:32
SUBDIVIDE
#define SUBDIVIDE(x, y, idx)
PredictionUnit::merge_flag
uint8_t merge_flag
Definition: hevcdec.h:364
av_md5_alloc
struct AVMD5 * av_md5_alloc(void)
Allocate an AVMD5 context.
Definition: md5.c:48
AV_PKT_DATA_NEW_EXTRADATA
@ AV_PKT_DATA_NEW_EXTRADATA
The AV_PKT_DATA_NEW_EXTRADATA is used to notify the codec or the format that the extradata buffer was...
Definition: packet.h:55
AVRational::den
int den
Denominator.
Definition: rational.h:60
pred_weight_table
static int pred_weight_table(HEVCContext *s, GetBitContext *gb)
Definition: hevcdec.c:143
SliceHeader::slice_cr_qp_offset
int slice_cr_qp_offset
Definition: hevcdec.h:293
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
HEVCContext
Definition: hevcdec.h:467
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1859
CodingUnit::pred_mode
enum PredMode pred_mode
PredMode.
Definition: hevcdec.h:330
SliceHeader::pic_order_cnt_lsb
int pic_order_cnt_lsb
Definition: hevcdec.h:259
ff_hevc_cabac_init
int ff_hevc_cabac_init(HEVCContext *s, int ctb_addr_ts)
Definition: hevc_cabac.c:504
HEVCLocalContext::qPy_pred
int qPy_pred
Definition: hevcdec.h:437
HEVCFrame::tab_mvf_buf
AVBufferRef * tab_mvf_buf
Definition: hevcdec.h:405
SCAN_DIAG
@ SCAN_DIAG
Definition: hevcdec.h:226
SliceHeader::rpl_modification_flag
uint8_t rpl_modification_flag[2]
Definition: hevcdec.h:275
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:75
AVMasteringDisplayMetadata::min_luminance
AVRational min_luminance
Min luminance of mastering display (cd/m^2).
Definition: mastering_display_metadata.h:52
FF_CODEC_PROPERTY_CLOSED_CAPTIONS
#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS
Definition: avcodec.h:2193
av_buffer_ref
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
HEVCLocalContext::tu
TransformUnit tu
Definition: hevcdec.h:439
hls_cross_component_pred
static int hls_cross_component_pred(HEVCContext *s, int idx)
Definition: hevcdec.c:1026
hls_slice_header
static int hls_slice_header(HEVCContext *s)
Definition: hevcdec.c:506
CodingUnit::y
int y
Definition: hevcdec.h:328
set_side_data
static int set_side_data(HEVCContext *s)
Definition: hevcdec.c:2667
ff_thread_ref_frame
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
Definition: utils.c:1885
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:714
Mv
Definition: hevcdec.h:339
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
HEVC_NAL_SPS
@ HEVC_NAL_SPS
Definition: hevc.h:62
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
PRED_L0
@ PRED_L0
Definition: hevcdec.h:161
get_bitsz
static av_always_inline int get_bitsz(GetBitContext *s, int n)
Read 0-25 bits.
Definition: get_bits.h:415
HEVCVPS
Definition: hevc_ps.h:123
mastering_display_metadata.h
get_ue_golomb_long
static unsigned get_ue_golomb_long(GetBitContext *gb)
Read an unsigned Exp-Golomb code in the range 0 to UINT32_MAX-1.
Definition: golomb.h:105
FF_CODEC_CAP_ALLOCATE_PROGRESS
#define FF_CODEC_CAP_ALLOCATE_PROGRESS
Definition: internal.h:75
ff_hevc_sao_band_position_decode
int ff_hevc_sao_band_position_decode(HEVCContext *s)
Definition: hevc_cabac.c:576
EPEL_EXTRA
#define EPEL_EXTRA
Definition: hevcdec.h:64
ff_hevc_part_mode_decode
int ff_hevc_part_mode_decode(HEVCContext *s, int log2_cb_size)
Definition: hevc_cabac.c:705
s0
#define s0
Definition: regdef.h:37
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:33
HEVCSPS
Definition: hevc_ps.h:153
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:206
ff_thread_get_format
enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Wrapper around get_format() for frame-multithreaded codecs.
Definition: pthread_frame.c:972
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
HEVCPPS
Definition: hevc_ps.h:249
CodingUnit::part_mode
enum PartMode part_mode
PartMode.
Definition: hevcdec.h:331
AVStereo3D::view
enum AVStereo3DView view
Determines which views are packed.
Definition: stereo3d.h:190
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
SliceHeader::tc_offset
int tc_offset
tc_offset_div2 * 2
Definition: hevcdec.h:298
LongTermRPS::nb_refs
uint8_t nb_refs
Definition: hevcdec.h:235
AVPacket
This structure stores compressed data.
Definition: packet.h:332
AVContentLightMetadata::MaxFALL
unsigned MaxFALL
Max average light level per frame (cd/m^2).
Definition: mastering_display_metadata.h:107
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:553
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:240
TransformUnit::cross_pf
uint8_t cross_pf
Definition: hevcdec.h:382
SAOParams::offset_val
int16_t offset_val[3][5]
SaoOffsetVal.
Definition: hevcdsp.h:40
HEVCLocalContext::cu
CodingUnit cu
Definition: hevcdec.h:454
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:502
SliceHeader::pps_id
unsigned int pps_id
address (in raster order) of the first block in the current slice segment
Definition: hevcdec.h:250
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:73
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:699
bytestream.h
ff_hevc_decode_short_term_rps
int ff_hevc_decode_short_term_rps(GetBitContext *gb, AVCodecContext *avctx, ShortTermRPS *rps, const HEVCSPS *sps, int is_slice_header)
Definition: hevc_ps.c:119
PredictionUnit::mpm_idx
int mpm_idx
Definition: hevcdec.h:360
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:331
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
HEVC_NAL_FD_NUT
@ HEVC_NAL_FD_NUT
Definition: hevc.h:67
PredictionUnit::chroma_mode_c
uint8_t chroma_mode_c[4]
Definition: hevcdec.h:366
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
skip_bytes
static const av_unused uint8_t * skip_bytes(CABACContext *c, int n)
Skip n bytes and reset the decoder.
Definition: cabac_functions.h:197
PredictionUnit::intra_pred_mode
uint8_t intra_pred_mode[4]
Definition: hevcdec.h:362
ff_hevc_decode_nal_pps
int ff_hevc_decode_nal_pps(GetBitContext *gb, AVCodecContext *avctx, HEVCParamSets *ps)
Definition: hevc_ps.c:1499
TransformUnit::is_cu_chroma_qp_offset_coded
uint8_t is_cu_chroma_qp_offset_coded
Definition: hevcdec.h:379
hls_transform_tree
static int hls_transform_tree(HEVCContext *s, int x0, int y0, int xBase, int yBase, int cb_xBase, int cb_yBase, int log2_cb_size, int log2_trafo_size, int trafo_depth, int blk_idx, const int *base_cbf_cb, const int *base_cbf_cr)
Definition: hevcdec.c:1263
h
h
Definition: vp9dsp_template.c:2038
BOUNDARY_LEFT_SLICE
#define BOUNDARY_LEFT_SLICE
Definition: hevcdec.h:458
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:176
SliceHeader::slice_qp_delta
int slice_qp_delta
Definition: hevcdec.h:291
SliceHeader::slice_addr
unsigned int slice_addr
Definition: hevcdec.h:255
ff_hevc_decoder
AVCodec ff_hevc_decoder
Definition: hevcdec.c:3558
ff_hevc_log2_res_scale_abs
int ff_hevc_log2_res_scale_abs(HEVCContext *s, int idx)
Definition: hevc_cabac.c:896
HEVC_NAL_EOB_NUT
@ HEVC_NAL_EOB_NUT
Definition: hevc.h:66
atomic_init
#define atomic_init(obj, value)
Definition: stdatomic.h:33
TransformUnit::intra_pred_mode_c
int intra_pred_mode_c
Definition: hevcdec.h:376
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: avcodec.h:232
HEVC_NAL_SEI_PREFIX
@ HEVC_NAL_SEI_PREFIX
Definition: hevc.h:68
int
int
Definition: ffmpeg_filter.c:192
HEVCLocalContext::end_of_tiles_y
int end_of_tiles_y
Definition: hevcdec.h:446
CodingUnit::intra_split_flag
uint8_t intra_split_flag
IntraSplitFlag.
Definition: hevcdec.h:334
ff_hevc_end_of_slice_flag_decode
int ff_hevc_end_of_slice_flag_decode(HEVCContext *s)
Definition: hevc_cabac.c:608
intra_prediction_unit
static void intra_prediction_unit(HEVCContext *s, int x0, int y0, int log2_cb_size)
Definition: hevcdec.c:2028
SHIFT_CTB_WPP
#define SHIFT_CTB_WPP
Definition: hevcdec.h:46
av_color_transfer_name
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
Definition: pixdesc.c:2918
luma_mc_uni
static void luma_mc_uni(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride, AVFrame *ref, const Mv *mv, int x_off, int y_off, int block_w, int block_h, int luma_weight, int luma_offset)
8.5.3.2.2.1 Luma sample unidirectional interpolation process
Definition: hevcdec.c:1443
PART_2NxN
@ PART_2NxN
Definition: hevcdec.h:145
HEVCParamSets::vps_list
AVBufferRef * vps_list[HEVC_MAX_VPS_COUNT]
Definition: hevc_ps.h:328
SliceHeader::long_term_rps
LongTermRPS long_term_rps
Definition: hevcdec.h:272
HEVCLocalContext::cc
CABACContext cc
Definition: hevcdec.h:432
TransformUnit::cu_qp_offset_cr
int8_t cu_qp_offset_cr
Definition: hevcdec.h:381
ff_alloc_entries
int ff_alloc_entries(AVCodecContext *avctx, int count)
Definition: pthread_slice.c:201
options
static const AVOption options[]
Definition: hevcdec.c:3543
HEVC_CONTEXTS
#define HEVC_CONTEXTS
Definition: hevcdec.h:55
HEVCParamSets
Definition: hevc_ps.h:327