48 const uint8_t ff_hevc_pel_weight[65] = { [2] = 0, [4] = 1, [6] = 2, [8] = 3, [12] = 4, [16] = 5, [24] = 6, [32] = 7, [48] = 8, [64] = 9 };
93 int pic_size_in_ctb = ((width >> log2_min_cb_size) + 1) *
94 ((height >> log2_min_cb_size) + 1);
148 uint8_t luma_weight_l0_flag[16];
149 uint8_t chroma_weight_l0_flag[16];
150 uint8_t luma_weight_l1_flag[16];
151 uint8_t chroma_weight_l1_flag[16];
152 int luma_log2_weight_denom;
155 if (luma_log2_weight_denom < 0 || luma_log2_weight_denom > 7) {
161 int64_t chroma_log2_weight_denom = luma_log2_weight_denom + (int64_t)
get_se_golomb(gb);
162 if (chroma_log2_weight_denom < 0 || chroma_log2_weight_denom > 7) {
171 if (!luma_weight_l0_flag[i]) {
178 chroma_weight_l0_flag[i] =
get_bits1(gb);
181 chroma_weight_l0_flag[i] = 0;
184 if (luma_weight_l0_flag[i]) {
186 if ((int8_t)delta_luma_weight_l0 != delta_luma_weight_l0)
191 if (chroma_weight_l0_flag[i]) {
192 for (j = 0; j < 2; j++) {
196 if ( (int8_t)delta_chroma_weight_l0 != delta_chroma_weight_l0
197 || delta_chroma_offset_l0 < -(1<<17) || delta_chroma_offset_l0 > (1<<17)) {
215 if (!luma_weight_l1_flag[i]) {
222 chroma_weight_l1_flag[i] =
get_bits1(gb);
225 chroma_weight_l1_flag[i] = 0;
228 if (luma_weight_l1_flag[i]) {
230 if ((int8_t)delta_luma_weight_l1 != delta_luma_weight_l1)
235 if (chroma_weight_l1_flag[i]) {
236 for (j = 0; j < 2; j++) {
240 if ( (int8_t)delta_chroma_weight_l1 != delta_chroma_weight_l1
241 || delta_chroma_offset_l1 < -(1<<17) || delta_chroma_offset_l1 > (1<<17)) {
264 int prev_delta_msb = 0;
265 unsigned int nb_sps = 0, nb_sh;
283 for (i = 0; i < rps->
nb_refs; i++) {
303 if (i && i != nb_sps)
304 delta += prev_delta_msb;
310 prev_delta_msb =
delta;
323 unsigned int num = 0, den = 0;
328 avctx->
width = sps->
width - ow->left_offset - ow->right_offset;
329 avctx->
height = sps->
height - ow->top_offset - ow->bottom_offset;
369 if (num != 0 && den != 0)
392 #define HWACCEL_MAX (CONFIG_HEVC_DXVA2_HWACCEL + \ 393 CONFIG_HEVC_D3D11VA_HWACCEL * 2 + \ 394 CONFIG_HEVC_NVDEC_HWACCEL + \ 395 CONFIG_HEVC_VAAPI_HWACCEL + \ 396 CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL + \ 397 CONFIG_HEVC_VDPAU_HWACCEL) 403 #if CONFIG_HEVC_DXVA2_HWACCEL 406 #if CONFIG_HEVC_D3D11VA_HWACCEL 410 #if CONFIG_HEVC_VAAPI_HWACCEL 413 #if CONFIG_HEVC_VDPAU_HWACCEL 416 #if CONFIG_HEVC_NVDEC_HWACCEL 419 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL 424 #if CONFIG_HEVC_DXVA2_HWACCEL 427 #if CONFIG_HEVC_D3D11VA_HWACCEL 431 #if CONFIG_HEVC_VAAPI_HWACCEL 434 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL 437 #if CONFIG_HEVC_VDPAU_HWACCEL 440 #if CONFIG_HEVC_NVDEC_HWACCEL 445 #if CONFIG_HEVC_VDPAU_HWACCEL 448 #if CONFIG_HEVC_NVDEC_HWACCEL 454 #if CONFIG_HEVC_VAAPI_HWACCEL 461 #if CONFIG_HEVC_VDPAU_HWACCEL 464 #if CONFIG_HEVC_NVDEC_HWACCEL 500 for (i = 0; i < 3; i++) {
509 for(c_idx = 0; c_idx < c_count; c_idx++) {
575 if (sps->
width != last_sps->width || sps->
height != last_sps->height ||
577 last_sps->temporal_layer[last_sps->max_sub_layers - 1].max_dec_pic_buffering)
601 int slice_address_length;
611 "Invalid slice segment address: %u.\n",
660 "Ignoring POC change between slices: %d -> %d\n", s->
poc, poc);
676 int numbits, rps_idx;
684 rps_idx = numbits > 0 ?
get_bits(gb, numbits) : 0;
790 "Invalid collocated_ref_idx: %d.\n",
807 "Invalid number of merging MVP candidates: %d.\n",
834 int deblocking_filter_override_flag = 0;
837 deblocking_filter_override_flag =
get_bits1(gb);
839 if (deblocking_filter_override_flag) {
844 if (beta_offset_div2 < -6 || beta_offset_div2 > 6 ||
845 tc_offset_div2 < -6 || tc_offset_div2 > 6) {
847 "Invalid deblock filter offsets: %d, %d\n",
848 beta_offset_div2, tc_offset_div2);
891 if (offset_len < 1 || offset_len > 32) {
927 for (i = 0; i <
length; i++)
936 "The slice_qp %d is outside the valid range " 968 #define CTB(tab, x, y) ((tab)[(y) * s->ps.sps->ctb_width + (x)]) 970 #define SET_SAO(elem, value) \ 972 if (!sao_merge_up_flag && !sao_merge_left_flag) \ 974 else if (sao_merge_left_flag) \ 975 sao->elem = CTB(s->sao, rx-1, ry).elem; \ 976 else if (sao_merge_up_flag) \ 977 sao->elem = CTB(s->sao, rx, ry-1).elem; \ 985 int sao_merge_left_flag = 0;
986 int sao_merge_up_flag = 0;
996 if (ry > 0 && !sao_merge_left_flag) {
1021 for (i = 0; i < 4; i++)
1025 for (i = 0; i < 4; i++) {
1027 SET_SAO(offset_sign[c_idx][i],
1034 }
else if (c_idx != 2) {
1040 for (i = 0; i < 4; i++) {
1048 sao->
offset_val[c_idx][i + 1] *= 1 << log2_sao_offset_scale;
1060 if (log2_res_scale_abs_plus1 != 0) {
1063 (1 - 2 * res_scale_sign_flag);
1073 int xBase,
int yBase,
int cb_xBase,
int cb_yBase,
1074 int log2_cb_size,
int log2_trafo_size,
1075 int blk_idx,
int cbf_luma,
int *cbf_cb,
int *cbf_cr)
1078 const int log2_trafo_size_c = log2_trafo_size - s->
ps.
sps->
hshift[1];
1082 int trafo_size = 1 << log2_trafo_size;
1088 if (cbf_luma || cbf_cb[0] || cbf_cr[0] ||
1092 int cbf_chroma = cbf_cb[0] || cbf_cr[0] ||
1094 (cbf_cb[1] || cbf_cr[1]));
1106 "The cu_qp_delta %d is outside the valid range " 1120 if (cu_chroma_qp_offset_flag) {
1121 int cu_chroma_qp_offset_idx = 0;
1125 "cu_chroma_qp_offset_idx not yet tested.\n");
1159 int trafo_size_h = 1 << (log2_trafo_size_c + s->
ps.
sps->
hshift[1]);
1160 int trafo_size_v = 1 << (log2_trafo_size_c + s->
ps.
sps->
vshift[1]);
1171 s->
hpc.
intra_pred[log2_trafo_size_c - 2](
s, x0, y0 + (i << log2_trafo_size_c), 1);
1175 log2_trafo_size_c, scan_idx_c, 1);
1183 int size = 1 << log2_trafo_size_c;
1187 for (i = 0; i < (size *
size); i++) {
1200 s->
hpc.
intra_pred[log2_trafo_size_c - 2](
s, x0, y0 + (i << log2_trafo_size_c), 2);
1204 log2_trafo_size_c, scan_idx_c, 2);
1212 int size = 1 << log2_trafo_size_c;
1216 for (i = 0; i < (size *
size); i++) {
1223 int trafo_size_h = 1 << (log2_trafo_size + 1);
1224 int trafo_size_v = 1 << (log2_trafo_size + s->
ps.
sps->
vshift[1]);
1228 trafo_size_h, trafo_size_v);
1229 s->
hpc.
intra_pred[log2_trafo_size - 2](
s, xBase, yBase + (i << log2_trafo_size), 1);
1233 log2_trafo_size, scan_idx_c, 1);
1238 trafo_size_h, trafo_size_v);
1239 s->
hpc.
intra_pred[log2_trafo_size - 2](
s, xBase, yBase + (i << log2_trafo_size), 2);
1243 log2_trafo_size, scan_idx_c, 2);
1248 int trafo_size_h = 1 << (log2_trafo_size_c + s->
ps.
sps->
hshift[1]);
1249 int trafo_size_v = 1 << (log2_trafo_size_c + s->
ps.
sps->
vshift[1]);
1255 trafo_size_h, trafo_size_v);
1256 s->
hpc.
intra_pred[log2_trafo_size_c - 2](
s, x0, y0 + (1 << log2_trafo_size_c), 1);
1257 s->
hpc.
intra_pred[log2_trafo_size_c - 2](
s, x0, y0 + (1 << log2_trafo_size_c), 2);
1259 }
else if (blk_idx == 3) {
1260 int trafo_size_h = 1 << (log2_trafo_size + 1);
1261 int trafo_size_v = 1 << (log2_trafo_size + s->
ps.
sps->
vshift[1]);
1263 trafo_size_h, trafo_size_v);
1268 trafo_size_h, trafo_size_v);
1269 s->
hpc.
intra_pred[log2_trafo_size - 2](
s, xBase, yBase + (1 << (log2_trafo_size)), 1);
1270 s->
hpc.
intra_pred[log2_trafo_size - 2](
s, xBase, yBase + (1 << (log2_trafo_size)), 2);
1280 int cb_size = 1 << log2_cb_size;
1288 for (j = (y0 >> log2_min_pu_size); j < (y_end >> log2_min_pu_size); j++)
1289 for (i = (x0 >> log2_min_pu_size); i < (x_end >> log2_min_pu_size); i++)
1290 s->
is_pcm[i + j * min_pu_width] = 2;
1294 int xBase,
int yBase,
int cb_xBase,
int cb_yBase,
1295 int log2_cb_size,
int log2_trafo_size,
1296 int trafo_depth,
int blk_idx,
1297 const int *base_cbf_cb,
const int *base_cbf_cr)
1305 cbf_cb[0] = base_cbf_cb[0];
1306 cbf_cb[1] = base_cbf_cb[1];
1307 cbf_cr[0] = base_cbf_cr[0];
1308 cbf_cr[1] = base_cbf_cr[1];
1311 if (trafo_depth == 1) {
1327 if (log2_trafo_size <= s->ps.sps->log2_max_trafo_size &&
1329 trafo_depth < lc->cu.max_trafo_depth &&
1344 if (trafo_depth == 0 || cbf_cb[0]) {
1351 if (trafo_depth == 0 || cbf_cr[0]) {
1359 if (split_transform_flag) {
1360 const int trafo_size_split = 1 << (log2_trafo_size - 1);
1361 const int x1 = x0 + trafo_size_split;
1362 const int y1 = y0 + trafo_size_split;
1364 #define SUBDIVIDE(x, y, idx) \ 1366 ret = hls_transform_tree(s, x, y, x0, y0, cb_xBase, cb_yBase, log2_cb_size, \ 1367 log2_trafo_size - 1, trafo_depth + 1, idx, \ 1386 cbf_cb[0] || cbf_cr[0] ||
1392 log2_cb_size, log2_trafo_size,
1393 blk_idx, cbf_luma, cbf_cb, cbf_cr);
1399 for (i = 0; i < (1 << log2_trafo_size); i += min_tu_size)
1400 for (j = 0; j < (1 << log2_trafo_size); j += min_tu_size) {
1401 int x_tu = (x0 + j) >> log2_min_tu_size;
1402 int y_tu = (y0 +
i) >> log2_min_tu_size;
1403 s->
cbf_luma[y_tu * min_tu_width + x_tu] = 1;
1420 int cb_size = 1 << log2_cb_size;
1475 int block_w,
int block_h,
int luma_weight,
int luma_offset)
1479 ptrdiff_t srcstride = ref->
linesize[0];
1488 x_off += mv->
x >> 2;
1489 y_off += mv->
y >> 2;
1500 edge_emu_stride, srcstride,
1504 pic_width, pic_height);
1506 srcstride = edge_emu_stride;
1511 block_h, mx, my, block_w);
1515 luma_weight, luma_offset, mx, my, block_w);
1535 AVFrame *ref0,
const Mv *mv0,
int x_off,
int y_off,
1536 int block_w,
int block_h,
AVFrame *ref1,
const Mv *mv1,
struct MvField *current_mv)
1539 ptrdiff_t src0stride = ref0->
linesize[0];
1540 ptrdiff_t src1stride = ref1->
linesize[0];
1543 int mx0 = mv0->
x & 3;
1544 int my0 = mv0->
y & 3;
1545 int mx1 = mv1->
x & 3;
1546 int my1 = mv1->
y & 3;
1549 int x_off0 = x_off + (mv0->
x >> 2);
1550 int y_off0 = y_off + (mv0->
y >> 2);
1551 int x_off1 = x_off + (mv1->
x >> 2);
1552 int y_off1 = y_off + (mv1->
y >> 2);
1566 edge_emu_stride, src0stride,
1570 pic_width, pic_height);
1572 src0stride = edge_emu_stride;
1583 edge_emu_stride, src1stride,
1587 pic_width, pic_height);
1589 src1stride = edge_emu_stride;
1593 block_h, mx0, my0, block_w);
1596 block_h, mx1, my1, block_w);
1626 ptrdiff_t dststride,
uint8_t *
src0, ptrdiff_t srcstride,
int reflist,
1627 int x_off,
int y_off,
int block_w,
int block_h,
struct MvField *current_mv,
int chroma_weight,
int chroma_offset)
1632 const Mv *
mv = ¤t_mv->
mv[reflist];
1638 intptr_t mx = av_mod_uintp2(mv->
x, 2 + hshift);
1639 intptr_t my = av_mod_uintp2(mv->
y, 2 + vshift);
1640 intptr_t _mx = mx << (1 - hshift);
1641 intptr_t _my = my << (1 - vshift);
1643 x_off += mv->
x >> (2 + hshift);
1644 y_off += mv->
y >> (2 + vshift);
1655 edge_emu_stride, srcstride,
1659 pic_width, pic_height);
1662 srcstride = edge_emu_stride;
1666 block_h, _mx, _my, block_w);
1670 chroma_weight, chroma_offset, _mx, _my, block_w);
1691 int x_off,
int y_off,
int block_w,
int block_h,
struct MvField *current_mv,
int cidx)
1696 ptrdiff_t src1stride = ref0->
linesize[cidx+1];
1697 ptrdiff_t src2stride = ref1->
linesize[cidx+1];
1702 Mv *mv0 = ¤t_mv->
mv[0];
1703 Mv *mv1 = ¤t_mv->
mv[1];
1707 intptr_t mx0 = av_mod_uintp2(mv0->
x, 2 + hshift);
1708 intptr_t my0 = av_mod_uintp2(mv0->
y, 2 + vshift);
1709 intptr_t mx1 = av_mod_uintp2(mv1->
x, 2 + hshift);
1710 intptr_t my1 = av_mod_uintp2(mv1->
y, 2 + vshift);
1711 intptr_t _mx0 = mx0 << (1 - hshift);
1712 intptr_t _my0 = my0 << (1 - vshift);
1713 intptr_t _mx1 = mx1 << (1 - hshift);
1714 intptr_t _my1 = my1 << (1 - vshift);
1716 int x_off0 = x_off + (mv0->
x >> (2 + hshift));
1717 int y_off0 = y_off + (mv0->
y >> (2 + vshift));
1718 int x_off1 = x_off + (mv1->
x >> (2 + hshift));
1719 int y_off1 = y_off + (mv1->
y >> (2 + vshift));
1733 edge_emu_stride, src1stride,
1737 pic_width, pic_height);
1740 src1stride = edge_emu_stride;
1752 edge_emu_stride, src2stride,
1756 pic_width, pic_height);
1759 src2stride = edge_emu_stride;
1763 block_h, _mx0, _my0, block_w);
1766 src2, src2stride, lc->
tmp,
1767 block_h, _mx1, _my1, block_w);
1770 src2, src2stride, lc->
tmp,
1777 _mx1, _my1, block_w);
1784 int y =
FFMAX(0, (mv->
y >> 2) + y0 + height + 9);
1791 int nPbH,
int log2_cb_size,
int part_idx,
1803 if (inter_pred_idc !=
PRED_L1) {
1811 part_idx, merge_idx, mv, mvp_flag, 0);
1816 if (inter_pred_idc !=
PRED_L0) {
1829 part_idx, merge_idx, mv, mvp_flag, 1);
1837 int log2_cb_size,
int partIdx,
int idx)
1839 #define POS(c_idx, x, y) \ 1840 &s->frame->data[c_idx][((y) >> s->ps.sps->vshift[c_idx]) * s->frame->linesize[c_idx] + \ 1841 (((x) >> s->ps.sps->hshift[c_idx]) << s->ps.sps->pixel_shift)] 1844 struct MvField current_mv = {{{ 0 }}};
1856 int x_cb = x0 >> log2_min_cb_size;
1857 int y_cb = y0 >> log2_min_cb_size;
1873 partIdx, merge_idx, ¤t_mv);
1876 partIdx, merge_idx, ¤t_mv);
1884 tab_mvf[(y_pu + j) * min_pu_width + x_pu +
i] = current_mv;
1887 ref0 = refPicList[0].
ref[current_mv.
ref_idx[0]];
1893 ref1 = refPicList[1].
ref[current_mv.
ref_idx[1]];
1906 ¤t_mv.
mv[0], x0, y0, nPbW, nPbH,
1912 0, x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv,
1915 0, x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv,
1925 ¤t_mv.
mv[1], x0, y0, nPbW, nPbH,
1931 1, x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv,
1935 1, x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv,
1945 ¤t_mv.
mv[0], x0, y0, nPbW, nPbH,
1946 ref1->frame, ¤t_mv.
mv[1], ¤t_mv);
1950 x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv, 0);
1953 x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv, 1);
1962 int prev_intra_luma_pred_flag)
1980 int intra_pred_mode;
1985 if ((y0 - 1) < y_ctb)
1988 if (cand_left == cand_up) {
1989 if (cand_left < 2) {
1994 candidate[0] = cand_left;
1995 candidate[1] = 2 + ((cand_left - 2 - 1 + 32) & 31);
1996 candidate[2] = 2 + ((cand_left - 2 + 1) & 31);
1999 candidate[0] = cand_left;
2000 candidate[1] = cand_up;
2010 if (prev_intra_luma_pred_flag) {
2011 intra_pred_mode = candidate[lc->
pu.
mpm_idx];
2013 if (candidate[0] > candidate[1])
2015 if (candidate[0] > candidate[2])
2017 if (candidate[1] > candidate[2])
2021 for (i = 0; i < 3; i++)
2022 if (intra_pred_mode >= candidate[i])
2029 for (i = 0; i < size_in_pus; i++) {
2030 memset(&s->
tab_ipm[(y_pu + i) * min_pu_width + x_pu],
2031 intra_pred_mode, size_in_pus);
2033 for (j = 0; j < size_in_pus; j++) {
2038 return intra_pred_mode;
2042 int log2_cb_size,
int ct_depth)
2055 0, 1, 2, 2, 2, 2, 3, 5, 7, 8, 10, 12, 13, 15, 17, 18, 19, 20,
2056 21, 22, 23, 23, 24, 24, 25, 25, 26, 27, 27, 28, 28, 29, 29, 30, 31};
2062 static const uint8_t intra_chroma_table[4] = { 0, 26, 10, 1 };
2063 uint8_t prev_intra_luma_pred_flag[4];
2065 int pb_size = (1 << log2_cb_size) >> split;
2066 int side = split + 1;
2070 for (i = 0; i < side; i++)
2071 for (j = 0; j < side; j++)
2074 for (i = 0; i < side; i++) {
2075 for (j = 0; j < side; j++) {
2076 if (prev_intra_luma_pred_flag[2 * i + j])
2083 prev_intra_luma_pred_flag[2 * i + j]);
2088 for (i = 0; i < side; i++) {
2089 for (j = 0; j < side; j++) {
2091 if (chroma_mode != 4) {
2104 if (chroma_mode != 4) {
2108 mode_idx = intra_chroma_table[chroma_mode];
2115 if (chroma_mode != 4) {
2131 int pb_size = 1 << log2_cb_size;
2139 if (size_in_pus == 0)
2141 for (j = 0; j < size_in_pus; j++)
2142 memset(&s->
tab_ipm[(y_pu + j) * min_pu_width + x_pu],
INTRA_DC, size_in_pus);
2144 for (j = 0; j < size_in_pus; j++)
2145 for (k = 0; k < size_in_pus; k++)
2151 int cb_size = 1 << log2_cb_size;
2154 int length = cb_size >> log2_min_cb_size;
2156 int x_cb = x0 >> log2_min_cb_size;
2157 int y_cb = y0 >> log2_min_cb_size;
2158 int idx = log2_cb_size - 2;
2169 for (x = 0; x < 4; x++)
2181 x = y_cb * min_cb_width + x_cb;
2182 for (y = 0; y <
length; y++) {
2183 memset(&s->
skip_flag[x], skip_flag, length);
2188 x = y_cb * min_cb_width + x_cb;
2189 for (y = 0; y <
length; y++) {
2216 log2_cb_size <= s->ps.sps->pcm.log2_max_pcm_cb_size) {
2242 hls_prediction_unit(s, x0 + cb_size / 2, y0, cb_size / 2, cb_size, log2_cb_size, 1, idx - 1);
2246 hls_prediction_unit(s, x0, y0 + cb_size / 4, cb_size, cb_size * 3 / 4, log2_cb_size, 1, idx);
2250 hls_prediction_unit(s, x0, y0 + cb_size * 3 / 4, cb_size, cb_size / 4, log2_cb_size, 1, idx);
2254 hls_prediction_unit(s, x0 + cb_size / 4, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 1, idx - 2);
2258 hls_prediction_unit(s, x0 + cb_size * 3 / 4, y0, cb_size / 4, cb_size, log2_cb_size, 1, idx - 2);
2262 hls_prediction_unit(s, x0 + cb_size / 2, y0, cb_size / 2, cb_size / 2, log2_cb_size, 1, idx - 1);
2263 hls_prediction_unit(s, x0, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 2, idx - 1);
2264 hls_prediction_unit(s, x0 + cb_size / 2, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 3, idx - 1);
2270 int rqt_root_cbf = 1;
2277 const static int cbf[2] = { 0 };
2283 log2_cb_size, 0, 0, cbf, cbf);
2296 x = y_cb * min_cb_width + x_cb;
2297 for (y = 0; y <
length; y++) {
2302 if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 &&
2303 ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0) {
2313 int log2_cb_size,
int cb_depth)
2316 const int cb_size = 1 << log2_cb_size;
2321 if (x0 + cb_size <= s->ps.sps->width &&
2322 y0 + cb_size <= s->ps.sps->height &&
2341 const int cb_size_split = cb_size >> 1;
2342 const int x1 = x0 + cb_size_split;
2343 const int y1 = y0 + cb_size_split;
2351 if (more_data && x1 < s->ps.sps->width) {
2356 if (more_data && y1 < s->ps.sps->height) {
2361 if (more_data && x1 < s->ps.sps->width &&
2362 y1 < s->ps.sps->height) {
2368 if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 &&
2369 ((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0)
2373 return ((x1 + cb_size_split) < s->
ps.
sps->
width ||
2381 if ((!((x0 + cb_size) %
2388 return !end_of_slice_flag;
2403 int ctb_addr_in_slice = ctb_addr_rs - s->
sh.
slice_addr;
2408 if (x_ctb == 0 && (y_ctb & (ctb_size - 1)) == 0)
2434 if (ctb_addr_in_slice <= 0)
2436 if (ctb_addr_in_slice < s->ps.sps->ctb_width)
2469 while (more_data && ctb_addr_ts < s->ps.sps->ctb_size) {
2489 if (more_data < 0) {
2524 int *ctb_row_p = input_ctb_row;
2525 int ctb_row = ctb_row_p[job];
2535 ret =
init_get_bits8(&lc->
gb,
s->data +
s->sh.offset[ctb_row - 1],
s->sh.size[ctb_row - 1]);
2541 while(more_data && ctb_addr_ts < s->ps.sps->ctb_size) {
2542 int x_ctb = (ctb_addr_rs %
s->ps.sps->ctb_width) <<
s->ps.sps->log2_ctb_size;
2543 int y_ctb = (ctb_addr_rs /
s->ps.sps->ctb_width) <<
s->ps.sps->log2_ctb_size;
2557 hls_sao_param(
s, x_ctb >>
s->ps.sps->log2_ctb_size, y_ctb >>
s->ps.sps->log2_ctb_size);
2560 if (more_data < 0) {
2571 if (!more_data && (x_ctb+ctb_size) <
s->ps.sps->width && ctb_row !=
s->sh.num_entry_point_offsets) {
2577 if ((x_ctb+ctb_size) >=
s->ps.sps->width && (y_ctb+ctb_size) >=
s->ps.sps->height ) {
2582 ctb_addr_rs =
s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2585 if(x_ctb >=
s->ps.sps->width) {
2593 s->tab_slice_address[ctb_addr_rs] = -1;
2607 int64_t startheader, cmpt = 0;
2638 for (j = 0, cmpt = 0, startheader = offset + s->
sh.
entry_point_offset[0]; j < nal->skipped_bytes; j++) {
2647 for (j = 0, cmpt = 0, startheader = offset
2660 if (length < offset) {
2760 const int mapping[3] = {2, 0, 1};
2761 const int chroma_den = 50000;
2762 const int luma_den = 10000;
2769 for (i = 0; i < 3; i++) {
2770 const int j = mapping[
i];
2790 "r(%5.4f,%5.4f) g(%5.4f,%5.4f) b(%5.4f %5.4f) wp(%5.4f, %5.4f)\n",
2799 "min_luminance=%f, max_luminance=%f\n",
2848 sizeof(uint32_t) * 4);
2852 tc_sd = (uint32_t*)tcside->
data;
2855 for (
int i = 0;
i < tc_sd[0];
i++) {
2947 int ctb_addr_ts,
ret;
3041 if (s->
max_ra == INT_MAX) {
3063 }
else if (!s->
ref) {
3070 "Non-matching NAL types of the VCL NALUs: %d %d\n",
3080 "Error constructing the reference lists for the current slice.\n");
3104 if (ctb_addr_ts < 0) {
3133 int eos_at_start = 1;
3146 "Error splitting the input into NAL units.\n");
3173 if (ret >= 0 && s->
overlap > 2)
3177 "Error parsing NAL unit #%d.\n", i);
3192 for (i = 0; i < 16; i++)
3193 av_log(log_ctx, level,
"%02"PRIx8, md5[i]);
3222 for (i = 0; frame->
data[
i]; i++) {
3230 for (j = 0; j <
h; j++) {
3235 (
const uint16_t *) src, w);
3293 int new_extradata_size;
3307 &new_extradata_size);
3308 if (new_extradata && new_extradata_size > 0) {
3322 "hardware accelerator failed to decode picture\n");
3403 for (i = 0; i < 3; i++) {
3643 #define OFFSET(x) offsetof(HEVCContext, x) 3644 #define PAR (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM) 3647 {
"apply_defdispwin",
"Apply default display window from VUI",
OFFSET(apply_defdispwin),
3649 {
"strict-displaywin",
"stricly apply default display window size",
OFFSET(apply_defdispwin),
3667 .priv_class = &hevc_decoder_class,
3679 #if CONFIG_HEVC_DXVA2_HWACCEL 3682 #if CONFIG_HEVC_D3D11VA_HWACCEL 3685 #if CONFIG_HEVC_D3D11VA2_HWACCEL 3688 #if CONFIG_HEVC_NVDEC_HWACCEL 3691 #if CONFIG_HEVC_VAAPI_HWACCEL 3694 #if CONFIG_HEVC_VDPAU_HWACCEL 3697 #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL static int set_side_data(HEVCContext *s)
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
static void luma_mc_uni(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride, AVFrame *ref, const Mv *mv, int x_off, int y_off, int block_w, int block_h, int luma_weight, int luma_offset)
8.5.3.2.2.1 Luma sample unidirectional interpolation process
static void intra_prediction_unit(HEVCContext *s, int x0, int y0, int log2_cb_size)
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
unsigned int log2_min_cb_size
discard all frames except keyframes
uint8_t log2_sao_offset_scale_luma
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static enum AVPixelFormat pix_fmt
int anticlockwise_rotation
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
static int verify_md5(HEVCContext *s, AVFrame *frame)
MPEG-2/4 4:2:0, H.264 default for 4:2:0.
This structure describes decoded (raw) audio or video data.
void(* bswap16_buf)(uint16_t *dst, const uint16_t *src, int len)
int current_frame_is_frame0_flag
unsigned MaxCLL
Max content light level (cd/m^2).
#define atomic_store(object, desired)
AVBufferRef * vps_list[HEVC_MAX_VPS_COUNT]
ptrdiff_t const GLvoid * data
static void flush(AVCodecContext *avctx)
enum AVStereo3DView view
Determines which views are packed.
uint8_t diff_cu_chroma_qp_offset_depth
static int get_se_golomb(GetBitContext *gb)
read signed exp golomb code.
int coded_width
Bitstream width / height, may be different from width/height e.g.
int max_dec_pic_buffering
void(* put_pcm)(uint8_t *_dst, ptrdiff_t _stride, int width, int height, struct GetBitContext *gb, int pcm_bit_depth)
void ff_hevc_pred_init(HEVCPredContext *hpc, int bit_depth)
int ff_hevc_split_coding_unit_flag_decode(HEVCContext *s, int ct_depth, int x0, int y0)
uint8_t edge_emu_buffer[(MAX_PB_SIZE+7)*EDGE_EMU_BUFFER_STRIDE *2]
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
#define AV_LOG_WARNING
Something somehow does not look correct.
void(* put_hevc_qpel_bi_w[10][2][2])(uint8_t *dst, ptrdiff_t dststride, uint8_t *_src, ptrdiff_t _srcstride, int16_t *src2, int height, int denom, int wx0, int wx1, int ox0, int ox1, intptr_t mx, intptr_t my, int width)
#define LIBAVUTIL_VERSION_INT
int ff_hevc_set_new_ref(HEVCContext *s, AVFrame **frame, int poc)
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
int16_t x
horizontal component of motion vector
hardware decoding through Videotoolbox
static av_cold int init(AVCodecContext *avctx)
static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal)
void * hwaccel_picture_private
uint8_t intra_split_flag
IntraSplitFlag.
int rem_intra_luma_pred_mode
static void chroma_mc_bi(HEVCContext *s, uint8_t *dst0, ptrdiff_t dststride, AVFrame *ref0, AVFrame *ref1, int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int cidx)
8.5.3.2.2.2 Chroma sample bidirectional interpolation process
enum AVColorRange color_range
MPEG vs JPEG YUV range.
static int hls_transform_tree(HEVCContext *s, int x0, int y0, int xBase, int yBase, int cb_xBase, int cb_yBase, int log2_cb_size, int log2_trafo_size, int trafo_depth, int blk_idx, const int *base_cbf_cb, const int *base_cbf_cr)
void ff_hevc_flush_dpb(HEVCContext *s)
Drop all frames currently in DPB.
int ff_hevc_cbf_luma_decode(HEVCContext *s, int trafo_depth)
static const AVClass hevc_decoder_class
Timecode which conforms to SMPTE ST 12-1.
Views are next to each other, but when upscaling apply a checkerboard pattern.
#define HWACCEL_NVDEC(codec)
static void export_stream_params(HEVCContext *s, const HEVCSPS *sps)
Frame contains only the right view.
static int decode_lt_rps(HEVCContext *s, LongTermRPS *rps, GetBitContext *gb)
const char * av_default_item_name(void *ptr)
Return the context name.
uint8_t weighted_bipred_flag
static void hevc_decode_flush(AVCodecContext *avctx)
int ff_hevc_sao_offset_sign_decode(HEVCContext *s)
int ff_hevc_frame_rps(HEVCContext *s)
Construct the reference picture sets for the current frame.
int ff_hevc_decode_short_term_rps(GetBitContext *gb, AVCodecContext *avctx, ShortTermRPS *rps, const HEVCSPS *sps, int is_slice_header)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
uint8_t seq_loop_filter_across_slices_enabled_flag
uint8_t cabac_init_present_flag
int chroma_loc_info_present_flag
#define AV_PIX_FMT_YUV420P12
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
void(* put_hevc_epel_uni[10][2][2])(uint8_t *dst, ptrdiff_t dststride, uint8_t *_src, ptrdiff_t _srcstride, int height, intptr_t mx, intptr_t my, int width)
int ff_hevc_decode_extradata(const uint8_t *data, int size, HEVCParamSets *ps, HEVCSEI *sei, int *is_nalff, int *nal_length_size, int err_recognition, int apply_defdispwin, void *logctx)
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
discard all non intra frames
uint8_t used_by_curr_pic_lt_sps_flag[HEVC_MAX_LONG_TERM_REF_PICS]
ShortTermRPS st_rps[HEVC_MAX_SHORT_TERM_REF_PIC_SETS]
Views are next to each other.
int * ctb_addr_ts_to_rs
CtbAddrTSToRS.
int num_ref_idx_l0_default_active
num_ref_idx_l0_default_active_minus1 + 1
static void error(const char *err)
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
int8_t cr_qp_offset_list[6]
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them.reget_buffer() and buffer age optimizations no longer work.*The contents of buffers must not be written to after ff_thread_report_progress() has been called on them.This includes draw_edges().Porting codecs to frame threading
int ff_hevc_merge_flag_decode(HEVCContext *s)
#define SET_SAO(elem, value)
void ff_thread_await_progress2(AVCodecContext *avctx, int field, int thread, int shift)
int chroma_sample_loc_type_top_field
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
static void hls_sao_param(HEVCContext *s, int rx, int ry)
uint16_t seq_decode
Sequence counters for decoded and output frames, so that old frames are output first after a POC rese...
static void hls_decode_neighbour(HEVCContext *s, int x_ctb, int y_ctb, int ctb_addr_ts)
void av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip)
Flip the input matrix horizontally and/or vertically.
void av_md5_update(AVMD5 *ctx, const uint8_t *src, int len)
Update hash value.
Macro definitions for various function/variable attributes.
HEVCSEIMasteringDisplay mastering_display
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure counterclockwise rotation by the specified angle...
Frame contains only the left view.
uint8_t entropy_coding_sync_enabled_flag
int ff_hevc_no_residual_syntax_flag_decode(HEVCContext *s)
enum AVDiscard skip_frame
Skip decoding for selected frames.
AVBufferPool * rpl_tab_pool
candidate references for the current frame
uint8_t log2_sao_offset_scale_chroma
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
uint8_t cnt_dropped_flag[3]
unsigned int log2_max_trafo_size
void ff_hevc_unref_frame(HEVCContext *s, HEVCFrame *frame, int flags)
void(* put_hevc_epel[10][2][2])(int16_t *dst, uint8_t *src, ptrdiff_t srcstride, int height, intptr_t mx, intptr_t my, int width)
AVBufferRef * sps_list[HEVC_MAX_SPS_COUNT]
int ff_hevc_split_transform_flag_decode(HEVCContext *s, int log2_trafo_size)
struct AVMD5 * av_md5_alloc(void)
Allocate an AVMD5 context.
int ff_hevc_sao_merge_flag_decode(HEVCContext *s)
HEVCSEIContentLight content_light
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
int is_nalff
this flag is != 0 if bitstream is encapsulated as a format defined in 14496-15
int ff_hevc_rem_intra_luma_pred_mode_decode(HEVCContext *s)
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
enum HEVCNALUnitType nal_unit_type
#define HWACCEL_VDPAU(codec)
static av_always_inline void set_ct_depth(HEVCContext *s, int x0, int y0, int log2_cb_size, int ct_depth)
int(* decode_params)(AVCodecContext *avctx, int type, const uint8_t *buf, uint32_t buf_size)
Callback for parameter data (SPS/PPS/VPS etc).
static void set_deblocking_bypass(HEVCContext *s, int x0, int y0, int log2_cb_size)
Stereo 3D type: this structure describes how two videos are packed within a single video surface...
uint8_t ctb_up_right_flag
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
int ff_hevc_inter_pred_idc_decode(HEVCContext *s, int nPbW, int nPbH)
static int hls_slice_data_wpp(HEVCContext *s, const H2645NAL *nal)
struct HEVCFrame * ref[HEVC_MAX_REFS]
uint8_t vps_timing_info_present_flag
void ff_hevc_deblocking_boundary_strengths(HEVCContext *s, int x0, int y0, int log2_trafo_size)
#define BOUNDARY_LEFT_TILE
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But first
int ff_hevc_part_mode_decode(HEVCContext *s, int log2_cb_size)
int num_ref_idx_l1_default_active
num_ref_idx_l1_default_active_minus1 + 1
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
int ff_h2645_packet_split(H2645Packet *pkt, const uint8_t *buf, int length, void *logctx, int is_nalff, int nal_length_size, enum AVCodecID codec_id, int small_padding, int use_ref)
Split an input packet into NAL units.
unsigned int log2_min_pcm_cb_size
struct HEVCSPS::@69 temporal_layer[HEVC_MAX_SUB_LAYERS]
uint8_t poc_msb_present[32]
HDR dynamic metadata associated with a video frame.
int ff_hevc_prev_intra_luma_pred_flag_decode(HEVCContext *s)
void ff_h2645_packet_uninit(H2645Packet *pkt)
Free all the allocated memory in the packet.
#define QPEL_EXTRA_BEFORE
Structure to hold side data for an AVFrame.
int ff_hevc_slice_rpl(HEVCContext *s)
Construct the reference picture list(s) for the current slice.
HEVCSEIUnregistered unregistered
static double av_q2d(AVRational a)
Convert an AVRational to a double.
void ff_hevc_hls_mvd_coding(HEVCContext *s, int x0, int y0, int log2_cb_size)
int ff_hevc_skip_flag_decode(HEVCContext *s, int x0, int y0, int x_cb, int y_cb)
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
void(* put_hevc_qpel_uni_w[10][2][2])(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride, int height, int denom, int wx, int ox, intptr_t mx, intptr_t my, int width)
AVDictionary * metadata
metadata.
AVBufferRef * pps_list[HEVC_MAX_PPS_COUNT]
uint8_t loop_filter_disable_flag
HEVCSEIDynamicHDRPlus dynamic_hdr_plus
int ff_hevc_cbf_cb_cr_decode(HEVCContext *s, int trafo_depth)
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
uint8_t cu_transquant_bypass_flag
int16_t tmp[MAX_PB_SIZE *MAX_PB_SIZE]
void ff_hevc_hls_filter(HEVCContext *s, int x, int y, int ctb_size)
void(* put_hevc_qpel[10][2][2])(int16_t *dst, uint8_t *src, ptrdiff_t srcstride, int height, intptr_t mx, intptr_t my, int width)
int8_t cb_qp_offset_list[6]
static av_unused const uint8_t * skip_bytes(CABACContext *c, int n)
Skip n bytes and reset the decoder.
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
int ff_hevc_log2_res_scale_abs(HEVCContext *s, int idx)
uint8_t transquant_bypass_enable_flag
int temporal_id
temporal_id_plus1 - 1