53 uint32_t v32 = v * 0x01010101;
62 uint64_t v64 = v * 0x0101010101010101ULL;
68 uint32_t v32 = v * 0x01010101;
83 0x0, 0x8, 0x0, 0x8, 0xc, 0x8, 0xc, 0xe, 0xc, 0xe, 0xf, 0xe, 0xf
86 0x0, 0x0, 0x8, 0x8, 0x8, 0xc, 0xc, 0xc, 0xe, 0xe, 0xe, 0xf, 0xf
94 int row = td->
row, col = td->
col, row7 = td->
row7;
95 enum TxfmMode max_tx = max_tx_for_bl_bp[b->
bs];
117 for (y = 0; y < h4; y++) {
118 int idx_base = (y + row) * 8 * s->
sb_cols + col;
119 for (x = 0; x < w4; x++)
120 pred =
FFMIN(pred, refsegmap[idx_base + x]);
158 if (have_a && have_l) {
270 }
else if (b->
intra) {
299 static const uint8_t size_group[10] = {
300 3, 3, 3, 3, 2, 2, 2, 1, 1, 1
302 int sz = size_group[b->
bs];
315 static const uint8_t inter_mode_ctx_lut[14][14] = {
316 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
317 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
318 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
319 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
320 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
321 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
322 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
323 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
324 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
325 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
326 { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 2, 2, 1, 3 },
327 { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 2, 2, 1, 3 },
328 { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 1, 1, 0, 3 },
329 { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3, 3, 3, 4 },
402 c = (refa == refl) ? 3 : 1;
419 c = (refl == refa) ? 4 : 2;
583 static const uint8_t off[10] = {
584 3, 0, 0, 1, 0, 0, 0, 0, 0, 0
682 #define SPLAT_CTX(var, val, n) \ 684 case 1: var = val; break; \ 685 case 2: AV_WN16A(&var, val * 0x0101); break; \ 686 case 4: AV_WN32A(&var, val * 0x01010101); break; \ 687 case 8: AV_WN64A(&var, val * 0x0101010101010101ULL); break; \ 689 uint64_t v64 = val * 0x0101010101010101ULL; \ 690 AV_WN64A( &var, v64); \ 691 AV_WN64A(&((uint8_t *) &var)[8], v64); \ 696 #define SPLAT_CTX(var, val, n) \ 698 case 1: var = val; break; \ 699 case 2: AV_WN16A(&var, val * 0x0101); break; \ 700 case 4: AV_WN32A(&var, val * 0x01010101); break; \ 702 uint32_t v32 = val * 0x01010101; \ 703 AV_WN32A( &var, v32); \ 704 AV_WN32A(&((uint8_t *) &var)[4], v32); \ 708 uint32_t v32 = val * 0x01010101; \ 709 AV_WN32A( &var, v32); \ 710 AV_WN32A(&((uint8_t *) &var)[4], v32); \ 711 AV_WN32A(&((uint8_t *) &var)[8], v32); \ 712 AV_WN32A(&((uint8_t *) &var)[12], v32); \ 719 #define SET_CTXS(perf, dir, off, n) \ 721 SPLAT_CTX(perf->dir##_skip_ctx[off], b->skip, n); \ 722 SPLAT_CTX(perf->dir##_txfm_ctx[off], b->tx, n); \ 723 SPLAT_CTX(perf->dir##_partition_ctx[off], dir##_ctx[b->bs], n); \ 724 if (!s->s.h.keyframe && !s->s.h.intraonly) { \ 725 SPLAT_CTX(perf->dir##_intra_ctx[off], b->intra, n); \ 726 SPLAT_CTX(perf->dir##_comp_ctx[off], b->comp, n); \ 727 SPLAT_CTX(perf->dir##_mode_ctx[off], b->mode[3], n); \ 729 SPLAT_CTX(perf->dir##_ref_ctx[off], vref, n); \ 730 if (s->s.h.filtermode == FILTER_SWITCHABLE) { \ 731 SPLAT_CTX(perf->dir##_filter_ctx[off], filter_id, n); \ 736 case 1:
SET_CTXS(s, above, col, 1);
break;
737 case 2:
SET_CTXS(s, above, col, 2);
break;
738 case 4:
SET_CTXS(s, above, col, 4);
break;
739 case 8:
SET_CTXS(s, above, col, 8);
break;
765 for (n = 0; n < w4 * 2; n++) {
769 for (n = 0; n < h4 * 2; n++) {
777 for (y = 0; y < h4; y++) {
778 int x, o = (row + y) * s->
sb_cols * 8 + col;
782 for (x = 0; x < w4; x++) {
786 }
else if (b->
comp) {
787 for (x = 0; x < w4; x++) {
788 mv[x].ref[0] = b->
ref[0];
789 mv[x].ref[1] = b->
ref[1];
794 for (x = 0; x < w4; x++) {
795 mv[x].ref[0] = b->
ref[0];
806 int is_tx32x32,
int is8bitsperpixel,
int bpp,
unsigned (*cnt)[6][3],
807 unsigned (*eob)[6][2],
uint8_t (*p)[6][11],
808 int nnz,
const int16_t *scan,
const int16_t (*nb)[2],
809 const int16_t *band_counts, int16_t *qmul)
811 int i = 0, band = 0, band_left = band_counts[band];
819 eob[band][nnz][
val]++;
827 band_left = band_counts[++band];
829 nnz = (1 + cache[nb[
i][0]] + cache[nb[
i][1]]) >> 1;
879 if (!is8bitsperpixel) {
904 #define STORE_COEF(c, i, v) do { \ 905 if (is8bitsperpixel) { \ 908 AV_WN32A(&c[i * 2], v); \ 912 band_left = band_counts[++band];
917 nnz = (1 + cache[nb[
i][0]] + cache[nb[
i][1]]) >> 1;
919 }
while (++i < n_coeffs);
925 unsigned (*cnt)[6][3],
unsigned (*eob)[6][2],
926 uint8_t (*p)[6][11],
int nnz,
const int16_t *scan,
927 const int16_t (*nb)[2],
const int16_t *band_counts,
931 nnz, scan, nb, band_counts, qmul);
935 unsigned (*cnt)[6][3],
unsigned (*eob)[6][2],
936 uint8_t (*p)[6][11],
int nnz,
const int16_t *scan,
937 const int16_t (*nb)[2],
const int16_t *band_counts,
941 nnz, scan, nb, band_counts, qmul);
945 unsigned (*cnt)[6][3],
unsigned (*eob)[6][2],
946 uint8_t (*p)[6][11],
int nnz,
const int16_t *scan,
947 const int16_t (*nb)[2],
const int16_t *band_counts,
951 nnz, scan, nb, band_counts, qmul);
955 unsigned (*cnt)[6][3],
unsigned (*eob)[6][2],
956 uint8_t (*p)[6][11],
int nnz,
const int16_t *scan,
957 const int16_t (*nb)[2],
const int16_t *band_counts,
961 nnz, scan, nb, band_counts, qmul);
968 int row = td->
row, col = td->
col;
973 int end_x =
FFMIN(2 * (s->
cols - col), w4);
974 int end_y =
FFMIN(2 * (s->
rows - row), h4);
975 int n, pl, x, y,
ret;
984 static const int16_t band_counts[4][8] = {
985 { 1, 2, 3, 4, 3, 16 - 13 },
986 { 1, 2, 3, 4, 11, 64 - 21 },
987 { 1, 2, 3, 4, 11, 256 - 21 },
988 { 1, 2, 3, 4, 11, 1024 - 21 },
990 const int16_t *y_band_counts = band_counts[b->tx];
991 const int16_t *uv_band_counts = band_counts[b->
uvtx];
992 int bytesperpixel = is8bitsperpixel ? 1 : 2;
995 #define MERGE(la, end, step, rd) \ 996 for (n = 0; n < end; n += step) \ 998 #define MERGE_CTX(step, rd) \ 1000 MERGE(l, end_y, step, rd); \ 1001 MERGE(a, end_x, step, rd); \ 1004 #define DECODE_Y_COEF_LOOP(step, mode_index, v) \ 1005 for (n = 0, y = 0; y < end_y; y += step) { \ 1006 for (x = 0; x < end_x; x += step, n += step * step) { \ 1007 enum TxfmType txtp = ff_vp9_intra_txfm_type[b->mode[mode_index]]; \ 1008 ret = (is8bitsperpixel ? decode_coeffs_b##v##_8bpp : decode_coeffs_b##v##_16bpp) \ 1009 (td, td->block + 16 * n * bytesperpixel, 16 * step * step, \ 1010 c, e, p, a[x] + l[y], yscans[txtp], \ 1011 ynbs[txtp], y_band_counts, qmul[0]); \ 1012 a[x] = l[y] = !!ret; \ 1013 total_coeff |= !!ret; \ 1015 AV_WN16A(&td->eob[n], ret); \ 1022 #define SPLAT(la, end, step, cond) \ 1024 for (n = 1; n < end; n += step) \ 1025 la[n] = la[n - 1]; \ 1026 } else if (step == 4) { \ 1028 for (n = 0; n < end; n += step) \ 1029 AV_WN32A(&la[n], la[n] * 0x01010101); \ 1031 for (n = 0; n < end; n += step) \ 1032 memset(&la[n + 1], la[n], FFMIN(end - n - 1, 3)); \ 1036 if (HAVE_FAST_64BIT) { \ 1037 for (n = 0; n < end; n += step) \ 1038 AV_WN64A(&la[n], la[n] * 0x0101010101010101ULL); \ 1040 for (n = 0; n < end; n += step) { \ 1041 uint32_t v32 = la[n] * 0x01010101; \ 1042 AV_WN32A(&la[n], v32); \ 1043 AV_WN32A(&la[n + 4], v32); \ 1047 for (n = 0; n < end; n += step) \ 1048 memset(&la[n + 1], la[n], FFMIN(end - n - 1, 7)); \ 1051 #define SPLAT_CTX(step) \ 1053 SPLAT(a, end_x, step, end_x == w4); \ 1054 SPLAT(l, end_y, step, end_y == h4); \ 1079 #define DECODE_UV_COEF_LOOP(step, v) \ 1080 for (n = 0, y = 0; y < end_y; y += step) { \ 1081 for (x = 0; x < end_x; x += step, n += step * step) { \ 1082 ret = (is8bitsperpixel ? decode_coeffs_b##v##_8bpp : decode_coeffs_b##v##_16bpp) \ 1083 (td, td->uvblock[pl] + 16 * n * bytesperpixel, \ 1084 16 * step * step, c, e, p, a[x] + l[y], \ 1085 uvscan, uvnb, uv_band_counts, qmul[1]); \ 1086 a[x] = l[y] = !!ret; \ 1087 total_coeff |= !!ret; \ 1089 AV_WN16A(&td->uveob[pl][n], ret); \ 1091 td->uveob[pl][n] = ret; \ 1103 for (pl = 0; pl < 2; pl++) {
1142 int row_and_7,
int col_and_7,
1143 int w,
int h,
int col_end,
int row_end,
1146 static const unsigned wide_filter_col_mask[2] = { 0x11, 0x01 };
1147 static const unsigned wide_filter_row_mask[2] = { 0x03, 0x07 };
1159 if (tx ==
TX_4X4 && (ss_v | ss_h)) {
1174 if (tx ==
TX_4X4 && !skip_inter) {
1175 int t = 1 << col_and_7, m_col = (t <<
w) - t, y;
1177 int m_row_8 = m_col & wide_filter_col_mask[ss_h], m_row_4 = m_col - m_row_8;
1179 for (y = row_and_7; y < h + row_and_7; y++) {
1180 int col_mask_id = 2 - !(y & wide_filter_row_mask[ss_v]);
1182 mask[0][y][1] |= m_row_8;
1183 mask[0][y][2] |= m_row_4;
1194 if ((ss_h & ss_v) && (col_end & 1) && (y & 1)) {
1195 mask[1][y][col_mask_id] |= (t << (w - 1)) - t;
1197 mask[1][y][col_mask_id] |= m_col;
1200 mask[0][y][3] |= m_col;
1202 if (ss_h && (col_end & 1))
1203 mask[1][y][3] |= (t << (w - 1)) - t;
1205 mask[1][y][3] |= m_col;
1209 int y, t = 1 << col_and_7, m_col = (t <<
w) - t;
1212 int mask_id = (tx ==
TX_8X8);
1213 int l2 = tx + ss_h - 1, step1d;
1214 static const unsigned masks[4] = { 0xff, 0x55, 0x11, 0x01 };
1215 int m_row = m_col & masks[l2];
1219 if (ss_h && tx >
TX_8X8 && (w ^ (w - 1)) == 1) {
1220 int m_row_16 = ((t << (w - 1)) - t) & masks[l2];
1221 int m_row_8 = m_row - m_row_16;
1223 for (y = row_and_7; y < h + row_and_7; y++) {
1224 mask[0][y][0] |= m_row_16;
1225 mask[0][y][1] |= m_row_8;
1228 for (y = row_and_7; y < h + row_and_7; y++)
1229 mask[0][y][mask_id] |= m_row;
1234 if (ss_v && tx >
TX_8X8 && (h ^ (h - 1)) == 1) {
1235 for (y = row_and_7; y < h + row_and_7 - 1; y += step1d)
1236 mask[1][y][0] |= m_col;
1237 if (y - row_and_7 == h - 1)
1238 mask[1][y][1] |= m_col;
1240 for (y = row_and_7; y < h + row_and_7; y += step1d)
1241 mask[1][y][mask_id] |= m_col;
1243 }
else if (tx !=
TX_4X4) {
1246 mask_id = (tx ==
TX_8X8) || (h == ss_v);
1247 mask[1][row_and_7][mask_id] |= m_col;
1248 mask_id = (tx ==
TX_8X8) || (w == ss_h);
1249 for (y = row_and_7; y < h + row_and_7; y++)
1250 mask[0][y][mask_id] |= t;
1252 int t8 = t & wide_filter_col_mask[ss_h],
t4 = t -
t8;
1254 for (y = row_and_7; y < h + row_and_7; y++) {
1258 mask[1][row_and_7][2 - !(row_and_7 & wide_filter_row_mask[ss_v])] |= m_col;
1264 VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff,
1280 td->
min_mv.
x = -(128 + col * 64);
1281 td->
min_mv.
y = -(128 + row * 64);
1290 b->
uvtx = b->tx - ((s->
ss_h && w4 * 2 == (1 << b->tx)) ||
1291 (s->
ss_v && h4 * 2 == (1 << b->tx)));
1304 if (bytesperpixel == 1) {
1315 int row7 = td->
row7;
1317 #define SPLAT_ZERO_CTX(v, n) \ 1319 case 1: v = 0; break; \ 1320 case 2: AV_ZERO16(&v); break; \ 1321 case 4: AV_ZERO32(&v); break; \ 1322 case 8: AV_ZERO64(&v); break; \ 1323 case 16: AV_ZERO128(&v); break; \ 1325 #define SPLAT_ZERO_YUV(dir, var, off, n, dir2) \ 1327 SPLAT_ZERO_CTX(dir##_y_##var[off * 2], n * 2); \ 1328 if (s->ss_##dir2) { \ 1329 SPLAT_ZERO_CTX(dir##_uv_##var[0][off], n); \ 1330 SPLAT_ZERO_CTX(dir##_uv_##var[1][off], n); \ 1332 SPLAT_ZERO_CTX(dir##_uv_##var[0][off * 2], n * 2); \ 1333 SPLAT_ZERO_CTX(dir##_uv_##var[1][off * 2], n * 2); \ 1353 s->
td[0].
block += w4 * h4 * 64 * bytesperpixel;
1356 s->
td[0].
eob += 4 * w4 * h4;
1367 emu[0] = (col + w4) * 8 * bytesperpixel > f->
linesize[0] ||
1368 (row + h4) > s->
rows;
1369 emu[1] = ((col + w4) * 8 >> s->
ss_h) * bytesperpixel > f->
linesize[1] ||
1370 (row + h4) > s->
rows;
1375 td->
dst[0] = f->
data[0] + yoff;
1383 td->
dst[1] = f->
data[1] + uvoff;
1384 td->
dst[2] = f->
data[2] + uvoff;
1388 if (s->
s.
h.
bpp > 8) {
1394 if (s->
s.
h.
bpp > 8) {
1403 for (n = 0; o <
w; n++) {
1409 td->
tmp_y + o * bytesperpixel, 128,
h, 0, 0);
1418 for (n = s->
ss_h; o < w; n++) {
1423 s->
dsp.
mc[n][0][0][0][0](f->
data[1] + uvoff + o * bytesperpixel, f->
linesize[1],
1424 td->
tmp_uv[0] + o * bytesperpixel, 128,
h, 0, 0);
1425 s->
dsp.
mc[n][0][0][0][0](f->
data[2] + uvoff + o * bytesperpixel, f->
linesize[2],
1426 td->
tmp_uv[1] + o * bytesperpixel, 128,
h, 0, 0);
1440 mask_edges(lflvl->
mask[0], 0, 0, row7, col7, x_end, y_end, 0, 0, b->tx, skip_inter);
1445 b->
uvtx, skip_inter);
1450 s->
td[0].
block += w4 * h4 * 64 * bytesperpixel;
1453 s->
td[0].
eob += 4 * w4 * h4;
vp9_mc_func mc[5][N_FILTERS][2][2][2]
uint8_t left_uv_nnz_ctx[2][16]
uint8_t * segmentation_map
unsigned single_ref[5][2][2]
static int decode_coeffs_b32_16bpp(VP9TileData *td, int16_t *coef, int n_coeffs, unsigned(*cnt)[6][3], unsigned(*eob)[6][2], uint8_t(*p)[6][11], int nnz, const int16_t *scan, const int16_t(*nb)[2], const int16_t *band_counts, int16_t *qmul)
This structure describes decoded (raw) audio or video data.
VP5 and VP6 compatible video decoder (common features)
static int decode_coeffs_16bpp(VP9TileData *td)
static av_always_inline int vp8_rac_get_tree(VP56RangeCoder *c, const int8_t(*tree)[2], const uint8_t *probs)
struct VP9TileData::@181 * block_structure
static void decode_mode(VP9TileData *td)
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
static int decode_coeffs_b32_8bpp(VP9TileData *td, int16_t *coef, int n_coeffs, unsigned(*cnt)[6][3], unsigned(*eob)[6][2], uint8_t(*p)[6][11], int nnz, const int16_t *scan, const int16_t(*nb)[2], const int16_t *band_counts, int16_t *qmul)
unsigned coef[4][2][2][6][6][3]
struct VP9TileData::@180 min_mv
uint8_t left_segpred_ctx[8]
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them.reget_buffer() and buffer age optimizations no longer work.*The contents of buffers must not be written to after ff_thread_report_progress() has been called on them.This includes draw_edges().Porting codecs to frame threading
const uint8_t ff_vp9_default_kf_ymode_probs[10][10][9]
uint8_t left_mode_ctx[16]
static av_always_inline void setctx_2d(uint8_t *ptr, int w, int h, ptrdiff_t stride, int v)
void ff_vp9_intra_recon_8bpp(VP9TileData *td, ptrdiff_t y_off, ptrdiff_t uv_off)
uint8_t left_intra_ctx[8]
const uint8_t ff_vp9_default_kf_uvmode_probs[10][9]
uint8_t coef[4][2][2][6][6][3]
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
void ff_vp9_inter_recon_8bpp(VP9TileData *td)
void ff_vp9_intra_recon_16bpp(VP9TileData *td, ptrdiff_t y_off, ptrdiff_t uv_off)
struct VP9TileData::@180 max_mv
#define SET_CTXS(perf, dir, off, n)
static av_always_inline void mask_edges(uint8_t(*mask)[8][4], int ss_h, int ss_v, int row_and_7, int col_and_7, int w, int h, int col_end, int row_end, enum TxfmMode tx, int skip_inter)
static av_always_inline int decode_coeffs(VP9TileData *td, int is8bitsperpixel)
const int16_t *const ff_vp9_scans[5][4]
uint8_t * above_uv_nnz_ctx[2]
#define DECODE_Y_COEF_LOOP(step, mode_index, v)
static int decode_coeffs_b_16bpp(VP9TileData *td, int16_t *coef, int n_coeffs, unsigned(*cnt)[6][3], unsigned(*eob)[6][2], uint8_t(*p)[6][11], int nnz, const int16_t *scan, const int16_t(*nb)[2], const int16_t *band_counts, int16_t *qmul)
uint8_t left_filter_ctx[8]
static const uint16_t mask[17]
const int8_t ff_vp9_intramode_tree[9][2]
#define STORE_COEF(c, i, v)
simple assert() macros that are a bit more flexible than ISO C assert().
void ff_vp9_inter_recon_16bpp(VP9TileData *td)
uint8_t * above_filter_ctx
uint8_t left_y_nnz_ctx[16]
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
unsigned int block_size_idx_x
const int8_t ff_vp9_inter_mode_tree[3][2]
static av_always_inline int decode_coeffs_b_generic(VP56RangeCoder *c, int16_t *coef, int n_coeffs, int is_tx32x32, int is8bitsperpixel, int bpp, unsigned(*cnt)[6][3], unsigned(*eob)[6][2], uint8_t(*p)[6][11], int nnz, const int16_t *scan, const int16_t(*nb)[2], const int16_t *band_counts, int16_t *qmul)
struct VP9Context::@178 prob
const int8_t ff_vp9_segmentation_tree[7][2]
enum FilterMode ff_vp9_filter_lut[3]
#define vp56_rac_get_prob
uint8_t * above_segpred_ctx
static const float pred[4]
static const int8_t mv[256][2]
static av_always_inline int vp56_rac_get_prob_branchy(VP56RangeCoder *c, int prob)
VP56mv(* above_mv_ctx)[2]
Libavcodec external API header.
static int decode_coeffs_8bpp(VP9TileData *td)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
uint8_t * above_y_nnz_ctx
static int decode_coeffs_b_8bpp(VP9TileData *td, int16_t *coef, int n_coeffs, unsigned(*cnt)[6][3], unsigned(*eob)[6][2], uint8_t(*p)[6][11], int nnz, const int16_t *scan, const int16_t(*nb)[2], const int16_t *band_counts, int16_t *qmul)
#define SPLAT_CTX(var, val, n)
#define MERGE_CTX(step, rd)
void ff_vp9_fill_mv(VP9TileData *td, VP56mv *mv, int mode, int sb)
uint8_t tmp_uv[2][64 *64 *2]
uint8_t * above_intra_ctx
unsigned int nb_block_structure
const uint8_t ff_vp9_bwh_tab[2][N_BS_SIZES][2]
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
#define DECODE_UV_COEF_LOOP(step, v)
void ff_vp9_decode_block(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl, enum BlockPartition bp)
unsigned eob[4][2][2][6][6][2]
GLint GLenum GLboolean GLsizei stride
common internal api header.
#define bit(string, value)
static av_always_inline int vp8_rac_get(VP56RangeCoder *c)
Core video DSP helper functions.
#define SPLAT_ZERO_YUV(dir, var, off, n, dir2)
const int8_t ff_vp9_filter_tree[2][2]
const int16_t(*const [5][4] ff_vp9_scans_nb)[2]
unsigned int block_size_idx_y
VP56mv left_mv_ctx[16][2]
static double val(void *priv, double ch)
struct VP9TileData::@179 counts