FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
vp9.c
Go to the documentation of this file.
1 /*
2  * VP9 compatible video decoder
3  *
4  * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
5  * Copyright (C) 2013 Clément Bœsch <u pkh me>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include "avcodec.h"
25 #include "get_bits.h"
26 #include "internal.h"
27 #include "profiles.h"
28 #include "thread.h"
29 #include "videodsp.h"
30 #include "vp56.h"
31 #include "vp9.h"
32 #include "vp9data.h"
33 #include "vp9dec.h"
34 #include "libavutil/avassert.h"
35 #include "libavutil/pixdesc.h"
36 
37 #define VP9_SYNCCODE 0x498342
38 
39 static void vp9_frame_unref(AVCodecContext *avctx, VP9Frame *f)
40 {
41  ff_thread_release_buffer(avctx, &f->tf);
46 }
47 
48 static int vp9_frame_alloc(AVCodecContext *avctx, VP9Frame *f)
49 {
50  VP9Context *s = avctx->priv_data;
51  int ret, sz;
52 
54  if (ret < 0)
55  return ret;
56 
57  sz = 64 * s->sb_cols * s->sb_rows;
58  f->extradata = av_buffer_allocz(sz * (1 + sizeof(VP9mvrefPair)));
59  if (!f->extradata) {
60  goto fail;
61  }
62 
64  f->mv = (VP9mvrefPair *) (f->extradata->data + sz);
65 
66  if (avctx->hwaccel) {
67  const AVHWAccel *hwaccel = avctx->hwaccel;
69  if (hwaccel->frame_priv_data_size) {
71  if (!f->hwaccel_priv_buf)
72  goto fail;
74  }
75  }
76 
77  return 0;
78 
79 fail:
80  vp9_frame_unref(avctx, f);
81  return AVERROR(ENOMEM);
82 }
83 
84 static int vp9_frame_ref(AVCodecContext *avctx, VP9Frame *dst, VP9Frame *src)
85 {
86  int ret;
87 
88  ret = ff_thread_ref_frame(&dst->tf, &src->tf);
89  if (ret < 0)
90  return ret;
91 
92  dst->extradata = av_buffer_ref(src->extradata);
93  if (!dst->extradata)
94  goto fail;
95 
97  dst->mv = src->mv;
98  dst->uses_2pass = src->uses_2pass;
99 
100  if (src->hwaccel_picture_private) {
102  if (!dst->hwaccel_priv_buf)
103  goto fail;
105  }
106 
107  return 0;
108 
109 fail:
110  vp9_frame_unref(avctx, dst);
111  return AVERROR(ENOMEM);
112 }
113 
114 static int update_size(AVCodecContext *avctx, int w, int h)
115 {
116 #define HWACCEL_MAX (CONFIG_VP9_DXVA2_HWACCEL + CONFIG_VP9_D3D11VA_HWACCEL + CONFIG_VP9_VAAPI_HWACCEL)
117  enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmtp = pix_fmts;
118  VP9Context *s = avctx->priv_data;
119  uint8_t *p;
120  int bytesperpixel = s->bytesperpixel, ret, cols, rows;
121 
122  av_assert0(w > 0 && h > 0);
123 
124  if (!(s->pix_fmt == s->gf_fmt && w == s->w && h == s->h)) {
125  if ((ret = ff_set_dimensions(avctx, w, h)) < 0)
126  return ret;
127 
128  switch (s->pix_fmt) {
129  case AV_PIX_FMT_YUV420P:
130 #if CONFIG_VP9_DXVA2_HWACCEL
131  *fmtp++ = AV_PIX_FMT_DXVA2_VLD;
132 #endif
133 #if CONFIG_VP9_D3D11VA_HWACCEL
134  *fmtp++ = AV_PIX_FMT_D3D11VA_VLD;
135 #endif
136 #if CONFIG_VP9_VAAPI_HWACCEL
137  *fmtp++ = AV_PIX_FMT_VAAPI;
138 #endif
139  break;
142 #if CONFIG_VP9_VAAPI_HWACCEL
143  *fmtp++ = AV_PIX_FMT_VAAPI;
144 #endif
145  break;
146  }
147 
148  *fmtp++ = s->pix_fmt;
149  *fmtp = AV_PIX_FMT_NONE;
150 
151  ret = ff_thread_get_format(avctx, pix_fmts);
152  if (ret < 0)
153  return ret;
154 
155  avctx->pix_fmt = ret;
156  s->gf_fmt = s->pix_fmt;
157  s->w = w;
158  s->h = h;
159  }
160 
161  cols = (w + 7) >> 3;
162  rows = (h + 7) >> 3;
163 
164  if (s->intra_pred_data[0] && cols == s->cols && rows == s->rows && s->pix_fmt == s->last_fmt)
165  return 0;
166 
167  s->last_fmt = s->pix_fmt;
168  s->sb_cols = (w + 63) >> 6;
169  s->sb_rows = (h + 63) >> 6;
170  s->cols = (w + 7) >> 3;
171  s->rows = (h + 7) >> 3;
172 
173 #define assign(var, type, n) var = (type) p; p += s->sb_cols * (n) * sizeof(*var)
174  av_freep(&s->intra_pred_data[0]);
175  // FIXME we slightly over-allocate here for subsampled chroma, but a little
176  // bit of padding shouldn't affect performance...
177  p = av_malloc(s->sb_cols * (128 + 192 * bytesperpixel +
178  sizeof(*s->lflvl) + 16 * sizeof(*s->above_mv_ctx)));
179  if (!p)
180  return AVERROR(ENOMEM);
181  assign(s->intra_pred_data[0], uint8_t *, 64 * bytesperpixel);
182  assign(s->intra_pred_data[1], uint8_t *, 64 * bytesperpixel);
183  assign(s->intra_pred_data[2], uint8_t *, 64 * bytesperpixel);
184  assign(s->above_y_nnz_ctx, uint8_t *, 16);
185  assign(s->above_mode_ctx, uint8_t *, 16);
186  assign(s->above_mv_ctx, VP56mv(*)[2], 16);
187  assign(s->above_uv_nnz_ctx[0], uint8_t *, 16);
188  assign(s->above_uv_nnz_ctx[1], uint8_t *, 16);
190  assign(s->above_skip_ctx, uint8_t *, 8);
191  assign(s->above_txfm_ctx, uint8_t *, 8);
192  assign(s->above_segpred_ctx, uint8_t *, 8);
193  assign(s->above_intra_ctx, uint8_t *, 8);
194  assign(s->above_comp_ctx, uint8_t *, 8);
195  assign(s->above_ref_ctx, uint8_t *, 8);
196  assign(s->above_filter_ctx, uint8_t *, 8);
197  assign(s->lflvl, VP9Filter *, 1);
198 #undef assign
199 
200  // these will be re-allocated a little later
201  av_freep(&s->b_base);
202  av_freep(&s->block_base);
203 
204  if (s->s.h.bpp != s->last_bpp) {
205  ff_vp9dsp_init(&s->dsp, s->s.h.bpp, avctx->flags & AV_CODEC_FLAG_BITEXACT);
206  ff_videodsp_init(&s->vdsp, s->s.h.bpp);
207  s->last_bpp = s->s.h.bpp;
208  }
209 
210  return 0;
211 }
212 
214 {
215  VP9Context *s = avctx->priv_data;
216  int chroma_blocks, chroma_eobs, bytesperpixel = s->bytesperpixel;
217 
219  return 0;
220 
221  av_free(s->b_base);
222  av_free(s->block_base);
223  chroma_blocks = 64 * 64 >> (s->ss_h + s->ss_v);
224  chroma_eobs = 16 * 16 >> (s->ss_h + s->ss_v);
225  if (s->s.frames[CUR_FRAME].uses_2pass) {
226  int sbs = s->sb_cols * s->sb_rows;
227 
228  s->b_base = av_malloc_array(s->cols * s->rows, sizeof(VP9Block));
229  s->block_base = av_mallocz(((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) +
230  16 * 16 + 2 * chroma_eobs) * sbs);
231  if (!s->b_base || !s->block_base)
232  return AVERROR(ENOMEM);
233  s->uvblock_base[0] = s->block_base + sbs * 64 * 64 * bytesperpixel;
234  s->uvblock_base[1] = s->uvblock_base[0] + sbs * chroma_blocks * bytesperpixel;
235  s->eob_base = (uint8_t *) (s->uvblock_base[1] + sbs * chroma_blocks * bytesperpixel);
236  s->uveob_base[0] = s->eob_base + 16 * 16 * sbs;
237  s->uveob_base[1] = s->uveob_base[0] + chroma_eobs * sbs;
238  } else {
239  s->b_base = av_malloc(sizeof(VP9Block));
240  s->block_base = av_mallocz((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) +
241  16 * 16 + 2 * chroma_eobs);
242  if (!s->b_base || !s->block_base)
243  return AVERROR(ENOMEM);
244  s->uvblock_base[0] = s->block_base + 64 * 64 * bytesperpixel;
245  s->uvblock_base[1] = s->uvblock_base[0] + chroma_blocks * bytesperpixel;
246  s->eob_base = (uint8_t *) (s->uvblock_base[1] + chroma_blocks * bytesperpixel);
247  s->uveob_base[0] = s->eob_base + 16 * 16;
248  s->uveob_base[1] = s->uveob_base[0] + chroma_eobs;
249  }
251 
252  return 0;
253 }
254 
255 // The sign bit is at the end, not the start, of a bit sequence
257 {
258  int v = get_bits(gb, n);
259  return get_bits1(gb) ? -v : v;
260 }
261 
262 static av_always_inline int inv_recenter_nonneg(int v, int m)
263 {
264  if (v > 2 * m)
265  return v;
266  if (v & 1)
267  return m - ((v + 1) >> 1);
268  return m + (v >> 1);
269 }
270 
271 // differential forward probability updates
272 static int update_prob(VP56RangeCoder *c, int p)
273 {
274  static const int inv_map_table[255] = {
275  7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176,
276  189, 202, 215, 228, 241, 254, 1, 2, 3, 4, 5, 6, 8, 9,
277  10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24,
278  25, 26, 27, 28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39,
279  40, 41, 42, 43, 44, 45, 47, 48, 49, 50, 51, 52, 53, 54,
280  55, 56, 57, 58, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
281  70, 71, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
282  86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 99, 100,
283  101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 112, 113, 114, 115,
284  116, 117, 118, 119, 120, 121, 122, 123, 125, 126, 127, 128, 129, 130,
285  131, 132, 133, 134, 135, 136, 138, 139, 140, 141, 142, 143, 144, 145,
286  146, 147, 148, 149, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160,
287  161, 162, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
288  177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 190, 191,
289  192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 203, 204, 205, 206,
290  207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 219, 220, 221,
291  222, 223, 224, 225, 226, 227, 229, 230, 231, 232, 233, 234, 235, 236,
292  237, 238, 239, 240, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
293  252, 253, 253,
294  };
295  int d;
296 
297  /* This code is trying to do a differential probability update. For a
298  * current probability A in the range [1, 255], the difference to a new
299  * probability of any value can be expressed differentially as 1-A, 255-A
300  * where some part of this (absolute range) exists both in positive as
301  * well as the negative part, whereas another part only exists in one
302  * half. We're trying to code this shared part differentially, i.e.
303  * times two where the value of the lowest bit specifies the sign, and
304  * the single part is then coded on top of this. This absolute difference
305  * then again has a value of [0, 254], but a bigger value in this range
306  * indicates that we're further away from the original value A, so we
307  * can code this as a VLC code, since higher values are increasingly
308  * unlikely. The first 20 values in inv_map_table[] allow 'cheap, rough'
309  * updates vs. the 'fine, exact' updates further down the range, which
310  * adds one extra dimension to this differential update model. */
311 
312  if (!vp8_rac_get(c)) {
313  d = vp8_rac_get_uint(c, 4) + 0;
314  } else if (!vp8_rac_get(c)) {
315  d = vp8_rac_get_uint(c, 4) + 16;
316  } else if (!vp8_rac_get(c)) {
317  d = vp8_rac_get_uint(c, 5) + 32;
318  } else {
319  d = vp8_rac_get_uint(c, 7);
320  if (d >= 65)
321  d = (d << 1) - 65 + vp8_rac_get(c);
322  d += 64;
323  av_assert2(d < FF_ARRAY_ELEMS(inv_map_table));
324  }
325 
326  return p <= 128 ? 1 + inv_recenter_nonneg(inv_map_table[d], p - 1) :
327  255 - inv_recenter_nonneg(inv_map_table[d], 255 - p);
328 }
329 
331 {
332  static const enum AVColorSpace colorspaces[8] = {
335  };
336  VP9Context *s = avctx->priv_data;
337  int bits = avctx->profile <= 1 ? 0 : 1 + get_bits1(&s->gb); // 0:8, 1:10, 2:12
338 
339  s->bpp_index = bits;
340  s->s.h.bpp = 8 + bits * 2;
341  s->bytesperpixel = (7 + s->s.h.bpp) >> 3;
342  avctx->colorspace = colorspaces[get_bits(&s->gb, 3)];
343  if (avctx->colorspace == AVCOL_SPC_RGB) { // RGB = profile 1
344  static const enum AVPixelFormat pix_fmt_rgb[3] = {
346  };
347  s->ss_h = s->ss_v = 0;
348  avctx->color_range = AVCOL_RANGE_JPEG;
349  s->pix_fmt = pix_fmt_rgb[bits];
350  if (avctx->profile & 1) {
351  if (get_bits1(&s->gb)) {
352  av_log(avctx, AV_LOG_ERROR, "Reserved bit set in RGB\n");
353  return AVERROR_INVALIDDATA;
354  }
355  } else {
356  av_log(avctx, AV_LOG_ERROR, "RGB not supported in profile %d\n",
357  avctx->profile);
358  return AVERROR_INVALIDDATA;
359  }
360  } else {
361  static const enum AVPixelFormat pix_fmt_for_ss[3][2 /* v */][2 /* h */] = {
368  };
370  if (avctx->profile & 1) {
371  s->ss_h = get_bits1(&s->gb);
372  s->ss_v = get_bits1(&s->gb);
373  s->pix_fmt = pix_fmt_for_ss[bits][s->ss_v][s->ss_h];
374  if (s->pix_fmt == AV_PIX_FMT_YUV420P) {
375  av_log(avctx, AV_LOG_ERROR, "YUV 4:2:0 not supported in profile %d\n",
376  avctx->profile);
377  return AVERROR_INVALIDDATA;
378  } else if (get_bits1(&s->gb)) {
379  av_log(avctx, AV_LOG_ERROR, "Profile %d color details reserved bit set\n",
380  avctx->profile);
381  return AVERROR_INVALIDDATA;
382  }
383  } else {
384  s->ss_h = s->ss_v = 1;
385  s->pix_fmt = pix_fmt_for_ss[bits][1][1];
386  }
387  }
388 
389  return 0;
390 }
391 
393  const uint8_t *data, int size, int *ref)
394 {
395  VP9Context *s = avctx->priv_data;
396  int c, i, j, k, l, m, n, w, h, max, size2, ret, sharp;
397  int last_invisible;
398  const uint8_t *data2;
399 
400  /* general header */
401  if ((ret = init_get_bits8(&s->gb, data, size)) < 0) {
402  av_log(avctx, AV_LOG_ERROR, "Failed to initialize bitstream reader\n");
403  return ret;
404  }
405  if (get_bits(&s->gb, 2) != 0x2) { // frame marker
406  av_log(avctx, AV_LOG_ERROR, "Invalid frame marker\n");
407  return AVERROR_INVALIDDATA;
408  }
409  avctx->profile = get_bits1(&s->gb);
410  avctx->profile |= get_bits1(&s->gb) << 1;
411  if (avctx->profile == 3) avctx->profile += get_bits1(&s->gb);
412  if (avctx->profile > 3) {
413  av_log(avctx, AV_LOG_ERROR, "Profile %d is not yet supported\n", avctx->profile);
414  return AVERROR_INVALIDDATA;
415  }
416  s->s.h.profile = avctx->profile;
417  if (get_bits1(&s->gb)) {
418  *ref = get_bits(&s->gb, 3);
419  return 0;
420  }
421 
422  s->last_keyframe = s->s.h.keyframe;
423  s->s.h.keyframe = !get_bits1(&s->gb);
424 
425  last_invisible = s->s.h.invisible;
426  s->s.h.invisible = !get_bits1(&s->gb);
427  s->s.h.errorres = get_bits1(&s->gb);
428  s->s.h.use_last_frame_mvs = !s->s.h.errorres && !last_invisible;
429 
430  if (s->s.h.keyframe) {
431  if (get_bits_long(&s->gb, 24) != VP9_SYNCCODE) { // synccode
432  av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
433  return AVERROR_INVALIDDATA;
434  }
435  if ((ret = read_colorspace_details(avctx)) < 0)
436  return ret;
437  // for profile 1, here follows the subsampling bits
438  s->s.h.refreshrefmask = 0xff;
439  w = get_bits(&s->gb, 16) + 1;
440  h = get_bits(&s->gb, 16) + 1;
441  if (get_bits1(&s->gb)) // display size
442  skip_bits(&s->gb, 32);
443  } else {
444  s->s.h.intraonly = s->s.h.invisible ? get_bits1(&s->gb) : 0;
445  s->s.h.resetctx = s->s.h.errorres ? 0 : get_bits(&s->gb, 2);
446  if (s->s.h.intraonly) {
447  if (get_bits_long(&s->gb, 24) != VP9_SYNCCODE) { // synccode
448  av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
449  return AVERROR_INVALIDDATA;
450  }
451  if (avctx->profile >= 1) {
452  if ((ret = read_colorspace_details(avctx)) < 0)
453  return ret;
454  } else {
455  s->ss_h = s->ss_v = 1;
456  s->s.h.bpp = 8;
457  s->bpp_index = 0;
458  s->bytesperpixel = 1;
459  s->pix_fmt = AV_PIX_FMT_YUV420P;
460  avctx->colorspace = AVCOL_SPC_BT470BG;
461  avctx->color_range = AVCOL_RANGE_JPEG;
462  }
463  s->s.h.refreshrefmask = get_bits(&s->gb, 8);
464  w = get_bits(&s->gb, 16) + 1;
465  h = get_bits(&s->gb, 16) + 1;
466  if (get_bits1(&s->gb)) // display size
467  skip_bits(&s->gb, 32);
468  } else {
469  s->s.h.refreshrefmask = get_bits(&s->gb, 8);
470  s->s.h.refidx[0] = get_bits(&s->gb, 3);
471  s->s.h.signbias[0] = get_bits1(&s->gb) && !s->s.h.errorres;
472  s->s.h.refidx[1] = get_bits(&s->gb, 3);
473  s->s.h.signbias[1] = get_bits1(&s->gb) && !s->s.h.errorres;
474  s->s.h.refidx[2] = get_bits(&s->gb, 3);
475  s->s.h.signbias[2] = get_bits1(&s->gb) && !s->s.h.errorres;
476  if (!s->s.refs[s->s.h.refidx[0]].f->buf[0] ||
477  !s->s.refs[s->s.h.refidx[1]].f->buf[0] ||
478  !s->s.refs[s->s.h.refidx[2]].f->buf[0]) {
479  av_log(avctx, AV_LOG_ERROR, "Not all references are available\n");
480  return AVERROR_INVALIDDATA;
481  }
482  if (get_bits1(&s->gb)) {
483  w = s->s.refs[s->s.h.refidx[0]].f->width;
484  h = s->s.refs[s->s.h.refidx[0]].f->height;
485  } else if (get_bits1(&s->gb)) {
486  w = s->s.refs[s->s.h.refidx[1]].f->width;
487  h = s->s.refs[s->s.h.refidx[1]].f->height;
488  } else if (get_bits1(&s->gb)) {
489  w = s->s.refs[s->s.h.refidx[2]].f->width;
490  h = s->s.refs[s->s.h.refidx[2]].f->height;
491  } else {
492  w = get_bits(&s->gb, 16) + 1;
493  h = get_bits(&s->gb, 16) + 1;
494  }
495  // Note that in this code, "CUR_FRAME" is actually before we
496  // have formally allocated a frame, and thus actually represents
497  // the _last_ frame
498  s->s.h.use_last_frame_mvs &= s->s.frames[CUR_FRAME].tf.f->width == w &&
499  s->s.frames[CUR_FRAME].tf.f->height == h;
500  if (get_bits1(&s->gb)) // display size
501  skip_bits(&s->gb, 32);
502  s->s.h.highprecisionmvs = get_bits1(&s->gb);
504  get_bits(&s->gb, 2);
505  s->s.h.allowcompinter = s->s.h.signbias[0] != s->s.h.signbias[1] ||
506  s->s.h.signbias[0] != s->s.h.signbias[2];
507  if (s->s.h.allowcompinter) {
508  if (s->s.h.signbias[0] == s->s.h.signbias[1]) {
509  s->s.h.fixcompref = 2;
510  s->s.h.varcompref[0] = 0;
511  s->s.h.varcompref[1] = 1;
512  } else if (s->s.h.signbias[0] == s->s.h.signbias[2]) {
513  s->s.h.fixcompref = 1;
514  s->s.h.varcompref[0] = 0;
515  s->s.h.varcompref[1] = 2;
516  } else {
517  s->s.h.fixcompref = 0;
518  s->s.h.varcompref[0] = 1;
519  s->s.h.varcompref[1] = 2;
520  }
521  }
522  }
523  }
524  s->s.h.refreshctx = s->s.h.errorres ? 0 : get_bits1(&s->gb);
525  s->s.h.parallelmode = s->s.h.errorres ? 1 : get_bits1(&s->gb);
526  s->s.h.framectxid = c = get_bits(&s->gb, 2);
527  if (s->s.h.keyframe || s->s.h.intraonly)
528  s->s.h.framectxid = 0; // BUG: libvpx ignores this field in keyframes
529 
530  /* loopfilter header data */
531  if (s->s.h.keyframe || s->s.h.errorres || s->s.h.intraonly) {
532  // reset loopfilter defaults
533  s->s.h.lf_delta.ref[0] = 1;
534  s->s.h.lf_delta.ref[1] = 0;
535  s->s.h.lf_delta.ref[2] = -1;
536  s->s.h.lf_delta.ref[3] = -1;
537  s->s.h.lf_delta.mode[0] = 0;
538  s->s.h.lf_delta.mode[1] = 0;
539  memset(s->s.h.segmentation.feat, 0, sizeof(s->s.h.segmentation.feat));
540  }
541  s->s.h.filter.level = get_bits(&s->gb, 6);
542  sharp = get_bits(&s->gb, 3);
543  // if sharpness changed, reinit lim/mblim LUTs. if it didn't change, keep
544  // the old cache values since they are still valid
545  if (s->s.h.filter.sharpness != sharp)
546  memset(s->filter_lut.lim_lut, 0, sizeof(s->filter_lut.lim_lut));
547  s->s.h.filter.sharpness = sharp;
548  if ((s->s.h.lf_delta.enabled = get_bits1(&s->gb))) {
549  if ((s->s.h.lf_delta.updated = get_bits1(&s->gb))) {
550  for (i = 0; i < 4; i++)
551  if (get_bits1(&s->gb))
552  s->s.h.lf_delta.ref[i] = get_sbits_inv(&s->gb, 6);
553  for (i = 0; i < 2; i++)
554  if (get_bits1(&s->gb))
555  s->s.h.lf_delta.mode[i] = get_sbits_inv(&s->gb, 6);
556  }
557  }
558 
559  /* quantization header data */
560  s->s.h.yac_qi = get_bits(&s->gb, 8);
561  s->s.h.ydc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
562  s->s.h.uvdc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
563  s->s.h.uvac_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
564  s->s.h.lossless = s->s.h.yac_qi == 0 && s->s.h.ydc_qdelta == 0 &&
565  s->s.h.uvdc_qdelta == 0 && s->s.h.uvac_qdelta == 0;
566  if (s->s.h.lossless)
568 
569  /* segmentation header info */
570  if ((s->s.h.segmentation.enabled = get_bits1(&s->gb))) {
571  if ((s->s.h.segmentation.update_map = get_bits1(&s->gb))) {
572  for (i = 0; i < 7; i++)
573  s->s.h.segmentation.prob[i] = get_bits1(&s->gb) ?
574  get_bits(&s->gb, 8) : 255;
575  if ((s->s.h.segmentation.temporal = get_bits1(&s->gb)))
576  for (i = 0; i < 3; i++)
577  s->s.h.segmentation.pred_prob[i] = get_bits1(&s->gb) ?
578  get_bits(&s->gb, 8) : 255;
579  }
580 
581  if (get_bits1(&s->gb)) {
583  for (i = 0; i < 8; i++) {
584  if ((s->s.h.segmentation.feat[i].q_enabled = get_bits1(&s->gb)))
585  s->s.h.segmentation.feat[i].q_val = get_sbits_inv(&s->gb, 8);
586  if ((s->s.h.segmentation.feat[i].lf_enabled = get_bits1(&s->gb)))
587  s->s.h.segmentation.feat[i].lf_val = get_sbits_inv(&s->gb, 6);
588  if ((s->s.h.segmentation.feat[i].ref_enabled = get_bits1(&s->gb)))
589  s->s.h.segmentation.feat[i].ref_val = get_bits(&s->gb, 2);
590  s->s.h.segmentation.feat[i].skip_enabled = get_bits1(&s->gb);
591  }
592  }
593  }
594 
595  // set qmul[] based on Y/UV, AC/DC and segmentation Q idx deltas
596  for (i = 0; i < (s->s.h.segmentation.enabled ? 8 : 1); i++) {
597  int qyac, qydc, quvac, quvdc, lflvl, sh;
598 
599  if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].q_enabled) {
600  if (s->s.h.segmentation.absolute_vals)
601  qyac = av_clip_uintp2(s->s.h.segmentation.feat[i].q_val, 8);
602  else
603  qyac = av_clip_uintp2(s->s.h.yac_qi + s->s.h.segmentation.feat[i].q_val, 8);
604  } else {
605  qyac = s->s.h.yac_qi;
606  }
607  qydc = av_clip_uintp2(qyac + s->s.h.ydc_qdelta, 8);
608  quvdc = av_clip_uintp2(qyac + s->s.h.uvdc_qdelta, 8);
609  quvac = av_clip_uintp2(qyac + s->s.h.uvac_qdelta, 8);
610  qyac = av_clip_uintp2(qyac, 8);
611 
612  s->s.h.segmentation.feat[i].qmul[0][0] = ff_vp9_dc_qlookup[s->bpp_index][qydc];
613  s->s.h.segmentation.feat[i].qmul[0][1] = ff_vp9_ac_qlookup[s->bpp_index][qyac];
614  s->s.h.segmentation.feat[i].qmul[1][0] = ff_vp9_dc_qlookup[s->bpp_index][quvdc];
615  s->s.h.segmentation.feat[i].qmul[1][1] = ff_vp9_ac_qlookup[s->bpp_index][quvac];
616 
617  sh = s->s.h.filter.level >= 32;
618  if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].lf_enabled) {
619  if (s->s.h.segmentation.absolute_vals)
620  lflvl = av_clip_uintp2(s->s.h.segmentation.feat[i].lf_val, 6);
621  else
622  lflvl = av_clip_uintp2(s->s.h.filter.level + s->s.h.segmentation.feat[i].lf_val, 6);
623  } else {
624  lflvl = s->s.h.filter.level;
625  }
626  if (s->s.h.lf_delta.enabled) {
627  s->s.h.segmentation.feat[i].lflvl[0][0] =
628  s->s.h.segmentation.feat[i].lflvl[0][1] =
629  av_clip_uintp2(lflvl + (s->s.h.lf_delta.ref[0] * (1 << sh)), 6);
630  for (j = 1; j < 4; j++) {
631  s->s.h.segmentation.feat[i].lflvl[j][0] =
632  av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] +
633  s->s.h.lf_delta.mode[0]) * (1 << sh)), 6);
634  s->s.h.segmentation.feat[i].lflvl[j][1] =
635  av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] +
636  s->s.h.lf_delta.mode[1]) * (1 << sh)), 6);
637  }
638  } else {
639  memset(s->s.h.segmentation.feat[i].lflvl, lflvl,
640  sizeof(s->s.h.segmentation.feat[i].lflvl));
641  }
642  }
643 
644  /* tiling info */
645  if ((ret = update_size(avctx, w, h)) < 0) {
646  av_log(avctx, AV_LOG_ERROR, "Failed to initialize decoder for %dx%d @ %d\n",
647  w, h, s->pix_fmt);
648  return ret;
649  }
650  for (s->s.h.tiling.log2_tile_cols = 0;
651  s->sb_cols > (64 << s->s.h.tiling.log2_tile_cols);
652  s->s.h.tiling.log2_tile_cols++) ;
653  for (max = 0; (s->sb_cols >> max) >= 4; max++) ;
654  max = FFMAX(0, max - 1);
655  while (max > s->s.h.tiling.log2_tile_cols) {
656  if (get_bits1(&s->gb))
657  s->s.h.tiling.log2_tile_cols++;
658  else
659  break;
660  }
661  s->s.h.tiling.log2_tile_rows = decode012(&s->gb);
662  s->s.h.tiling.tile_rows = 1 << s->s.h.tiling.log2_tile_rows;
663  if (s->s.h.tiling.tile_cols != (1 << s->s.h.tiling.log2_tile_cols)) {
664  s->s.h.tiling.tile_cols = 1 << s->s.h.tiling.log2_tile_cols;
665  s->c_b = av_fast_realloc(s->c_b, &s->c_b_size,
666  sizeof(VP56RangeCoder) * s->s.h.tiling.tile_cols);
667  if (!s->c_b) {
668  av_log(avctx, AV_LOG_ERROR, "Ran out of memory during range coder init\n");
669  return AVERROR(ENOMEM);
670  }
671  }
672 
673  /* check reference frames */
674  if (!s->s.h.keyframe && !s->s.h.intraonly) {
675  for (i = 0; i < 3; i++) {
676  AVFrame *ref = s->s.refs[s->s.h.refidx[i]].f;
677  int refw = ref->width, refh = ref->height;
678 
679  if (ref->format != avctx->pix_fmt) {
680  av_log(avctx, AV_LOG_ERROR,
681  "Ref pixfmt (%s) did not match current frame (%s)",
683  av_get_pix_fmt_name(avctx->pix_fmt));
684  return AVERROR_INVALIDDATA;
685  } else if (refw == w && refh == h) {
686  s->mvscale[i][0] = s->mvscale[i][1] = 0;
687  } else {
688  if (w * 2 < refw || h * 2 < refh || w > 16 * refw || h > 16 * refh) {
689  av_log(avctx, AV_LOG_ERROR,
690  "Invalid ref frame dimensions %dx%d for frame size %dx%d\n",
691  refw, refh, w, h);
692  return AVERROR_INVALIDDATA;
693  }
694  s->mvscale[i][0] = (refw << 14) / w;
695  s->mvscale[i][1] = (refh << 14) / h;
696  s->mvstep[i][0] = 16 * s->mvscale[i][0] >> 14;
697  s->mvstep[i][1] = 16 * s->mvscale[i][1] >> 14;
698  }
699  }
700  }
701 
702  if (s->s.h.keyframe || s->s.h.errorres || (s->s.h.intraonly && s->s.h.resetctx == 3)) {
703  s->prob_ctx[0].p = s->prob_ctx[1].p = s->prob_ctx[2].p =
706  sizeof(ff_vp9_default_coef_probs));
708  sizeof(ff_vp9_default_coef_probs));
710  sizeof(ff_vp9_default_coef_probs));
712  sizeof(ff_vp9_default_coef_probs));
713  } else if (s->s.h.intraonly && s->s.h.resetctx == 2) {
716  sizeof(ff_vp9_default_coef_probs));
717  }
718 
719  // next 16 bits is size of the rest of the header (arith-coded)
720  s->s.h.compressed_header_size = size2 = get_bits(&s->gb, 16);
721  s->s.h.uncompressed_header_size = (get_bits_count(&s->gb) + 7) / 8;
722 
723  data2 = align_get_bits(&s->gb);
724  if (size2 > size - (data2 - data)) {
725  av_log(avctx, AV_LOG_ERROR, "Invalid compressed header size\n");
726  return AVERROR_INVALIDDATA;
727  }
728  ret = ff_vp56_init_range_decoder(&s->c, data2, size2);
729  if (ret < 0)
730  return ret;
731 
732  if (vp56_rac_get_prob_branchy(&s->c, 128)) { // marker bit
733  av_log(avctx, AV_LOG_ERROR, "Marker bit was set\n");
734  return AVERROR_INVALIDDATA;
735  }
736 
737  if (s->s.h.keyframe || s->s.h.intraonly) {
738  memset(s->counts.coef, 0, sizeof(s->counts.coef));
739  memset(s->counts.eob, 0, sizeof(s->counts.eob));
740  } else {
741  memset(&s->counts, 0, sizeof(s->counts));
742  }
743  /* FIXME is it faster to not copy here, but do it down in the fw updates
744  * as explicit copies if the fw update is missing (and skip the copy upon
745  * fw update)? */
746  s->prob.p = s->prob_ctx[c].p;
747 
748  // txfm updates
749  if (s->s.h.lossless) {
750  s->s.h.txfmmode = TX_4X4;
751  } else {
752  s->s.h.txfmmode = vp8_rac_get_uint(&s->c, 2);
753  if (s->s.h.txfmmode == 3)
754  s->s.h.txfmmode += vp8_rac_get(&s->c);
755 
756  if (s->s.h.txfmmode == TX_SWITCHABLE) {
757  for (i = 0; i < 2; i++)
758  if (vp56_rac_get_prob_branchy(&s->c, 252))
759  s->prob.p.tx8p[i] = update_prob(&s->c, s->prob.p.tx8p[i]);
760  for (i = 0; i < 2; i++)
761  for (j = 0; j < 2; j++)
762  if (vp56_rac_get_prob_branchy(&s->c, 252))
763  s->prob.p.tx16p[i][j] =
764  update_prob(&s->c, s->prob.p.tx16p[i][j]);
765  for (i = 0; i < 2; i++)
766  for (j = 0; j < 3; j++)
767  if (vp56_rac_get_prob_branchy(&s->c, 252))
768  s->prob.p.tx32p[i][j] =
769  update_prob(&s->c, s->prob.p.tx32p[i][j]);
770  }
771  }
772 
773  // coef updates
774  for (i = 0; i < 4; i++) {
775  uint8_t (*ref)[2][6][6][3] = s->prob_ctx[c].coef[i];
776  if (vp8_rac_get(&s->c)) {
777  for (j = 0; j < 2; j++)
778  for (k = 0; k < 2; k++)
779  for (l = 0; l < 6; l++)
780  for (m = 0; m < 6; m++) {
781  uint8_t *p = s->prob.coef[i][j][k][l][m];
782  uint8_t *r = ref[j][k][l][m];
783  if (m >= 3 && l == 0) // dc only has 3 pt
784  break;
785  for (n = 0; n < 3; n++) {
786  if (vp56_rac_get_prob_branchy(&s->c, 252))
787  p[n] = update_prob(&s->c, r[n]);
788  else
789  p[n] = r[n];
790  }
791  p[3] = 0;
792  }
793  } else {
794  for (j = 0; j < 2; j++)
795  for (k = 0; k < 2; k++)
796  for (l = 0; l < 6; l++)
797  for (m = 0; m < 6; m++) {
798  uint8_t *p = s->prob.coef[i][j][k][l][m];
799  uint8_t *r = ref[j][k][l][m];
800  if (m > 3 && l == 0) // dc only has 3 pt
801  break;
802  memcpy(p, r, 3);
803  p[3] = 0;
804  }
805  }
806  if (s->s.h.txfmmode == i)
807  break;
808  }
809 
810  // mode updates
811  for (i = 0; i < 3; i++)
812  if (vp56_rac_get_prob_branchy(&s->c, 252))
813  s->prob.p.skip[i] = update_prob(&s->c, s->prob.p.skip[i]);
814  if (!s->s.h.keyframe && !s->s.h.intraonly) {
815  for (i = 0; i < 7; i++)
816  for (j = 0; j < 3; j++)
817  if (vp56_rac_get_prob_branchy(&s->c, 252))
818  s->prob.p.mv_mode[i][j] =
819  update_prob(&s->c, s->prob.p.mv_mode[i][j]);
820 
821  if (s->s.h.filtermode == FILTER_SWITCHABLE)
822  for (i = 0; i < 4; i++)
823  for (j = 0; j < 2; j++)
824  if (vp56_rac_get_prob_branchy(&s->c, 252))
825  s->prob.p.filter[i][j] =
826  update_prob(&s->c, s->prob.p.filter[i][j]);
827 
828  for (i = 0; i < 4; i++)
829  if (vp56_rac_get_prob_branchy(&s->c, 252))
830  s->prob.p.intra[i] = update_prob(&s->c, s->prob.p.intra[i]);
831 
832  if (s->s.h.allowcompinter) {
833  s->s.h.comppredmode = vp8_rac_get(&s->c);
834  if (s->s.h.comppredmode)
835  s->s.h.comppredmode += vp8_rac_get(&s->c);
836  if (s->s.h.comppredmode == PRED_SWITCHABLE)
837  for (i = 0; i < 5; i++)
838  if (vp56_rac_get_prob_branchy(&s->c, 252))
839  s->prob.p.comp[i] =
840  update_prob(&s->c, s->prob.p.comp[i]);
841  } else {
843  }
844 
845  if (s->s.h.comppredmode != PRED_COMPREF) {
846  for (i = 0; i < 5; i++) {
847  if (vp56_rac_get_prob_branchy(&s->c, 252))
848  s->prob.p.single_ref[i][0] =
849  update_prob(&s->c, s->prob.p.single_ref[i][0]);
850  if (vp56_rac_get_prob_branchy(&s->c, 252))
851  s->prob.p.single_ref[i][1] =
852  update_prob(&s->c, s->prob.p.single_ref[i][1]);
853  }
854  }
855 
856  if (s->s.h.comppredmode != PRED_SINGLEREF) {
857  for (i = 0; i < 5; i++)
858  if (vp56_rac_get_prob_branchy(&s->c, 252))
859  s->prob.p.comp_ref[i] =
860  update_prob(&s->c, s->prob.p.comp_ref[i]);
861  }
862 
863  for (i = 0; i < 4; i++)
864  for (j = 0; j < 9; j++)
865  if (vp56_rac_get_prob_branchy(&s->c, 252))
866  s->prob.p.y_mode[i][j] =
867  update_prob(&s->c, s->prob.p.y_mode[i][j]);
868 
869  for (i = 0; i < 4; i++)
870  for (j = 0; j < 4; j++)
871  for (k = 0; k < 3; k++)
872  if (vp56_rac_get_prob_branchy(&s->c, 252))
873  s->prob.p.partition[3 - i][j][k] =
874  update_prob(&s->c,
875  s->prob.p.partition[3 - i][j][k]);
876 
877  // mv fields don't use the update_prob subexp model for some reason
878  for (i = 0; i < 3; i++)
879  if (vp56_rac_get_prob_branchy(&s->c, 252))
880  s->prob.p.mv_joint[i] = (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
881 
882  for (i = 0; i < 2; i++) {
883  if (vp56_rac_get_prob_branchy(&s->c, 252))
884  s->prob.p.mv_comp[i].sign =
885  (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
886 
887  for (j = 0; j < 10; j++)
888  if (vp56_rac_get_prob_branchy(&s->c, 252))
889  s->prob.p.mv_comp[i].classes[j] =
890  (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
891 
892  if (vp56_rac_get_prob_branchy(&s->c, 252))
893  s->prob.p.mv_comp[i].class0 =
894  (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
895 
896  for (j = 0; j < 10; j++)
897  if (vp56_rac_get_prob_branchy(&s->c, 252))
898  s->prob.p.mv_comp[i].bits[j] =
899  (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
900  }
901 
902  for (i = 0; i < 2; i++) {
903  for (j = 0; j < 2; j++)
904  for (k = 0; k < 3; k++)
905  if (vp56_rac_get_prob_branchy(&s->c, 252))
906  s->prob.p.mv_comp[i].class0_fp[j][k] =
907  (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
908 
909  for (j = 0; j < 3; j++)
910  if (vp56_rac_get_prob_branchy(&s->c, 252))
911  s->prob.p.mv_comp[i].fp[j] =
912  (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
913  }
914 
915  if (s->s.h.highprecisionmvs) {
916  for (i = 0; i < 2; i++) {
917  if (vp56_rac_get_prob_branchy(&s->c, 252))
918  s->prob.p.mv_comp[i].class0_hp =
919  (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
920 
921  if (vp56_rac_get_prob_branchy(&s->c, 252))
922  s->prob.p.mv_comp[i].hp =
923  (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
924  }
925  }
926  }
927 
928  return (data2 - data) + size2;
929 }
930 
931 static void decode_sb(AVCodecContext *avctx, int row, int col, VP9Filter *lflvl,
932  ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
933 {
934  VP9Context *s = avctx->priv_data;
935  int c = ((s->above_partition_ctx[col] >> (3 - bl)) & 1) |
936  (((s->left_partition_ctx[row & 0x7] >> (3 - bl)) & 1) << 1);
937  const uint8_t *p = s->s.h.keyframe || s->s.h.intraonly ? ff_vp9_default_kf_partition_probs[bl][c] :
938  s->prob.p.partition[bl][c];
939  enum BlockPartition bp;
940  ptrdiff_t hbs = 4 >> bl;
941  AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
942  ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
943  int bytesperpixel = s->bytesperpixel;
944 
945  if (bl == BL_8X8) {
947  ff_vp9_decode_block(avctx, row, col, lflvl, yoff, uvoff, bl, bp);
948  } else if (col + hbs < s->cols) { // FIXME why not <=?
949  if (row + hbs < s->rows) { // FIXME why not <=?
951  switch (bp) {
952  case PARTITION_NONE:
953  ff_vp9_decode_block(avctx, row, col, lflvl, yoff, uvoff, bl, bp);
954  break;
955  case PARTITION_H:
956  ff_vp9_decode_block(avctx, row, col, lflvl, yoff, uvoff, bl, bp);
957  yoff += hbs * 8 * y_stride;
958  uvoff += hbs * 8 * uv_stride >> s->ss_v;
959  ff_vp9_decode_block(avctx, row + hbs, col, lflvl, yoff, uvoff, bl, bp);
960  break;
961  case PARTITION_V:
962  ff_vp9_decode_block(avctx, row, col, lflvl, yoff, uvoff, bl, bp);
963  yoff += hbs * 8 * bytesperpixel;
964  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
965  ff_vp9_decode_block(avctx, row, col + hbs, lflvl, yoff, uvoff, bl, bp);
966  break;
967  case PARTITION_SPLIT:
968  decode_sb(avctx, row, col, lflvl, yoff, uvoff, bl + 1);
969  decode_sb(avctx, row, col + hbs, lflvl,
970  yoff + 8 * hbs * bytesperpixel,
971  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
972  yoff += hbs * 8 * y_stride;
973  uvoff += hbs * 8 * uv_stride >> s->ss_v;
974  decode_sb(avctx, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
975  decode_sb(avctx, row + hbs, col + hbs, lflvl,
976  yoff + 8 * hbs * bytesperpixel,
977  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
978  break;
979  default:
980  av_assert0(0);
981  }
982  } else if (vp56_rac_get_prob_branchy(&s->c, p[1])) {
983  bp = PARTITION_SPLIT;
984  decode_sb(avctx, row, col, lflvl, yoff, uvoff, bl + 1);
985  decode_sb(avctx, row, col + hbs, lflvl,
986  yoff + 8 * hbs * bytesperpixel,
987  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
988  } else {
989  bp = PARTITION_H;
990  ff_vp9_decode_block(avctx, row, col, lflvl, yoff, uvoff, bl, bp);
991  }
992  } else if (row + hbs < s->rows) { // FIXME why not <=?
993  if (vp56_rac_get_prob_branchy(&s->c, p[2])) {
994  bp = PARTITION_SPLIT;
995  decode_sb(avctx, row, col, lflvl, yoff, uvoff, bl + 1);
996  yoff += hbs * 8 * y_stride;
997  uvoff += hbs * 8 * uv_stride >> s->ss_v;
998  decode_sb(avctx, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
999  } else {
1000  bp = PARTITION_V;
1001  ff_vp9_decode_block(avctx, row, col, lflvl, yoff, uvoff, bl, bp);
1002  }
1003  } else {
1004  bp = PARTITION_SPLIT;
1005  decode_sb(avctx, row, col, lflvl, yoff, uvoff, bl + 1);
1006  }
1007  s->counts.partition[bl][c][bp]++;
1008 }
1009 
1010 static void decode_sb_mem(AVCodecContext *avctx, int row, int col, VP9Filter *lflvl,
1011  ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
1012 {
1013  VP9Context *s = avctx->priv_data;
1014  VP9Block *b = s->b;
1015  ptrdiff_t hbs = 4 >> bl;
1016  AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
1017  ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
1018  int bytesperpixel = s->bytesperpixel;
1019 
1020  if (bl == BL_8X8) {
1021  av_assert2(b->bl == BL_8X8);
1022  ff_vp9_decode_block(avctx, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
1023  } else if (s->b->bl == bl) {
1024  ff_vp9_decode_block(avctx, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
1025  if (b->bp == PARTITION_H && row + hbs < s->rows) {
1026  yoff += hbs * 8 * y_stride;
1027  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1028  ff_vp9_decode_block(avctx, row + hbs, col, lflvl, yoff, uvoff, b->bl, b->bp);
1029  } else if (b->bp == PARTITION_V && col + hbs < s->cols) {
1030  yoff += hbs * 8 * bytesperpixel;
1031  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1032  ff_vp9_decode_block(avctx, row, col + hbs, lflvl, yoff, uvoff, b->bl, b->bp);
1033  }
1034  } else {
1035  decode_sb_mem(avctx, row, col, lflvl, yoff, uvoff, bl + 1);
1036  if (col + hbs < s->cols) { // FIXME why not <=?
1037  if (row + hbs < s->rows) {
1038  decode_sb_mem(avctx, row, col + hbs, lflvl, yoff + 8 * hbs * bytesperpixel,
1039  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1040  yoff += hbs * 8 * y_stride;
1041  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1042  decode_sb_mem(avctx, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1043  decode_sb_mem(avctx, row + hbs, col + hbs, lflvl,
1044  yoff + 8 * hbs * bytesperpixel,
1045  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1046  } else {
1047  yoff += hbs * 8 * bytesperpixel;
1048  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1049  decode_sb_mem(avctx, row, col + hbs, lflvl, yoff, uvoff, bl + 1);
1050  }
1051  } else if (row + hbs < s->rows) {
1052  yoff += hbs * 8 * y_stride;
1053  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1054  decode_sb_mem(avctx, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1055  }
1056  }
1057 }
1058 
1059 static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
1060 {
1061  int sb_start = ( idx * n) >> log2_n;
1062  int sb_end = ((idx + 1) * n) >> log2_n;
1063  *start = FFMIN(sb_start, n) << 3;
1064  *end = FFMIN(sb_end, n) << 3;
1065 }
1066 
1068 {
1069  av_freep(&s->intra_pred_data[0]);
1070  av_freep(&s->b_base);
1071  av_freep(&s->block_base);
1072 }
1073 
1075 {
1076  VP9Context *s = avctx->priv_data;
1077  int i;
1078 
1079  for (i = 0; i < 3; i++) {
1080  if (s->s.frames[i].tf.f->buf[0])
1081  vp9_frame_unref(avctx, &s->s.frames[i]);
1082  av_frame_free(&s->s.frames[i].tf.f);
1083  }
1084  for (i = 0; i < 8; i++) {
1085  if (s->s.refs[i].f->buf[0])
1086  ff_thread_release_buffer(avctx, &s->s.refs[i]);
1087  av_frame_free(&s->s.refs[i].f);
1088  if (s->next_refs[i].f->buf[0])
1089  ff_thread_release_buffer(avctx, &s->next_refs[i]);
1090  av_frame_free(&s->next_refs[i].f);
1091  }
1092  free_buffers(s);
1093  av_freep(&s->c_b);
1094  s->c_b_size = 0;
1095 
1096  return 0;
1097 }
1098 
1099 
1100 static int vp9_decode_frame(AVCodecContext *avctx, void *frame,
1101  int *got_frame, AVPacket *pkt)
1102 {
1103  const uint8_t *data = pkt->data;
1104  int size = pkt->size;
1105  VP9Context *s = avctx->priv_data;
1106  int ret, tile_row, tile_col, i, ref, row, col;
1107  int retain_segmap_ref = s->s.frames[REF_FRAME_SEGMAP].segmentation_map &&
1109  ptrdiff_t yoff, uvoff, ls_y, ls_uv;
1110  AVFrame *f;
1111  int bytesperpixel;
1112 
1113  if ((ret = decode_frame_header(avctx, data, size, &ref)) < 0) {
1114  return ret;
1115  } else if (ret == 0) {
1116  if (!s->s.refs[ref].f->buf[0]) {
1117  av_log(avctx, AV_LOG_ERROR, "Requested reference %d not available\n", ref);
1118  return AVERROR_INVALIDDATA;
1119  }
1120  if ((ret = av_frame_ref(frame, s->s.refs[ref].f)) < 0)
1121  return ret;
1122  ((AVFrame *)frame)->pts = pkt->pts;
1123 #if FF_API_PKT_PTS
1125  ((AVFrame *)frame)->pkt_pts = pkt->pts;
1127 #endif
1128  ((AVFrame *)frame)->pkt_dts = pkt->dts;
1129  for (i = 0; i < 8; i++) {
1130  if (s->next_refs[i].f->buf[0])
1131  ff_thread_release_buffer(avctx, &s->next_refs[i]);
1132  if (s->s.refs[i].f->buf[0] &&
1133  (ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.refs[i])) < 0)
1134  return ret;
1135  }
1136  *got_frame = 1;
1137  return pkt->size;
1138  }
1139  data += ret;
1140  size -= ret;
1141 
1142  if (!retain_segmap_ref || s->s.h.keyframe || s->s.h.intraonly) {
1143  if (s->s.frames[REF_FRAME_SEGMAP].tf.f->buf[0])
1145  if (!s->s.h.keyframe && !s->s.h.intraonly && !s->s.h.errorres && s->s.frames[CUR_FRAME].tf.f->buf[0] &&
1146  (ret = vp9_frame_ref(avctx, &s->s.frames[REF_FRAME_SEGMAP], &s->s.frames[CUR_FRAME])) < 0)
1147  return ret;
1148  }
1149  if (s->s.frames[REF_FRAME_MVPAIR].tf.f->buf[0])
1151  if (!s->s.h.intraonly && !s->s.h.keyframe && !s->s.h.errorres && s->s.frames[CUR_FRAME].tf.f->buf[0] &&
1152  (ret = vp9_frame_ref(avctx, &s->s.frames[REF_FRAME_MVPAIR], &s->s.frames[CUR_FRAME])) < 0)
1153  return ret;
1154  if (s->s.frames[CUR_FRAME].tf.f->buf[0])
1155  vp9_frame_unref(avctx, &s->s.frames[CUR_FRAME]);
1156  if ((ret = vp9_frame_alloc(avctx, &s->s.frames[CUR_FRAME])) < 0)
1157  return ret;
1158  f = s->s.frames[CUR_FRAME].tf.f;
1159  f->key_frame = s->s.h.keyframe;
1161  ls_y = f->linesize[0];
1162  ls_uv =f->linesize[1];
1163 
1164  if (s->s.frames[REF_FRAME_SEGMAP].tf.f->buf[0] &&
1168  }
1169 
1170  // ref frame setup
1171  for (i = 0; i < 8; i++) {
1172  if (s->next_refs[i].f->buf[0])
1173  ff_thread_release_buffer(avctx, &s->next_refs[i]);
1174  if (s->s.h.refreshrefmask & (1 << i)) {
1175  ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.frames[CUR_FRAME].tf);
1176  } else if (s->s.refs[i].f->buf[0]) {
1177  ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.refs[i]);
1178  }
1179  if (ret < 0)
1180  return ret;
1181  }
1182 
1183  if (avctx->hwaccel) {
1184  ret = avctx->hwaccel->start_frame(avctx, NULL, 0);
1185  if (ret < 0)
1186  return ret;
1187  ret = avctx->hwaccel->decode_slice(avctx, pkt->data, pkt->size);
1188  if (ret < 0)
1189  return ret;
1190  ret = avctx->hwaccel->end_frame(avctx);
1191  if (ret < 0)
1192  return ret;
1193  goto finish;
1194  }
1195 
1196  // main tile decode loop
1197  bytesperpixel = s->bytesperpixel;
1198  memset(s->above_partition_ctx, 0, s->cols);
1199  memset(s->above_skip_ctx, 0, s->cols);
1200  if (s->s.h.keyframe || s->s.h.intraonly) {
1201  memset(s->above_mode_ctx, DC_PRED, s->cols * 2);
1202  } else {
1203  memset(s->above_mode_ctx, NEARESTMV, s->cols);
1204  }
1205  memset(s->above_y_nnz_ctx, 0, s->sb_cols * 16);
1206  memset(s->above_uv_nnz_ctx[0], 0, s->sb_cols * 16 >> s->ss_h);
1207  memset(s->above_uv_nnz_ctx[1], 0, s->sb_cols * 16 >> s->ss_h);
1208  memset(s->above_segpred_ctx, 0, s->cols);
1209  s->pass = s->s.frames[CUR_FRAME].uses_2pass =
1211  if ((ret = update_block_buffers(avctx)) < 0) {
1212  av_log(avctx, AV_LOG_ERROR,
1213  "Failed to allocate block buffers\n");
1214  return ret;
1215  }
1216  if (s->s.h.refreshctx && s->s.h.parallelmode) {
1217  int j, k, l, m;
1218 
1219  for (i = 0; i < 4; i++) {
1220  for (j = 0; j < 2; j++)
1221  for (k = 0; k < 2; k++)
1222  for (l = 0; l < 6; l++)
1223  for (m = 0; m < 6; m++)
1224  memcpy(s->prob_ctx[s->s.h.framectxid].coef[i][j][k][l][m],
1225  s->prob.coef[i][j][k][l][m], 3);
1226  if (s->s.h.txfmmode == i)
1227  break;
1228  }
1229  s->prob_ctx[s->s.h.framectxid].p = s->prob.p;
1230  ff_thread_finish_setup(avctx);
1231  } else if (!s->s.h.refreshctx) {
1232  ff_thread_finish_setup(avctx);
1233  }
1234 
1235  do {
1236  yoff = uvoff = 0;
1237  s->b = s->b_base;
1238  s->block = s->block_base;
1239  s->uvblock[0] = s->uvblock_base[0];
1240  s->uvblock[1] = s->uvblock_base[1];
1241  s->eob = s->eob_base;
1242  s->uveob[0] = s->uveob_base[0];
1243  s->uveob[1] = s->uveob_base[1];
1244 
1245  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1247  tile_row, s->s.h.tiling.log2_tile_rows, s->sb_rows);
1248  if (s->pass != 2) {
1249  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1250  int64_t tile_size;
1251 
1252  if (tile_col == s->s.h.tiling.tile_cols - 1 &&
1253  tile_row == s->s.h.tiling.tile_rows - 1) {
1254  tile_size = size;
1255  } else {
1256  tile_size = AV_RB32(data);
1257  data += 4;
1258  size -= 4;
1259  }
1260  if (tile_size > size) {
1261  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
1262  return AVERROR_INVALIDDATA;
1263  }
1264  ret = ff_vp56_init_range_decoder(&s->c_b[tile_col], data, tile_size);
1265  if (ret < 0)
1266  return ret;
1267  if (vp56_rac_get_prob_branchy(&s->c_b[tile_col], 128)) { // marker bit
1268  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
1269  return AVERROR_INVALIDDATA;
1270  }
1271  data += tile_size;
1272  size -= tile_size;
1273  }
1274  }
1275 
1276  for (row = s->tile_row_start; row < s->tile_row_end;
1277  row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->ss_v) {
1278  VP9Filter *lflvl_ptr = s->lflvl;
1279  ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
1280 
1281  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1283  tile_col, s->s.h.tiling.log2_tile_cols, s->sb_cols);
1284 
1285  if (s->pass != 2) {
1286  memset(s->left_partition_ctx, 0, 8);
1287  memset(s->left_skip_ctx, 0, 8);
1288  if (s->s.h.keyframe || s->s.h.intraonly) {
1289  memset(s->left_mode_ctx, DC_PRED, 16);
1290  } else {
1291  memset(s->left_mode_ctx, NEARESTMV, 8);
1292  }
1293  memset(s->left_y_nnz_ctx, 0, 16);
1294  memset(s->left_uv_nnz_ctx, 0, 32);
1295  memset(s->left_segpred_ctx, 0, 8);
1296 
1297  memcpy(&s->c, &s->c_b[tile_col], sizeof(s->c));
1298  }
1299 
1300  for (col = s->tile_col_start;
1301  col < s->tile_col_end;
1302  col += 8, yoff2 += 64 * bytesperpixel,
1303  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1304  // FIXME integrate with lf code (i.e. zero after each
1305  // use, similar to invtxfm coefficients, or similar)
1306  if (s->pass != 1) {
1307  memset(lflvl_ptr->mask, 0, sizeof(lflvl_ptr->mask));
1308  }
1309 
1310  if (s->pass == 2) {
1311  decode_sb_mem(avctx, row, col, lflvl_ptr,
1312  yoff2, uvoff2, BL_64X64);
1313  } else {
1314  decode_sb(avctx, row, col, lflvl_ptr,
1315  yoff2, uvoff2, BL_64X64);
1316  }
1317  }
1318  if (s->pass != 2)
1319  memcpy(&s->c_b[tile_col], &s->c, sizeof(s->c));
1320  }
1321 
1322  if (s->pass == 1)
1323  continue;
1324 
1325  // backup pre-loopfilter reconstruction data for intra
1326  // prediction of next row of sb64s
1327  if (row + 8 < s->rows) {
1328  memcpy(s->intra_pred_data[0],
1329  f->data[0] + yoff + 63 * ls_y,
1330  8 * s->cols * bytesperpixel);
1331  memcpy(s->intra_pred_data[1],
1332  f->data[1] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1333  8 * s->cols * bytesperpixel >> s->ss_h);
1334  memcpy(s->intra_pred_data[2],
1335  f->data[2] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1336  8 * s->cols * bytesperpixel >> s->ss_h);
1337  }
1338 
1339  // loopfilter one row
1340  if (s->s.h.filter.level) {
1341  yoff2 = yoff;
1342  uvoff2 = uvoff;
1343  lflvl_ptr = s->lflvl;
1344  for (col = 0; col < s->cols;
1345  col += 8, yoff2 += 64 * bytesperpixel,
1346  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1347  ff_vp9_loopfilter_sb(avctx, lflvl_ptr, row, col,
1348  yoff2, uvoff2);
1349  }
1350  }
1351 
1352  // FIXME maybe we can make this more finegrained by running the
1353  // loopfilter per-block instead of after each sbrow
1354  // In fact that would also make intra pred left preparation easier?
1355  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, row >> 3, 0);
1356  }
1357  }
1358 
1359  if (s->pass < 2 && s->s.h.refreshctx && !s->s.h.parallelmode) {
1360  ff_vp9_adapt_probs(s);
1361  ff_thread_finish_setup(avctx);
1362  }
1363  } while (s->pass++ == 1);
1364  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
1365 
1366 finish:
1367  // ref frame setup
1368  for (i = 0; i < 8; i++) {
1369  if (s->s.refs[i].f->buf[0])
1370  ff_thread_release_buffer(avctx, &s->s.refs[i]);
1371  if (s->next_refs[i].f->buf[0] &&
1372  (ret = ff_thread_ref_frame(&s->s.refs[i], &s->next_refs[i])) < 0)
1373  return ret;
1374  }
1375 
1376  if (!s->s.h.invisible) {
1377  if ((ret = av_frame_ref(frame, s->s.frames[CUR_FRAME].tf.f)) < 0)
1378  return ret;
1379  *got_frame = 1;
1380  }
1381 
1382  return pkt->size;
1383 }
1384 
1386 {
1387  VP9Context *s = avctx->priv_data;
1388  int i;
1389 
1390  for (i = 0; i < 3; i++)
1391  vp9_frame_unref(avctx, &s->s.frames[i]);
1392  for (i = 0; i < 8; i++)
1393  ff_thread_release_buffer(avctx, &s->s.refs[i]);
1394 }
1395 
1396 static int init_frames(AVCodecContext *avctx)
1397 {
1398  VP9Context *s = avctx->priv_data;
1399  int i;
1400 
1401  for (i = 0; i < 3; i++) {
1402  s->s.frames[i].tf.f = av_frame_alloc();
1403  if (!s->s.frames[i].tf.f) {
1404  vp9_decode_free(avctx);
1405  av_log(avctx, AV_LOG_ERROR, "Failed to allocate frame buffer %d\n", i);
1406  return AVERROR(ENOMEM);
1407  }
1408  }
1409  for (i = 0; i < 8; i++) {
1410  s->s.refs[i].f = av_frame_alloc();
1411  s->next_refs[i].f = av_frame_alloc();
1412  if (!s->s.refs[i].f || !s->next_refs[i].f) {
1413  vp9_decode_free(avctx);
1414  av_log(avctx, AV_LOG_ERROR, "Failed to allocate frame buffer %d\n", i);
1415  return AVERROR(ENOMEM);
1416  }
1417  }
1418 
1419  return 0;
1420 }
1421 
1423 {
1424  VP9Context *s = avctx->priv_data;
1425 
1426  avctx->internal->allocate_progress = 1;
1427  s->last_bpp = 0;
1428  s->s.h.filter.sharpness = -1;
1429 
1430  return init_frames(avctx);
1431 }
1432 
1433 #if HAVE_THREADS
1434 static av_cold int vp9_decode_init_thread_copy(AVCodecContext *avctx)
1435 {
1436  return init_frames(avctx);
1437 }
1438 
1439 static int vp9_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
1440 {
1441  int i, ret;
1442  VP9Context *s = dst->priv_data, *ssrc = src->priv_data;
1443 
1444  for (i = 0; i < 3; i++) {
1445  if (s->s.frames[i].tf.f->buf[0])
1446  vp9_frame_unref(dst, &s->s.frames[i]);
1447  if (ssrc->s.frames[i].tf.f->buf[0]) {
1448  if ((ret = vp9_frame_ref(dst, &s->s.frames[i], &ssrc->s.frames[i])) < 0)
1449  return ret;
1450  }
1451  }
1452  for (i = 0; i < 8; i++) {
1453  if (s->s.refs[i].f->buf[0])
1454  ff_thread_release_buffer(dst, &s->s.refs[i]);
1455  if (ssrc->next_refs[i].f->buf[0]) {
1456  if ((ret = ff_thread_ref_frame(&s->s.refs[i], &ssrc->next_refs[i])) < 0)
1457  return ret;
1458  }
1459  }
1460 
1461  s->s.h.invisible = ssrc->s.h.invisible;
1462  s->s.h.keyframe = ssrc->s.h.keyframe;
1463  s->s.h.intraonly = ssrc->s.h.intraonly;
1464  s->ss_v = ssrc->ss_v;
1465  s->ss_h = ssrc->ss_h;
1466  s->s.h.segmentation.enabled = ssrc->s.h.segmentation.enabled;
1467  s->s.h.segmentation.update_map = ssrc->s.h.segmentation.update_map;
1468  s->s.h.segmentation.absolute_vals = ssrc->s.h.segmentation.absolute_vals;
1469  s->bytesperpixel = ssrc->bytesperpixel;
1470  s->gf_fmt = ssrc->gf_fmt;
1471  s->w = ssrc->w;
1472  s->h = ssrc->h;
1473  s->s.h.bpp = ssrc->s.h.bpp;
1474  s->bpp_index = ssrc->bpp_index;
1475  s->pix_fmt = ssrc->pix_fmt;
1476  memcpy(&s->prob_ctx, &ssrc->prob_ctx, sizeof(s->prob_ctx));
1477  memcpy(&s->s.h.lf_delta, &ssrc->s.h.lf_delta, sizeof(s->s.h.lf_delta));
1478  memcpy(&s->s.h.segmentation.feat, &ssrc->s.h.segmentation.feat,
1479  sizeof(s->s.h.segmentation.feat));
1480 
1481  return 0;
1482 }
1483 #endif
1484 
1486  .name = "vp9",
1487  .long_name = NULL_IF_CONFIG_SMALL("Google VP9"),
1488  .type = AVMEDIA_TYPE_VIDEO,
1489  .id = AV_CODEC_ID_VP9,
1490  .priv_data_size = sizeof(VP9Context),
1491  .init = vp9_decode_init,
1492  .close = vp9_decode_free,
1494  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
1496  .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp9_decode_init_thread_copy),
1497  .update_thread_context = ONLY_IF_THREADS_ENABLED(vp9_decode_update_thread_context),
1499 };
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:453
ThreadFrame tf
Definition: vp9shared.h:60
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
#define NULL
Definition: coverity.c:32
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
Definition: avcodec.h:3864
const char * s
Definition: avisynth_c.h:768
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
uint8_t * segmentation_map
Definition: vp9shared.h:62
#define AV_PIX_FMT_YUV440P10
Definition: pixfmt.h:353
uint8_t parallelmode
Definition: vp9shared.h:108
const uint8_t ff_vp9_default_kf_partition_probs[4][4][3]
Definition: vp9data.c:41
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
static void vp9_decode_flush(AVCodecContext *avctx)
Definition: vp9.c:1385
This structure describes decoded (raw) audio or video data.
Definition: frame.h:187
static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
Definition: vp9.c:1059
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
uint8_t allowcompinter
Definition: vp9shared.h:106
static void flush(AVCodecContext *avctx)
uint8_t left_segpred_ctx[8]
Definition: vp9dec.h:164
VP5 and VP6 compatible video decoder (common features)
uint8_t update_map
Definition: vp9shared.h:133
static av_always_inline int get_sbits_inv(GetBitContext *gb, int n)
Definition: vp9.c:256
uint8_t * above_skip_ctx
Definition: vp9dec.h:174
uint8_t * eob_base
Definition: vp9dec.h:191
uint8_t mvstep[3][2]
Definition: vp9dec.h:196
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:67
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:261
static int init_thread_copy(AVCodecContext *avctx)
Definition: tta.c:392
AVFrame * f
Definition: thread.h:36
static av_always_inline int vp8_rac_get_tree(VP56RangeCoder *c, const int8_t(*tree)[2], const uint8_t *probs)
Definition: vp56.h:380
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:210
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:370
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:457
VP9BitstreamHeader h
Definition: vp9shared.h:160
VideoDSPContext vdsp
Definition: vp9dec.h:91
static av_cold int vp9_decode_init(AVCodecContext *avctx)
Definition: vp9.c:1422
ProbContext p
Definition: vp9dec.h:120
uint8_t last_keyframe
Definition: vp9dec.h:104
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2469
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:180
uint8_t ss_v
Definition: vp9dec.h:102
struct ProbContext::@145 mv_comp[2]
int size
Definition: avcodec.h:1658
const char * b
Definition: vf_curves.c:113
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:367
uint8_t prob[7]
Definition: vp9shared.h:134
uint8_t tx32p[2][3]
Definition: vp9dec.h:53
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:458
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1960
uint8_t left_uv_nnz_ctx[2][16]
Definition: vp9dec.h:160
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:355
uint8_t framectxid
Definition: vp9shared.h:109
const int16_t ff_vp9_dc_qlookup[3][256]
Definition: vp9data.c:231
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:222
VP9Filter * lflvl
Definition: vp9dec.h:185
static AVPacket pkt
unsigned cols
Definition: vp9dec.h:111
#define src
Definition: vp8dsp.c:254
unsigned tile_col_end
Definition: vp9dec.h:118
int profile
profile
Definition: avcodec.h:3235
AVCodec.
Definition: avcodec.h:3681
uint8_t comp_ref[5]
Definition: vp9dec.h:52
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB)
Definition: pixfmt.h:452
const int16_t ff_vp9_ac_qlookup[3][256]
Definition: vp9data.c:334
functionally identical to above
Definition: pixfmt.h:459
unsigned log2_tile_rows
Definition: vp9shared.h:151
uint8_t * intra_pred_data[3]
Definition: vp9dec.h:184
int uncompressed_header_size
Definition: vp9shared.h:155
enum FilterMode filtermode
Definition: vp9shared.h:105
struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:3052
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
uint8_t coef[4][2][2][6][6][3]
Definition: vp9dec.h:121
#define VP9_SYNCCODE
Definition: vp9.c:37
uint8_t bits
Definition: crc.c:296
uint8_t
const int8_t ff_vp9_partition_tree[3][2]
Definition: vp9data.c:35
#define av_cold
Definition: attributes.h:82
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:150
uint8_t absolute_vals
Definition: vp9shared.h:132
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
uint8_t varcompref[2]
Definition: vp9shared.h:114
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:451
void ff_vp9_adapt_probs(VP9Context *s)
Definition: vp9prob.c:46
static void free_buffers(VP9Context *s)
Definition: vp9.c:1067
VP9Frame frames[3]
Definition: vp9shared.h:166
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
Multithreading support functions.
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:388
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:3551
uint8_t * uveob_base[2]
Definition: vp9dec.h:191
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:87
static AVFrame * frame
static int update_size(AVCodecContext *avctx, int w, int h)
Definition: vp9.c:114
AVBufferRef * hwaccel_priv_buf
Definition: vp9shared.h:66
static void finish(void)
Definition: movenc.c:344
uint8_t * data
Definition: avcodec.h:1657
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:199
AVBufferRef * extradata
Definition: vp9shared.h:61
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
Definition: utils.c:3970
uint8_t skip[3]
Definition: vp9dec.h:56
bitstream reader API header.
uint8_t * above_uv_nnz_ctx[2]
Definition: vp9dec.h:173
VP9DSPContext dsp
Definition: vp9dec.h:90
uint8_t lim_lut[64]
Definition: vp9dec.h:115
ptrdiff_t size
Definition: opengl_enc.c:101
enum CompPredMode comppredmode
Definition: vp9shared.h:149
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:356
unsigned log2_tile_cols
Definition: vp9shared.h:151
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
uint16_t mvscale[3][2]
Definition: vp9dec.h:195
uint8_t refidx[3]
Definition: vp9shared.h:111
uint8_t * above_txfm_ctx
Definition: vp9dec.h:175
struct VP9BitstreamHeader::@154::@156 feat[MAX_SEGMENT]
av_cold void ff_vp9dsp_init(VP9DSPContext *dsp, int bpp, int bitexact)
Definition: vp9dsp.c:84
int h
Definition: vp9dec.h:109
#define av_log(a,...)
int16_t * block
Definition: vp9dec.h:190
static void decode_sb_mem(AVCodecContext *avctx, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
Definition: vp9.c:1010
uint8_t bytesperpixel
Definition: vp9dec.h:103
void ff_vp9_loopfilter_sb(AVCodecContext *avctx, VP9Filter *lflvl, int row, int col, ptrdiff_t yoff, ptrdiff_t uvoff)
Definition: vp9lpf.c:178
uint8_t mask[2][2][8][4]
Definition: vp9dec.h:74
static av_cold int vp9_decode_free(AVCodecContext *avctx)
Definition: vp9.c:1074
int16_t * uvblock[2]
Definition: vp9dec.h:190
Definition: vp9.h:28
uint8_t partition[4][4][3]
Definition: vp9dec.h:68
uint8_t hp
Definition: vp9dec.h:66
int width
width and height of the video frame
Definition: frame.h:239
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
uint8_t sign
Definition: vp9dec.h:59
static int decode_frame_header(AVCodecContext *avctx, const uint8_t *data, int size, int *ref)
Definition: vp9.c:392
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
uint8_t left_partition_ctx[8]
Definition: vp9dec.h:161
unsigned tile_cols
Definition: vp9shared.h:152
#define AVERROR(e)
Definition: error.h:43
GetBitContext gb
Definition: vp9dec.h:92
uint8_t fp[3]
Definition: vp9dec.h:64
uint8_t signbias[3]
Definition: vp9shared.h:112
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:163
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:179
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:3180
uint8_t refreshctx
Definition: vp9shared.h:107
const char * r
Definition: vf_curves.c:111
unsigned tile_row_start
Definition: vp9dec.h:118
uint8_t bpp_index
Definition: vp9dec.h:103
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1827
void * hwaccel_picture_private
Definition: vp9shared.h:67
simple assert() macros that are a bit more flexible than ISO C assert().
const char * name
Name of the codec implementation.
Definition: avcodec.h:3688
uint8_t intra[4]
Definition: vp9dec.h:49
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:354
int16_t * uvblock_base[2]
Definition: vp9dec.h:190
#define FFMAX(a, b)
Definition: common.h:94
struct VP9BitstreamHeader::@152 filter
#define fail()
Definition: checkasm.h:89
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:1057
uint8_t * above_filter_ctx
Definition: vp9dec.h:180
#define REF_FRAME_MVPAIR
Definition: vp9shared.h:164
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:66
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:218
uint8_t comp[5]
Definition: vp9dec.h:50
unsigned c_b_size
Definition: vp9dec.h:95
int ff_vp56_init_range_decoder(VP56RangeCoder *c, const uint8_t *buf, int buf_size)
Definition: vp56rac.c:40
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:261
int uses_2pass
Definition: vp9shared.h:64
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:921
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:3172
#define FFMIN(a, b)
Definition: common.h:96
uint8_t left_y_nnz_ctx[16]
Definition: vp9dec.h:157
enum TxfmMode txfmmode
Definition: vp9shared.h:148
uint8_t keyframe
Definition: vp9shared.h:98
unsigned tile_rows
Definition: vp9shared.h:152
uint8_t left_mode_ctx[16]
Definition: vp9dec.h:158
unsigned eob[4][2][2][6][6][2]
Definition: vp9dec.h:153
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:462
uint8_t tx16p[2][2]
Definition: vp9dec.h:54
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
uint8_t class0_hp
Definition: vp9dec.h:65
VP9SharedContext s
Definition: vp9dec.h:88
void * av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
Reallocate the given buffer if it is not large enough, otherwise do nothing.
Definition: mem.c:450
uint8_t * above_partition_ctx
Definition: vp9dec.h:169
int n
Definition: avisynth_c.h:684
uint8_t mv_mode[7][3]
Definition: vp9dec.h:48
uint8_t fixcompref
Definition: vp9shared.h:113
struct VP9BitstreamHeader::@155 tiling
uint8_t * above_segpred_ctx
Definition: vp9dec.h:176
#define FF_ARRAY_ELEMS(a)
unsigned tile_col_start
Definition: vp9dec.h:118
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:476
unsigned rows
Definition: vp9dec.h:111
unsigned sb_cols
Definition: vp9dec.h:111
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:251
static av_always_inline int vp56_rac_get_prob_branchy(VP56RangeCoder *c, int prob)
Definition: vp56.h:271
static int init_frames(AVCodecContext *avctx)
Definition: vp9.c:1396
VP56mv(* above_mv_ctx)[2]
Definition: vp9dec.h:181
Libavcodec external API header.
int16_t * block_base
Definition: vp9dec.h:190
BlockLevel
Definition: vp9shared.h:70
uint8_t filter[4][2]
Definition: vp9dec.h:47
uint8_t class0_fp[2][3]
Definition: vp9dec.h:63
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:218
int pass
Definition: vp9dec.h:97
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:456
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
uint8_t left_skip_ctx[8]
Definition: vp9dec.h:162
main external API structure.
Definition: avcodec.h:1732
uint8_t * data
The data buffer.
Definition: buffer.h:89
#define REF_FRAME_SEGMAP
Definition: vp9shared.h:165
int8_t mode[2]
Definition: vp9shared.h:122
#define CUR_FRAME
Definition: vp9shared.h:163
static int update_block_buffers(AVCodecContext *avctx)
Definition: vp9.c:213
VP56RangeCoder * c_b
Definition: vp9dec.h:94
static int vp9_frame_ref(AVCodecContext *avctx, VP9Frame *dst, VP9Frame *src)
Definition: vp9.c:84
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:83
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:313
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:351
unsigned partition[4][4][4]
Definition: vp9dec.h:151
uint8_t * above_y_nnz_ctx
Definition: vp9dec.h:172
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:306
struct VP9Context::@147 prob_ctx[4]
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:2462
uint8_t tx8p[2]
Definition: vp9dec.h:55
uint8_t ss_h
Definition: vp9dec.h:102
uint8_t y_mode[4][9]
Definition: vp9dec.h:45
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:357
enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Wrapper around get_format() for frame-multithreaded codecs.
uint8_t last_bpp
Definition: vp9dec.h:103
static int read_colorspace_details(AVCodecContext *avctx)
Definition: vp9.c:330
uint8_t * above_intra_ctx
Definition: vp9dec.h:177
int allocate_progress
Whether to allocate progress for frame threading.
Definition: internal.h:126
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:346
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer. ...
Definition: pixfmt.h:148
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:262
enum BlockPartition bp
Definition: vp9dec.h:84
static int vp8_rac_get_uint(VP56RangeCoder *c, int bits)
Definition: vp56.h:324
VP9Block * b
Definition: vp9dec.h:96
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:368
uint8_t * uveob[2]
Definition: vp9dec.h:191
uint8_t * above_mode_ctx
Definition: vp9dec.h:170
uint8_t single_ref[5][2]
Definition: vp9dec.h:51
Definition: vp56.h:66
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:352
static void vp9_frame_unref(AVCodecContext *avctx, VP9Frame *f)
Definition: vp9.c:39
static int vp9_decode_frame(AVCodecContext *avctx, void *frame, int *got_frame, AVPacket *pkt)
Definition: vp9.c:1100
uint8_t bits[10]
Definition: vp9dec.h:62
ThreadFrame next_refs[8]
Definition: vp9dec.h:112
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:358
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:201
Definition: vp9.h:48
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: pixfmt.h:475
static av_always_inline int inv_recenter_nonneg(int v, int m)
Definition: vp9.c:262
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:62
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:83
common internal api header.
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
#define assign(var, type, n)
int w
Definition: vp9dec.h:109
static double c[64]
enum AVPixelFormat pix_fmt last_fmt gf_fmt
Definition: vp9dec.h:110
AVCodec ff_vp9_decoder
Definition: vp9.c:1485
static void decode_sb(AVCodecContext *avctx, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
Definition: vp9.c:931
unsigned sb_rows
Definition: vp9dec.h:111
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
unsigned properties
Definition: avcodec.h:3550
struct VP9BitstreamHeader::@153 lf_delta
static av_always_inline int vp8_rac_get(VP56RangeCoder *c)
Definition: vp56.h:308
struct VP9Context::@148 prob
Core video DSP helper functions.
struct VP9Context::@149 counts
uint8_t mv_joint[3]
Definition: vp9dec.h:57
enum BlockLevel bl
Definition: vp9dec.h:83
void * priv_data
Definition: avcodec.h:1774
#define HWACCEL_MAX
struct VP9Context::@146 filter_lut
uint8_t class0
Definition: vp9dec.h:61
#define av_free(p)
static int vp9_frame_alloc(AVCodecContext *avctx, VP9Frame *f)
Definition: vp9.c:48
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
unsigned tile_row_end
Definition: vp9dec.h:118
int frame_priv_data_size
Size of per-frame hardware accelerator private data.
Definition: avcodec.h:3898
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1782
static int decode012(GetBitContext *gb)
Definition: get_bits.h:569
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:256
VP9mvrefPair * mv
Definition: vp9shared.h:63
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:464
uint8_t invisible
Definition: vp9shared.h:99
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1656
uint8_t use_last_frame_mvs
Definition: vp9shared.h:110
int height
Definition: frame.h:239
ThreadFrame refs[8]
Definition: vp9shared.h:162
uint8_t pred_prob[3]
Definition: vp9shared.h:135
#define av_freep(p)
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:100
VP9Block * b_base
Definition: vp9dec.h:96
void INT64 start
Definition: avisynth_c.h:690
static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
Definition: ffmpeg.c:2257
#define av_always_inline
Definition: attributes.h:39
HW decoding through Direct3D11, Picture.data[3] contains a ID3D11VideoDecoderOutputView pointer...
Definition: pixfmt.h:243
static int update_prob(VP56RangeCoder *c, int p)
Definition: vp9.c:272
#define av_malloc_array(a, b)
const ProbContext ff_vp9_default_probs
Definition: vp9data.c:1435
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2249
const AVProfile ff_vp9_profiles[]
Definition: profiles.c:135
uint8_t * above_ref_ctx
Definition: vp9dec.h:179
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
Definition: avcodec.h:3878
struct VP9BitstreamHeader::@154 segmentation
BlockPartition
Definition: vp9shared.h:34
uint8_t classes[10]
Definition: vp9dec.h:60
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
Definition: avcodec.h:3889
const uint8_t ff_vp9_default_coef_probs[4][2][2][6][6][3]
Definition: vp9data.c:1540
uint8_t highprecisionmvs
Definition: vp9shared.h:104
AVPixelFormat
Pixel format.
Definition: pixfmt.h:60
This structure stores compressed data.
Definition: avcodec.h:1634
void ff_vp9_decode_block(AVCodecContext *avctx, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl, enum BlockPartition bp)
Definition: vp9block.c:1267
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:1389
uint8_t * above_comp_ctx
Definition: vp9dec.h:178
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:994
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1650
int block_alloc_using_2pass
Definition: vp9dec.h:189
Predicted.
Definition: avutil.h:275
int compressed_header_size
Definition: vp9shared.h:156
uint8_t refreshrefmask
Definition: vp9shared.h:103
VP56RangeCoder c
Definition: vp9dec.h:93