FFmpeg
vp9.c
Go to the documentation of this file.
1 /*
2  * VP9 compatible video decoder
3  *
4  * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
5  * Copyright (C) 2013 Clément Bœsch <u pkh me>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include "avcodec.h"
25 #include "get_bits.h"
26 #include "hwconfig.h"
27 #include "internal.h"
28 #include "profiles.h"
29 #include "thread.h"
30 #include "videodsp.h"
31 #include "vp56.h"
32 #include "vp9.h"
33 #include "vp9data.h"
34 #include "vp9dec.h"
35 #include "libavutil/avassert.h"
36 #include "libavutil/pixdesc.h"
38 
39 #define VP9_SYNCCODE 0x498342
40 
41 #if HAVE_THREADS
42 static void vp9_free_entries(AVCodecContext *avctx) {
43  VP9Context *s = avctx->priv_data;
44 
45  if (avctx->active_thread_type & FF_THREAD_SLICE) {
46  pthread_mutex_destroy(&s->progress_mutex);
47  pthread_cond_destroy(&s->progress_cond);
48  av_freep(&s->entries);
49  }
50 }
51 
52 static int vp9_alloc_entries(AVCodecContext *avctx, int n) {
53  VP9Context *s = avctx->priv_data;
54  int i;
55 
56  if (avctx->active_thread_type & FF_THREAD_SLICE) {
57  if (s->entries)
58  av_freep(&s->entries);
59 
60  s->entries = av_malloc_array(n, sizeof(atomic_int));
61 
62  if (!s->entries) {
63  av_freep(&s->entries);
64  return AVERROR(ENOMEM);
65  }
66 
67  for (i = 0; i < n; i++)
68  atomic_init(&s->entries[i], 0);
69 
70  pthread_mutex_init(&s->progress_mutex, NULL);
71  pthread_cond_init(&s->progress_cond, NULL);
72  }
73  return 0;
74 }
75 
76 static void vp9_report_tile_progress(VP9Context *s, int field, int n) {
77  pthread_mutex_lock(&s->progress_mutex);
78  atomic_fetch_add_explicit(&s->entries[field], n, memory_order_release);
79  pthread_cond_signal(&s->progress_cond);
80  pthread_mutex_unlock(&s->progress_mutex);
81 }
82 
83 static void vp9_await_tile_progress(VP9Context *s, int field, int n) {
84  if (atomic_load_explicit(&s->entries[field], memory_order_acquire) >= n)
85  return;
86 
87  pthread_mutex_lock(&s->progress_mutex);
88  while (atomic_load_explicit(&s->entries[field], memory_order_relaxed) != n)
89  pthread_cond_wait(&s->progress_cond, &s->progress_mutex);
90  pthread_mutex_unlock(&s->progress_mutex);
91 }
92 #else
93 static void vp9_free_entries(AVCodecContext *avctx) {}
94 static int vp9_alloc_entries(AVCodecContext *avctx, int n) { return 0; }
95 #endif
96 
98 {
99  av_freep(&td->b_base);
100  av_freep(&td->block_base);
101  av_freep(&td->block_structure);
102 }
103 
105 {
106  ff_thread_release_buffer(avctx, &f->tf);
107  av_buffer_unref(&f->extradata);
108  av_buffer_unref(&f->hwaccel_priv_buf);
109  f->segmentation_map = NULL;
110  f->hwaccel_picture_private = NULL;
111 }
112 
114 {
115  VP9Context *s = avctx->priv_data;
116  int ret, sz;
117 
119  if (ret < 0)
120  return ret;
121 
122  sz = 64 * s->sb_cols * s->sb_rows;
123  if (sz != s->frame_extradata_pool_size) {
124  av_buffer_pool_uninit(&s->frame_extradata_pool);
125  s->frame_extradata_pool = av_buffer_pool_init(sz * (1 + sizeof(VP9mvrefPair)), NULL);
126  if (!s->frame_extradata_pool) {
127  s->frame_extradata_pool_size = 0;
128  goto fail;
129  }
130  s->frame_extradata_pool_size = sz;
131  }
132  f->extradata = av_buffer_pool_get(s->frame_extradata_pool);
133  if (!f->extradata) {
134  goto fail;
135  }
136  memset(f->extradata->data, 0, f->extradata->size);
137 
138  f->segmentation_map = f->extradata->data;
139  f->mv = (VP9mvrefPair *) (f->extradata->data + sz);
140 
141  if (avctx->hwaccel) {
142  const AVHWAccel *hwaccel = avctx->hwaccel;
143  av_assert0(!f->hwaccel_picture_private);
144  if (hwaccel->frame_priv_data_size) {
145  f->hwaccel_priv_buf = av_buffer_allocz(hwaccel->frame_priv_data_size);
146  if (!f->hwaccel_priv_buf)
147  goto fail;
148  f->hwaccel_picture_private = f->hwaccel_priv_buf->data;
149  }
150  }
151 
152  return 0;
153 
154 fail:
155  vp9_frame_unref(avctx, f);
156  return AVERROR(ENOMEM);
157 }
158 
160 {
161  int ret;
162 
163  ret = ff_thread_ref_frame(&dst->tf, &src->tf);
164  if (ret < 0)
165  return ret;
166 
167  dst->extradata = av_buffer_ref(src->extradata);
168  if (!dst->extradata)
169  goto fail;
170 
171  dst->segmentation_map = src->segmentation_map;
172  dst->mv = src->mv;
173  dst->uses_2pass = src->uses_2pass;
174 
175  if (src->hwaccel_picture_private) {
176  dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
177  if (!dst->hwaccel_priv_buf)
178  goto fail;
180  }
181 
182  return 0;
183 
184 fail:
185  vp9_frame_unref(avctx, dst);
186  return AVERROR(ENOMEM);
187 }
188 
189 static int update_size(AVCodecContext *avctx, int w, int h)
190 {
191 #define HWACCEL_MAX (CONFIG_VP9_DXVA2_HWACCEL + \
192  CONFIG_VP9_D3D11VA_HWACCEL * 2 + \
193  CONFIG_VP9_NVDEC_HWACCEL + \
194  CONFIG_VP9_VAAPI_HWACCEL + \
195  CONFIG_VP9_VDPAU_HWACCEL)
196  enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmtp = pix_fmts;
197  VP9Context *s = avctx->priv_data;
198  uint8_t *p;
199  int bytesperpixel = s->bytesperpixel, ret, cols, rows;
200  int lflvl_len, i;
201 
202  av_assert0(w > 0 && h > 0);
203 
204  if (!(s->pix_fmt == s->gf_fmt && w == s->w && h == s->h)) {
205  if ((ret = ff_set_dimensions(avctx, w, h)) < 0)
206  return ret;
207 
208  switch (s->pix_fmt) {
209  case AV_PIX_FMT_YUV420P:
210 #if CONFIG_VP9_VDPAU_HWACCEL
211  *fmtp++ = AV_PIX_FMT_VDPAU;
212 #endif
214 #if CONFIG_VP9_DXVA2_HWACCEL
215  *fmtp++ = AV_PIX_FMT_DXVA2_VLD;
216 #endif
217 #if CONFIG_VP9_D3D11VA_HWACCEL
218  *fmtp++ = AV_PIX_FMT_D3D11VA_VLD;
219  *fmtp++ = AV_PIX_FMT_D3D11;
220 #endif
221 #if CONFIG_VP9_NVDEC_HWACCEL
222  *fmtp++ = AV_PIX_FMT_CUDA;
223 #endif
224 #if CONFIG_VP9_VAAPI_HWACCEL
225  *fmtp++ = AV_PIX_FMT_VAAPI;
226 #endif
227  break;
229 #if CONFIG_VP9_NVDEC_HWACCEL
230  *fmtp++ = AV_PIX_FMT_CUDA;
231 #endif
232 #if CONFIG_VP9_VAAPI_HWACCEL
233  *fmtp++ = AV_PIX_FMT_VAAPI;
234 #endif
235  break;
236  }
237 
238  *fmtp++ = s->pix_fmt;
239  *fmtp = AV_PIX_FMT_NONE;
240 
242  if (ret < 0)
243  return ret;
244 
245  avctx->pix_fmt = ret;
246  s->gf_fmt = s->pix_fmt;
247  s->w = w;
248  s->h = h;
249  }
250 
251  cols = (w + 7) >> 3;
252  rows = (h + 7) >> 3;
253 
254  if (s->intra_pred_data[0] && cols == s->cols && rows == s->rows && s->pix_fmt == s->last_fmt)
255  return 0;
256 
257  s->last_fmt = s->pix_fmt;
258  s->sb_cols = (w + 63) >> 6;
259  s->sb_rows = (h + 63) >> 6;
260  s->cols = (w + 7) >> 3;
261  s->rows = (h + 7) >> 3;
262  lflvl_len = avctx->active_thread_type == FF_THREAD_SLICE ? s->sb_rows : 1;
263 
264 #define assign(var, type, n) var = (type) p; p += s->sb_cols * (n) * sizeof(*var)
265  av_freep(&s->intra_pred_data[0]);
266  // FIXME we slightly over-allocate here for subsampled chroma, but a little
267  // bit of padding shouldn't affect performance...
268  p = av_malloc(s->sb_cols * (128 + 192 * bytesperpixel +
269  lflvl_len * sizeof(*s->lflvl) + 16 * sizeof(*s->above_mv_ctx)));
270  if (!p)
271  return AVERROR(ENOMEM);
272  assign(s->intra_pred_data[0], uint8_t *, 64 * bytesperpixel);
273  assign(s->intra_pred_data[1], uint8_t *, 64 * bytesperpixel);
274  assign(s->intra_pred_data[2], uint8_t *, 64 * bytesperpixel);
275  assign(s->above_y_nnz_ctx, uint8_t *, 16);
276  assign(s->above_mode_ctx, uint8_t *, 16);
277  assign(s->above_mv_ctx, VP56mv(*)[2], 16);
278  assign(s->above_uv_nnz_ctx[0], uint8_t *, 16);
279  assign(s->above_uv_nnz_ctx[1], uint8_t *, 16);
280  assign(s->above_partition_ctx, uint8_t *, 8);
281  assign(s->above_skip_ctx, uint8_t *, 8);
282  assign(s->above_txfm_ctx, uint8_t *, 8);
283  assign(s->above_segpred_ctx, uint8_t *, 8);
284  assign(s->above_intra_ctx, uint8_t *, 8);
285  assign(s->above_comp_ctx, uint8_t *, 8);
286  assign(s->above_ref_ctx, uint8_t *, 8);
287  assign(s->above_filter_ctx, uint8_t *, 8);
288  assign(s->lflvl, VP9Filter *, lflvl_len);
289 #undef assign
290 
291  if (s->td) {
292  for (i = 0; i < s->active_tile_cols; i++)
293  vp9_tile_data_free(&s->td[i]);
294  }
295 
296  if (s->s.h.bpp != s->last_bpp) {
297  ff_vp9dsp_init(&s->dsp, s->s.h.bpp, avctx->flags & AV_CODEC_FLAG_BITEXACT);
298  ff_videodsp_init(&s->vdsp, s->s.h.bpp);
299  s->last_bpp = s->s.h.bpp;
300  }
301 
302  return 0;
303 }
304 
306 {
307  int i;
308  VP9Context *s = avctx->priv_data;
309  int chroma_blocks, chroma_eobs, bytesperpixel = s->bytesperpixel;
310  VP9TileData *td = &s->td[0];
311 
312  if (td->b_base && td->block_base && s->block_alloc_using_2pass == s->s.frames[CUR_FRAME].uses_2pass)
313  return 0;
314 
316  chroma_blocks = 64 * 64 >> (s->ss_h + s->ss_v);
317  chroma_eobs = 16 * 16 >> (s->ss_h + s->ss_v);
318  if (s->s.frames[CUR_FRAME].uses_2pass) {
319  int sbs = s->sb_cols * s->sb_rows;
320 
321  td->b_base = av_malloc_array(s->cols * s->rows, sizeof(VP9Block));
322  td->block_base = av_mallocz(((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) +
323  16 * 16 + 2 * chroma_eobs) * sbs);
324  if (!td->b_base || !td->block_base)
325  return AVERROR(ENOMEM);
326  td->uvblock_base[0] = td->block_base + sbs * 64 * 64 * bytesperpixel;
327  td->uvblock_base[1] = td->uvblock_base[0] + sbs * chroma_blocks * bytesperpixel;
328  td->eob_base = (uint8_t *) (td->uvblock_base[1] + sbs * chroma_blocks * bytesperpixel);
329  td->uveob_base[0] = td->eob_base + 16 * 16 * sbs;
330  td->uveob_base[1] = td->uveob_base[0] + chroma_eobs * sbs;
331 
333  td->block_structure = av_malloc_array(s->cols * s->rows, sizeof(*td->block_structure));
334  if (!td->block_structure)
335  return AVERROR(ENOMEM);
336  }
337  } else {
338  for (i = 1; i < s->active_tile_cols; i++)
339  vp9_tile_data_free(&s->td[i]);
340 
341  for (i = 0; i < s->active_tile_cols; i++) {
342  s->td[i].b_base = av_malloc(sizeof(VP9Block));
343  s->td[i].block_base = av_mallocz((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) +
344  16 * 16 + 2 * chroma_eobs);
345  if (!s->td[i].b_base || !s->td[i].block_base)
346  return AVERROR(ENOMEM);
347  s->td[i].uvblock_base[0] = s->td[i].block_base + 64 * 64 * bytesperpixel;
348  s->td[i].uvblock_base[1] = s->td[i].uvblock_base[0] + chroma_blocks * bytesperpixel;
349  s->td[i].eob_base = (uint8_t *) (s->td[i].uvblock_base[1] + chroma_blocks * bytesperpixel);
350  s->td[i].uveob_base[0] = s->td[i].eob_base + 16 * 16;
351  s->td[i].uveob_base[1] = s->td[i].uveob_base[0] + chroma_eobs;
352 
354  s->td[i].block_structure = av_malloc_array(s->cols * s->rows, sizeof(*td->block_structure));
355  if (!s->td[i].block_structure)
356  return AVERROR(ENOMEM);
357  }
358  }
359  }
360  s->block_alloc_using_2pass = s->s.frames[CUR_FRAME].uses_2pass;
361 
362  return 0;
363 }
364 
365 // The sign bit is at the end, not the start, of a bit sequence
367 {
368  int v = get_bits(gb, n);
369  return get_bits1(gb) ? -v : v;
370 }
371 
372 static av_always_inline int inv_recenter_nonneg(int v, int m)
373 {
374  if (v > 2 * m)
375  return v;
376  if (v & 1)
377  return m - ((v + 1) >> 1);
378  return m + (v >> 1);
379 }
380 
381 // differential forward probability updates
382 static int update_prob(VP56RangeCoder *c, int p)
383 {
384  static const uint8_t inv_map_table[255] = {
385  7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176,
386  189, 202, 215, 228, 241, 254, 1, 2, 3, 4, 5, 6, 8, 9,
387  10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24,
388  25, 26, 27, 28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39,
389  40, 41, 42, 43, 44, 45, 47, 48, 49, 50, 51, 52, 53, 54,
390  55, 56, 57, 58, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
391  70, 71, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
392  86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 99, 100,
393  101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 112, 113, 114, 115,
394  116, 117, 118, 119, 120, 121, 122, 123, 125, 126, 127, 128, 129, 130,
395  131, 132, 133, 134, 135, 136, 138, 139, 140, 141, 142, 143, 144, 145,
396  146, 147, 148, 149, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160,
397  161, 162, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
398  177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 190, 191,
399  192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 203, 204, 205, 206,
400  207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 219, 220, 221,
401  222, 223, 224, 225, 226, 227, 229, 230, 231, 232, 233, 234, 235, 236,
402  237, 238, 239, 240, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
403  252, 253, 253,
404  };
405  int d;
406 
407  /* This code is trying to do a differential probability update. For a
408  * current probability A in the range [1, 255], the difference to a new
409  * probability of any value can be expressed differentially as 1-A, 255-A
410  * where some part of this (absolute range) exists both in positive as
411  * well as the negative part, whereas another part only exists in one
412  * half. We're trying to code this shared part differentially, i.e.
413  * times two where the value of the lowest bit specifies the sign, and
414  * the single part is then coded on top of this. This absolute difference
415  * then again has a value of [0, 254], but a bigger value in this range
416  * indicates that we're further away from the original value A, so we
417  * can code this as a VLC code, since higher values are increasingly
418  * unlikely. The first 20 values in inv_map_table[] allow 'cheap, rough'
419  * updates vs. the 'fine, exact' updates further down the range, which
420  * adds one extra dimension to this differential update model. */
421 
422  if (!vp8_rac_get(c)) {
423  d = vp8_rac_get_uint(c, 4) + 0;
424  } else if (!vp8_rac_get(c)) {
425  d = vp8_rac_get_uint(c, 4) + 16;
426  } else if (!vp8_rac_get(c)) {
427  d = vp8_rac_get_uint(c, 5) + 32;
428  } else {
429  d = vp8_rac_get_uint(c, 7);
430  if (d >= 65)
431  d = (d << 1) - 65 + vp8_rac_get(c);
432  d += 64;
433  av_assert2(d < FF_ARRAY_ELEMS(inv_map_table));
434  }
435 
436  return p <= 128 ? 1 + inv_recenter_nonneg(inv_map_table[d], p - 1) :
437  255 - inv_recenter_nonneg(inv_map_table[d], 255 - p);
438 }
439 
441 {
442  static const enum AVColorSpace colorspaces[8] = {
445  };
446  VP9Context *s = avctx->priv_data;
447  int bits = avctx->profile <= 1 ? 0 : 1 + get_bits1(&s->gb); // 0:8, 1:10, 2:12
448 
449  s->bpp_index = bits;
450  s->s.h.bpp = 8 + bits * 2;
451  s->bytesperpixel = (7 + s->s.h.bpp) >> 3;
452  avctx->colorspace = colorspaces[get_bits(&s->gb, 3)];
453  if (avctx->colorspace == AVCOL_SPC_RGB) { // RGB = profile 1
454  static const enum AVPixelFormat pix_fmt_rgb[3] = {
456  };
457  s->ss_h = s->ss_v = 0;
458  avctx->color_range = AVCOL_RANGE_JPEG;
459  s->pix_fmt = pix_fmt_rgb[bits];
460  if (avctx->profile & 1) {
461  if (get_bits1(&s->gb)) {
462  av_log(avctx, AV_LOG_ERROR, "Reserved bit set in RGB\n");
463  return AVERROR_INVALIDDATA;
464  }
465  } else {
466  av_log(avctx, AV_LOG_ERROR, "RGB not supported in profile %d\n",
467  avctx->profile);
468  return AVERROR_INVALIDDATA;
469  }
470  } else {
471  static const enum AVPixelFormat pix_fmt_for_ss[3][2 /* v */][2 /* h */] = {
478  };
480  if (avctx->profile & 1) {
481  s->ss_h = get_bits1(&s->gb);
482  s->ss_v = get_bits1(&s->gb);
483  s->pix_fmt = pix_fmt_for_ss[bits][s->ss_v][s->ss_h];
484  if (s->pix_fmt == AV_PIX_FMT_YUV420P) {
485  av_log(avctx, AV_LOG_ERROR, "YUV 4:2:0 not supported in profile %d\n",
486  avctx->profile);
487  return AVERROR_INVALIDDATA;
488  } else if (get_bits1(&s->gb)) {
489  av_log(avctx, AV_LOG_ERROR, "Profile %d color details reserved bit set\n",
490  avctx->profile);
491  return AVERROR_INVALIDDATA;
492  }
493  } else {
494  s->ss_h = s->ss_v = 1;
495  s->pix_fmt = pix_fmt_for_ss[bits][1][1];
496  }
497  }
498 
499  return 0;
500 }
501 
503  const uint8_t *data, int size, int *ref)
504 {
505  VP9Context *s = avctx->priv_data;
506  int c, i, j, k, l, m, n, w, h, max, size2, ret, sharp;
507  int last_invisible;
508  const uint8_t *data2;
509 
510  /* general header */
511  if ((ret = init_get_bits8(&s->gb, data, size)) < 0) {
512  av_log(avctx, AV_LOG_ERROR, "Failed to initialize bitstream reader\n");
513  return ret;
514  }
515  if (get_bits(&s->gb, 2) != 0x2) { // frame marker
516  av_log(avctx, AV_LOG_ERROR, "Invalid frame marker\n");
517  return AVERROR_INVALIDDATA;
518  }
519  avctx->profile = get_bits1(&s->gb);
520  avctx->profile |= get_bits1(&s->gb) << 1;
521  if (avctx->profile == 3) avctx->profile += get_bits1(&s->gb);
522  if (avctx->profile > 3) {
523  av_log(avctx, AV_LOG_ERROR, "Profile %d is not yet supported\n", avctx->profile);
524  return AVERROR_INVALIDDATA;
525  }
526  s->s.h.profile = avctx->profile;
527  if (get_bits1(&s->gb)) {
528  *ref = get_bits(&s->gb, 3);
529  return 0;
530  }
531 
532  s->last_keyframe = s->s.h.keyframe;
533  s->s.h.keyframe = !get_bits1(&s->gb);
534 
535  last_invisible = s->s.h.invisible;
536  s->s.h.invisible = !get_bits1(&s->gb);
537  s->s.h.errorres = get_bits1(&s->gb);
538  s->s.h.use_last_frame_mvs = !s->s.h.errorres && !last_invisible;
539 
540  if (s->s.h.keyframe) {
541  if (get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode
542  av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
543  return AVERROR_INVALIDDATA;
544  }
545  if ((ret = read_colorspace_details(avctx)) < 0)
546  return ret;
547  // for profile 1, here follows the subsampling bits
548  s->s.h.refreshrefmask = 0xff;
549  w = get_bits(&s->gb, 16) + 1;
550  h = get_bits(&s->gb, 16) + 1;
551  if (get_bits1(&s->gb)) // display size
552  skip_bits(&s->gb, 32);
553  } else {
554  s->s.h.intraonly = s->s.h.invisible ? get_bits1(&s->gb) : 0;
555  s->s.h.resetctx = s->s.h.errorres ? 0 : get_bits(&s->gb, 2);
556  if (s->s.h.intraonly) {
557  if (get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode
558  av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
559  return AVERROR_INVALIDDATA;
560  }
561  if (avctx->profile >= 1) {
562  if ((ret = read_colorspace_details(avctx)) < 0)
563  return ret;
564  } else {
565  s->ss_h = s->ss_v = 1;
566  s->s.h.bpp = 8;
567  s->bpp_index = 0;
568  s->bytesperpixel = 1;
569  s->pix_fmt = AV_PIX_FMT_YUV420P;
570  avctx->colorspace = AVCOL_SPC_BT470BG;
571  avctx->color_range = AVCOL_RANGE_MPEG;
572  }
573  s->s.h.refreshrefmask = get_bits(&s->gb, 8);
574  w = get_bits(&s->gb, 16) + 1;
575  h = get_bits(&s->gb, 16) + 1;
576  if (get_bits1(&s->gb)) // display size
577  skip_bits(&s->gb, 32);
578  } else {
579  s->s.h.refreshrefmask = get_bits(&s->gb, 8);
580  s->s.h.refidx[0] = get_bits(&s->gb, 3);
581  s->s.h.signbias[0] = get_bits1(&s->gb) && !s->s.h.errorres;
582  s->s.h.refidx[1] = get_bits(&s->gb, 3);
583  s->s.h.signbias[1] = get_bits1(&s->gb) && !s->s.h.errorres;
584  s->s.h.refidx[2] = get_bits(&s->gb, 3);
585  s->s.h.signbias[2] = get_bits1(&s->gb) && !s->s.h.errorres;
586  if (!s->s.refs[s->s.h.refidx[0]].f->buf[0] ||
587  !s->s.refs[s->s.h.refidx[1]].f->buf[0] ||
588  !s->s.refs[s->s.h.refidx[2]].f->buf[0]) {
589  av_log(avctx, AV_LOG_ERROR, "Not all references are available\n");
590  return AVERROR_INVALIDDATA;
591  }
592  if (get_bits1(&s->gb)) {
593  w = s->s.refs[s->s.h.refidx[0]].f->width;
594  h = s->s.refs[s->s.h.refidx[0]].f->height;
595  } else if (get_bits1(&s->gb)) {
596  w = s->s.refs[s->s.h.refidx[1]].f->width;
597  h = s->s.refs[s->s.h.refidx[1]].f->height;
598  } else if (get_bits1(&s->gb)) {
599  w = s->s.refs[s->s.h.refidx[2]].f->width;
600  h = s->s.refs[s->s.h.refidx[2]].f->height;
601  } else {
602  w = get_bits(&s->gb, 16) + 1;
603  h = get_bits(&s->gb, 16) + 1;
604  }
605  // Note that in this code, "CUR_FRAME" is actually before we
606  // have formally allocated a frame, and thus actually represents
607  // the _last_ frame
608  s->s.h.use_last_frame_mvs &= s->s.frames[CUR_FRAME].tf.f->width == w &&
609  s->s.frames[CUR_FRAME].tf.f->height == h;
610  if (get_bits1(&s->gb)) // display size
611  skip_bits(&s->gb, 32);
612  s->s.h.highprecisionmvs = get_bits1(&s->gb);
613  s->s.h.filtermode = get_bits1(&s->gb) ? FILTER_SWITCHABLE :
614  get_bits(&s->gb, 2);
615  s->s.h.allowcompinter = s->s.h.signbias[0] != s->s.h.signbias[1] ||
616  s->s.h.signbias[0] != s->s.h.signbias[2];
617  if (s->s.h.allowcompinter) {
618  if (s->s.h.signbias[0] == s->s.h.signbias[1]) {
619  s->s.h.fixcompref = 2;
620  s->s.h.varcompref[0] = 0;
621  s->s.h.varcompref[1] = 1;
622  } else if (s->s.h.signbias[0] == s->s.h.signbias[2]) {
623  s->s.h.fixcompref = 1;
624  s->s.h.varcompref[0] = 0;
625  s->s.h.varcompref[1] = 2;
626  } else {
627  s->s.h.fixcompref = 0;
628  s->s.h.varcompref[0] = 1;
629  s->s.h.varcompref[1] = 2;
630  }
631  }
632  }
633  }
634  s->s.h.refreshctx = s->s.h.errorres ? 0 : get_bits1(&s->gb);
635  s->s.h.parallelmode = s->s.h.errorres ? 1 : get_bits1(&s->gb);
636  s->s.h.framectxid = c = get_bits(&s->gb, 2);
637  if (s->s.h.keyframe || s->s.h.intraonly)
638  s->s.h.framectxid = 0; // BUG: libvpx ignores this field in keyframes
639 
640  /* loopfilter header data */
641  if (s->s.h.keyframe || s->s.h.errorres || s->s.h.intraonly) {
642  // reset loopfilter defaults
643  s->s.h.lf_delta.ref[0] = 1;
644  s->s.h.lf_delta.ref[1] = 0;
645  s->s.h.lf_delta.ref[2] = -1;
646  s->s.h.lf_delta.ref[3] = -1;
647  s->s.h.lf_delta.mode[0] = 0;
648  s->s.h.lf_delta.mode[1] = 0;
649  memset(s->s.h.segmentation.feat, 0, sizeof(s->s.h.segmentation.feat));
650  }
651  s->s.h.filter.level = get_bits(&s->gb, 6);
652  sharp = get_bits(&s->gb, 3);
653  // if sharpness changed, reinit lim/mblim LUTs. if it didn't change, keep
654  // the old cache values since they are still valid
655  if (s->s.h.filter.sharpness != sharp) {
656  for (i = 1; i <= 63; i++) {
657  int limit = i;
658 
659  if (sharp > 0) {
660  limit >>= (sharp + 3) >> 2;
661  limit = FFMIN(limit, 9 - sharp);
662  }
663  limit = FFMAX(limit, 1);
664 
665  s->filter_lut.lim_lut[i] = limit;
666  s->filter_lut.mblim_lut[i] = 2 * (i + 2) + limit;
667  }
668  }
669  s->s.h.filter.sharpness = sharp;
670  if ((s->s.h.lf_delta.enabled = get_bits1(&s->gb))) {
671  if ((s->s.h.lf_delta.updated = get_bits1(&s->gb))) {
672  for (i = 0; i < 4; i++)
673  if (get_bits1(&s->gb))
674  s->s.h.lf_delta.ref[i] = get_sbits_inv(&s->gb, 6);
675  for (i = 0; i < 2; i++)
676  if (get_bits1(&s->gb))
677  s->s.h.lf_delta.mode[i] = get_sbits_inv(&s->gb, 6);
678  }
679  }
680 
681  /* quantization header data */
682  s->s.h.yac_qi = get_bits(&s->gb, 8);
683  s->s.h.ydc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
684  s->s.h.uvdc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
685  s->s.h.uvac_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
686  s->s.h.lossless = s->s.h.yac_qi == 0 && s->s.h.ydc_qdelta == 0 &&
687  s->s.h.uvdc_qdelta == 0 && s->s.h.uvac_qdelta == 0;
688  if (s->s.h.lossless)
690 
691  /* segmentation header info */
692  if ((s->s.h.segmentation.enabled = get_bits1(&s->gb))) {
693  if ((s->s.h.segmentation.update_map = get_bits1(&s->gb))) {
694  for (i = 0; i < 7; i++)
695  s->s.h.segmentation.prob[i] = get_bits1(&s->gb) ?
696  get_bits(&s->gb, 8) : 255;
697  if ((s->s.h.segmentation.temporal = get_bits1(&s->gb)))
698  for (i = 0; i < 3; i++)
699  s->s.h.segmentation.pred_prob[i] = get_bits1(&s->gb) ?
700  get_bits(&s->gb, 8) : 255;
701  }
702 
703  if (get_bits1(&s->gb)) {
704  s->s.h.segmentation.absolute_vals = get_bits1(&s->gb);
705  for (i = 0; i < 8; i++) {
706  if ((s->s.h.segmentation.feat[i].q_enabled = get_bits1(&s->gb)))
707  s->s.h.segmentation.feat[i].q_val = get_sbits_inv(&s->gb, 8);
708  if ((s->s.h.segmentation.feat[i].lf_enabled = get_bits1(&s->gb)))
709  s->s.h.segmentation.feat[i].lf_val = get_sbits_inv(&s->gb, 6);
710  if ((s->s.h.segmentation.feat[i].ref_enabled = get_bits1(&s->gb)))
711  s->s.h.segmentation.feat[i].ref_val = get_bits(&s->gb, 2);
712  s->s.h.segmentation.feat[i].skip_enabled = get_bits1(&s->gb);
713  }
714  }
715  }
716 
717  // set qmul[] based on Y/UV, AC/DC and segmentation Q idx deltas
718  for (i = 0; i < (s->s.h.segmentation.enabled ? 8 : 1); i++) {
719  int qyac, qydc, quvac, quvdc, lflvl, sh;
720 
721  if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].q_enabled) {
722  if (s->s.h.segmentation.absolute_vals)
723  qyac = av_clip_uintp2(s->s.h.segmentation.feat[i].q_val, 8);
724  else
725  qyac = av_clip_uintp2(s->s.h.yac_qi + s->s.h.segmentation.feat[i].q_val, 8);
726  } else {
727  qyac = s->s.h.yac_qi;
728  }
729  qydc = av_clip_uintp2(qyac + s->s.h.ydc_qdelta, 8);
730  quvdc = av_clip_uintp2(qyac + s->s.h.uvdc_qdelta, 8);
731  quvac = av_clip_uintp2(qyac + s->s.h.uvac_qdelta, 8);
732  qyac = av_clip_uintp2(qyac, 8);
733 
734  s->s.h.segmentation.feat[i].qmul[0][0] = ff_vp9_dc_qlookup[s->bpp_index][qydc];
735  s->s.h.segmentation.feat[i].qmul[0][1] = ff_vp9_ac_qlookup[s->bpp_index][qyac];
736  s->s.h.segmentation.feat[i].qmul[1][0] = ff_vp9_dc_qlookup[s->bpp_index][quvdc];
737  s->s.h.segmentation.feat[i].qmul[1][1] = ff_vp9_ac_qlookup[s->bpp_index][quvac];
738 
739  sh = s->s.h.filter.level >= 32;
740  if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].lf_enabled) {
741  if (s->s.h.segmentation.absolute_vals)
742  lflvl = av_clip_uintp2(s->s.h.segmentation.feat[i].lf_val, 6);
743  else
744  lflvl = av_clip_uintp2(s->s.h.filter.level + s->s.h.segmentation.feat[i].lf_val, 6);
745  } else {
746  lflvl = s->s.h.filter.level;
747  }
748  if (s->s.h.lf_delta.enabled) {
749  s->s.h.segmentation.feat[i].lflvl[0][0] =
750  s->s.h.segmentation.feat[i].lflvl[0][1] =
751  av_clip_uintp2(lflvl + (s->s.h.lf_delta.ref[0] * (1 << sh)), 6);
752  for (j = 1; j < 4; j++) {
753  s->s.h.segmentation.feat[i].lflvl[j][0] =
754  av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] +
755  s->s.h.lf_delta.mode[0]) * (1 << sh)), 6);
756  s->s.h.segmentation.feat[i].lflvl[j][1] =
757  av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] +
758  s->s.h.lf_delta.mode[1]) * (1 << sh)), 6);
759  }
760  } else {
761  memset(s->s.h.segmentation.feat[i].lflvl, lflvl,
762  sizeof(s->s.h.segmentation.feat[i].lflvl));
763  }
764  }
765 
766  /* tiling info */
767  if ((ret = update_size(avctx, w, h)) < 0) {
768  av_log(avctx, AV_LOG_ERROR, "Failed to initialize decoder for %dx%d @ %d\n",
769  w, h, s->pix_fmt);
770  return ret;
771  }
772  for (s->s.h.tiling.log2_tile_cols = 0;
773  s->sb_cols > (64 << s->s.h.tiling.log2_tile_cols);
774  s->s.h.tiling.log2_tile_cols++) ;
775  for (max = 0; (s->sb_cols >> max) >= 4; max++) ;
776  max = FFMAX(0, max - 1);
777  while (max > s->s.h.tiling.log2_tile_cols) {
778  if (get_bits1(&s->gb))
779  s->s.h.tiling.log2_tile_cols++;
780  else
781  break;
782  }
783  s->s.h.tiling.log2_tile_rows = decode012(&s->gb);
784  s->s.h.tiling.tile_rows = 1 << s->s.h.tiling.log2_tile_rows;
785  if (s->s.h.tiling.tile_cols != (1 << s->s.h.tiling.log2_tile_cols)) {
786  int n_range_coders;
787  VP56RangeCoder *rc;
788 
789  if (s->td) {
790  for (i = 0; i < s->active_tile_cols; i++)
791  vp9_tile_data_free(&s->td[i]);
792  av_free(s->td);
793  }
794 
795  s->s.h.tiling.tile_cols = 1 << s->s.h.tiling.log2_tile_cols;
796  vp9_free_entries(avctx);
797  s->active_tile_cols = avctx->active_thread_type == FF_THREAD_SLICE ?
798  s->s.h.tiling.tile_cols : 1;
799  vp9_alloc_entries(avctx, s->sb_rows);
800  if (avctx->active_thread_type == FF_THREAD_SLICE) {
801  n_range_coders = 4; // max_tile_rows
802  } else {
803  n_range_coders = s->s.h.tiling.tile_cols;
804  }
805  s->td = av_mallocz_array(s->active_tile_cols, sizeof(VP9TileData) +
806  n_range_coders * sizeof(VP56RangeCoder));
807  if (!s->td)
808  return AVERROR(ENOMEM);
809  rc = (VP56RangeCoder *) &s->td[s->active_tile_cols];
810  for (i = 0; i < s->active_tile_cols; i++) {
811  s->td[i].s = s;
812  s->td[i].c_b = rc;
813  rc += n_range_coders;
814  }
815  }
816 
817  /* check reference frames */
818  if (!s->s.h.keyframe && !s->s.h.intraonly) {
819  int valid_ref_frame = 0;
820  for (i = 0; i < 3; i++) {
821  AVFrame *ref = s->s.refs[s->s.h.refidx[i]].f;
822  int refw = ref->width, refh = ref->height;
823 
824  if (ref->format != avctx->pix_fmt) {
825  av_log(avctx, AV_LOG_ERROR,
826  "Ref pixfmt (%s) did not match current frame (%s)",
827  av_get_pix_fmt_name(ref->format),
828  av_get_pix_fmt_name(avctx->pix_fmt));
829  return AVERROR_INVALIDDATA;
830  } else if (refw == w && refh == h) {
831  s->mvscale[i][0] = s->mvscale[i][1] = 0;
832  } else {
833  /* Check to make sure at least one of frames that */
834  /* this frame references has valid dimensions */
835  if (w * 2 < refw || h * 2 < refh || w > 16 * refw || h > 16 * refh) {
836  av_log(avctx, AV_LOG_WARNING,
837  "Invalid ref frame dimensions %dx%d for frame size %dx%d\n",
838  refw, refh, w, h);
839  s->mvscale[i][0] = s->mvscale[i][1] = REF_INVALID_SCALE;
840  continue;
841  }
842  s->mvscale[i][0] = (refw << 14) / w;
843  s->mvscale[i][1] = (refh << 14) / h;
844  s->mvstep[i][0] = 16 * s->mvscale[i][0] >> 14;
845  s->mvstep[i][1] = 16 * s->mvscale[i][1] >> 14;
846  }
847  valid_ref_frame++;
848  }
849  if (!valid_ref_frame) {
850  av_log(avctx, AV_LOG_ERROR, "No valid reference frame is found, bitstream not supported\n");
851  return AVERROR_INVALIDDATA;
852  }
853  }
854 
855  if (s->s.h.keyframe || s->s.h.errorres || (s->s.h.intraonly && s->s.h.resetctx == 3)) {
856  s->prob_ctx[0].p = s->prob_ctx[1].p = s->prob_ctx[2].p =
857  s->prob_ctx[3].p = ff_vp9_default_probs;
858  memcpy(s->prob_ctx[0].coef, ff_vp9_default_coef_probs,
859  sizeof(ff_vp9_default_coef_probs));
860  memcpy(s->prob_ctx[1].coef, ff_vp9_default_coef_probs,
861  sizeof(ff_vp9_default_coef_probs));
862  memcpy(s->prob_ctx[2].coef, ff_vp9_default_coef_probs,
863  sizeof(ff_vp9_default_coef_probs));
864  memcpy(s->prob_ctx[3].coef, ff_vp9_default_coef_probs,
865  sizeof(ff_vp9_default_coef_probs));
866  } else if (s->s.h.intraonly && s->s.h.resetctx == 2) {
867  s->prob_ctx[c].p = ff_vp9_default_probs;
868  memcpy(s->prob_ctx[c].coef, ff_vp9_default_coef_probs,
869  sizeof(ff_vp9_default_coef_probs));
870  }
871 
872  // next 16 bits is size of the rest of the header (arith-coded)
873  s->s.h.compressed_header_size = size2 = get_bits(&s->gb, 16);
874  s->s.h.uncompressed_header_size = (get_bits_count(&s->gb) + 7) / 8;
875 
876  data2 = align_get_bits(&s->gb);
877  if (size2 > size - (data2 - data)) {
878  av_log(avctx, AV_LOG_ERROR, "Invalid compressed header size\n");
879  return AVERROR_INVALIDDATA;
880  }
881  ret = ff_vp56_init_range_decoder(&s->c, data2, size2);
882  if (ret < 0)
883  return ret;
884 
885  if (vp56_rac_get_prob_branchy(&s->c, 128)) { // marker bit
886  av_log(avctx, AV_LOG_ERROR, "Marker bit was set\n");
887  return AVERROR_INVALIDDATA;
888  }
889 
890  for (i = 0; i < s->active_tile_cols; i++) {
891  if (s->s.h.keyframe || s->s.h.intraonly) {
892  memset(s->td[i].counts.coef, 0, sizeof(s->td[0].counts.coef));
893  memset(s->td[i].counts.eob, 0, sizeof(s->td[0].counts.eob));
894  } else {
895  memset(&s->td[i].counts, 0, sizeof(s->td[0].counts));
896  }
897  s->td[i].nb_block_structure = 0;
898  }
899 
900  /* FIXME is it faster to not copy here, but do it down in the fw updates
901  * as explicit copies if the fw update is missing (and skip the copy upon
902  * fw update)? */
903  s->prob.p = s->prob_ctx[c].p;
904 
905  // txfm updates
906  if (s->s.h.lossless) {
907  s->s.h.txfmmode = TX_4X4;
908  } else {
909  s->s.h.txfmmode = vp8_rac_get_uint(&s->c, 2);
910  if (s->s.h.txfmmode == 3)
911  s->s.h.txfmmode += vp8_rac_get(&s->c);
912 
913  if (s->s.h.txfmmode == TX_SWITCHABLE) {
914  for (i = 0; i < 2; i++)
915  if (vp56_rac_get_prob_branchy(&s->c, 252))
916  s->prob.p.tx8p[i] = update_prob(&s->c, s->prob.p.tx8p[i]);
917  for (i = 0; i < 2; i++)
918  for (j = 0; j < 2; j++)
919  if (vp56_rac_get_prob_branchy(&s->c, 252))
920  s->prob.p.tx16p[i][j] =
921  update_prob(&s->c, s->prob.p.tx16p[i][j]);
922  for (i = 0; i < 2; i++)
923  for (j = 0; j < 3; j++)
924  if (vp56_rac_get_prob_branchy(&s->c, 252))
925  s->prob.p.tx32p[i][j] =
926  update_prob(&s->c, s->prob.p.tx32p[i][j]);
927  }
928  }
929 
930  // coef updates
931  for (i = 0; i < 4; i++) {
932  uint8_t (*ref)[2][6][6][3] = s->prob_ctx[c].coef[i];
933  if (vp8_rac_get(&s->c)) {
934  for (j = 0; j < 2; j++)
935  for (k = 0; k < 2; k++)
936  for (l = 0; l < 6; l++)
937  for (m = 0; m < 6; m++) {
938  uint8_t *p = s->prob.coef[i][j][k][l][m];
939  uint8_t *r = ref[j][k][l][m];
940  if (m >= 3 && l == 0) // dc only has 3 pt
941  break;
942  for (n = 0; n < 3; n++) {
943  if (vp56_rac_get_prob_branchy(&s->c, 252))
944  p[n] = update_prob(&s->c, r[n]);
945  else
946  p[n] = r[n];
947  }
948  memcpy(&p[3], ff_vp9_model_pareto8[p[2]], 8);
949  }
950  } else {
951  for (j = 0; j < 2; j++)
952  for (k = 0; k < 2; k++)
953  for (l = 0; l < 6; l++)
954  for (m = 0; m < 6; m++) {
955  uint8_t *p = s->prob.coef[i][j][k][l][m];
956  uint8_t *r = ref[j][k][l][m];
957  if (m > 3 && l == 0) // dc only has 3 pt
958  break;
959  memcpy(p, r, 3);
960  memcpy(&p[3], ff_vp9_model_pareto8[p[2]], 8);
961  }
962  }
963  if (s->s.h.txfmmode == i)
964  break;
965  }
966 
967  // mode updates
968  for (i = 0; i < 3; i++)
969  if (vp56_rac_get_prob_branchy(&s->c, 252))
970  s->prob.p.skip[i] = update_prob(&s->c, s->prob.p.skip[i]);
971  if (!s->s.h.keyframe && !s->s.h.intraonly) {
972  for (i = 0; i < 7; i++)
973  for (j = 0; j < 3; j++)
974  if (vp56_rac_get_prob_branchy(&s->c, 252))
975  s->prob.p.mv_mode[i][j] =
976  update_prob(&s->c, s->prob.p.mv_mode[i][j]);
977 
978  if (s->s.h.filtermode == FILTER_SWITCHABLE)
979  for (i = 0; i < 4; i++)
980  for (j = 0; j < 2; j++)
981  if (vp56_rac_get_prob_branchy(&s->c, 252))
982  s->prob.p.filter[i][j] =
983  update_prob(&s->c, s->prob.p.filter[i][j]);
984 
985  for (i = 0; i < 4; i++)
986  if (vp56_rac_get_prob_branchy(&s->c, 252))
987  s->prob.p.intra[i] = update_prob(&s->c, s->prob.p.intra[i]);
988 
989  if (s->s.h.allowcompinter) {
990  s->s.h.comppredmode = vp8_rac_get(&s->c);
991  if (s->s.h.comppredmode)
992  s->s.h.comppredmode += vp8_rac_get(&s->c);
993  if (s->s.h.comppredmode == PRED_SWITCHABLE)
994  for (i = 0; i < 5; i++)
995  if (vp56_rac_get_prob_branchy(&s->c, 252))
996  s->prob.p.comp[i] =
997  update_prob(&s->c, s->prob.p.comp[i]);
998  } else {
999  s->s.h.comppredmode = PRED_SINGLEREF;
1000  }
1001 
1002  if (s->s.h.comppredmode != PRED_COMPREF) {
1003  for (i = 0; i < 5; i++) {
1004  if (vp56_rac_get_prob_branchy(&s->c, 252))
1005  s->prob.p.single_ref[i][0] =
1006  update_prob(&s->c, s->prob.p.single_ref[i][0]);
1007  if (vp56_rac_get_prob_branchy(&s->c, 252))
1008  s->prob.p.single_ref[i][1] =
1009  update_prob(&s->c, s->prob.p.single_ref[i][1]);
1010  }
1011  }
1012 
1013  if (s->s.h.comppredmode != PRED_SINGLEREF) {
1014  for (i = 0; i < 5; i++)
1015  if (vp56_rac_get_prob_branchy(&s->c, 252))
1016  s->prob.p.comp_ref[i] =
1017  update_prob(&s->c, s->prob.p.comp_ref[i]);
1018  }
1019 
1020  for (i = 0; i < 4; i++)
1021  for (j = 0; j < 9; j++)
1022  if (vp56_rac_get_prob_branchy(&s->c, 252))
1023  s->prob.p.y_mode[i][j] =
1024  update_prob(&s->c, s->prob.p.y_mode[i][j]);
1025 
1026  for (i = 0; i < 4; i++)
1027  for (j = 0; j < 4; j++)
1028  for (k = 0; k < 3; k++)
1029  if (vp56_rac_get_prob_branchy(&s->c, 252))
1030  s->prob.p.partition[3 - i][j][k] =
1031  update_prob(&s->c,
1032  s->prob.p.partition[3 - i][j][k]);
1033 
1034  // mv fields don't use the update_prob subexp model for some reason
1035  for (i = 0; i < 3; i++)
1036  if (vp56_rac_get_prob_branchy(&s->c, 252))
1037  s->prob.p.mv_joint[i] = (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
1038 
1039  for (i = 0; i < 2; i++) {
1040  if (vp56_rac_get_prob_branchy(&s->c, 252))
1041  s->prob.p.mv_comp[i].sign =
1042  (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
1043 
1044  for (j = 0; j < 10; j++)
1045  if (vp56_rac_get_prob_branchy(&s->c, 252))
1046  s->prob.p.mv_comp[i].classes[j] =
1047  (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
1048 
1049  if (vp56_rac_get_prob_branchy(&s->c, 252))
1050  s->prob.p.mv_comp[i].class0 =
1051  (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
1052 
1053  for (j = 0; j < 10; j++)
1054  if (vp56_rac_get_prob_branchy(&s->c, 252))
1055  s->prob.p.mv_comp[i].bits[j] =
1056  (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
1057  }
1058 
1059  for (i = 0; i < 2; i++) {
1060  for (j = 0; j < 2; j++)
1061  for (k = 0; k < 3; k++)
1062  if (vp56_rac_get_prob_branchy(&s->c, 252))
1063  s->prob.p.mv_comp[i].class0_fp[j][k] =
1064  (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
1065 
1066  for (j = 0; j < 3; j++)
1067  if (vp56_rac_get_prob_branchy(&s->c, 252))
1068  s->prob.p.mv_comp[i].fp[j] =
1069  (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
1070  }
1071 
1072  if (s->s.h.highprecisionmvs) {
1073  for (i = 0; i < 2; i++) {
1074  if (vp56_rac_get_prob_branchy(&s->c, 252))
1075  s->prob.p.mv_comp[i].class0_hp =
1076  (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
1077 
1078  if (vp56_rac_get_prob_branchy(&s->c, 252))
1079  s->prob.p.mv_comp[i].hp =
1080  (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
1081  }
1082  }
1083  }
1084 
1085  return (data2 - data) + size2;
1086 }
1087 
1088 static void decode_sb(VP9TileData *td, int row, int col, VP9Filter *lflvl,
1089  ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
1090 {
1091  const VP9Context *s = td->s;
1092  int c = ((s->above_partition_ctx[col] >> (3 - bl)) & 1) |
1093  (((td->left_partition_ctx[row & 0x7] >> (3 - bl)) & 1) << 1);
1094  const uint8_t *p = s->s.h.keyframe || s->s.h.intraonly ? ff_vp9_default_kf_partition_probs[bl][c] :
1095  s->prob.p.partition[bl][c];
1096  enum BlockPartition bp;
1097  ptrdiff_t hbs = 4 >> bl;
1098  AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
1099  ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
1100  int bytesperpixel = s->bytesperpixel;
1101 
1102  if (bl == BL_8X8) {
1104  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1105  } else if (col + hbs < s->cols) { // FIXME why not <=?
1106  if (row + hbs < s->rows) { // FIXME why not <=?
1108  switch (bp) {
1109  case PARTITION_NONE:
1110  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1111  break;
1112  case PARTITION_H:
1113  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1114  yoff += hbs * 8 * y_stride;
1115  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1116  ff_vp9_decode_block(td, row + hbs, col, lflvl, yoff, uvoff, bl, bp);
1117  break;
1118  case PARTITION_V:
1119  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1120  yoff += hbs * 8 * bytesperpixel;
1121  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1122  ff_vp9_decode_block(td, row, col + hbs, lflvl, yoff, uvoff, bl, bp);
1123  break;
1124  case PARTITION_SPLIT:
1125  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1126  decode_sb(td, row, col + hbs, lflvl,
1127  yoff + 8 * hbs * bytesperpixel,
1128  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1129  yoff += hbs * 8 * y_stride;
1130  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1131  decode_sb(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1132  decode_sb(td, row + hbs, col + hbs, lflvl,
1133  yoff + 8 * hbs * bytesperpixel,
1134  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1135  break;
1136  default:
1137  av_assert0(0);
1138  }
1139  } else if (vp56_rac_get_prob_branchy(td->c, p[1])) {
1140  bp = PARTITION_SPLIT;
1141  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1142  decode_sb(td, row, col + hbs, lflvl,
1143  yoff + 8 * hbs * bytesperpixel,
1144  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1145  } else {
1146  bp = PARTITION_H;
1147  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1148  }
1149  } else if (row + hbs < s->rows) { // FIXME why not <=?
1150  if (vp56_rac_get_prob_branchy(td->c, p[2])) {
1151  bp = PARTITION_SPLIT;
1152  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1153  yoff += hbs * 8 * y_stride;
1154  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1155  decode_sb(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1156  } else {
1157  bp = PARTITION_V;
1158  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1159  }
1160  } else {
1161  bp = PARTITION_SPLIT;
1162  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1163  }
1164  td->counts.partition[bl][c][bp]++;
1165 }
1166 
1167 static void decode_sb_mem(VP9TileData *td, int row, int col, VP9Filter *lflvl,
1168  ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
1169 {
1170  const VP9Context *s = td->s;
1171  VP9Block *b = td->b;
1172  ptrdiff_t hbs = 4 >> bl;
1173  AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
1174  ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
1175  int bytesperpixel = s->bytesperpixel;
1176 
1177  if (bl == BL_8X8) {
1178  av_assert2(b->bl == BL_8X8);
1179  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
1180  } else if (td->b->bl == bl) {
1181  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
1182  if (b->bp == PARTITION_H && row + hbs < s->rows) {
1183  yoff += hbs * 8 * y_stride;
1184  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1185  ff_vp9_decode_block(td, row + hbs, col, lflvl, yoff, uvoff, b->bl, b->bp);
1186  } else if (b->bp == PARTITION_V && col + hbs < s->cols) {
1187  yoff += hbs * 8 * bytesperpixel;
1188  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1189  ff_vp9_decode_block(td, row, col + hbs, lflvl, yoff, uvoff, b->bl, b->bp);
1190  }
1191  } else {
1192  decode_sb_mem(td, row, col, lflvl, yoff, uvoff, bl + 1);
1193  if (col + hbs < s->cols) { // FIXME why not <=?
1194  if (row + hbs < s->rows) {
1195  decode_sb_mem(td, row, col + hbs, lflvl, yoff + 8 * hbs * bytesperpixel,
1196  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1197  yoff += hbs * 8 * y_stride;
1198  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1199  decode_sb_mem(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1200  decode_sb_mem(td, row + hbs, col + hbs, lflvl,
1201  yoff + 8 * hbs * bytesperpixel,
1202  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1203  } else {
1204  yoff += hbs * 8 * bytesperpixel;
1205  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1206  decode_sb_mem(td, row, col + hbs, lflvl, yoff, uvoff, bl + 1);
1207  }
1208  } else if (row + hbs < s->rows) {
1209  yoff += hbs * 8 * y_stride;
1210  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1211  decode_sb_mem(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1212  }
1213  }
1214 }
1215 
1216 static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
1217 {
1218  int sb_start = ( idx * n) >> log2_n;
1219  int sb_end = ((idx + 1) * n) >> log2_n;
1220  *start = FFMIN(sb_start, n) << 3;
1221  *end = FFMIN(sb_end, n) << 3;
1222 }
1223 
1225 {
1226  int i;
1227 
1228  av_freep(&s->intra_pred_data[0]);
1229  for (i = 0; i < s->active_tile_cols; i++)
1230  vp9_tile_data_free(&s->td[i]);
1231 }
1232 
1234 {
1235  VP9Context *s = avctx->priv_data;
1236  int i;
1237 
1238  for (i = 0; i < 3; i++) {
1239  vp9_frame_unref(avctx, &s->s.frames[i]);
1240  av_frame_free(&s->s.frames[i].tf.f);
1241  }
1242  av_buffer_pool_uninit(&s->frame_extradata_pool);
1243  for (i = 0; i < 8; i++) {
1244  ff_thread_release_buffer(avctx, &s->s.refs[i]);
1245  av_frame_free(&s->s.refs[i].f);
1246  ff_thread_release_buffer(avctx, &s->next_refs[i]);
1247  av_frame_free(&s->next_refs[i].f);
1248  }
1249 
1250  free_buffers(s);
1251  vp9_free_entries(avctx);
1252  av_freep(&s->td);
1253  return 0;
1254 }
1255 
1256 static int decode_tiles(AVCodecContext *avctx,
1257  const uint8_t *data, int size)
1258 {
1259  VP9Context *s = avctx->priv_data;
1260  VP9TileData *td = &s->td[0];
1261  int row, col, tile_row, tile_col, ret;
1262  int bytesperpixel;
1263  int tile_row_start, tile_row_end, tile_col_start, tile_col_end;
1264  AVFrame *f;
1265  ptrdiff_t yoff, uvoff, ls_y, ls_uv;
1266 
1267  f = s->s.frames[CUR_FRAME].tf.f;
1268  ls_y = f->linesize[0];
1269  ls_uv =f->linesize[1];
1270  bytesperpixel = s->bytesperpixel;
1271 
1272  yoff = uvoff = 0;
1273  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1274  set_tile_offset(&tile_row_start, &tile_row_end,
1275  tile_row, s->s.h.tiling.log2_tile_rows, s->sb_rows);
1276 
1277  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1278  int64_t tile_size;
1279 
1280  if (tile_col == s->s.h.tiling.tile_cols - 1 &&
1281  tile_row == s->s.h.tiling.tile_rows - 1) {
1282  tile_size = size;
1283  } else {
1284  tile_size = AV_RB32(data);
1285  data += 4;
1286  size -= 4;
1287  }
1288  if (tile_size > size) {
1289  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
1290  return AVERROR_INVALIDDATA;
1291  }
1292  ret = ff_vp56_init_range_decoder(&td->c_b[tile_col], data, tile_size);
1293  if (ret < 0)
1294  return ret;
1295  if (vp56_rac_get_prob_branchy(&td->c_b[tile_col], 128)) { // marker bit
1296  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
1297  return AVERROR_INVALIDDATA;
1298  }
1299  data += tile_size;
1300  size -= tile_size;
1301  }
1302 
1303  for (row = tile_row_start; row < tile_row_end;
1304  row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->ss_v) {
1305  VP9Filter *lflvl_ptr = s->lflvl;
1306  ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
1307 
1308  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1309  set_tile_offset(&tile_col_start, &tile_col_end,
1310  tile_col, s->s.h.tiling.log2_tile_cols, s->sb_cols);
1311  td->tile_col_start = tile_col_start;
1312  if (s->pass != 2) {
1313  memset(td->left_partition_ctx, 0, 8);
1314  memset(td->left_skip_ctx, 0, 8);
1315  if (s->s.h.keyframe || s->s.h.intraonly) {
1316  memset(td->left_mode_ctx, DC_PRED, 16);
1317  } else {
1318  memset(td->left_mode_ctx, NEARESTMV, 8);
1319  }
1320  memset(td->left_y_nnz_ctx, 0, 16);
1321  memset(td->left_uv_nnz_ctx, 0, 32);
1322  memset(td->left_segpred_ctx, 0, 8);
1323 
1324  td->c = &td->c_b[tile_col];
1325  }
1326 
1327  for (col = tile_col_start;
1328  col < tile_col_end;
1329  col += 8, yoff2 += 64 * bytesperpixel,
1330  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1331  // FIXME integrate with lf code (i.e. zero after each
1332  // use, similar to invtxfm coefficients, or similar)
1333  if (s->pass != 1) {
1334  memset(lflvl_ptr->mask, 0, sizeof(lflvl_ptr->mask));
1335  }
1336 
1337  if (s->pass == 2) {
1338  decode_sb_mem(td, row, col, lflvl_ptr,
1339  yoff2, uvoff2, BL_64X64);
1340  } else {
1341  if (vpX_rac_is_end(td->c)) {
1342  return AVERROR_INVALIDDATA;
1343  }
1344  decode_sb(td, row, col, lflvl_ptr,
1345  yoff2, uvoff2, BL_64X64);
1346  }
1347  }
1348  }
1349 
1350  if (s->pass == 1)
1351  continue;
1352 
1353  // backup pre-loopfilter reconstruction data for intra
1354  // prediction of next row of sb64s
1355  if (row + 8 < s->rows) {
1356  memcpy(s->intra_pred_data[0],
1357  f->data[0] + yoff + 63 * ls_y,
1358  8 * s->cols * bytesperpixel);
1359  memcpy(s->intra_pred_data[1],
1360  f->data[1] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1361  8 * s->cols * bytesperpixel >> s->ss_h);
1362  memcpy(s->intra_pred_data[2],
1363  f->data[2] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1364  8 * s->cols * bytesperpixel >> s->ss_h);
1365  }
1366 
1367  // loopfilter one row
1368  if (s->s.h.filter.level) {
1369  yoff2 = yoff;
1370  uvoff2 = uvoff;
1371  lflvl_ptr = s->lflvl;
1372  for (col = 0; col < s->cols;
1373  col += 8, yoff2 += 64 * bytesperpixel,
1374  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1375  ff_vp9_loopfilter_sb(avctx, lflvl_ptr, row, col,
1376  yoff2, uvoff2);
1377  }
1378  }
1379 
1380  // FIXME maybe we can make this more finegrained by running the
1381  // loopfilter per-block instead of after each sbrow
1382  // In fact that would also make intra pred left preparation easier?
1383  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, row >> 3, 0);
1384  }
1385  }
1386  return 0;
1387 }
1388 
1389 #if HAVE_THREADS
1390 static av_always_inline
1391 int decode_tiles_mt(AVCodecContext *avctx, void *tdata, int jobnr,
1392  int threadnr)
1393 {
1394  VP9Context *s = avctx->priv_data;
1395  VP9TileData *td = &s->td[jobnr];
1396  ptrdiff_t uvoff, yoff, ls_y, ls_uv;
1397  int bytesperpixel = s->bytesperpixel, row, col, tile_row;
1398  unsigned tile_cols_len;
1399  int tile_row_start, tile_row_end, tile_col_start, tile_col_end;
1400  VP9Filter *lflvl_ptr_base;
1401  AVFrame *f;
1402 
1403  f = s->s.frames[CUR_FRAME].tf.f;
1404  ls_y = f->linesize[0];
1405  ls_uv =f->linesize[1];
1406 
1407  set_tile_offset(&tile_col_start, &tile_col_end,
1408  jobnr, s->s.h.tiling.log2_tile_cols, s->sb_cols);
1409  td->tile_col_start = tile_col_start;
1410  uvoff = (64 * bytesperpixel >> s->ss_h)*(tile_col_start >> 3);
1411  yoff = (64 * bytesperpixel)*(tile_col_start >> 3);
1412  lflvl_ptr_base = s->lflvl+(tile_col_start >> 3);
1413 
1414  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1415  set_tile_offset(&tile_row_start, &tile_row_end,
1416  tile_row, s->s.h.tiling.log2_tile_rows, s->sb_rows);
1417 
1418  td->c = &td->c_b[tile_row];
1419  for (row = tile_row_start; row < tile_row_end;
1420  row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->ss_v) {
1421  ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
1422  VP9Filter *lflvl_ptr = lflvl_ptr_base+s->sb_cols*(row >> 3);
1423 
1424  memset(td->left_partition_ctx, 0, 8);
1425  memset(td->left_skip_ctx, 0, 8);
1426  if (s->s.h.keyframe || s->s.h.intraonly) {
1427  memset(td->left_mode_ctx, DC_PRED, 16);
1428  } else {
1429  memset(td->left_mode_ctx, NEARESTMV, 8);
1430  }
1431  memset(td->left_y_nnz_ctx, 0, 16);
1432  memset(td->left_uv_nnz_ctx, 0, 32);
1433  memset(td->left_segpred_ctx, 0, 8);
1434 
1435  for (col = tile_col_start;
1436  col < tile_col_end;
1437  col += 8, yoff2 += 64 * bytesperpixel,
1438  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1439  // FIXME integrate with lf code (i.e. zero after each
1440  // use, similar to invtxfm coefficients, or similar)
1441  memset(lflvl_ptr->mask, 0, sizeof(lflvl_ptr->mask));
1442  decode_sb(td, row, col, lflvl_ptr,
1443  yoff2, uvoff2, BL_64X64);
1444  }
1445 
1446  // backup pre-loopfilter reconstruction data for intra
1447  // prediction of next row of sb64s
1448  tile_cols_len = tile_col_end - tile_col_start;
1449  if (row + 8 < s->rows) {
1450  memcpy(s->intra_pred_data[0] + (tile_col_start * 8 * bytesperpixel),
1451  f->data[0] + yoff + 63 * ls_y,
1452  8 * tile_cols_len * bytesperpixel);
1453  memcpy(s->intra_pred_data[1] + (tile_col_start * 8 * bytesperpixel >> s->ss_h),
1454  f->data[1] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1455  8 * tile_cols_len * bytesperpixel >> s->ss_h);
1456  memcpy(s->intra_pred_data[2] + (tile_col_start * 8 * bytesperpixel >> s->ss_h),
1457  f->data[2] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1458  8 * tile_cols_len * bytesperpixel >> s->ss_h);
1459  }
1460 
1461  vp9_report_tile_progress(s, row >> 3, 1);
1462  }
1463  }
1464  return 0;
1465 }
1466 
1467 static av_always_inline
1468 int loopfilter_proc(AVCodecContext *avctx)
1469 {
1470  VP9Context *s = avctx->priv_data;
1471  ptrdiff_t uvoff, yoff, ls_y, ls_uv;
1472  VP9Filter *lflvl_ptr;
1473  int bytesperpixel = s->bytesperpixel, col, i;
1474  AVFrame *f;
1475 
1476  f = s->s.frames[CUR_FRAME].tf.f;
1477  ls_y = f->linesize[0];
1478  ls_uv =f->linesize[1];
1479 
1480  for (i = 0; i < s->sb_rows; i++) {
1481  vp9_await_tile_progress(s, i, s->s.h.tiling.tile_cols);
1482 
1483  if (s->s.h.filter.level) {
1484  yoff = (ls_y * 64)*i;
1485  uvoff = (ls_uv * 64 >> s->ss_v)*i;
1486  lflvl_ptr = s->lflvl+s->sb_cols*i;
1487  for (col = 0; col < s->cols;
1488  col += 8, yoff += 64 * bytesperpixel,
1489  uvoff += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1490  ff_vp9_loopfilter_sb(avctx, lflvl_ptr, i << 3, col,
1491  yoff, uvoff);
1492  }
1493  }
1494  }
1495  return 0;
1496 }
1497 #endif
1498 
1500 {
1501  AVVideoEncParams *par;
1502  unsigned int tile, nb_blocks = 0;
1503 
1504  if (s->s.h.segmentation.enabled) {
1505  for (tile = 0; tile < s->active_tile_cols; tile++)
1506  nb_blocks += s->td[tile].nb_block_structure;
1507  }
1508 
1510  AV_VIDEO_ENC_PARAMS_VP9, nb_blocks);
1511  if (!par)
1512  return AVERROR(ENOMEM);
1513 
1514  par->qp = s->s.h.yac_qi;
1515  par->delta_qp[0][0] = s->s.h.ydc_qdelta;
1516  par->delta_qp[1][0] = s->s.h.uvdc_qdelta;
1517  par->delta_qp[2][0] = s->s.h.uvdc_qdelta;
1518  par->delta_qp[1][1] = s->s.h.uvac_qdelta;
1519  par->delta_qp[2][1] = s->s.h.uvac_qdelta;
1520 
1521  if (nb_blocks) {
1522  unsigned int block = 0;
1523  unsigned int tile, block_tile;
1524 
1525  for (tile = 0; tile < s->active_tile_cols; tile++) {
1526  VP9TileData *td = &s->td[tile];
1527 
1528  for (block_tile = 0; block_tile < td->nb_block_structure; block_tile++) {
1530  unsigned int row = td->block_structure[block_tile].row;
1531  unsigned int col = td->block_structure[block_tile].col;
1532  uint8_t seg_id = frame->segmentation_map[row * 8 * s->sb_cols + col];
1533 
1534  b->src_x = col * 8;
1535  b->src_y = row * 8;
1536  b->w = 1 << (3 + td->block_structure[block_tile].block_size_idx_x);
1537  b->h = 1 << (3 + td->block_structure[block_tile].block_size_idx_y);
1538 
1539  if (s->s.h.segmentation.feat[seg_id].q_enabled) {
1540  b->delta_qp = s->s.h.segmentation.feat[seg_id].q_val;
1541  if (s->s.h.segmentation.absolute_vals)
1542  b->delta_qp -= par->qp;
1543  }
1544  }
1545  }
1546  }
1547 
1548  return 0;
1549 }
1550 
1551 static int vp9_decode_frame(AVCodecContext *avctx, void *frame,
1552  int *got_frame, AVPacket *pkt)
1553 {
1554  const uint8_t *data = pkt->data;
1555  int size = pkt->size;
1556  VP9Context *s = avctx->priv_data;
1557  int ret, i, j, ref;
1558  int retain_segmap_ref = s->s.frames[REF_FRAME_SEGMAP].segmentation_map &&
1559  (!s->s.h.segmentation.enabled || !s->s.h.segmentation.update_map);
1560  AVFrame *f;
1561 
1562  if ((ret = decode_frame_header(avctx, data, size, &ref)) < 0) {
1563  return ret;
1564  } else if (ret == 0) {
1565  if (!s->s.refs[ref].f->buf[0]) {
1566  av_log(avctx, AV_LOG_ERROR, "Requested reference %d not available\n", ref);
1567  return AVERROR_INVALIDDATA;
1568  }
1569  if ((ret = av_frame_ref(frame, s->s.refs[ref].f)) < 0)
1570  return ret;
1571  ((AVFrame *)frame)->pts = pkt->pts;
1572 #if FF_API_PKT_PTS
1574  ((AVFrame *)frame)->pkt_pts = pkt->pts;
1576 #endif
1577  ((AVFrame *)frame)->pkt_dts = pkt->dts;
1578  for (i = 0; i < 8; i++) {
1579  if (s->next_refs[i].f->buf[0])
1580  ff_thread_release_buffer(avctx, &s->next_refs[i]);
1581  if (s->s.refs[i].f->buf[0] &&
1582  (ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.refs[i])) < 0)
1583  return ret;
1584  }
1585  *got_frame = 1;
1586  return pkt->size;
1587  }
1588  data += ret;
1589  size -= ret;
1590 
1591  if (!retain_segmap_ref || s->s.h.keyframe || s->s.h.intraonly) {
1592  if (s->s.frames[REF_FRAME_SEGMAP].tf.f->buf[0])
1593  vp9_frame_unref(avctx, &s->s.frames[REF_FRAME_SEGMAP]);
1594  if (!s->s.h.keyframe && !s->s.h.intraonly && !s->s.h.errorres && s->s.frames[CUR_FRAME].tf.f->buf[0] &&
1595  (ret = vp9_frame_ref(avctx, &s->s.frames[REF_FRAME_SEGMAP], &s->s.frames[CUR_FRAME])) < 0)
1596  return ret;
1597  }
1598  if (s->s.frames[REF_FRAME_MVPAIR].tf.f->buf[0])
1599  vp9_frame_unref(avctx, &s->s.frames[REF_FRAME_MVPAIR]);
1600  if (!s->s.h.intraonly && !s->s.h.keyframe && !s->s.h.errorres && s->s.frames[CUR_FRAME].tf.f->buf[0] &&
1601  (ret = vp9_frame_ref(avctx, &s->s.frames[REF_FRAME_MVPAIR], &s->s.frames[CUR_FRAME])) < 0)
1602  return ret;
1603  if (s->s.frames[CUR_FRAME].tf.f->buf[0])
1604  vp9_frame_unref(avctx, &s->s.frames[CUR_FRAME]);
1605  if ((ret = vp9_frame_alloc(avctx, &s->s.frames[CUR_FRAME])) < 0)
1606  return ret;
1607  f = s->s.frames[CUR_FRAME].tf.f;
1608  f->key_frame = s->s.h.keyframe;
1609  f->pict_type = (s->s.h.keyframe || s->s.h.intraonly) ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
1610 
1611  if (s->s.frames[REF_FRAME_SEGMAP].tf.f->buf[0] &&
1612  (s->s.frames[REF_FRAME_MVPAIR].tf.f->width != s->s.frames[CUR_FRAME].tf.f->width ||
1613  s->s.frames[REF_FRAME_MVPAIR].tf.f->height != s->s.frames[CUR_FRAME].tf.f->height)) {
1614  vp9_frame_unref(avctx, &s->s.frames[REF_FRAME_SEGMAP]);
1615  }
1616 
1617  // ref frame setup
1618  for (i = 0; i < 8; i++) {
1619  if (s->next_refs[i].f->buf[0])
1620  ff_thread_release_buffer(avctx, &s->next_refs[i]);
1621  if (s->s.h.refreshrefmask & (1 << i)) {
1622  ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.frames[CUR_FRAME].tf);
1623  } else if (s->s.refs[i].f->buf[0]) {
1624  ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.refs[i]);
1625  }
1626  if (ret < 0)
1627  return ret;
1628  }
1629 
1630  if (avctx->hwaccel) {
1631  ret = avctx->hwaccel->start_frame(avctx, NULL, 0);
1632  if (ret < 0)
1633  return ret;
1634  ret = avctx->hwaccel->decode_slice(avctx, pkt->data, pkt->size);
1635  if (ret < 0)
1636  return ret;
1637  ret = avctx->hwaccel->end_frame(avctx);
1638  if (ret < 0)
1639  return ret;
1640  goto finish;
1641  }
1642 
1643  // main tile decode loop
1644  memset(s->above_partition_ctx, 0, s->cols);
1645  memset(s->above_skip_ctx, 0, s->cols);
1646  if (s->s.h.keyframe || s->s.h.intraonly) {
1647  memset(s->above_mode_ctx, DC_PRED, s->cols * 2);
1648  } else {
1649  memset(s->above_mode_ctx, NEARESTMV, s->cols);
1650  }
1651  memset(s->above_y_nnz_ctx, 0, s->sb_cols * 16);
1652  memset(s->above_uv_nnz_ctx[0], 0, s->sb_cols * 16 >> s->ss_h);
1653  memset(s->above_uv_nnz_ctx[1], 0, s->sb_cols * 16 >> s->ss_h);
1654  memset(s->above_segpred_ctx, 0, s->cols);
1655  s->pass = s->s.frames[CUR_FRAME].uses_2pass =
1656  avctx->active_thread_type == FF_THREAD_FRAME && s->s.h.refreshctx && !s->s.h.parallelmode;
1657  if ((ret = update_block_buffers(avctx)) < 0) {
1658  av_log(avctx, AV_LOG_ERROR,
1659  "Failed to allocate block buffers\n");
1660  return ret;
1661  }
1662  if (s->s.h.refreshctx && s->s.h.parallelmode) {
1663  int j, k, l, m;
1664 
1665  for (i = 0; i < 4; i++) {
1666  for (j = 0; j < 2; j++)
1667  for (k = 0; k < 2; k++)
1668  for (l = 0; l < 6; l++)
1669  for (m = 0; m < 6; m++)
1670  memcpy(s->prob_ctx[s->s.h.framectxid].coef[i][j][k][l][m],
1671  s->prob.coef[i][j][k][l][m], 3);
1672  if (s->s.h.txfmmode == i)
1673  break;
1674  }
1675  s->prob_ctx[s->s.h.framectxid].p = s->prob.p;
1676  ff_thread_finish_setup(avctx);
1677  } else if (!s->s.h.refreshctx) {
1678  ff_thread_finish_setup(avctx);
1679  }
1680 
1681 #if HAVE_THREADS
1682  if (avctx->active_thread_type & FF_THREAD_SLICE) {
1683  for (i = 0; i < s->sb_rows; i++)
1684  atomic_store(&s->entries[i], 0);
1685  }
1686 #endif
1687 
1688  do {
1689  for (i = 0; i < s->active_tile_cols; i++) {
1690  s->td[i].b = s->td[i].b_base;
1691  s->td[i].block = s->td[i].block_base;
1692  s->td[i].uvblock[0] = s->td[i].uvblock_base[0];
1693  s->td[i].uvblock[1] = s->td[i].uvblock_base[1];
1694  s->td[i].eob = s->td[i].eob_base;
1695  s->td[i].uveob[0] = s->td[i].uveob_base[0];
1696  s->td[i].uveob[1] = s->td[i].uveob_base[1];
1697  s->td[i].error_info = 0;
1698  }
1699 
1700 #if HAVE_THREADS
1701  if (avctx->active_thread_type == FF_THREAD_SLICE) {
1702  int tile_row, tile_col;
1703 
1704  av_assert1(!s->pass);
1705 
1706  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1707  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1708  int64_t tile_size;
1709 
1710  if (tile_col == s->s.h.tiling.tile_cols - 1 &&
1711  tile_row == s->s.h.tiling.tile_rows - 1) {
1712  tile_size = size;
1713  } else {
1714  tile_size = AV_RB32(data);
1715  data += 4;
1716  size -= 4;
1717  }
1718  if (tile_size > size)
1719  return AVERROR_INVALIDDATA;
1720  ret = ff_vp56_init_range_decoder(&s->td[tile_col].c_b[tile_row], data, tile_size);
1721  if (ret < 0)
1722  return ret;
1723  if (vp56_rac_get_prob_branchy(&s->td[tile_col].c_b[tile_row], 128)) // marker bit
1724  return AVERROR_INVALIDDATA;
1725  data += tile_size;
1726  size -= tile_size;
1727  }
1728  }
1729 
1730  ff_slice_thread_execute_with_mainfunc(avctx, decode_tiles_mt, loopfilter_proc, s->td, NULL, s->s.h.tiling.tile_cols);
1731  } else
1732 #endif
1733  {
1734  ret = decode_tiles(avctx, data, size);
1735  if (ret < 0) {
1736  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
1737  return ret;
1738  }
1739  }
1740 
1741  // Sum all counts fields into td[0].counts for tile threading
1742  if (avctx->active_thread_type == FF_THREAD_SLICE)
1743  for (i = 1; i < s->s.h.tiling.tile_cols; i++)
1744  for (j = 0; j < sizeof(s->td[i].counts) / sizeof(unsigned); j++)
1745  ((unsigned *)&s->td[0].counts)[j] += ((unsigned *)&s->td[i].counts)[j];
1746 
1747  if (s->pass < 2 && s->s.h.refreshctx && !s->s.h.parallelmode) {
1749  ff_thread_finish_setup(avctx);
1750  }
1751  } while (s->pass++ == 1);
1752  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
1753 
1754  if (s->td->error_info < 0) {
1755  av_log(avctx, AV_LOG_ERROR, "Failed to decode tile data\n");
1756  s->td->error_info = 0;
1757  return AVERROR_INVALIDDATA;
1758  }
1760  ret = vp9_export_enc_params(s, &s->s.frames[CUR_FRAME]);
1761  if (ret < 0)
1762  return ret;
1763  }
1764 
1765 finish:
1766  // ref frame setup
1767  for (i = 0; i < 8; i++) {
1768  if (s->s.refs[i].f->buf[0])
1769  ff_thread_release_buffer(avctx, &s->s.refs[i]);
1770  if (s->next_refs[i].f->buf[0] &&
1771  (ret = ff_thread_ref_frame(&s->s.refs[i], &s->next_refs[i])) < 0)
1772  return ret;
1773  }
1774 
1775  if (!s->s.h.invisible) {
1776  if ((ret = av_frame_ref(frame, s->s.frames[CUR_FRAME].tf.f)) < 0)
1777  return ret;
1778  *got_frame = 1;
1779  }
1780 
1781  return pkt->size;
1782 }
1783 
1785 {
1786  VP9Context *s = avctx->priv_data;
1787  int i;
1788 
1789  for (i = 0; i < 3; i++)
1790  vp9_frame_unref(avctx, &s->s.frames[i]);
1791  for (i = 0; i < 8; i++)
1792  ff_thread_release_buffer(avctx, &s->s.refs[i]);
1793 }
1794 
1795 static int init_frames(AVCodecContext *avctx)
1796 {
1797  VP9Context *s = avctx->priv_data;
1798  int i;
1799 
1800  for (i = 0; i < 3; i++) {
1801  s->s.frames[i].tf.f = av_frame_alloc();
1802  if (!s->s.frames[i].tf.f) {
1803  vp9_decode_free(avctx);
1804  av_log(avctx, AV_LOG_ERROR, "Failed to allocate frame buffer %d\n", i);
1805  return AVERROR(ENOMEM);
1806  }
1807  }
1808  for (i = 0; i < 8; i++) {
1809  s->s.refs[i].f = av_frame_alloc();
1810  s->next_refs[i].f = av_frame_alloc();
1811  if (!s->s.refs[i].f || !s->next_refs[i].f) {
1812  vp9_decode_free(avctx);
1813  av_log(avctx, AV_LOG_ERROR, "Failed to allocate frame buffer %d\n", i);
1814  return AVERROR(ENOMEM);
1815  }
1816  }
1817 
1818  return 0;
1819 }
1820 
1822 {
1823  VP9Context *s = avctx->priv_data;
1824 
1825  s->last_bpp = 0;
1826  s->s.h.filter.sharpness = -1;
1827 
1828  return init_frames(avctx);
1829 }
1830 
1831 #if HAVE_THREADS
1832 static int vp9_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
1833 {
1834  int i, ret;
1835  VP9Context *s = dst->priv_data, *ssrc = src->priv_data;
1836 
1837  for (i = 0; i < 3; i++) {
1838  if (s->s.frames[i].tf.f->buf[0])
1839  vp9_frame_unref(dst, &s->s.frames[i]);
1840  if (ssrc->s.frames[i].tf.f->buf[0]) {
1841  if ((ret = vp9_frame_ref(dst, &s->s.frames[i], &ssrc->s.frames[i])) < 0)
1842  return ret;
1843  }
1844  }
1845  for (i = 0; i < 8; i++) {
1846  if (s->s.refs[i].f->buf[0])
1847  ff_thread_release_buffer(dst, &s->s.refs[i]);
1848  if (ssrc->next_refs[i].f->buf[0]) {
1849  if ((ret = ff_thread_ref_frame(&s->s.refs[i], &ssrc->next_refs[i])) < 0)
1850  return ret;
1851  }
1852  }
1853 
1854  s->s.h.invisible = ssrc->s.h.invisible;
1855  s->s.h.keyframe = ssrc->s.h.keyframe;
1856  s->s.h.intraonly = ssrc->s.h.intraonly;
1857  s->ss_v = ssrc->ss_v;
1858  s->ss_h = ssrc->ss_h;
1859  s->s.h.segmentation.enabled = ssrc->s.h.segmentation.enabled;
1860  s->s.h.segmentation.update_map = ssrc->s.h.segmentation.update_map;
1861  s->s.h.segmentation.absolute_vals = ssrc->s.h.segmentation.absolute_vals;
1862  s->bytesperpixel = ssrc->bytesperpixel;
1863  s->gf_fmt = ssrc->gf_fmt;
1864  s->w = ssrc->w;
1865  s->h = ssrc->h;
1866  s->s.h.bpp = ssrc->s.h.bpp;
1867  s->bpp_index = ssrc->bpp_index;
1868  s->pix_fmt = ssrc->pix_fmt;
1869  memcpy(&s->prob_ctx, &ssrc->prob_ctx, sizeof(s->prob_ctx));
1870  memcpy(&s->s.h.lf_delta, &ssrc->s.h.lf_delta, sizeof(s->s.h.lf_delta));
1871  memcpy(&s->s.h.segmentation.feat, &ssrc->s.h.segmentation.feat,
1872  sizeof(s->s.h.segmentation.feat));
1873 
1874  return 0;
1875 }
1876 #endif
1877 
1879  .name = "vp9",
1880  .long_name = NULL_IF_CONFIG_SMALL("Google VP9"),
1881  .type = AVMEDIA_TYPE_VIDEO,
1882  .id = AV_CODEC_ID_VP9,
1883  .priv_data_size = sizeof(VP9Context),
1884  .init = vp9_decode_init,
1885  .close = vp9_decode_free,
1888  .caps_internal = FF_CODEC_CAP_SLICE_THREAD_HAS_MF |
1891  .update_thread_context = ONLY_IF_THREADS_ENABLED(vp9_decode_update_thread_context),
1893  .bsfs = "vp9_superframe_split",
1894  .hw_configs = (const AVCodecHWConfigInternal*[]) {
1895 #if CONFIG_VP9_DXVA2_HWACCEL
1896  HWACCEL_DXVA2(vp9),
1897 #endif
1898 #if CONFIG_VP9_D3D11VA_HWACCEL
1899  HWACCEL_D3D11VA(vp9),
1900 #endif
1901 #if CONFIG_VP9_D3D11VA2_HWACCEL
1902  HWACCEL_D3D11VA2(vp9),
1903 #endif
1904 #if CONFIG_VP9_NVDEC_HWACCEL
1905  HWACCEL_NVDEC(vp9),
1906 #endif
1907 #if CONFIG_VP9_VAAPI_HWACCEL
1908  HWACCEL_VAAPI(vp9),
1909 #endif
1910 #if CONFIG_VP9_VDPAU_HWACCEL
1911  HWACCEL_VDPAU(vp9),
1912 #endif
1913  NULL
1914  },
1915 };
AVVideoEncParams::qp
int32_t qp
Base quantisation parameter for the frame.
Definition: video_enc_params.h:95
hwconfig.h
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1690
AVCodec
AVCodec.
Definition: codec.h:190
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:85
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:235
decode_tiles
static int decode_tiles(AVCodecContext *avctx, const uint8_t *data, int size)
Definition: vp9.c:1256
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
vp9_frame_alloc
static int vp9_frame_alloc(AVCodecContext *avctx, VP9Frame *f)
Definition: vp9.c:113
atomic_store
#define atomic_store(object, desired)
Definition: stdatomic.h:85
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
r
const char * r
Definition: vf_curves.c:114
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
PRED_SWITCHABLE
@ PRED_SWITCHABLE
Definition: vp9shared.h:51
PRED_SINGLEREF
@ PRED_SINGLEREF
Definition: vp9shared.h:49
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1154
vpX_rac_is_end
static av_always_inline int vpX_rac_is_end(VP56RangeCoder *c)
vp5689 returns 1 if the end of the stream has been reached, 0 otherwise.
Definition: vp56.h:237
VP9Frame::segmentation_map
uint8_t * segmentation_map
Definition: vp9shared.h:62
VP9Frame
Definition: vp9shared.h:59
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:89
pthread_mutex_init
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
Definition: os2threads.h:104
decode_sb
static void decode_sb(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
Definition: vp9.c:1088
ff_vp9_adapt_probs
void ff_vp9_adapt_probs(VP9Context *s)
Definition: vp9prob.c:46
vp9_decode_flush
static void vp9_decode_flush(AVCodecContext *avctx)
Definition: vp9.c:1784
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
vp9_decode_frame
static int vp9_decode_frame(AVCodecContext *avctx, void *frame, int *got_frame, AVPacket *pkt)
Definition: vp9.c:1551
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
end
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:92
PRED_COMPREF
@ PRED_COMPREF
Definition: vp9shared.h:50
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
pixdesc.h
w
uint8_t w
Definition: llviddspenc.c:38
HWACCEL_DXVA2
#define HWACCEL_DXVA2(codec)
Definition: hwconfig.h:67
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:535
BlockPartition
BlockPartition
Definition: vp9shared.h:34
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:355
DC_PRED
@ DC_PRED
Definition: vp9.h:48
HWACCEL_D3D11VA2
#define HWACCEL_D3D11VA2(codec)
Definition: hwconfig.h:69
b
#define b
Definition: input.c:41
data
const char data[16]
Definition: mxf.c:91
update_size
static int update_size(AVCodecContext *avctx, int w, int h)
Definition: vp9.c:189
decode_sb_mem
static void decode_sb_mem(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
Definition: vp9.c:1167
REF_FRAME_SEGMAP
#define REF_FRAME_SEGMAP
Definition: vp9shared.h:165
decode_frame_header
static int decode_frame_header(AVCodecContext *avctx, const uint8_t *data, int size, int *ref)
Definition: vp9.c:502
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:397
atomic_int
intptr_t atomic_int
Definition: stdatomic.h:55
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:229
av_mallocz_array
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:190
av_buffer_allocz
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:83
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB)
Definition: pixfmt.h:510
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
max
#define max(a, b)
Definition: cuda_runtime.h:33
VP9_SYNCCODE
#define VP9_SYNCCODE
Definition: vp9.c:39
VP9Frame::hwaccel_priv_buf
AVBufferRef * hwaccel_priv_buf
Definition: vp9shared.h:66
VP9Filter
Definition: vp9dec.h:75
vp9_free_entries
static void vp9_free_entries(AVCodecContext *avctx)
Definition: vp9.c:93
thread.h
FILTER_SWITCHABLE
@ FILTER_SWITCHABLE
Definition: vp9.h:70
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
VP9Block
Definition: vp9dec.h:81
VP9Frame::tf
ThreadFrame tf
Definition: vp9shared.h:60
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
vp56_rac_get_prob_branchy
static av_always_inline int vp56_rac_get_prob_branchy(VP56RangeCoder *c, int prob)
Definition: vp56.h:285
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:515
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
av_buffer_pool_init
AVBufferPool * av_buffer_pool_init(int size, AVBufferRef *(*alloc)(int size))
Allocate and initialize a buffer pool.
Definition: buffer.c:239
AVCOL_SPC_RESERVED
@ AVCOL_SPC_RESERVED
Definition: pixfmt.h:513
AVHWAccel
Definition: avcodec.h:2410
VP9Frame::extradata
AVBufferRef * extradata
Definition: vp9shared.h:61
TX_SWITCHABLE
@ TX_SWITCHABLE
Definition: vp9.h:33
finish
static void finish(void)
Definition: movenc.c:345
vp8_rac_get
static av_always_inline int vp8_rac_get(VP56RangeCoder *c)
Definition: vp56.h:322
ff_vp9_ac_qlookup
const int16_t ff_vp9_ac_qlookup[3][256]
Definition: vp9data.c:334
AVVideoEncParams::delta_qp
int32_t delta_qp[4][2]
Quantisation parameter offset from the base (per-frame) qp for a given plane (first index) and AC/DC ...
Definition: video_enc_params.h:101
fail
#define fail()
Definition: checkasm.h:123
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:413
GetBitContext
Definition: get_bits.h:61
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:606
HWACCEL_VDPAU
#define HWACCEL_VDPAU(codec)
Definition: hwconfig.h:75
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
PARTITION_NONE
@ PARTITION_NONE
Definition: vp9shared.h:35
init_frames
static int init_frames(AVCodecContext *avctx)
Definition: vp9.c:1795
VP9Frame::hwaccel_picture_private
void * hwaccel_picture_private
Definition: vp9shared.h:67
AVVideoEncParams
Video encoding parameters for a given frame.
Definition: video_enc_params.h:65
vp9_decode_free
static av_cold int vp9_decode_free(AVCodecContext *avctx)
Definition: vp9.c:1233
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:400
avassert.h
ff_vp9_model_pareto8
const uint8_t ff_vp9_model_pareto8[256][8]
Definition: vp9data.c:1176
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:2192
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:568
BL_8X8
@ BL_8X8
Definition: vp9shared.h:74
av_buffer_pool_get
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:337
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
PARTITION_V
@ PARTITION_V
Definition: vp9shared.h:37
vp9_frame_ref
static int vp9_frame_ref(AVCodecContext *avctx, VP9Frame *dst, VP9Frame *src)
Definition: vp9.c:159
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:137
s
#define s(width, name)
Definition: cbs_vp9.c:257
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:516
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:509
AV_CODEC_ID_VP9
@ AV_CODEC_ID_VP9
Definition: codec_id.h:217
vp9data.h
bits
uint8_t bits
Definition: vp3data.h:202
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
get_bits.h
VP56mv
Definition: vp56.h:66
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
ff_vp9dsp_init
av_cold void ff_vp9dsp_init(VP9DSPContext *dsp, int bpp, int bitexact)
Definition: vp9dsp.c:84
decode012
static int decode012(GetBitContext *gb)
Definition: get_bits.h:831
ff_vp9_partition_tree
const int8_t ff_vp9_partition_tree[3][2]
Definition: vp9data.c:35
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
f
#define f(width, name)
Definition: cbs_vp9.c:255
vp56.h
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:106
flush
static void flush(AVCodecContext *avctx)
Definition: aacdec_template.c:500
NULL
#define NULL
Definition: coverity.c:32
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1161
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:125
VP9Context
Definition: vp9dec.h:93
vp8_rac_get_uint
static int vp8_rac_get_uint(VP56RangeCoder *c, int bits)
Definition: vp56.h:338
vp8_rac_get_tree
static av_always_inline int vp8_rac_get_tree(VP56RangeCoder *c, const int8_t(*tree)[2], const uint8_t *probs)
Definition: vp56.h:394
REF_FRAME_MVPAIR
#define REF_FRAME_MVPAIR
Definition: vp9shared.h:164
AVHWAccel::end_frame
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
Definition: avcodec.h:2511
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
profiles.h
src
#define src
Definition: vp8dsp.c:254
av_buffer_pool_uninit
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:276
AV_PIX_FMT_YUV440P10
#define AV_PIX_FMT_YUV440P10
Definition: pixfmt.h:399
ONLY_IF_THREADS_ENABLED
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:227
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:398
PARTITION_SPLIT
@ PARTITION_SPLIT
Definition: vp9shared.h:38
atomic_load_explicit
#define atomic_load_explicit(object, order)
Definition: stdatomic.h:96
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
pthread_mutex_unlock
#define pthread_mutex_unlock(a)
Definition: ffprobe.c:66
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
ff_thread_release_buffer
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
Definition: pthread_frame.c:1006
av_video_enc_params_create_side_data
AVVideoEncParams * av_video_enc_params_create_side_data(AVFrame *frame, enum AVVideoEncParamsType type, unsigned int nb_blocks)
Allocates memory for AVEncodeInfoFrame plus an array of.
Definition: video_enc_params.c:57
vp9.h
VP9Frame::uses_2pass
int uses_2pass
Definition: vp9shared.h:64
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
AVPacket::size
int size
Definition: packet.h:356
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:444
pix_fmt_rgb
static enum AVPixelFormat pix_fmt_rgb[3]
Definition: libdav1d.c:54
REF_INVALID_SCALE
#define REF_INVALID_SCALE
Definition: vp9dec.h:39
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
read_colorspace_details
static int read_colorspace_details(AVCodecContext *avctx)
Definition: vp9.c:440
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:402
size
int size
Definition: twinvq_data.h:11134
vp9_alloc_entries
static int vp9_alloc_entries(AVCodecContext *avctx, int n)
Definition: vp9.c:94
atomic_fetch_add_explicit
#define atomic_fetch_add_explicit(object, operand, order)
Definition: stdatomic.h:149
free_buffers
static void free_buffers(VP9Context *s)
Definition: vp9.c:1224
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:92
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:404
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1797
AVCodecHWConfigInternal
Definition: hwconfig.h:29
TX_4X4
@ TX_4X4
Definition: vp9.h:28
update_block_buffers
static int update_block_buffers(AVCodecContext *avctx)
Definition: vp9.c:305
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:354
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:110
HWACCEL_D3D11VA
#define HWACCEL_D3D11VA(codec)
Definition: hwconfig.h:79
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:313
ff_vp56_init_range_decoder
int ff_vp56_init_range_decoder(VP56RangeCoder *c, const uint8_t *buf, int buf_size)
Definition: vp56rac.c:40
inv_recenter_nonneg
static av_always_inline int inv_recenter_nonneg(int v, int m)
Definition: vp9.c:372
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:71
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Definition: pixfmt.h:122
pthread_cond_destroy
static av_always_inline int pthread_cond_destroy(pthread_cond_t *cond)
Definition: os2threads.h:144
FF_CODEC_CAP_SLICE_THREAD_HAS_MF
#define FF_CODEC_CAP_SLICE_THREAD_HAS_MF
Codec initializes slice-based threading with a main function.
Definition: internal.h:70
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1796
pthread_mutex_destroy
static av_always_inline int pthread_mutex_destroy(pthread_mutex_t *mutex)
Definition: os2threads.h:112
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:197
ff_slice_thread_execute_with_mainfunc
int ff_slice_thread_execute_with_mainfunc(AVCodecContext *avctx, action_func2 *func2, main_func *mainfunc, void *arg, int *ret, int job_count)
Definition: pthread_slice.c:121
AVCOL_SPC_SMPTE240M
@ AVCOL_SPC_SMPTE240M
functionally identical to above
Definition: pixfmt.h:517
assign
#define assign(var, type, n)
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:348
AVCodecContext::properties
unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:2191
AVCOL_SPC_BT2020_NCL
@ AVCOL_SPC_BT2020_NCL
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:520
AVHWAccel::decode_slice
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
Definition: avcodec.h:2500
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:414
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:509
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
av_always_inline
#define av_always_inline
Definition: attributes.h:49
uint8_t
uint8_t
Definition: audio_convert.c:194
AVVideoBlockParams
Data structure for storing block-level encoding information.
Definition: video_enc_params.h:112
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
get_sbits_inv
static av_always_inline int get_sbits_inv(GetBitContext *gb, int n)
Definition: vp9.c:366
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:197
update_thread_context
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread. Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little speed gain at this point but it should work. If there are inter-frame dependencies
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:512
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: pixfmt.h:534
avcodec.h
BL_64X64
@ BL_64X64
Definition: vp9shared.h:71
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
vp9_decode_init
static av_cold int vp9_decode_init(AVCodecContext *avctx)
Definition: vp9.c:1821
ff_vp9_decoder
AVCodec ff_vp9_decoder
Definition: vp9.c:1878
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:693
vp9_tile_data_free
static void vp9_tile_data_free(VP9TileData *td)
Definition: vp9.c:97
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
VP9mvrefPair
Definition: vp9shared.h:54
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:401
pthread_cond_signal
static av_always_inline int pthread_cond_signal(pthread_cond_t *cond)
Definition: os2threads.h:152
AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
Decoding only.
Definition: avcodec.h:412
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen_template.c:38
VP9TileData
Definition: vp9dec.h:163
VP56RangeCoder
Definition: vp56.h:85
AVCodecContext
main external API structure.
Definition: avcodec.h:526
AVCodecContext::active_thread_type
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1804
VP9Filter::mask
uint8_t mask[2][2][8][4]
Definition: vp9dec.h:78
pkt
static AVPacket pkt
Definition: demuxing_decoding.c:54
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AVHWAccel::frame_priv_data_size
int frame_priv_data_size
Size of per-frame hardware accelerator private data.
Definition: avcodec.h:2520
vp9_frame_unref
static void vp9_frame_unref(AVCodecContext *avctx, VP9Frame *f)
Definition: vp9.c:104
VP9Frame::mv
VP9mvrefPair * mv
Definition: vp9shared.h:63
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1859
ff_vp9_decode_block
void ff_vp9_decode_block(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl, enum BlockPartition bp)
Definition: vp9block.c:1263
NEARESTMV
@ NEARESTMV
Definition: vp9shared.h:42
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
BlockLevel
BlockLevel
Definition: vp9shared.h:70
AVCodecContext::export_side_data
int export_side_data
Bit set of AV_CODEC_EXPORT_DATA_* flags, which affects the kind of metadata exported in frame,...
Definition: avcodec.h:2354
av_buffer_ref
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
pthread_cond_wait
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
Definition: os2threads.h:192
vp9dec.h
ff_thread_ref_frame
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
Definition: utils.c:1885
AVHWAccel::start_frame
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
Definition: avcodec.h:2472
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
ff_vp9_default_kf_partition_probs
const uint8_t ff_vp9_default_kf_partition_probs[4][4][3]
Definition: vp9data.c:41
AV_VIDEO_ENC_PARAMS_VP9
@ AV_VIDEO_ENC_PARAMS_VP9
VP9 stores:
Definition: video_enc_params.h:44
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:333
ff_vp9_default_probs
const ProbContext ff_vp9_default_probs
Definition: vp9data.c:1435
FF_CODEC_CAP_ALLOCATE_PROGRESS
#define FF_CODEC_CAP_ALLOCATE_PROGRESS
Definition: internal.h:75
CUR_FRAME
#define CUR_FRAME
Definition: vp9shared.h:163
ff_thread_get_format
enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Wrapper around get_format() for frame-multithreaded codecs.
Definition: pthread_frame.c:972
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
vp9_export_enc_params
static int vp9_export_enc_params(VP9Context *s, VP9Frame *frame)
Definition: vp9.c:1499
AVPacket
This structure stores compressed data.
Definition: packet.h:332
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:553
PARTITION_H
@ PARTITION_H
Definition: vp9shared.h:36
ff_vp9_loopfilter_sb
void ff_vp9_loopfilter_sb(AVCodecContext *avctx, VP9Filter *lflvl, int row, int col, ptrdiff_t yoff, ptrdiff_t uvoff)
Definition: vp9lpf.c:178
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
videodsp.h
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:73
HWACCEL_MAX
#define HWACCEL_MAX
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
av_video_enc_params_block
static av_always_inline AVVideoBlockParams * av_video_enc_params_block(AVVideoEncParams *par, unsigned int idx)
Definition: video_enc_params.h:135
pthread_cond_init
static av_always_inline int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
Definition: os2threads.h:133
AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:403
h
h
Definition: vp9dsp_template.c:2038
atomic_init
#define atomic_init(obj, value)
Definition: stdatomic.h:33
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:511
ff_vp9_profiles
const AVProfile ff_vp9_profiles[]
Definition: profiles.c:133
video_enc_params.h
set_tile_offset
static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
Definition: vp9.c:1216
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2465
update_prob
static int update_prob(VP56RangeCoder *c, int p)
Definition: vp9.c:382
ff_vp9_dc_qlookup
const int16_t ff_vp9_dc_qlookup[3][256]
Definition: vp9data.c:231
pthread_mutex_lock
#define pthread_mutex_lock(a)
Definition: ffprobe.c:62
ff_vp9_default_coef_probs
const uint8_t ff_vp9_default_coef_probs[4][2][2][6][6][3]
Definition: vp9data.c:1540