FFmpeg
vp9.c
Go to the documentation of this file.
1 /*
2  * VP9 compatible video decoder
3  *
4  * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
5  * Copyright (C) 2013 Clément Bœsch <u pkh me>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include "avcodec.h"
25 #include "get_bits.h"
26 #include "hwconfig.h"
27 #include "internal.h"
28 #include "profiles.h"
29 #include "thread.h"
30 #include "pthread_internal.h"
31 
32 #include "videodsp.h"
33 #include "vp56.h"
34 #include "vp9.h"
35 #include "vp9data.h"
36 #include "vp9dec.h"
37 #include "libavutil/avassert.h"
38 #include "libavutil/pixdesc.h"
40 
41 #define VP9_SYNCCODE 0x498342
42 
43 #if HAVE_THREADS
44 DEFINE_OFFSET_ARRAY(VP9Context, vp9_context, pthread_init_cnt,
45  (offsetof(VP9Context, progress_mutex)),
46  (offsetof(VP9Context, progress_cond)));
47 
48 static int vp9_alloc_entries(AVCodecContext *avctx, int n) {
49  VP9Context *s = avctx->priv_data;
50  int i;
51 
52  if (avctx->active_thread_type & FF_THREAD_SLICE) {
53  if (s->entries)
54  av_freep(&s->entries);
55 
56  s->entries = av_malloc_array(n, sizeof(atomic_int));
57  if (!s->entries)
58  return AVERROR(ENOMEM);
59 
60  for (i = 0; i < n; i++)
61  atomic_init(&s->entries[i], 0);
62  }
63  return 0;
64 }
65 
66 static void vp9_report_tile_progress(VP9Context *s, int field, int n) {
67  pthread_mutex_lock(&s->progress_mutex);
68  atomic_fetch_add_explicit(&s->entries[field], n, memory_order_release);
69  pthread_cond_signal(&s->progress_cond);
70  pthread_mutex_unlock(&s->progress_mutex);
71 }
72 
73 static void vp9_await_tile_progress(VP9Context *s, int field, int n) {
74  if (atomic_load_explicit(&s->entries[field], memory_order_acquire) >= n)
75  return;
76 
77  pthread_mutex_lock(&s->progress_mutex);
78  while (atomic_load_explicit(&s->entries[field], memory_order_relaxed) != n)
79  pthread_cond_wait(&s->progress_cond, &s->progress_mutex);
80  pthread_mutex_unlock(&s->progress_mutex);
81 }
82 #else
83 static int vp9_alloc_entries(AVCodecContext *avctx, int n) { return 0; }
84 #endif
85 
87 {
88  av_freep(&td->b_base);
89  av_freep(&td->block_base);
90  av_freep(&td->block_structure);
91 }
92 
94 {
95  ff_thread_release_buffer(avctx, &f->tf);
96  av_buffer_unref(&f->extradata);
97  av_buffer_unref(&f->hwaccel_priv_buf);
98  f->segmentation_map = NULL;
99  f->hwaccel_picture_private = NULL;
100 }
101 
103 {
104  VP9Context *s = avctx->priv_data;
105  int ret, sz;
106 
108  if (ret < 0)
109  return ret;
110 
111  sz = 64 * s->sb_cols * s->sb_rows;
112  if (sz != s->frame_extradata_pool_size) {
113  av_buffer_pool_uninit(&s->frame_extradata_pool);
114  s->frame_extradata_pool = av_buffer_pool_init(sz * (1 + sizeof(VP9mvrefPair)), NULL);
115  if (!s->frame_extradata_pool) {
116  s->frame_extradata_pool_size = 0;
117  goto fail;
118  }
119  s->frame_extradata_pool_size = sz;
120  }
121  f->extradata = av_buffer_pool_get(s->frame_extradata_pool);
122  if (!f->extradata) {
123  goto fail;
124  }
125  memset(f->extradata->data, 0, f->extradata->size);
126 
127  f->segmentation_map = f->extradata->data;
128  f->mv = (VP9mvrefPair *) (f->extradata->data + sz);
129 
130  if (avctx->hwaccel) {
131  const AVHWAccel *hwaccel = avctx->hwaccel;
132  av_assert0(!f->hwaccel_picture_private);
133  if (hwaccel->frame_priv_data_size) {
134  f->hwaccel_priv_buf = av_buffer_allocz(hwaccel->frame_priv_data_size);
135  if (!f->hwaccel_priv_buf)
136  goto fail;
137  f->hwaccel_picture_private = f->hwaccel_priv_buf->data;
138  }
139  }
140 
141  return 0;
142 
143 fail:
144  vp9_frame_unref(avctx, f);
145  return AVERROR(ENOMEM);
146 }
147 
149 {
150  int ret;
151 
152  ret = ff_thread_ref_frame(&dst->tf, &src->tf);
153  if (ret < 0)
154  return ret;
155 
156  dst->extradata = av_buffer_ref(src->extradata);
157  if (!dst->extradata)
158  goto fail;
159 
160  dst->segmentation_map = src->segmentation_map;
161  dst->mv = src->mv;
162  dst->uses_2pass = src->uses_2pass;
163 
164  if (src->hwaccel_picture_private) {
165  dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
166  if (!dst->hwaccel_priv_buf)
167  goto fail;
169  }
170 
171  return 0;
172 
173 fail:
174  vp9_frame_unref(avctx, dst);
175  return AVERROR(ENOMEM);
176 }
177 
178 static int update_size(AVCodecContext *avctx, int w, int h)
179 {
180 #define HWACCEL_MAX (CONFIG_VP9_DXVA2_HWACCEL + \
181  CONFIG_VP9_D3D11VA_HWACCEL * 2 + \
182  CONFIG_VP9_NVDEC_HWACCEL + \
183  CONFIG_VP9_VAAPI_HWACCEL + \
184  CONFIG_VP9_VDPAU_HWACCEL + \
185  CONFIG_VP9_VIDEOTOOLBOX_HWACCEL)
186  enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmtp = pix_fmts;
187  VP9Context *s = avctx->priv_data;
188  uint8_t *p;
189  int bytesperpixel = s->bytesperpixel, ret, cols, rows;
190  int lflvl_len, i;
191 
192  av_assert0(w > 0 && h > 0);
193 
194  if (!(s->pix_fmt == s->gf_fmt && w == s->w && h == s->h)) {
195  if ((ret = ff_set_dimensions(avctx, w, h)) < 0)
196  return ret;
197 
198  switch (s->pix_fmt) {
199  case AV_PIX_FMT_YUV420P:
201 #if CONFIG_VP9_DXVA2_HWACCEL
202  *fmtp++ = AV_PIX_FMT_DXVA2_VLD;
203 #endif
204 #if CONFIG_VP9_D3D11VA_HWACCEL
205  *fmtp++ = AV_PIX_FMT_D3D11VA_VLD;
206  *fmtp++ = AV_PIX_FMT_D3D11;
207 #endif
208 #if CONFIG_VP9_NVDEC_HWACCEL
209  *fmtp++ = AV_PIX_FMT_CUDA;
210 #endif
211 #if CONFIG_VP9_VAAPI_HWACCEL
212  *fmtp++ = AV_PIX_FMT_VAAPI;
213 #endif
214 #if CONFIG_VP9_VDPAU_HWACCEL
215  *fmtp++ = AV_PIX_FMT_VDPAU;
216 #endif
217 #if CONFIG_VP9_VIDEOTOOLBOX_HWACCEL
218  *fmtp++ = AV_PIX_FMT_VIDEOTOOLBOX;
219 #endif
220  break;
222 #if CONFIG_VP9_NVDEC_HWACCEL
223  *fmtp++ = AV_PIX_FMT_CUDA;
224 #endif
225 #if CONFIG_VP9_VAAPI_HWACCEL
226  *fmtp++ = AV_PIX_FMT_VAAPI;
227 #endif
228 #if CONFIG_VP9_VDPAU_HWACCEL
229  *fmtp++ = AV_PIX_FMT_VDPAU;
230 #endif
231  break;
232  }
233 
234  *fmtp++ = s->pix_fmt;
235  *fmtp = AV_PIX_FMT_NONE;
236 
238  if (ret < 0)
239  return ret;
240 
241  avctx->pix_fmt = ret;
242  s->gf_fmt = s->pix_fmt;
243  s->w = w;
244  s->h = h;
245  }
246 
247  cols = (w + 7) >> 3;
248  rows = (h + 7) >> 3;
249 
250  if (s->intra_pred_data[0] && cols == s->cols && rows == s->rows && s->pix_fmt == s->last_fmt)
251  return 0;
252 
253  s->last_fmt = s->pix_fmt;
254  s->sb_cols = (w + 63) >> 6;
255  s->sb_rows = (h + 63) >> 6;
256  s->cols = (w + 7) >> 3;
257  s->rows = (h + 7) >> 3;
258  lflvl_len = avctx->active_thread_type == FF_THREAD_SLICE ? s->sb_rows : 1;
259 
260 #define assign(var, type, n) var = (type) p; p += s->sb_cols * (n) * sizeof(*var)
261  av_freep(&s->intra_pred_data[0]);
262  // FIXME we slightly over-allocate here for subsampled chroma, but a little
263  // bit of padding shouldn't affect performance...
264  p = av_malloc(s->sb_cols * (128 + 192 * bytesperpixel +
265  lflvl_len * sizeof(*s->lflvl) + 16 * sizeof(*s->above_mv_ctx)));
266  if (!p)
267  return AVERROR(ENOMEM);
268  assign(s->intra_pred_data[0], uint8_t *, 64 * bytesperpixel);
269  assign(s->intra_pred_data[1], uint8_t *, 64 * bytesperpixel);
270  assign(s->intra_pred_data[2], uint8_t *, 64 * bytesperpixel);
271  assign(s->above_y_nnz_ctx, uint8_t *, 16);
272  assign(s->above_mode_ctx, uint8_t *, 16);
273  assign(s->above_mv_ctx, VP56mv(*)[2], 16);
274  assign(s->above_uv_nnz_ctx[0], uint8_t *, 16);
275  assign(s->above_uv_nnz_ctx[1], uint8_t *, 16);
276  assign(s->above_partition_ctx, uint8_t *, 8);
277  assign(s->above_skip_ctx, uint8_t *, 8);
278  assign(s->above_txfm_ctx, uint8_t *, 8);
279  assign(s->above_segpred_ctx, uint8_t *, 8);
280  assign(s->above_intra_ctx, uint8_t *, 8);
281  assign(s->above_comp_ctx, uint8_t *, 8);
282  assign(s->above_ref_ctx, uint8_t *, 8);
283  assign(s->above_filter_ctx, uint8_t *, 8);
284  assign(s->lflvl, VP9Filter *, lflvl_len);
285 #undef assign
286 
287  if (s->td) {
288  for (i = 0; i < s->active_tile_cols; i++)
289  vp9_tile_data_free(&s->td[i]);
290  }
291 
292  if (s->s.h.bpp != s->last_bpp) {
293  ff_vp9dsp_init(&s->dsp, s->s.h.bpp, avctx->flags & AV_CODEC_FLAG_BITEXACT);
294  ff_videodsp_init(&s->vdsp, s->s.h.bpp);
295  s->last_bpp = s->s.h.bpp;
296  }
297 
298  return 0;
299 }
300 
302 {
303  int i;
304  VP9Context *s = avctx->priv_data;
305  int chroma_blocks, chroma_eobs, bytesperpixel = s->bytesperpixel;
306  VP9TileData *td = &s->td[0];
307 
308  if (td->b_base && td->block_base && s->block_alloc_using_2pass == s->s.frames[CUR_FRAME].uses_2pass)
309  return 0;
310 
312  chroma_blocks = 64 * 64 >> (s->ss_h + s->ss_v);
313  chroma_eobs = 16 * 16 >> (s->ss_h + s->ss_v);
314  if (s->s.frames[CUR_FRAME].uses_2pass) {
315  int sbs = s->sb_cols * s->sb_rows;
316 
317  td->b_base = av_malloc_array(s->cols * s->rows, sizeof(VP9Block));
318  td->block_base = av_mallocz(((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) +
319  16 * 16 + 2 * chroma_eobs) * sbs);
320  if (!td->b_base || !td->block_base)
321  return AVERROR(ENOMEM);
322  td->uvblock_base[0] = td->block_base + sbs * 64 * 64 * bytesperpixel;
323  td->uvblock_base[1] = td->uvblock_base[0] + sbs * chroma_blocks * bytesperpixel;
324  td->eob_base = (uint8_t *) (td->uvblock_base[1] + sbs * chroma_blocks * bytesperpixel);
325  td->uveob_base[0] = td->eob_base + 16 * 16 * sbs;
326  td->uveob_base[1] = td->uveob_base[0] + chroma_eobs * sbs;
327 
329  td->block_structure = av_malloc_array(s->cols * s->rows, sizeof(*td->block_structure));
330  if (!td->block_structure)
331  return AVERROR(ENOMEM);
332  }
333  } else {
334  for (i = 1; i < s->active_tile_cols; i++)
335  vp9_tile_data_free(&s->td[i]);
336 
337  for (i = 0; i < s->active_tile_cols; i++) {
338  s->td[i].b_base = av_malloc(sizeof(VP9Block));
339  s->td[i].block_base = av_mallocz((64 * 64 + 2 * chroma_blocks) * bytesperpixel * sizeof(int16_t) +
340  16 * 16 + 2 * chroma_eobs);
341  if (!s->td[i].b_base || !s->td[i].block_base)
342  return AVERROR(ENOMEM);
343  s->td[i].uvblock_base[0] = s->td[i].block_base + 64 * 64 * bytesperpixel;
344  s->td[i].uvblock_base[1] = s->td[i].uvblock_base[0] + chroma_blocks * bytesperpixel;
345  s->td[i].eob_base = (uint8_t *) (s->td[i].uvblock_base[1] + chroma_blocks * bytesperpixel);
346  s->td[i].uveob_base[0] = s->td[i].eob_base + 16 * 16;
347  s->td[i].uveob_base[1] = s->td[i].uveob_base[0] + chroma_eobs;
348 
350  s->td[i].block_structure = av_malloc_array(s->cols * s->rows, sizeof(*td->block_structure));
351  if (!s->td[i].block_structure)
352  return AVERROR(ENOMEM);
353  }
354  }
355  }
356  s->block_alloc_using_2pass = s->s.frames[CUR_FRAME].uses_2pass;
357 
358  return 0;
359 }
360 
361 // The sign bit is at the end, not the start, of a bit sequence
363 {
364  int v = get_bits(gb, n);
365  return get_bits1(gb) ? -v : v;
366 }
367 
368 static av_always_inline int inv_recenter_nonneg(int v, int m)
369 {
370  if (v > 2 * m)
371  return v;
372  if (v & 1)
373  return m - ((v + 1) >> 1);
374  return m + (v >> 1);
375 }
376 
377 // differential forward probability updates
378 static int update_prob(VP56RangeCoder *c, int p)
379 {
380  static const uint8_t inv_map_table[255] = {
381  7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176,
382  189, 202, 215, 228, 241, 254, 1, 2, 3, 4, 5, 6, 8, 9,
383  10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24,
384  25, 26, 27, 28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39,
385  40, 41, 42, 43, 44, 45, 47, 48, 49, 50, 51, 52, 53, 54,
386  55, 56, 57, 58, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
387  70, 71, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
388  86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 99, 100,
389  101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 112, 113, 114, 115,
390  116, 117, 118, 119, 120, 121, 122, 123, 125, 126, 127, 128, 129, 130,
391  131, 132, 133, 134, 135, 136, 138, 139, 140, 141, 142, 143, 144, 145,
392  146, 147, 148, 149, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160,
393  161, 162, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
394  177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 190, 191,
395  192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 203, 204, 205, 206,
396  207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 219, 220, 221,
397  222, 223, 224, 225, 226, 227, 229, 230, 231, 232, 233, 234, 235, 236,
398  237, 238, 239, 240, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
399  252, 253, 253,
400  };
401  int d;
402 
403  /* This code is trying to do a differential probability update. For a
404  * current probability A in the range [1, 255], the difference to a new
405  * probability of any value can be expressed differentially as 1-A, 255-A
406  * where some part of this (absolute range) exists both in positive as
407  * well as the negative part, whereas another part only exists in one
408  * half. We're trying to code this shared part differentially, i.e.
409  * times two where the value of the lowest bit specifies the sign, and
410  * the single part is then coded on top of this. This absolute difference
411  * then again has a value of [0, 254], but a bigger value in this range
412  * indicates that we're further away from the original value A, so we
413  * can code this as a VLC code, since higher values are increasingly
414  * unlikely. The first 20 values in inv_map_table[] allow 'cheap, rough'
415  * updates vs. the 'fine, exact' updates further down the range, which
416  * adds one extra dimension to this differential update model. */
417 
418  if (!vp8_rac_get(c)) {
419  d = vp8_rac_get_uint(c, 4) + 0;
420  } else if (!vp8_rac_get(c)) {
421  d = vp8_rac_get_uint(c, 4) + 16;
422  } else if (!vp8_rac_get(c)) {
423  d = vp8_rac_get_uint(c, 5) + 32;
424  } else {
425  d = vp8_rac_get_uint(c, 7);
426  if (d >= 65)
427  d = (d << 1) - 65 + vp8_rac_get(c);
428  d += 64;
429  av_assert2(d < FF_ARRAY_ELEMS(inv_map_table));
430  }
431 
432  return p <= 128 ? 1 + inv_recenter_nonneg(inv_map_table[d], p - 1) :
433  255 - inv_recenter_nonneg(inv_map_table[d], 255 - p);
434 }
435 
437 {
438  static const enum AVColorSpace colorspaces[8] = {
441  };
442  VP9Context *s = avctx->priv_data;
443  int bits = avctx->profile <= 1 ? 0 : 1 + get_bits1(&s->gb); // 0:8, 1:10, 2:12
444 
445  s->bpp_index = bits;
446  s->s.h.bpp = 8 + bits * 2;
447  s->bytesperpixel = (7 + s->s.h.bpp) >> 3;
448  avctx->colorspace = colorspaces[get_bits(&s->gb, 3)];
449  if (avctx->colorspace == AVCOL_SPC_RGB) { // RGB = profile 1
450  static const enum AVPixelFormat pix_fmt_rgb[3] = {
452  };
453  s->ss_h = s->ss_v = 0;
454  avctx->color_range = AVCOL_RANGE_JPEG;
455  s->pix_fmt = pix_fmt_rgb[bits];
456  if (avctx->profile & 1) {
457  if (get_bits1(&s->gb)) {
458  av_log(avctx, AV_LOG_ERROR, "Reserved bit set in RGB\n");
459  return AVERROR_INVALIDDATA;
460  }
461  } else {
462  av_log(avctx, AV_LOG_ERROR, "RGB not supported in profile %d\n",
463  avctx->profile);
464  return AVERROR_INVALIDDATA;
465  }
466  } else {
467  static const enum AVPixelFormat pix_fmt_for_ss[3][2 /* v */][2 /* h */] = {
474  };
476  if (avctx->profile & 1) {
477  s->ss_h = get_bits1(&s->gb);
478  s->ss_v = get_bits1(&s->gb);
479  s->pix_fmt = pix_fmt_for_ss[bits][s->ss_v][s->ss_h];
480  if (s->pix_fmt == AV_PIX_FMT_YUV420P) {
481  av_log(avctx, AV_LOG_ERROR, "YUV 4:2:0 not supported in profile %d\n",
482  avctx->profile);
483  return AVERROR_INVALIDDATA;
484  } else if (get_bits1(&s->gb)) {
485  av_log(avctx, AV_LOG_ERROR, "Profile %d color details reserved bit set\n",
486  avctx->profile);
487  return AVERROR_INVALIDDATA;
488  }
489  } else {
490  s->ss_h = s->ss_v = 1;
491  s->pix_fmt = pix_fmt_for_ss[bits][1][1];
492  }
493  }
494 
495  return 0;
496 }
497 
499  const uint8_t *data, int size, int *ref)
500 {
501  VP9Context *s = avctx->priv_data;
502  int c, i, j, k, l, m, n, w, h, max, size2, ret, sharp;
503  int last_invisible;
504  const uint8_t *data2;
505 
506  /* general header */
507  if ((ret = init_get_bits8(&s->gb, data, size)) < 0) {
508  av_log(avctx, AV_LOG_ERROR, "Failed to initialize bitstream reader\n");
509  return ret;
510  }
511  if (get_bits(&s->gb, 2) != 0x2) { // frame marker
512  av_log(avctx, AV_LOG_ERROR, "Invalid frame marker\n");
513  return AVERROR_INVALIDDATA;
514  }
515  avctx->profile = get_bits1(&s->gb);
516  avctx->profile |= get_bits1(&s->gb) << 1;
517  if (avctx->profile == 3) avctx->profile += get_bits1(&s->gb);
518  if (avctx->profile > 3) {
519  av_log(avctx, AV_LOG_ERROR, "Profile %d is not yet supported\n", avctx->profile);
520  return AVERROR_INVALIDDATA;
521  }
522  s->s.h.profile = avctx->profile;
523  if (get_bits1(&s->gb)) {
524  *ref = get_bits(&s->gb, 3);
525  return 0;
526  }
527 
528  s->last_keyframe = s->s.h.keyframe;
529  s->s.h.keyframe = !get_bits1(&s->gb);
530 
531  last_invisible = s->s.h.invisible;
532  s->s.h.invisible = !get_bits1(&s->gb);
533  s->s.h.errorres = get_bits1(&s->gb);
534  s->s.h.use_last_frame_mvs = !s->s.h.errorres && !last_invisible;
535 
536  if (s->s.h.keyframe) {
537  if (get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode
538  av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
539  return AVERROR_INVALIDDATA;
540  }
541  if ((ret = read_colorspace_details(avctx)) < 0)
542  return ret;
543  // for profile 1, here follows the subsampling bits
544  s->s.h.refreshrefmask = 0xff;
545  w = get_bits(&s->gb, 16) + 1;
546  h = get_bits(&s->gb, 16) + 1;
547  if (get_bits1(&s->gb)) // display size
548  skip_bits(&s->gb, 32);
549  } else {
550  s->s.h.intraonly = s->s.h.invisible ? get_bits1(&s->gb) : 0;
551  s->s.h.resetctx = s->s.h.errorres ? 0 : get_bits(&s->gb, 2);
552  if (s->s.h.intraonly) {
553  if (get_bits(&s->gb, 24) != VP9_SYNCCODE) { // synccode
554  av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
555  return AVERROR_INVALIDDATA;
556  }
557  if (avctx->profile >= 1) {
558  if ((ret = read_colorspace_details(avctx)) < 0)
559  return ret;
560  } else {
561  s->ss_h = s->ss_v = 1;
562  s->s.h.bpp = 8;
563  s->bpp_index = 0;
564  s->bytesperpixel = 1;
565  s->pix_fmt = AV_PIX_FMT_YUV420P;
566  avctx->colorspace = AVCOL_SPC_BT470BG;
567  avctx->color_range = AVCOL_RANGE_MPEG;
568  }
569  s->s.h.refreshrefmask = get_bits(&s->gb, 8);
570  w = get_bits(&s->gb, 16) + 1;
571  h = get_bits(&s->gb, 16) + 1;
572  if (get_bits1(&s->gb)) // display size
573  skip_bits(&s->gb, 32);
574  } else {
575  s->s.h.refreshrefmask = get_bits(&s->gb, 8);
576  s->s.h.refidx[0] = get_bits(&s->gb, 3);
577  s->s.h.signbias[0] = get_bits1(&s->gb) && !s->s.h.errorres;
578  s->s.h.refidx[1] = get_bits(&s->gb, 3);
579  s->s.h.signbias[1] = get_bits1(&s->gb) && !s->s.h.errorres;
580  s->s.h.refidx[2] = get_bits(&s->gb, 3);
581  s->s.h.signbias[2] = get_bits1(&s->gb) && !s->s.h.errorres;
582  if (!s->s.refs[s->s.h.refidx[0]].f->buf[0] ||
583  !s->s.refs[s->s.h.refidx[1]].f->buf[0] ||
584  !s->s.refs[s->s.h.refidx[2]].f->buf[0]) {
585  av_log(avctx, AV_LOG_ERROR, "Not all references are available\n");
586  return AVERROR_INVALIDDATA;
587  }
588  if (get_bits1(&s->gb)) {
589  w = s->s.refs[s->s.h.refidx[0]].f->width;
590  h = s->s.refs[s->s.h.refidx[0]].f->height;
591  } else if (get_bits1(&s->gb)) {
592  w = s->s.refs[s->s.h.refidx[1]].f->width;
593  h = s->s.refs[s->s.h.refidx[1]].f->height;
594  } else if (get_bits1(&s->gb)) {
595  w = s->s.refs[s->s.h.refidx[2]].f->width;
596  h = s->s.refs[s->s.h.refidx[2]].f->height;
597  } else {
598  w = get_bits(&s->gb, 16) + 1;
599  h = get_bits(&s->gb, 16) + 1;
600  }
601  // Note that in this code, "CUR_FRAME" is actually before we
602  // have formally allocated a frame, and thus actually represents
603  // the _last_ frame
604  s->s.h.use_last_frame_mvs &= s->s.frames[CUR_FRAME].tf.f->width == w &&
605  s->s.frames[CUR_FRAME].tf.f->height == h;
606  if (get_bits1(&s->gb)) // display size
607  skip_bits(&s->gb, 32);
608  s->s.h.highprecisionmvs = get_bits1(&s->gb);
609  s->s.h.filtermode = get_bits1(&s->gb) ? FILTER_SWITCHABLE :
610  get_bits(&s->gb, 2);
611  s->s.h.allowcompinter = s->s.h.signbias[0] != s->s.h.signbias[1] ||
612  s->s.h.signbias[0] != s->s.h.signbias[2];
613  if (s->s.h.allowcompinter) {
614  if (s->s.h.signbias[0] == s->s.h.signbias[1]) {
615  s->s.h.fixcompref = 2;
616  s->s.h.varcompref[0] = 0;
617  s->s.h.varcompref[1] = 1;
618  } else if (s->s.h.signbias[0] == s->s.h.signbias[2]) {
619  s->s.h.fixcompref = 1;
620  s->s.h.varcompref[0] = 0;
621  s->s.h.varcompref[1] = 2;
622  } else {
623  s->s.h.fixcompref = 0;
624  s->s.h.varcompref[0] = 1;
625  s->s.h.varcompref[1] = 2;
626  }
627  }
628  }
629  }
630  s->s.h.refreshctx = s->s.h.errorres ? 0 : get_bits1(&s->gb);
631  s->s.h.parallelmode = s->s.h.errorres ? 1 : get_bits1(&s->gb);
632  s->s.h.framectxid = c = get_bits(&s->gb, 2);
633  if (s->s.h.keyframe || s->s.h.intraonly)
634  s->s.h.framectxid = 0; // BUG: libvpx ignores this field in keyframes
635 
636  /* loopfilter header data */
637  if (s->s.h.keyframe || s->s.h.errorres || s->s.h.intraonly) {
638  // reset loopfilter defaults
639  s->s.h.lf_delta.ref[0] = 1;
640  s->s.h.lf_delta.ref[1] = 0;
641  s->s.h.lf_delta.ref[2] = -1;
642  s->s.h.lf_delta.ref[3] = -1;
643  s->s.h.lf_delta.mode[0] = 0;
644  s->s.h.lf_delta.mode[1] = 0;
645  memset(s->s.h.segmentation.feat, 0, sizeof(s->s.h.segmentation.feat));
646  }
647  s->s.h.filter.level = get_bits(&s->gb, 6);
648  sharp = get_bits(&s->gb, 3);
649  // if sharpness changed, reinit lim/mblim LUTs. if it didn't change, keep
650  // the old cache values since they are still valid
651  if (s->s.h.filter.sharpness != sharp) {
652  for (i = 1; i <= 63; i++) {
653  int limit = i;
654 
655  if (sharp > 0) {
656  limit >>= (sharp + 3) >> 2;
657  limit = FFMIN(limit, 9 - sharp);
658  }
659  limit = FFMAX(limit, 1);
660 
661  s->filter_lut.lim_lut[i] = limit;
662  s->filter_lut.mblim_lut[i] = 2 * (i + 2) + limit;
663  }
664  }
665  s->s.h.filter.sharpness = sharp;
666  if ((s->s.h.lf_delta.enabled = get_bits1(&s->gb))) {
667  if ((s->s.h.lf_delta.updated = get_bits1(&s->gb))) {
668  for (i = 0; i < 4; i++)
669  if (get_bits1(&s->gb))
670  s->s.h.lf_delta.ref[i] = get_sbits_inv(&s->gb, 6);
671  for (i = 0; i < 2; i++)
672  if (get_bits1(&s->gb))
673  s->s.h.lf_delta.mode[i] = get_sbits_inv(&s->gb, 6);
674  }
675  }
676 
677  /* quantization header data */
678  s->s.h.yac_qi = get_bits(&s->gb, 8);
679  s->s.h.ydc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
680  s->s.h.uvdc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
681  s->s.h.uvac_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
682  s->s.h.lossless = s->s.h.yac_qi == 0 && s->s.h.ydc_qdelta == 0 &&
683  s->s.h.uvdc_qdelta == 0 && s->s.h.uvac_qdelta == 0;
684  if (s->s.h.lossless)
686 
687  /* segmentation header info */
688  if ((s->s.h.segmentation.enabled = get_bits1(&s->gb))) {
689  if ((s->s.h.segmentation.update_map = get_bits1(&s->gb))) {
690  for (i = 0; i < 7; i++)
691  s->s.h.segmentation.prob[i] = get_bits1(&s->gb) ?
692  get_bits(&s->gb, 8) : 255;
693  if ((s->s.h.segmentation.temporal = get_bits1(&s->gb)))
694  for (i = 0; i < 3; i++)
695  s->s.h.segmentation.pred_prob[i] = get_bits1(&s->gb) ?
696  get_bits(&s->gb, 8) : 255;
697  }
698 
699  if (get_bits1(&s->gb)) {
700  s->s.h.segmentation.absolute_vals = get_bits1(&s->gb);
701  for (i = 0; i < 8; i++) {
702  if ((s->s.h.segmentation.feat[i].q_enabled = get_bits1(&s->gb)))
703  s->s.h.segmentation.feat[i].q_val = get_sbits_inv(&s->gb, 8);
704  if ((s->s.h.segmentation.feat[i].lf_enabled = get_bits1(&s->gb)))
705  s->s.h.segmentation.feat[i].lf_val = get_sbits_inv(&s->gb, 6);
706  if ((s->s.h.segmentation.feat[i].ref_enabled = get_bits1(&s->gb)))
707  s->s.h.segmentation.feat[i].ref_val = get_bits(&s->gb, 2);
708  s->s.h.segmentation.feat[i].skip_enabled = get_bits1(&s->gb);
709  }
710  }
711  }
712 
713  // set qmul[] based on Y/UV, AC/DC and segmentation Q idx deltas
714  for (i = 0; i < (s->s.h.segmentation.enabled ? 8 : 1); i++) {
715  int qyac, qydc, quvac, quvdc, lflvl, sh;
716 
717  if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].q_enabled) {
718  if (s->s.h.segmentation.absolute_vals)
719  qyac = av_clip_uintp2(s->s.h.segmentation.feat[i].q_val, 8);
720  else
721  qyac = av_clip_uintp2(s->s.h.yac_qi + s->s.h.segmentation.feat[i].q_val, 8);
722  } else {
723  qyac = s->s.h.yac_qi;
724  }
725  qydc = av_clip_uintp2(qyac + s->s.h.ydc_qdelta, 8);
726  quvdc = av_clip_uintp2(qyac + s->s.h.uvdc_qdelta, 8);
727  quvac = av_clip_uintp2(qyac + s->s.h.uvac_qdelta, 8);
728  qyac = av_clip_uintp2(qyac, 8);
729 
730  s->s.h.segmentation.feat[i].qmul[0][0] = ff_vp9_dc_qlookup[s->bpp_index][qydc];
731  s->s.h.segmentation.feat[i].qmul[0][1] = ff_vp9_ac_qlookup[s->bpp_index][qyac];
732  s->s.h.segmentation.feat[i].qmul[1][0] = ff_vp9_dc_qlookup[s->bpp_index][quvdc];
733  s->s.h.segmentation.feat[i].qmul[1][1] = ff_vp9_ac_qlookup[s->bpp_index][quvac];
734 
735  sh = s->s.h.filter.level >= 32;
736  if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[i].lf_enabled) {
737  if (s->s.h.segmentation.absolute_vals)
738  lflvl = av_clip_uintp2(s->s.h.segmentation.feat[i].lf_val, 6);
739  else
740  lflvl = av_clip_uintp2(s->s.h.filter.level + s->s.h.segmentation.feat[i].lf_val, 6);
741  } else {
742  lflvl = s->s.h.filter.level;
743  }
744  if (s->s.h.lf_delta.enabled) {
745  s->s.h.segmentation.feat[i].lflvl[0][0] =
746  s->s.h.segmentation.feat[i].lflvl[0][1] =
747  av_clip_uintp2(lflvl + (s->s.h.lf_delta.ref[0] * (1 << sh)), 6);
748  for (j = 1; j < 4; j++) {
749  s->s.h.segmentation.feat[i].lflvl[j][0] =
750  av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] +
751  s->s.h.lf_delta.mode[0]) * (1 << sh)), 6);
752  s->s.h.segmentation.feat[i].lflvl[j][1] =
753  av_clip_uintp2(lflvl + ((s->s.h.lf_delta.ref[j] +
754  s->s.h.lf_delta.mode[1]) * (1 << sh)), 6);
755  }
756  } else {
757  memset(s->s.h.segmentation.feat[i].lflvl, lflvl,
758  sizeof(s->s.h.segmentation.feat[i].lflvl));
759  }
760  }
761 
762  /* tiling info */
763  if ((ret = update_size(avctx, w, h)) < 0) {
764  av_log(avctx, AV_LOG_ERROR, "Failed to initialize decoder for %dx%d @ %d\n",
765  w, h, s->pix_fmt);
766  return ret;
767  }
768  for (s->s.h.tiling.log2_tile_cols = 0;
769  s->sb_cols > (64 << s->s.h.tiling.log2_tile_cols);
770  s->s.h.tiling.log2_tile_cols++) ;
771  for (max = 0; (s->sb_cols >> max) >= 4; max++) ;
772  max = FFMAX(0, max - 1);
773  while (max > s->s.h.tiling.log2_tile_cols) {
774  if (get_bits1(&s->gb))
775  s->s.h.tiling.log2_tile_cols++;
776  else
777  break;
778  }
779  s->s.h.tiling.log2_tile_rows = decode012(&s->gb);
780  s->s.h.tiling.tile_rows = 1 << s->s.h.tiling.log2_tile_rows;
781  if (s->s.h.tiling.tile_cols != (1 << s->s.h.tiling.log2_tile_cols)) {
782  int n_range_coders;
783  VP56RangeCoder *rc;
784 
785  if (s->td) {
786  for (i = 0; i < s->active_tile_cols; i++)
787  vp9_tile_data_free(&s->td[i]);
788  av_freep(&s->td);
789  }
790 
791  s->s.h.tiling.tile_cols = 1 << s->s.h.tiling.log2_tile_cols;
792  s->active_tile_cols = avctx->active_thread_type == FF_THREAD_SLICE ?
793  s->s.h.tiling.tile_cols : 1;
794  vp9_alloc_entries(avctx, s->sb_rows);
795  if (avctx->active_thread_type == FF_THREAD_SLICE) {
796  n_range_coders = 4; // max_tile_rows
797  } else {
798  n_range_coders = s->s.h.tiling.tile_cols;
799  }
800  s->td = av_calloc(s->active_tile_cols, sizeof(VP9TileData) +
801  n_range_coders * sizeof(VP56RangeCoder));
802  if (!s->td)
803  return AVERROR(ENOMEM);
804  rc = (VP56RangeCoder *) &s->td[s->active_tile_cols];
805  for (i = 0; i < s->active_tile_cols; i++) {
806  s->td[i].s = s;
807  s->td[i].c_b = rc;
808  rc += n_range_coders;
809  }
810  }
811 
812  /* check reference frames */
813  if (!s->s.h.keyframe && !s->s.h.intraonly) {
814  int valid_ref_frame = 0;
815  for (i = 0; i < 3; i++) {
816  AVFrame *ref = s->s.refs[s->s.h.refidx[i]].f;
817  int refw = ref->width, refh = ref->height;
818 
819  if (ref->format != avctx->pix_fmt) {
820  av_log(avctx, AV_LOG_ERROR,
821  "Ref pixfmt (%s) did not match current frame (%s)",
822  av_get_pix_fmt_name(ref->format),
823  av_get_pix_fmt_name(avctx->pix_fmt));
824  return AVERROR_INVALIDDATA;
825  } else if (refw == w && refh == h) {
826  s->mvscale[i][0] = s->mvscale[i][1] = 0;
827  } else {
828  /* Check to make sure at least one of frames that */
829  /* this frame references has valid dimensions */
830  if (w * 2 < refw || h * 2 < refh || w > 16 * refw || h > 16 * refh) {
831  av_log(avctx, AV_LOG_WARNING,
832  "Invalid ref frame dimensions %dx%d for frame size %dx%d\n",
833  refw, refh, w, h);
834  s->mvscale[i][0] = s->mvscale[i][1] = REF_INVALID_SCALE;
835  continue;
836  }
837  s->mvscale[i][0] = (refw << 14) / w;
838  s->mvscale[i][1] = (refh << 14) / h;
839  s->mvstep[i][0] = 16 * s->mvscale[i][0] >> 14;
840  s->mvstep[i][1] = 16 * s->mvscale[i][1] >> 14;
841  }
842  valid_ref_frame++;
843  }
844  if (!valid_ref_frame) {
845  av_log(avctx, AV_LOG_ERROR, "No valid reference frame is found, bitstream not supported\n");
846  return AVERROR_INVALIDDATA;
847  }
848  }
849 
850  if (s->s.h.keyframe || s->s.h.errorres || (s->s.h.intraonly && s->s.h.resetctx == 3)) {
851  s->prob_ctx[0].p = s->prob_ctx[1].p = s->prob_ctx[2].p =
852  s->prob_ctx[3].p = ff_vp9_default_probs;
853  memcpy(s->prob_ctx[0].coef, ff_vp9_default_coef_probs,
854  sizeof(ff_vp9_default_coef_probs));
855  memcpy(s->prob_ctx[1].coef, ff_vp9_default_coef_probs,
856  sizeof(ff_vp9_default_coef_probs));
857  memcpy(s->prob_ctx[2].coef, ff_vp9_default_coef_probs,
858  sizeof(ff_vp9_default_coef_probs));
859  memcpy(s->prob_ctx[3].coef, ff_vp9_default_coef_probs,
860  sizeof(ff_vp9_default_coef_probs));
861  } else if (s->s.h.intraonly && s->s.h.resetctx == 2) {
862  s->prob_ctx[c].p = ff_vp9_default_probs;
863  memcpy(s->prob_ctx[c].coef, ff_vp9_default_coef_probs,
864  sizeof(ff_vp9_default_coef_probs));
865  }
866 
867  // next 16 bits is size of the rest of the header (arith-coded)
868  s->s.h.compressed_header_size = size2 = get_bits(&s->gb, 16);
869  s->s.h.uncompressed_header_size = (get_bits_count(&s->gb) + 7) / 8;
870 
871  data2 = align_get_bits(&s->gb);
872  if (size2 > size - (data2 - data)) {
873  av_log(avctx, AV_LOG_ERROR, "Invalid compressed header size\n");
874  return AVERROR_INVALIDDATA;
875  }
876  ret = ff_vp56_init_range_decoder(&s->c, data2, size2);
877  if (ret < 0)
878  return ret;
879 
880  if (vp56_rac_get_prob_branchy(&s->c, 128)) { // marker bit
881  av_log(avctx, AV_LOG_ERROR, "Marker bit was set\n");
882  return AVERROR_INVALIDDATA;
883  }
884 
885  for (i = 0; i < s->active_tile_cols; i++) {
886  if (s->s.h.keyframe || s->s.h.intraonly) {
887  memset(s->td[i].counts.coef, 0, sizeof(s->td[0].counts.coef));
888  memset(s->td[i].counts.eob, 0, sizeof(s->td[0].counts.eob));
889  } else {
890  memset(&s->td[i].counts, 0, sizeof(s->td[0].counts));
891  }
892  s->td[i].nb_block_structure = 0;
893  }
894 
895  /* FIXME is it faster to not copy here, but do it down in the fw updates
896  * as explicit copies if the fw update is missing (and skip the copy upon
897  * fw update)? */
898  s->prob.p = s->prob_ctx[c].p;
899 
900  // txfm updates
901  if (s->s.h.lossless) {
902  s->s.h.txfmmode = TX_4X4;
903  } else {
904  s->s.h.txfmmode = vp8_rac_get_uint(&s->c, 2);
905  if (s->s.h.txfmmode == 3)
906  s->s.h.txfmmode += vp8_rac_get(&s->c);
907 
908  if (s->s.h.txfmmode == TX_SWITCHABLE) {
909  for (i = 0; i < 2; i++)
910  if (vp56_rac_get_prob_branchy(&s->c, 252))
911  s->prob.p.tx8p[i] = update_prob(&s->c, s->prob.p.tx8p[i]);
912  for (i = 0; i < 2; i++)
913  for (j = 0; j < 2; j++)
914  if (vp56_rac_get_prob_branchy(&s->c, 252))
915  s->prob.p.tx16p[i][j] =
916  update_prob(&s->c, s->prob.p.tx16p[i][j]);
917  for (i = 0; i < 2; i++)
918  for (j = 0; j < 3; j++)
919  if (vp56_rac_get_prob_branchy(&s->c, 252))
920  s->prob.p.tx32p[i][j] =
921  update_prob(&s->c, s->prob.p.tx32p[i][j]);
922  }
923  }
924 
925  // coef updates
926  for (i = 0; i < 4; i++) {
927  uint8_t (*ref)[2][6][6][3] = s->prob_ctx[c].coef[i];
928  if (vp8_rac_get(&s->c)) {
929  for (j = 0; j < 2; j++)
930  for (k = 0; k < 2; k++)
931  for (l = 0; l < 6; l++)
932  for (m = 0; m < 6; m++) {
933  uint8_t *p = s->prob.coef[i][j][k][l][m];
934  uint8_t *r = ref[j][k][l][m];
935  if (m >= 3 && l == 0) // dc only has 3 pt
936  break;
937  for (n = 0; n < 3; n++) {
938  if (vp56_rac_get_prob_branchy(&s->c, 252))
939  p[n] = update_prob(&s->c, r[n]);
940  else
941  p[n] = r[n];
942  }
943  memcpy(&p[3], ff_vp9_model_pareto8[p[2]], 8);
944  }
945  } else {
946  for (j = 0; j < 2; j++)
947  for (k = 0; k < 2; k++)
948  for (l = 0; l < 6; l++)
949  for (m = 0; m < 6; m++) {
950  uint8_t *p = s->prob.coef[i][j][k][l][m];
951  uint8_t *r = ref[j][k][l][m];
952  if (m > 3 && l == 0) // dc only has 3 pt
953  break;
954  memcpy(p, r, 3);
955  memcpy(&p[3], ff_vp9_model_pareto8[p[2]], 8);
956  }
957  }
958  if (s->s.h.txfmmode == i)
959  break;
960  }
961 
962  // mode updates
963  for (i = 0; i < 3; i++)
964  if (vp56_rac_get_prob_branchy(&s->c, 252))
965  s->prob.p.skip[i] = update_prob(&s->c, s->prob.p.skip[i]);
966  if (!s->s.h.keyframe && !s->s.h.intraonly) {
967  for (i = 0; i < 7; i++)
968  for (j = 0; j < 3; j++)
969  if (vp56_rac_get_prob_branchy(&s->c, 252))
970  s->prob.p.mv_mode[i][j] =
971  update_prob(&s->c, s->prob.p.mv_mode[i][j]);
972 
973  if (s->s.h.filtermode == FILTER_SWITCHABLE)
974  for (i = 0; i < 4; i++)
975  for (j = 0; j < 2; j++)
976  if (vp56_rac_get_prob_branchy(&s->c, 252))
977  s->prob.p.filter[i][j] =
978  update_prob(&s->c, s->prob.p.filter[i][j]);
979 
980  for (i = 0; i < 4; i++)
981  if (vp56_rac_get_prob_branchy(&s->c, 252))
982  s->prob.p.intra[i] = update_prob(&s->c, s->prob.p.intra[i]);
983 
984  if (s->s.h.allowcompinter) {
985  s->s.h.comppredmode = vp8_rac_get(&s->c);
986  if (s->s.h.comppredmode)
987  s->s.h.comppredmode += vp8_rac_get(&s->c);
988  if (s->s.h.comppredmode == PRED_SWITCHABLE)
989  for (i = 0; i < 5; i++)
990  if (vp56_rac_get_prob_branchy(&s->c, 252))
991  s->prob.p.comp[i] =
992  update_prob(&s->c, s->prob.p.comp[i]);
993  } else {
994  s->s.h.comppredmode = PRED_SINGLEREF;
995  }
996 
997  if (s->s.h.comppredmode != PRED_COMPREF) {
998  for (i = 0; i < 5; i++) {
999  if (vp56_rac_get_prob_branchy(&s->c, 252))
1000  s->prob.p.single_ref[i][0] =
1001  update_prob(&s->c, s->prob.p.single_ref[i][0]);
1002  if (vp56_rac_get_prob_branchy(&s->c, 252))
1003  s->prob.p.single_ref[i][1] =
1004  update_prob(&s->c, s->prob.p.single_ref[i][1]);
1005  }
1006  }
1007 
1008  if (s->s.h.comppredmode != PRED_SINGLEREF) {
1009  for (i = 0; i < 5; i++)
1010  if (vp56_rac_get_prob_branchy(&s->c, 252))
1011  s->prob.p.comp_ref[i] =
1012  update_prob(&s->c, s->prob.p.comp_ref[i]);
1013  }
1014 
1015  for (i = 0; i < 4; i++)
1016  for (j = 0; j < 9; j++)
1017  if (vp56_rac_get_prob_branchy(&s->c, 252))
1018  s->prob.p.y_mode[i][j] =
1019  update_prob(&s->c, s->prob.p.y_mode[i][j]);
1020 
1021  for (i = 0; i < 4; i++)
1022  for (j = 0; j < 4; j++)
1023  for (k = 0; k < 3; k++)
1024  if (vp56_rac_get_prob_branchy(&s->c, 252))
1025  s->prob.p.partition[3 - i][j][k] =
1026  update_prob(&s->c,
1027  s->prob.p.partition[3 - i][j][k]);
1028 
1029  // mv fields don't use the update_prob subexp model for some reason
1030  for (i = 0; i < 3; i++)
1031  if (vp56_rac_get_prob_branchy(&s->c, 252))
1032  s->prob.p.mv_joint[i] = (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
1033 
1034  for (i = 0; i < 2; i++) {
1035  if (vp56_rac_get_prob_branchy(&s->c, 252))
1036  s->prob.p.mv_comp[i].sign =
1037  (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
1038 
1039  for (j = 0; j < 10; j++)
1040  if (vp56_rac_get_prob_branchy(&s->c, 252))
1041  s->prob.p.mv_comp[i].classes[j] =
1042  (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
1043 
1044  if (vp56_rac_get_prob_branchy(&s->c, 252))
1045  s->prob.p.mv_comp[i].class0 =
1046  (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
1047 
1048  for (j = 0; j < 10; j++)
1049  if (vp56_rac_get_prob_branchy(&s->c, 252))
1050  s->prob.p.mv_comp[i].bits[j] =
1051  (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
1052  }
1053 
1054  for (i = 0; i < 2; i++) {
1055  for (j = 0; j < 2; j++)
1056  for (k = 0; k < 3; k++)
1057  if (vp56_rac_get_prob_branchy(&s->c, 252))
1058  s->prob.p.mv_comp[i].class0_fp[j][k] =
1059  (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
1060 
1061  for (j = 0; j < 3; j++)
1062  if (vp56_rac_get_prob_branchy(&s->c, 252))
1063  s->prob.p.mv_comp[i].fp[j] =
1064  (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
1065  }
1066 
1067  if (s->s.h.highprecisionmvs) {
1068  for (i = 0; i < 2; i++) {
1069  if (vp56_rac_get_prob_branchy(&s->c, 252))
1070  s->prob.p.mv_comp[i].class0_hp =
1071  (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
1072 
1073  if (vp56_rac_get_prob_branchy(&s->c, 252))
1074  s->prob.p.mv_comp[i].hp =
1075  (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
1076  }
1077  }
1078  }
1079 
1080  return (data2 - data) + size2;
1081 }
1082 
1083 static void decode_sb(VP9TileData *td, int row, int col, VP9Filter *lflvl,
1084  ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
1085 {
1086  const VP9Context *s = td->s;
1087  int c = ((s->above_partition_ctx[col] >> (3 - bl)) & 1) |
1088  (((td->left_partition_ctx[row & 0x7] >> (3 - bl)) & 1) << 1);
1089  const uint8_t *p = s->s.h.keyframe || s->s.h.intraonly ? ff_vp9_default_kf_partition_probs[bl][c] :
1090  s->prob.p.partition[bl][c];
1091  enum BlockPartition bp;
1092  ptrdiff_t hbs = 4 >> bl;
1093  AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
1094  ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
1095  int bytesperpixel = s->bytesperpixel;
1096 
1097  if (bl == BL_8X8) {
1099  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1100  } else if (col + hbs < s->cols) { // FIXME why not <=?
1101  if (row + hbs < s->rows) { // FIXME why not <=?
1103  switch (bp) {
1104  case PARTITION_NONE:
1105  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1106  break;
1107  case PARTITION_H:
1108  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1109  yoff += hbs * 8 * y_stride;
1110  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1111  ff_vp9_decode_block(td, row + hbs, col, lflvl, yoff, uvoff, bl, bp);
1112  break;
1113  case PARTITION_V:
1114  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1115  yoff += hbs * 8 * bytesperpixel;
1116  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1117  ff_vp9_decode_block(td, row, col + hbs, lflvl, yoff, uvoff, bl, bp);
1118  break;
1119  case PARTITION_SPLIT:
1120  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1121  decode_sb(td, row, col + hbs, lflvl,
1122  yoff + 8 * hbs * bytesperpixel,
1123  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1124  yoff += hbs * 8 * y_stride;
1125  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1126  decode_sb(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1127  decode_sb(td, row + hbs, col + hbs, lflvl,
1128  yoff + 8 * hbs * bytesperpixel,
1129  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1130  break;
1131  default:
1132  av_assert0(0);
1133  }
1134  } else if (vp56_rac_get_prob_branchy(td->c, p[1])) {
1135  bp = PARTITION_SPLIT;
1136  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1137  decode_sb(td, row, col + hbs, lflvl,
1138  yoff + 8 * hbs * bytesperpixel,
1139  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1140  } else {
1141  bp = PARTITION_H;
1142  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1143  }
1144  } else if (row + hbs < s->rows) { // FIXME why not <=?
1145  if (vp56_rac_get_prob_branchy(td->c, p[2])) {
1146  bp = PARTITION_SPLIT;
1147  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1148  yoff += hbs * 8 * y_stride;
1149  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1150  decode_sb(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1151  } else {
1152  bp = PARTITION_V;
1153  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, bl, bp);
1154  }
1155  } else {
1156  bp = PARTITION_SPLIT;
1157  decode_sb(td, row, col, lflvl, yoff, uvoff, bl + 1);
1158  }
1159  td->counts.partition[bl][c][bp]++;
1160 }
1161 
1162 static void decode_sb_mem(VP9TileData *td, int row, int col, VP9Filter *lflvl,
1163  ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
1164 {
1165  const VP9Context *s = td->s;
1166  VP9Block *b = td->b;
1167  ptrdiff_t hbs = 4 >> bl;
1168  AVFrame *f = s->s.frames[CUR_FRAME].tf.f;
1169  ptrdiff_t y_stride = f->linesize[0], uv_stride = f->linesize[1];
1170  int bytesperpixel = s->bytesperpixel;
1171 
1172  if (bl == BL_8X8) {
1173  av_assert2(b->bl == BL_8X8);
1174  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
1175  } else if (td->b->bl == bl) {
1176  ff_vp9_decode_block(td, row, col, lflvl, yoff, uvoff, b->bl, b->bp);
1177  if (b->bp == PARTITION_H && row + hbs < s->rows) {
1178  yoff += hbs * 8 * y_stride;
1179  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1180  ff_vp9_decode_block(td, row + hbs, col, lflvl, yoff, uvoff, b->bl, b->bp);
1181  } else if (b->bp == PARTITION_V && col + hbs < s->cols) {
1182  yoff += hbs * 8 * bytesperpixel;
1183  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1184  ff_vp9_decode_block(td, row, col + hbs, lflvl, yoff, uvoff, b->bl, b->bp);
1185  }
1186  } else {
1187  decode_sb_mem(td, row, col, lflvl, yoff, uvoff, bl + 1);
1188  if (col + hbs < s->cols) { // FIXME why not <=?
1189  if (row + hbs < s->rows) {
1190  decode_sb_mem(td, row, col + hbs, lflvl, yoff + 8 * hbs * bytesperpixel,
1191  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1192  yoff += hbs * 8 * y_stride;
1193  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1194  decode_sb_mem(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1195  decode_sb_mem(td, row + hbs, col + hbs, lflvl,
1196  yoff + 8 * hbs * bytesperpixel,
1197  uvoff + (8 * hbs * bytesperpixel >> s->ss_h), bl + 1);
1198  } else {
1199  yoff += hbs * 8 * bytesperpixel;
1200  uvoff += hbs * 8 * bytesperpixel >> s->ss_h;
1201  decode_sb_mem(td, row, col + hbs, lflvl, yoff, uvoff, bl + 1);
1202  }
1203  } else if (row + hbs < s->rows) {
1204  yoff += hbs * 8 * y_stride;
1205  uvoff += hbs * 8 * uv_stride >> s->ss_v;
1206  decode_sb_mem(td, row + hbs, col, lflvl, yoff, uvoff, bl + 1);
1207  }
1208  }
1209 }
1210 
1211 static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
1212 {
1213  int sb_start = ( idx * n) >> log2_n;
1214  int sb_end = ((idx + 1) * n) >> log2_n;
1215  *start = FFMIN(sb_start, n) << 3;
1216  *end = FFMIN(sb_end, n) << 3;
1217 }
1218 
1220 {
1221  int i;
1222 
1223  av_freep(&s->intra_pred_data[0]);
1224  for (i = 0; i < s->active_tile_cols; i++)
1225  vp9_tile_data_free(&s->td[i]);
1226 }
1227 
1229 {
1230  VP9Context *s = avctx->priv_data;
1231  int i;
1232 
1233  for (i = 0; i < 3; i++) {
1234  vp9_frame_unref(avctx, &s->s.frames[i]);
1235  av_frame_free(&s->s.frames[i].tf.f);
1236  }
1237  av_buffer_pool_uninit(&s->frame_extradata_pool);
1238  for (i = 0; i < 8; i++) {
1239  ff_thread_release_buffer(avctx, &s->s.refs[i]);
1240  av_frame_free(&s->s.refs[i].f);
1241  ff_thread_release_buffer(avctx, &s->next_refs[i]);
1242  av_frame_free(&s->next_refs[i].f);
1243  }
1244 
1245  free_buffers(s);
1246 #if HAVE_THREADS
1247  av_freep(&s->entries);
1248  ff_pthread_free(s, vp9_context_offsets);
1249 #endif
1250  av_freep(&s->td);
1251  return 0;
1252 }
1253 
1254 static int decode_tiles(AVCodecContext *avctx,
1255  const uint8_t *data, int size)
1256 {
1257  VP9Context *s = avctx->priv_data;
1258  VP9TileData *td = &s->td[0];
1259  int row, col, tile_row, tile_col, ret;
1260  int bytesperpixel;
1261  int tile_row_start, tile_row_end, tile_col_start, tile_col_end;
1262  AVFrame *f;
1263  ptrdiff_t yoff, uvoff, ls_y, ls_uv;
1264 
1265  f = s->s.frames[CUR_FRAME].tf.f;
1266  ls_y = f->linesize[0];
1267  ls_uv =f->linesize[1];
1268  bytesperpixel = s->bytesperpixel;
1269 
1270  yoff = uvoff = 0;
1271  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1272  set_tile_offset(&tile_row_start, &tile_row_end,
1273  tile_row, s->s.h.tiling.log2_tile_rows, s->sb_rows);
1274 
1275  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1276  int64_t tile_size;
1277 
1278  if (tile_col == s->s.h.tiling.tile_cols - 1 &&
1279  tile_row == s->s.h.tiling.tile_rows - 1) {
1280  tile_size = size;
1281  } else {
1282  tile_size = AV_RB32(data);
1283  data += 4;
1284  size -= 4;
1285  }
1286  if (tile_size > size) {
1287  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
1288  return AVERROR_INVALIDDATA;
1289  }
1290  ret = ff_vp56_init_range_decoder(&td->c_b[tile_col], data, tile_size);
1291  if (ret < 0)
1292  return ret;
1293  if (vp56_rac_get_prob_branchy(&td->c_b[tile_col], 128)) { // marker bit
1294  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
1295  return AVERROR_INVALIDDATA;
1296  }
1297  data += tile_size;
1298  size -= tile_size;
1299  }
1300 
1301  for (row = tile_row_start; row < tile_row_end;
1302  row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->ss_v) {
1303  VP9Filter *lflvl_ptr = s->lflvl;
1304  ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
1305 
1306  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1307  set_tile_offset(&tile_col_start, &tile_col_end,
1308  tile_col, s->s.h.tiling.log2_tile_cols, s->sb_cols);
1309  td->tile_col_start = tile_col_start;
1310  if (s->pass != 2) {
1311  memset(td->left_partition_ctx, 0, 8);
1312  memset(td->left_skip_ctx, 0, 8);
1313  if (s->s.h.keyframe || s->s.h.intraonly) {
1314  memset(td->left_mode_ctx, DC_PRED, 16);
1315  } else {
1316  memset(td->left_mode_ctx, NEARESTMV, 8);
1317  }
1318  memset(td->left_y_nnz_ctx, 0, 16);
1319  memset(td->left_uv_nnz_ctx, 0, 32);
1320  memset(td->left_segpred_ctx, 0, 8);
1321 
1322  td->c = &td->c_b[tile_col];
1323  }
1324 
1325  for (col = tile_col_start;
1326  col < tile_col_end;
1327  col += 8, yoff2 += 64 * bytesperpixel,
1328  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1329  // FIXME integrate with lf code (i.e. zero after each
1330  // use, similar to invtxfm coefficients, or similar)
1331  if (s->pass != 1) {
1332  memset(lflvl_ptr->mask, 0, sizeof(lflvl_ptr->mask));
1333  }
1334 
1335  if (s->pass == 2) {
1336  decode_sb_mem(td, row, col, lflvl_ptr,
1337  yoff2, uvoff2, BL_64X64);
1338  } else {
1339  if (vpX_rac_is_end(td->c)) {
1340  return AVERROR_INVALIDDATA;
1341  }
1342  decode_sb(td, row, col, lflvl_ptr,
1343  yoff2, uvoff2, BL_64X64);
1344  }
1345  }
1346  }
1347 
1348  if (s->pass == 1)
1349  continue;
1350 
1351  // backup pre-loopfilter reconstruction data for intra
1352  // prediction of next row of sb64s
1353  if (row + 8 < s->rows) {
1354  memcpy(s->intra_pred_data[0],
1355  f->data[0] + yoff + 63 * ls_y,
1356  8 * s->cols * bytesperpixel);
1357  memcpy(s->intra_pred_data[1],
1358  f->data[1] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1359  8 * s->cols * bytesperpixel >> s->ss_h);
1360  memcpy(s->intra_pred_data[2],
1361  f->data[2] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1362  8 * s->cols * bytesperpixel >> s->ss_h);
1363  }
1364 
1365  // loopfilter one row
1366  if (s->s.h.filter.level) {
1367  yoff2 = yoff;
1368  uvoff2 = uvoff;
1369  lflvl_ptr = s->lflvl;
1370  for (col = 0; col < s->cols;
1371  col += 8, yoff2 += 64 * bytesperpixel,
1372  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1373  ff_vp9_loopfilter_sb(avctx, lflvl_ptr, row, col,
1374  yoff2, uvoff2);
1375  }
1376  }
1377 
1378  // FIXME maybe we can make this more finegrained by running the
1379  // loopfilter per-block instead of after each sbrow
1380  // In fact that would also make intra pred left preparation easier?
1381  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, row >> 3, 0);
1382  }
1383  }
1384  return 0;
1385 }
1386 
1387 #if HAVE_THREADS
1388 static av_always_inline
1389 int decode_tiles_mt(AVCodecContext *avctx, void *tdata, int jobnr,
1390  int threadnr)
1391 {
1392  VP9Context *s = avctx->priv_data;
1393  VP9TileData *td = &s->td[jobnr];
1394  ptrdiff_t uvoff, yoff, ls_y, ls_uv;
1395  int bytesperpixel = s->bytesperpixel, row, col, tile_row;
1396  unsigned tile_cols_len;
1397  int tile_row_start, tile_row_end, tile_col_start, tile_col_end;
1398  VP9Filter *lflvl_ptr_base;
1399  AVFrame *f;
1400 
1401  f = s->s.frames[CUR_FRAME].tf.f;
1402  ls_y = f->linesize[0];
1403  ls_uv =f->linesize[1];
1404 
1405  set_tile_offset(&tile_col_start, &tile_col_end,
1406  jobnr, s->s.h.tiling.log2_tile_cols, s->sb_cols);
1407  td->tile_col_start = tile_col_start;
1408  uvoff = (64 * bytesperpixel >> s->ss_h)*(tile_col_start >> 3);
1409  yoff = (64 * bytesperpixel)*(tile_col_start >> 3);
1410  lflvl_ptr_base = s->lflvl+(tile_col_start >> 3);
1411 
1412  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1413  set_tile_offset(&tile_row_start, &tile_row_end,
1414  tile_row, s->s.h.tiling.log2_tile_rows, s->sb_rows);
1415 
1416  td->c = &td->c_b[tile_row];
1417  for (row = tile_row_start; row < tile_row_end;
1418  row += 8, yoff += ls_y * 64, uvoff += ls_uv * 64 >> s->ss_v) {
1419  ptrdiff_t yoff2 = yoff, uvoff2 = uvoff;
1420  VP9Filter *lflvl_ptr = lflvl_ptr_base+s->sb_cols*(row >> 3);
1421 
1422  memset(td->left_partition_ctx, 0, 8);
1423  memset(td->left_skip_ctx, 0, 8);
1424  if (s->s.h.keyframe || s->s.h.intraonly) {
1425  memset(td->left_mode_ctx, DC_PRED, 16);
1426  } else {
1427  memset(td->left_mode_ctx, NEARESTMV, 8);
1428  }
1429  memset(td->left_y_nnz_ctx, 0, 16);
1430  memset(td->left_uv_nnz_ctx, 0, 32);
1431  memset(td->left_segpred_ctx, 0, 8);
1432 
1433  for (col = tile_col_start;
1434  col < tile_col_end;
1435  col += 8, yoff2 += 64 * bytesperpixel,
1436  uvoff2 += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1437  // FIXME integrate with lf code (i.e. zero after each
1438  // use, similar to invtxfm coefficients, or similar)
1439  memset(lflvl_ptr->mask, 0, sizeof(lflvl_ptr->mask));
1440  decode_sb(td, row, col, lflvl_ptr,
1441  yoff2, uvoff2, BL_64X64);
1442  }
1443 
1444  // backup pre-loopfilter reconstruction data for intra
1445  // prediction of next row of sb64s
1446  tile_cols_len = tile_col_end - tile_col_start;
1447  if (row + 8 < s->rows) {
1448  memcpy(s->intra_pred_data[0] + (tile_col_start * 8 * bytesperpixel),
1449  f->data[0] + yoff + 63 * ls_y,
1450  8 * tile_cols_len * bytesperpixel);
1451  memcpy(s->intra_pred_data[1] + (tile_col_start * 8 * bytesperpixel >> s->ss_h),
1452  f->data[1] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1453  8 * tile_cols_len * bytesperpixel >> s->ss_h);
1454  memcpy(s->intra_pred_data[2] + (tile_col_start * 8 * bytesperpixel >> s->ss_h),
1455  f->data[2] + uvoff + ((64 >> s->ss_v) - 1) * ls_uv,
1456  8 * tile_cols_len * bytesperpixel >> s->ss_h);
1457  }
1458 
1459  vp9_report_tile_progress(s, row >> 3, 1);
1460  }
1461  }
1462  return 0;
1463 }
1464 
1465 static av_always_inline
1466 int loopfilter_proc(AVCodecContext *avctx)
1467 {
1468  VP9Context *s = avctx->priv_data;
1469  ptrdiff_t uvoff, yoff, ls_y, ls_uv;
1470  VP9Filter *lflvl_ptr;
1471  int bytesperpixel = s->bytesperpixel, col, i;
1472  AVFrame *f;
1473 
1474  f = s->s.frames[CUR_FRAME].tf.f;
1475  ls_y = f->linesize[0];
1476  ls_uv =f->linesize[1];
1477 
1478  for (i = 0; i < s->sb_rows; i++) {
1479  vp9_await_tile_progress(s, i, s->s.h.tiling.tile_cols);
1480 
1481  if (s->s.h.filter.level) {
1482  yoff = (ls_y * 64)*i;
1483  uvoff = (ls_uv * 64 >> s->ss_v)*i;
1484  lflvl_ptr = s->lflvl+s->sb_cols*i;
1485  for (col = 0; col < s->cols;
1486  col += 8, yoff += 64 * bytesperpixel,
1487  uvoff += 64 * bytesperpixel >> s->ss_h, lflvl_ptr++) {
1488  ff_vp9_loopfilter_sb(avctx, lflvl_ptr, i << 3, col,
1489  yoff, uvoff);
1490  }
1491  }
1492  }
1493  return 0;
1494 }
1495 #endif
1496 
1498 {
1499  AVVideoEncParams *par;
1500  unsigned int tile, nb_blocks = 0;
1501 
1502  if (s->s.h.segmentation.enabled) {
1503  for (tile = 0; tile < s->active_tile_cols; tile++)
1504  nb_blocks += s->td[tile].nb_block_structure;
1505  }
1506 
1508  AV_VIDEO_ENC_PARAMS_VP9, nb_blocks);
1509  if (!par)
1510  return AVERROR(ENOMEM);
1511 
1512  par->qp = s->s.h.yac_qi;
1513  par->delta_qp[0][0] = s->s.h.ydc_qdelta;
1514  par->delta_qp[1][0] = s->s.h.uvdc_qdelta;
1515  par->delta_qp[2][0] = s->s.h.uvdc_qdelta;
1516  par->delta_qp[1][1] = s->s.h.uvac_qdelta;
1517  par->delta_qp[2][1] = s->s.h.uvac_qdelta;
1518 
1519  if (nb_blocks) {
1520  unsigned int block = 0;
1521  unsigned int tile, block_tile;
1522 
1523  for (tile = 0; tile < s->active_tile_cols; tile++) {
1524  VP9TileData *td = &s->td[tile];
1525 
1526  for (block_tile = 0; block_tile < td->nb_block_structure; block_tile++) {
1528  unsigned int row = td->block_structure[block_tile].row;
1529  unsigned int col = td->block_structure[block_tile].col;
1530  uint8_t seg_id = frame->segmentation_map[row * 8 * s->sb_cols + col];
1531 
1532  b->src_x = col * 8;
1533  b->src_y = row * 8;
1534  b->w = 1 << (3 + td->block_structure[block_tile].block_size_idx_x);
1535  b->h = 1 << (3 + td->block_structure[block_tile].block_size_idx_y);
1536 
1537  if (s->s.h.segmentation.feat[seg_id].q_enabled) {
1538  b->delta_qp = s->s.h.segmentation.feat[seg_id].q_val;
1539  if (s->s.h.segmentation.absolute_vals)
1540  b->delta_qp -= par->qp;
1541  }
1542  }
1543  }
1544  }
1545 
1546  return 0;
1547 }
1548 
1549 static int vp9_decode_frame(AVCodecContext *avctx, void *frame,
1550  int *got_frame, AVPacket *pkt)
1551 {
1552  const uint8_t *data = pkt->data;
1553  int size = pkt->size;
1554  VP9Context *s = avctx->priv_data;
1555  int ret, i, j, ref;
1556  int retain_segmap_ref = s->s.frames[REF_FRAME_SEGMAP].segmentation_map &&
1557  (!s->s.h.segmentation.enabled || !s->s.h.segmentation.update_map);
1558  AVFrame *f;
1559 
1560  if ((ret = decode_frame_header(avctx, data, size, &ref)) < 0) {
1561  return ret;
1562  } else if (ret == 0) {
1563  if (!s->s.refs[ref].f->buf[0]) {
1564  av_log(avctx, AV_LOG_ERROR, "Requested reference %d not available\n", ref);
1565  return AVERROR_INVALIDDATA;
1566  }
1567  if ((ret = av_frame_ref(frame, s->s.refs[ref].f)) < 0)
1568  return ret;
1569  ((AVFrame *)frame)->pts = pkt->pts;
1570  ((AVFrame *)frame)->pkt_dts = pkt->dts;
1571  for (i = 0; i < 8; i++) {
1572  if (s->next_refs[i].f->buf[0])
1573  ff_thread_release_buffer(avctx, &s->next_refs[i]);
1574  if (s->s.refs[i].f->buf[0] &&
1575  (ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.refs[i])) < 0)
1576  return ret;
1577  }
1578  *got_frame = 1;
1579  return pkt->size;
1580  }
1581  data += ret;
1582  size -= ret;
1583 
1584  if (!retain_segmap_ref || s->s.h.keyframe || s->s.h.intraonly) {
1585  if (s->s.frames[REF_FRAME_SEGMAP].tf.f->buf[0])
1586  vp9_frame_unref(avctx, &s->s.frames[REF_FRAME_SEGMAP]);
1587  if (!s->s.h.keyframe && !s->s.h.intraonly && !s->s.h.errorres && s->s.frames[CUR_FRAME].tf.f->buf[0] &&
1588  (ret = vp9_frame_ref(avctx, &s->s.frames[REF_FRAME_SEGMAP], &s->s.frames[CUR_FRAME])) < 0)
1589  return ret;
1590  }
1591  if (s->s.frames[REF_FRAME_MVPAIR].tf.f->buf[0])
1592  vp9_frame_unref(avctx, &s->s.frames[REF_FRAME_MVPAIR]);
1593  if (!s->s.h.intraonly && !s->s.h.keyframe && !s->s.h.errorres && s->s.frames[CUR_FRAME].tf.f->buf[0] &&
1594  (ret = vp9_frame_ref(avctx, &s->s.frames[REF_FRAME_MVPAIR], &s->s.frames[CUR_FRAME])) < 0)
1595  return ret;
1596  if (s->s.frames[CUR_FRAME].tf.f->buf[0])
1597  vp9_frame_unref(avctx, &s->s.frames[CUR_FRAME]);
1598  if ((ret = vp9_frame_alloc(avctx, &s->s.frames[CUR_FRAME])) < 0)
1599  return ret;
1600  f = s->s.frames[CUR_FRAME].tf.f;
1601  f->key_frame = s->s.h.keyframe;
1602  f->pict_type = (s->s.h.keyframe || s->s.h.intraonly) ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
1603 
1604  if (s->s.frames[REF_FRAME_SEGMAP].tf.f->buf[0] &&
1605  (s->s.frames[REF_FRAME_MVPAIR].tf.f->width != s->s.frames[CUR_FRAME].tf.f->width ||
1606  s->s.frames[REF_FRAME_MVPAIR].tf.f->height != s->s.frames[CUR_FRAME].tf.f->height)) {
1607  vp9_frame_unref(avctx, &s->s.frames[REF_FRAME_SEGMAP]);
1608  }
1609 
1610  // ref frame setup
1611  for (i = 0; i < 8; i++) {
1612  if (s->next_refs[i].f->buf[0])
1613  ff_thread_release_buffer(avctx, &s->next_refs[i]);
1614  if (s->s.h.refreshrefmask & (1 << i)) {
1615  ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.frames[CUR_FRAME].tf);
1616  } else if (s->s.refs[i].f->buf[0]) {
1617  ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.refs[i]);
1618  }
1619  if (ret < 0)
1620  return ret;
1621  }
1622 
1623  if (avctx->hwaccel) {
1624  ret = avctx->hwaccel->start_frame(avctx, NULL, 0);
1625  if (ret < 0)
1626  return ret;
1627  ret = avctx->hwaccel->decode_slice(avctx, pkt->data, pkt->size);
1628  if (ret < 0)
1629  return ret;
1630  ret = avctx->hwaccel->end_frame(avctx);
1631  if (ret < 0)
1632  return ret;
1633  goto finish;
1634  }
1635 
1636  // main tile decode loop
1637  memset(s->above_partition_ctx, 0, s->cols);
1638  memset(s->above_skip_ctx, 0, s->cols);
1639  if (s->s.h.keyframe || s->s.h.intraonly) {
1640  memset(s->above_mode_ctx, DC_PRED, s->cols * 2);
1641  } else {
1642  memset(s->above_mode_ctx, NEARESTMV, s->cols);
1643  }
1644  memset(s->above_y_nnz_ctx, 0, s->sb_cols * 16);
1645  memset(s->above_uv_nnz_ctx[0], 0, s->sb_cols * 16 >> s->ss_h);
1646  memset(s->above_uv_nnz_ctx[1], 0, s->sb_cols * 16 >> s->ss_h);
1647  memset(s->above_segpred_ctx, 0, s->cols);
1648  s->pass = s->s.frames[CUR_FRAME].uses_2pass =
1649  avctx->active_thread_type == FF_THREAD_FRAME && s->s.h.refreshctx && !s->s.h.parallelmode;
1650  if ((ret = update_block_buffers(avctx)) < 0) {
1651  av_log(avctx, AV_LOG_ERROR,
1652  "Failed to allocate block buffers\n");
1653  return ret;
1654  }
1655  if (s->s.h.refreshctx && s->s.h.parallelmode) {
1656  int j, k, l, m;
1657 
1658  for (i = 0; i < 4; i++) {
1659  for (j = 0; j < 2; j++)
1660  for (k = 0; k < 2; k++)
1661  for (l = 0; l < 6; l++)
1662  for (m = 0; m < 6; m++)
1663  memcpy(s->prob_ctx[s->s.h.framectxid].coef[i][j][k][l][m],
1664  s->prob.coef[i][j][k][l][m], 3);
1665  if (s->s.h.txfmmode == i)
1666  break;
1667  }
1668  s->prob_ctx[s->s.h.framectxid].p = s->prob.p;
1669  ff_thread_finish_setup(avctx);
1670  } else if (!s->s.h.refreshctx) {
1671  ff_thread_finish_setup(avctx);
1672  }
1673 
1674 #if HAVE_THREADS
1675  if (avctx->active_thread_type & FF_THREAD_SLICE) {
1676  for (i = 0; i < s->sb_rows; i++)
1677  atomic_store(&s->entries[i], 0);
1678  }
1679 #endif
1680 
1681  do {
1682  for (i = 0; i < s->active_tile_cols; i++) {
1683  s->td[i].b = s->td[i].b_base;
1684  s->td[i].block = s->td[i].block_base;
1685  s->td[i].uvblock[0] = s->td[i].uvblock_base[0];
1686  s->td[i].uvblock[1] = s->td[i].uvblock_base[1];
1687  s->td[i].eob = s->td[i].eob_base;
1688  s->td[i].uveob[0] = s->td[i].uveob_base[0];
1689  s->td[i].uveob[1] = s->td[i].uveob_base[1];
1690  s->td[i].error_info = 0;
1691  }
1692 
1693 #if HAVE_THREADS
1694  if (avctx->active_thread_type == FF_THREAD_SLICE) {
1695  int tile_row, tile_col;
1696 
1697  av_assert1(!s->pass);
1698 
1699  for (tile_row = 0; tile_row < s->s.h.tiling.tile_rows; tile_row++) {
1700  for (tile_col = 0; tile_col < s->s.h.tiling.tile_cols; tile_col++) {
1701  int64_t tile_size;
1702 
1703  if (tile_col == s->s.h.tiling.tile_cols - 1 &&
1704  tile_row == s->s.h.tiling.tile_rows - 1) {
1705  tile_size = size;
1706  } else {
1707  tile_size = AV_RB32(data);
1708  data += 4;
1709  size -= 4;
1710  }
1711  if (tile_size > size)
1712  return AVERROR_INVALIDDATA;
1713  ret = ff_vp56_init_range_decoder(&s->td[tile_col].c_b[tile_row], data, tile_size);
1714  if (ret < 0)
1715  return ret;
1716  if (vp56_rac_get_prob_branchy(&s->td[tile_col].c_b[tile_row], 128)) // marker bit
1717  return AVERROR_INVALIDDATA;
1718  data += tile_size;
1719  size -= tile_size;
1720  }
1721  }
1722 
1723  ff_slice_thread_execute_with_mainfunc(avctx, decode_tiles_mt, loopfilter_proc, s->td, NULL, s->s.h.tiling.tile_cols);
1724  } else
1725 #endif
1726  {
1727  ret = decode_tiles(avctx, data, size);
1728  if (ret < 0) {
1729  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
1730  return ret;
1731  }
1732  }
1733 
1734  // Sum all counts fields into td[0].counts for tile threading
1735  if (avctx->active_thread_type == FF_THREAD_SLICE)
1736  for (i = 1; i < s->s.h.tiling.tile_cols; i++)
1737  for (j = 0; j < sizeof(s->td[i].counts) / sizeof(unsigned); j++)
1738  ((unsigned *)&s->td[0].counts)[j] += ((unsigned *)&s->td[i].counts)[j];
1739 
1740  if (s->pass < 2 && s->s.h.refreshctx && !s->s.h.parallelmode) {
1742  ff_thread_finish_setup(avctx);
1743  }
1744  } while (s->pass++ == 1);
1745  ff_thread_report_progress(&s->s.frames[CUR_FRAME].tf, INT_MAX, 0);
1746 
1747  if (s->td->error_info < 0) {
1748  av_log(avctx, AV_LOG_ERROR, "Failed to decode tile data\n");
1749  s->td->error_info = 0;
1750  return AVERROR_INVALIDDATA;
1751  }
1753  ret = vp9_export_enc_params(s, &s->s.frames[CUR_FRAME]);
1754  if (ret < 0)
1755  return ret;
1756  }
1757 
1758 finish:
1759  // ref frame setup
1760  for (i = 0; i < 8; i++) {
1761  if (s->s.refs[i].f->buf[0])
1762  ff_thread_release_buffer(avctx, &s->s.refs[i]);
1763  if (s->next_refs[i].f->buf[0] &&
1764  (ret = ff_thread_ref_frame(&s->s.refs[i], &s->next_refs[i])) < 0)
1765  return ret;
1766  }
1767 
1768  if (!s->s.h.invisible) {
1769  if ((ret = av_frame_ref(frame, s->s.frames[CUR_FRAME].tf.f)) < 0)
1770  return ret;
1771  *got_frame = 1;
1772  }
1773 
1774  return pkt->size;
1775 }
1776 
1778 {
1779  VP9Context *s = avctx->priv_data;
1780  int i;
1781 
1782  for (i = 0; i < 3; i++)
1783  vp9_frame_unref(avctx, &s->s.frames[i]);
1784  for (i = 0; i < 8; i++)
1785  ff_thread_release_buffer(avctx, &s->s.refs[i]);
1786 }
1787 
1789 {
1790  VP9Context *s = avctx->priv_data;
1791  int ret;
1792 
1793  s->last_bpp = 0;
1794  s->s.h.filter.sharpness = -1;
1795 
1796 #if HAVE_THREADS
1797  if (avctx->active_thread_type & FF_THREAD_SLICE) {
1798  ret = ff_pthread_init(s, vp9_context_offsets);
1799  if (ret < 0)
1800  return ret;
1801  }
1802 #endif
1803 
1804  for (int i = 0; i < 3; i++) {
1805  s->s.frames[i].tf.f = av_frame_alloc();
1806  if (!s->s.frames[i].tf.f)
1807  return AVERROR(ENOMEM);
1808  }
1809  for (int i = 0; i < 8; i++) {
1810  s->s.refs[i].f = av_frame_alloc();
1811  s->next_refs[i].f = av_frame_alloc();
1812  if (!s->s.refs[i].f || !s->next_refs[i].f)
1813  return AVERROR(ENOMEM);
1814  }
1815  return 0;
1816 }
1817 
1818 #if HAVE_THREADS
1819 static int vp9_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
1820 {
1821  int i, ret;
1822  VP9Context *s = dst->priv_data, *ssrc = src->priv_data;
1823 
1824  for (i = 0; i < 3; i++) {
1825  if (s->s.frames[i].tf.f->buf[0])
1826  vp9_frame_unref(dst, &s->s.frames[i]);
1827  if (ssrc->s.frames[i].tf.f->buf[0]) {
1828  if ((ret = vp9_frame_ref(dst, &s->s.frames[i], &ssrc->s.frames[i])) < 0)
1829  return ret;
1830  }
1831  }
1832  for (i = 0; i < 8; i++) {
1833  if (s->s.refs[i].f->buf[0])
1834  ff_thread_release_buffer(dst, &s->s.refs[i]);
1835  if (ssrc->next_refs[i].f->buf[0]) {
1836  if ((ret = ff_thread_ref_frame(&s->s.refs[i], &ssrc->next_refs[i])) < 0)
1837  return ret;
1838  }
1839  }
1840 
1841  s->s.h.invisible = ssrc->s.h.invisible;
1842  s->s.h.keyframe = ssrc->s.h.keyframe;
1843  s->s.h.intraonly = ssrc->s.h.intraonly;
1844  s->ss_v = ssrc->ss_v;
1845  s->ss_h = ssrc->ss_h;
1846  s->s.h.segmentation.enabled = ssrc->s.h.segmentation.enabled;
1847  s->s.h.segmentation.update_map = ssrc->s.h.segmentation.update_map;
1848  s->s.h.segmentation.absolute_vals = ssrc->s.h.segmentation.absolute_vals;
1849  s->bytesperpixel = ssrc->bytesperpixel;
1850  s->gf_fmt = ssrc->gf_fmt;
1851  s->w = ssrc->w;
1852  s->h = ssrc->h;
1853  s->s.h.bpp = ssrc->s.h.bpp;
1854  s->bpp_index = ssrc->bpp_index;
1855  s->pix_fmt = ssrc->pix_fmt;
1856  memcpy(&s->prob_ctx, &ssrc->prob_ctx, sizeof(s->prob_ctx));
1857  memcpy(&s->s.h.lf_delta, &ssrc->s.h.lf_delta, sizeof(s->s.h.lf_delta));
1858  memcpy(&s->s.h.segmentation.feat, &ssrc->s.h.segmentation.feat,
1859  sizeof(s->s.h.segmentation.feat));
1860 
1861  return 0;
1862 }
1863 #endif
1864 
1866  .name = "vp9",
1867  .long_name = NULL_IF_CONFIG_SMALL("Google VP9"),
1868  .type = AVMEDIA_TYPE_VIDEO,
1869  .id = AV_CODEC_ID_VP9,
1870  .priv_data_size = sizeof(VP9Context),
1871  .init = vp9_decode_init,
1872  .close = vp9_decode_free,
1879  .update_thread_context = ONLY_IF_THREADS_ENABLED(vp9_decode_update_thread_context),
1881  .bsfs = "vp9_superframe_split",
1882  .hw_configs = (const AVCodecHWConfigInternal *const []) {
1883 #if CONFIG_VP9_DXVA2_HWACCEL
1884  HWACCEL_DXVA2(vp9),
1885 #endif
1886 #if CONFIG_VP9_D3D11VA_HWACCEL
1887  HWACCEL_D3D11VA(vp9),
1888 #endif
1889 #if CONFIG_VP9_D3D11VA2_HWACCEL
1890  HWACCEL_D3D11VA2(vp9),
1891 #endif
1892 #if CONFIG_VP9_NVDEC_HWACCEL
1893  HWACCEL_NVDEC(vp9),
1894 #endif
1895 #if CONFIG_VP9_VAAPI_HWACCEL
1896  HWACCEL_VAAPI(vp9),
1897 #endif
1898 #if CONFIG_VP9_VDPAU_HWACCEL
1899  HWACCEL_VDPAU(vp9),
1900 #endif
1901 #if CONFIG_VP9_VIDEOTOOLBOX_HWACCEL
1902  HWACCEL_VIDEOTOOLBOX(vp9),
1903 #endif
1904  NULL
1905  },
1906 };
AVVideoEncParams::qp
int32_t qp
Base quantisation parameter for the frame.
Definition: video_enc_params.h:103
hwconfig.h
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1359
AVCodec
AVCodec.
Definition: codec.h:202
av_buffer_pool_init
AVBufferPool * av_buffer_pool_init(size_t size, AVBufferRef *(*alloc)(size_t size))
Allocate and initialize a buffer pool.
Definition: buffer.c:280
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:42
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:225
decode_tiles
static int decode_tiles(AVCodecContext *avctx, const uint8_t *data, int size)
Definition: vp9.c:1254
td
#define td
Definition: regdef.h:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
vp9_frame_alloc
static int vp9_frame_alloc(AVCodecContext *avctx, VP9Frame *f)
Definition: vp9.c:102
atomic_store
#define atomic_store(object, desired)
Definition: stdatomic.h:85
r
const char * r
Definition: vf_curves.c:116
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
PRED_SWITCHABLE
@ PRED_SWITCHABLE
Definition: vp9shared.h:51
PRED_SINGLEREF
@ PRED_SINGLEREF
Definition: vp9shared.h:49
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:960
ff_thread_ref_frame
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
Definition: utils.c:866
vpX_rac_is_end
static av_always_inline int vpX_rac_is_end(VP56RangeCoder *c)
vp5689 returns 1 if the end of the stream has been reached, 0 otherwise.
Definition: vp56.h:239
VP9Frame::segmentation_map
uint8_t * segmentation_map
Definition: vp9shared.h:62
VP9Frame
Definition: vp9shared.h:59
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
av_clip_uintp2
#define av_clip_uintp2
Definition: common.h:120
decode_sb
static void decode_sb(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
Definition: vp9.c:1083
ff_vp9_decoder
const AVCodec ff_vp9_decoder
Definition: vp9.c:1865
ff_vp9_adapt_probs
void ff_vp9_adapt_probs(VP9Context *s)
Definition: vp9prob.c:46
vp9_decode_flush
static void vp9_decode_flush(AVCodecContext *avctx)
Definition: vp9.c:1777
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:220
vp9_decode_frame
static int vp9_decode_frame(AVCodecContext *avctx, void *frame, int *got_frame, AVPacket *pkt)
Definition: vp9.c:1549
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:109
PRED_COMPREF
@ PRED_COMPREF
Definition: vp9shared.h:50
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:317
pixdesc.h
w
uint8_t w
Definition: llviddspenc.c:38
HWACCEL_DXVA2
#define HWACCEL_DXVA2(codec)
Definition: hwconfig.h:67
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:597
BlockPartition
BlockPartition
Definition: vp9shared.h:34
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:373
DC_PRED
@ DC_PRED
Definition: vp9.h:48
HWACCEL_D3D11VA2
#define HWACCEL_D3D11VA2(codec)
Definition: hwconfig.h:69
b
#define b
Definition: input.c:40
data
const char data[16]
Definition: mxf.c:143
update_size
static int update_size(AVCodecContext *avctx, int w, int h)
Definition: vp9.c:178
decode_sb_mem
static void decode_sb_mem(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
Definition: vp9.c:1162
REF_FRAME_SEGMAP
#define REF_FRAME_SEGMAP
Definition: vp9shared.h:165
decode_frame_header
static int decode_frame_header(AVCodecContext *avctx, const uint8_t *data, int size, int *ref)
Definition: vp9.c:498
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:404
atomic_int
intptr_t atomic_int
Definition: stdatomic.h:55
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:219
AVCOL_SPC_RGB
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
Definition: pixfmt.h:524
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
max
#define max(a, b)
Definition: cuda_runtime.h:33
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
VP9_SYNCCODE
#define VP9_SYNCCODE
Definition: vp9.c:41
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
VP9Frame::hwaccel_priv_buf
AVBufferRef * hwaccel_priv_buf
Definition: vp9shared.h:66
VP9Filter
Definition: vp9dec.h:76
thread.h
ff_pthread_free
av_cold void ff_pthread_free(void *obj, const unsigned offsets[])
Definition: pthread.c:94
FILTER_SWITCHABLE
@ FILTER_SWITCHABLE
Definition: vp9.h:70
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
VP9Block
Definition: vp9dec.h:82
init
static int init
Definition: av_tx.c:47
VP9Frame::tf
ThreadFrame tf
Definition: vp9shared.h:60
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:468
vp56_rac_get_prob_branchy
static av_always_inline int vp56_rac_get_prob_branchy(VP56RangeCoder *c, int prob)
Definition: vp56.h:287
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:529
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:380
AVCOL_SPC_RESERVED
@ AVCOL_SPC_RESERVED
reserved for future use by ITU-T and ISO/IEC just like 15-255 are
Definition: pixfmt.h:527
AVHWAccel
Definition: avcodec.h:2039
VP9Frame::extradata
AVBufferRef * extradata
Definition: vp9shared.h:61
TX_SWITCHABLE
@ TX_SWITCHABLE
Definition: vp9.h:33
finish
static void finish(void)
Definition: movenc.c:342
vp8_rac_get
static av_always_inline int vp8_rac_get(VP56RangeCoder *c)
Definition: vp56.h:324
ff_vp9_ac_qlookup
const int16_t ff_vp9_ac_qlookup[3][256]
Definition: vp9data.c:334
AVVideoEncParams::delta_qp
int32_t delta_qp[4][2]
Quantisation parameter offset from the base (per-frame) qp for a given plane (first index) and AC/DC ...
Definition: video_enc_params.h:109
fail
#define fail()
Definition: checkasm.h:127
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:420
GetBitContext
Definition: get_bits.h:62
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:463
HWACCEL_VDPAU
#define HWACCEL_VDPAU(codec)
Definition: hwconfig.h:75
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
PARTITION_NONE
@ PARTITION_NONE
Definition: vp9shared.h:35
VP9Frame::hwaccel_picture_private
void * hwaccel_picture_private
Definition: vp9shared.h:67
AVVideoEncParams
Video encoding parameters for a given frame.
Definition: video_enc_params.h:73
vp9_decode_free
static av_cold int vp9_decode_free(AVCodecContext *avctx)
Definition: vp9.c:1228
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:97
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:407
avassert.h
ff_vp9_model_pareto8
const uint8_t ff_vp9_model_pareto8[256][8]
Definition: vp9data.c:1176
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:678
FF_CODEC_PROPERTY_LOSSLESS
#define FF_CODEC_PROPERTY_LOSSLESS
Definition: avcodec.h:1823
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:588
BL_8X8
@ BL_8X8
Definition: vp9shared.h:74
av_buffer_pool_get
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:387
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
PARTITION_V
@ PARTITION_V
Definition: vp9shared.h:37
vp9_frame_ref
static int vp9_frame_ref(AVCodecContext *avctx, VP9Frame *dst, VP9Frame *src)
Definition: vp9.c:148
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:127
s
#define s(width, name)
Definition: cbs_vp9.c:257
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
Definition: pixfmt.h:530
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:361
AV_CODEC_ID_VP9
@ AV_CODEC_ID_VP9
Definition: codec_id.h:218
vp9data.h
bits
uint8_t bits
Definition: vp3data.h:141
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
get_bits.h
VP56mv
Definition: vp56.h:68
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
ff_vp9dsp_init
av_cold void ff_vp9dsp_init(VP9DSPContext *dsp, int bpp, int bitexact)
Definition: vp9dsp.c:86
decode012
static int decode012(GetBitContext *gb)
Definition: get_bits.h:832
ff_vp9_partition_tree
const int8_t ff_vp9_partition_tree[3][2]
Definition: vp9data.c:35
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
f
#define f(width, name)
Definition: cbs_vp9.c:255
vp56.h
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:113
flush
static void flush(AVCodecContext *avctx)
Definition: aacdec_template.c:593
NULL
#define NULL
Definition: coverity.c:32
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:967
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
VP9Context
Definition: vp9dec.h:94
vp8_rac_get_uint
static int vp8_rac_get_uint(VP56RangeCoder *c, int bits)
Definition: vp56.h:340
vp8_rac_get_tree
static av_always_inline int vp8_rac_get_tree(VP56RangeCoder *c, const int8_t(*tree)[2], const uint8_t *probs)
Definition: vp56.h:396
REF_FRAME_MVPAIR
#define REF_FRAME_MVPAIR
Definition: vp9shared.h:164
AVHWAccel::end_frame
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
Definition: avcodec.h:2140
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:499
profiles.h
src
#define src
Definition: vp8dsp.c:255
av_buffer_pool_uninit
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:322
AV_PIX_FMT_YUV440P10
#define AV_PIX_FMT_YUV440P10
Definition: pixfmt.h:406
pthread_internal.h
ONLY_IF_THREADS_ENABLED
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:156
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:405
PARTITION_SPLIT
@ PARTITION_SPLIT
Definition: vp9shared.h:38
atomic_load_explicit
#define atomic_load_explicit(object, order)
Definition: stdatomic.h:96
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
pthread_mutex_unlock
#define pthread_mutex_unlock(a)
Definition: ffprobe.c:68
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
ff_thread_release_buffer
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
Definition: pthread_frame.c:1056
av_video_enc_params_create_side_data
AVVideoEncParams * av_video_enc_params_create_side_data(AVFrame *frame, enum AVVideoEncParamsType type, unsigned int nb_blocks)
Allocates memory for AVEncodeInfoFrame plus an array of.
Definition: video_enc_params.c:60
vp9.h
VP9Frame::uses_2pass
int uses_2pass
Definition: vp9shared.h:64
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:374
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:325
pix_fmt_rgb
static enum AVPixelFormat pix_fmt_rgb[3]
Definition: libdav1d.c:64
REF_INVALID_SCALE
#define REF_INVALID_SCALE
Definition: vp9dec.h:40
read_colorspace_details
static int read_colorspace_details(AVCodecContext *avctx)
Definition: vp9.c:436
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:409
size
int size
Definition: twinvq_data.h:10344
vp9_alloc_entries
static int vp9_alloc_entries(AVCodecContext *avctx, int n)
Definition: vp9.c:83
atomic_fetch_add_explicit
#define atomic_fetch_add_explicit(object, operand, order)
Definition: stdatomic.h:149
free_buffers
static void free_buffers(VP9Context *s)
Definition: vp9.c:1219
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:411
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1452
AVCodecHWConfigInternal
Definition: hwconfig.h:29
TX_4X4
@ TX_4X4
Definition: vp9.h:28
update_block_buffers
static int update_block_buffers(AVCodecContext *avctx)
Definition: vp9.c:301
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:372
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:117
HWACCEL_D3D11VA
#define HWACCEL_D3D11VA(codec)
Definition: hwconfig.h:79
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:303
ff_vp56_init_range_decoder
int ff_vp56_init_range_decoder(VP56RangeCoder *c, const uint8_t *buf, int buf_size)
Definition: vp56rac.c:40
inv_recenter_nonneg
static av_always_inline int inv_recenter_nonneg(int v, int m)
Definition: vp9.c:368
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:71
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:119
FF_CODEC_CAP_SLICE_THREAD_HAS_MF
#define FF_CODEC_CAP_SLICE_THREAD_HAS_MF
Codec initializes slice-based threading with a main function.
Definition: internal.h:72
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1451
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:187
ff_slice_thread_execute_with_mainfunc
int ff_slice_thread_execute_with_mainfunc(AVCodecContext *avctx, action_func2 *func2, main_func *mainfunc, void *arg, int *ret, int job_count)
Definition: pthread_slice.c:121
AVCOL_SPC_SMPTE240M
@ AVCOL_SPC_SMPTE240M
derived from 170M primaries and D65 white point, 170M is derived from BT470 System M's primaries
Definition: pixfmt.h:531
assign
#define assign(var, type, n)
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:272
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:366
DEFINE_OFFSET_ARRAY
#define DEFINE_OFFSET_ARRAY(type, name, cnt_variable, mutexes, conds)
Definition: pthread_internal.h:61
AVCodecContext::properties
unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1822
AVCOL_SPC_BT2020_NCL
@ AVCOL_SPC_BT2020_NCL
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:534
AVHWAccel::decode_slice
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
Definition: avcodec.h:2129
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:50
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:421
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:523
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AVVideoBlockParams
Data structure for storing block-level encoding information.
Definition: video_enc_params.h:120
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:263
get_sbits_inv
static av_always_inline int get_sbits_inv(GetBitContext *gb, int n)
Definition: vp9.c:362
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:209
update_thread_context
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread. Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little speed gain at this point but it should work. If there are inter-frame dependencies
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:526
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:593
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:580
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:271
HWACCEL_VIDEOTOOLBOX
#define HWACCEL_VIDEOTOOLBOX(codec)
Definition: hwconfig.h:77
avcodec.h
limit
static double limit(double x)
Definition: vf_pseudocolor.c:128
BL_64X64
@ BL_64X64
Definition: vp9shared.h:71
av_buffer_allocz
AVBufferRef * av_buffer_allocz(size_t size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:93
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
vp9_decode_init
static av_cold int vp9_decode_init(AVCodecContext *avctx)
Definition: vp9.c:1788
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:694
vp9_tile_data_free
static void vp9_tile_data_free(VP9TileData *td)
Definition: vp9.c:86
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
VP9mvrefPair
Definition: vp9shared.h:54
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:408
pthread_cond_signal
static av_always_inline int pthread_cond_signal(pthread_cond_t *cond)
Definition: os2threads.h:152
AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
Decoding only.
Definition: avcodec.h:351
VP9TileData
Definition: vp9dec.h:165
VP56RangeCoder
Definition: vp56.h:87
AVCodecContext
main external API structure.
Definition: avcodec.h:383
AVCodecContext::active_thread_type
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1459
VP9Filter::mask
uint8_t mask[2][2][8][4]
Definition: vp9dec.h:79
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AVHWAccel::frame_priv_data_size
int frame_priv_data_size
Size of per-frame hardware accelerator private data.
Definition: avcodec.h:2149
vp9_frame_unref
static void vp9_frame_unref(AVCodecContext *avctx, VP9Frame *f)
Definition: vp9.c:93
VP9Frame::mv
VP9mvrefPair * mv
Definition: vp9shared.h:63
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1525
ff_vp9_decode_block
void ff_vp9_decode_block(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl, enum BlockPartition bp)
Definition: vp9block.c:1260
NEARESTMV
@ NEARESTMV
Definition: vp9shared.h:42
ff_thread_get_format
FF_DISABLE_DEPRECATION_WARNINGS enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Wrapper around get_format() for frame-multithreaded codecs.
Definition: pthread_frame.c:1020
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
BlockLevel
BlockLevel
Definition: vp9shared.h:70
AVCodecContext::export_side_data
int export_side_data
Bit set of AV_CODEC_EXPORT_DATA_* flags, which affects the kind of metadata exported in frame,...
Definition: avcodec.h:1984
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
ff_pthread_init
av_cold int ff_pthread_init(void *obj, const unsigned offsets[])
Initialize/destroy a list of mutexes/conditions contained in a structure.
Definition: pthread.c:107
pthread_cond_wait
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
Definition: os2threads.h:192
vp9dec.h
AVHWAccel::start_frame
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
Definition: avcodec.h:2101
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
ff_vp9_default_kf_partition_probs
const uint8_t ff_vp9_default_kf_partition_probs[4][4][3]
Definition: vp9data.c:41
AV_VIDEO_ENC_PARAMS_VP9
@ AV_VIDEO_ENC_PARAMS_VP9
VP9 stores:
Definition: video_enc_params.h:44
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:86
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:272
ff_vp9_default_probs
const ProbContext ff_vp9_default_probs
Definition: vp9data.c:1435
FF_CODEC_CAP_ALLOCATE_PROGRESS
#define FF_CODEC_CAP_ALLOCATE_PROGRESS
Definition: internal.h:77
CUR_FRAME
#define CUR_FRAME
Definition: vp9shared.h:163
vp9_export_enc_params
static int vp9_export_enc_params(VP9Context *s, VP9Frame *frame)
Definition: vp9.c:1497
AVPacket
This structure stores compressed data.
Definition: packet.h:350
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:410
PARTITION_H
@ PARTITION_H
Definition: vp9shared.h:36
ff_vp9_loopfilter_sb
void ff_vp9_loopfilter_sb(AVCodecContext *avctx, VP9Filter *lflvl, int row, int col, ptrdiff_t yoff, ptrdiff_t uvoff)
Definition: vp9lpf.c:178
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
videodsp.h
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:73
d
d
Definition: ffmpeg_filter.c:153
HWACCEL_MAX
#define HWACCEL_MAX
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
av_video_enc_params_block
static av_always_inline AVVideoBlockParams * av_video_enc_params_block(AVVideoEncParams *par, unsigned int idx)
Definition: video_enc_params.h:143
AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:410
h
h
Definition: vp9dsp_template.c:2038
atomic_init
#define atomic_init(obj, value)
Definition: stdatomic.h:33
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:525
ff_vp9_profiles
const AVProfile ff_vp9_profiles[]
Definition: profiles.c:139
video_enc_params.h
set_tile_offset
static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
Definition: vp9.c:1211
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2580
update_prob
static int update_prob(VP56RangeCoder *c, int p)
Definition: vp9.c:378
ff_vp9_dc_qlookup
const int16_t ff_vp9_dc_qlookup[3][256]
Definition: vp9data.c:231
pthread_mutex_lock
#define pthread_mutex_lock(a)
Definition: ffprobe.c:64
ff_vp9_default_coef_probs
const uint8_t ff_vp9_default_coef_probs[4][2][2][6][6][3]
Definition: vp9data.c:1540