FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
vp8.c
Go to the documentation of this file.
1 /*
2  * VP7/VP8 compatible video decoder
3  *
4  * Copyright (C) 2010 David Conrad
5  * Copyright (C) 2010 Ronald S. Bultje
6  * Copyright (C) 2010 Fiona Glaser
7  * Copyright (C) 2012 Daniel Kang
8  * Copyright (C) 2014 Peter Ross
9  *
10  * This file is part of FFmpeg.
11  *
12  * FFmpeg is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU Lesser General Public
14  * License as published by the Free Software Foundation; either
15  * version 2.1 of the License, or (at your option) any later version.
16  *
17  * FFmpeg is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20  * Lesser General Public License for more details.
21  *
22  * You should have received a copy of the GNU Lesser General Public
23  * License along with FFmpeg; if not, write to the Free Software
24  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25  */
26 
27 #include "libavutil/imgutils.h"
28 
29 #include "avcodec.h"
30 #include "hwaccel.h"
31 #include "internal.h"
32 #include "mathops.h"
33 #include "rectangle.h"
34 #include "thread.h"
35 #include "vp8.h"
36 #include "vp8data.h"
37 
38 #if ARCH_ARM
39 # include "arm/vp8.h"
40 #endif
41 
42 #if CONFIG_VP7_DECODER && CONFIG_VP8_DECODER
43 #define VPX(vp7, f) (vp7 ? vp7_ ## f : vp8_ ## f)
44 #elif CONFIG_VP7_DECODER
45 #define VPX(vp7, f) vp7_ ## f
46 #else // CONFIG_VP8_DECODER
47 #define VPX(vp7, f) vp8_ ## f
48 #endif
49 
50 static void free_buffers(VP8Context *s)
51 {
52  int i;
53  if (s->thread_data)
54  for (i = 0; i < MAX_THREADS; i++) {
55 #if HAVE_THREADS
56  pthread_cond_destroy(&s->thread_data[i].cond);
58 #endif
60  }
61  av_freep(&s->thread_data);
64  av_freep(&s->top_nnz);
65  av_freep(&s->top_border);
66 
67  s->macroblocks = NULL;
68 }
69 
71 {
72  int ret;
73  if ((ret = ff_thread_get_buffer(s->avctx, &f->tf,
74  ref ? AV_GET_BUFFER_FLAG_REF : 0)) < 0)
75  return ret;
76  if (!(f->seg_map = av_buffer_allocz(s->mb_width * s->mb_height)))
77  goto fail;
78  if (s->avctx->hwaccel) {
79  const AVHWAccel *hwaccel = s->avctx->hwaccel;
80  if (hwaccel->frame_priv_data_size) {
82  if (!f->hwaccel_priv_buf)
83  goto fail;
85  }
86  }
87  return 0;
88 
89 fail:
92  return AVERROR(ENOMEM);
93 }
94 
96 {
101 }
102 
103 #if CONFIG_VP8_DECODER
104 static int vp8_ref_frame(VP8Context *s, VP8Frame *dst, VP8Frame *src)
105 {
106  int ret;
107 
108  vp8_release_frame(s, dst);
109 
110  if ((ret = ff_thread_ref_frame(&dst->tf, &src->tf)) < 0)
111  return ret;
112  if (src->seg_map &&
113  !(dst->seg_map = av_buffer_ref(src->seg_map))) {
114  vp8_release_frame(s, dst);
115  return AVERROR(ENOMEM);
116  }
117  if (src->hwaccel_picture_private) {
119  if (!dst->hwaccel_priv_buf)
120  return AVERROR(ENOMEM);
122  }
123 
124  return 0;
125 }
126 #endif /* CONFIG_VP8_DECODER */
127 
128 static void vp8_decode_flush_impl(AVCodecContext *avctx, int free_mem)
129 {
130  VP8Context *s = avctx->priv_data;
131  int i;
132 
133  for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++)
134  vp8_release_frame(s, &s->frames[i]);
135  memset(s->framep, 0, sizeof(s->framep));
136 
137  if (free_mem)
138  free_buffers(s);
139 }
140 
141 static void vp8_decode_flush(AVCodecContext *avctx)
142 {
143  vp8_decode_flush_impl(avctx, 0);
144 }
145 
147 {
148  VP8Frame *frame = NULL;
149  int i;
150 
151  // find a free buffer
152  for (i = 0; i < 5; i++)
153  if (&s->frames[i] != s->framep[VP56_FRAME_CURRENT] &&
154  &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
155  &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
156  &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2]) {
157  frame = &s->frames[i];
158  break;
159  }
160  if (i == 5) {
161  av_log(s->avctx, AV_LOG_FATAL, "Ran out of free frames!\n");
162  abort();
163  }
164  if (frame->tf.f->buf[0])
165  vp8_release_frame(s, frame);
166 
167  return frame;
168 }
169 
171 {
172  enum AVPixelFormat pix_fmts[] = {
173 #if CONFIG_VP8_VAAPI_HWACCEL
175 #endif
176 #if CONFIG_VP8_NVDEC_HWACCEL
178 #endif
181  };
182 
183  return ff_get_format(s->avctx, pix_fmts);
184 }
185 
186 static av_always_inline
187 int update_dimensions(VP8Context *s, int width, int height, int is_vp7)
188 {
189  AVCodecContext *avctx = s->avctx;
190  int i, ret;
191 
192  if (width != s->avctx->width || ((width+15)/16 != s->mb_width || (height+15)/16 != s->mb_height) && s->macroblocks_base ||
193  height != s->avctx->height) {
195 
196  ret = ff_set_dimensions(s->avctx, width, height);
197  if (ret < 0)
198  return ret;
199  }
200 
201  if (!s->actually_webp && !is_vp7) {
202  s->pix_fmt = get_pixel_format(s);
203  if (s->pix_fmt < 0)
204  return AVERROR(EINVAL);
205  avctx->pix_fmt = s->pix_fmt;
206  }
207 
208  s->mb_width = (s->avctx->coded_width + 15) / 16;
209  s->mb_height = (s->avctx->coded_height + 15) / 16;
210 
211  s->mb_layout = is_vp7 || avctx->active_thread_type == FF_THREAD_SLICE &&
212  avctx->thread_count > 1;
213  if (!s->mb_layout) { // Frame threading and one thread
214  s->macroblocks_base = av_mallocz((s->mb_width + s->mb_height * 2 + 1) *
215  sizeof(*s->macroblocks));
217  } else // Sliced threading
218  s->macroblocks_base = av_mallocz((s->mb_width + 2) * (s->mb_height + 2) *
219  sizeof(*s->macroblocks));
220  s->top_nnz = av_mallocz(s->mb_width * sizeof(*s->top_nnz));
221  s->top_border = av_mallocz((s->mb_width + 1) * sizeof(*s->top_border));
223 
224  if (!s->macroblocks_base || !s->top_nnz || !s->top_border ||
225  !s->thread_data || (!s->intra4x4_pred_mode_top && !s->mb_layout)) {
226  free_buffers(s);
227  return AVERROR(ENOMEM);
228  }
229 
230  for (i = 0; i < MAX_THREADS; i++) {
232  av_mallocz(s->mb_width * sizeof(*s->thread_data[0].filter_strength));
233  if (!s->thread_data[i].filter_strength) {
234  free_buffers(s);
235  return AVERROR(ENOMEM);
236  }
237 #if HAVE_THREADS
238  pthread_mutex_init(&s->thread_data[i].lock, NULL);
239  pthread_cond_init(&s->thread_data[i].cond, NULL);
240 #endif
241  }
242 
243  s->macroblocks = s->macroblocks_base + 1;
244 
245  return 0;
246 }
247 
249 {
250  return update_dimensions(s, width, height, IS_VP7);
251 }
252 
254 {
255  return update_dimensions(s, width, height, IS_VP8);
256 }
257 
258 
260 {
261  VP56RangeCoder *c = &s->c;
262  int i;
263 
266 
269 
270  for (i = 0; i < 4; i++)
272 
273  for (i = 0; i < 4; i++)
275  }
276  if (s->segmentation.update_map)
277  for (i = 0; i < 3; i++)
278  s->prob->segmentid[i] = vp8_rac_get(c) ? vp8_rac_get_uint(c, 8) : 255;
279 }
280 
282 {
283  VP56RangeCoder *c = &s->c;
284  int i;
285 
286  for (i = 0; i < 4; i++) {
287  if (vp8_rac_get(c)) {
288  s->lf_delta.ref[i] = vp8_rac_get_uint(c, 6);
289 
290  if (vp8_rac_get(c))
291  s->lf_delta.ref[i] = -s->lf_delta.ref[i];
292  }
293  }
294 
295  for (i = MODE_I4x4; i <= VP8_MVMODE_SPLIT; i++) {
296  if (vp8_rac_get(c)) {
297  s->lf_delta.mode[i] = vp8_rac_get_uint(c, 6);
298 
299  if (vp8_rac_get(c))
300  s->lf_delta.mode[i] = -s->lf_delta.mode[i];
301  }
302  }
303 }
304 
305 static int setup_partitions(VP8Context *s, const uint8_t *buf, int buf_size)
306 {
307  const uint8_t *sizes = buf;
308  int i;
309  int ret;
310 
311  s->num_coeff_partitions = 1 << vp8_rac_get_uint(&s->c, 2);
312 
313  buf += 3 * (s->num_coeff_partitions - 1);
314  buf_size -= 3 * (s->num_coeff_partitions - 1);
315  if (buf_size < 0)
316  return -1;
317 
318  for (i = 0; i < s->num_coeff_partitions - 1; i++) {
319  int size = AV_RL24(sizes + 3 * i);
320  if (buf_size - size < 0)
321  return -1;
322  s->coeff_partition_size[i] = size;
323 
324  ret = ff_vp56_init_range_decoder(&s->coeff_partition[i], buf, size);
325  if (ret < 0)
326  return ret;
327  buf += size;
328  buf_size -= size;
329  }
330 
331  s->coeff_partition_size[i] = buf_size;
332  ff_vp56_init_range_decoder(&s->coeff_partition[i], buf, buf_size);
333 
334  return 0;
335 }
336 
337 static void vp7_get_quants(VP8Context *s)
338 {
339  VP56RangeCoder *c = &s->c;
340 
341  int yac_qi = vp8_rac_get_uint(c, 7);
342  int ydc_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
343  int y2dc_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
344  int y2ac_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
345  int uvdc_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
346  int uvac_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
347 
348  s->qmat[0].luma_qmul[0] = vp7_ydc_qlookup[ydc_qi];
349  s->qmat[0].luma_qmul[1] = vp7_yac_qlookup[yac_qi];
350  s->qmat[0].luma_dc_qmul[0] = vp7_y2dc_qlookup[y2dc_qi];
351  s->qmat[0].luma_dc_qmul[1] = vp7_y2ac_qlookup[y2ac_qi];
352  s->qmat[0].chroma_qmul[0] = FFMIN(vp7_ydc_qlookup[uvdc_qi], 132);
353  s->qmat[0].chroma_qmul[1] = vp7_yac_qlookup[uvac_qi];
354 }
355 
356 static void vp8_get_quants(VP8Context *s)
357 {
358  VP56RangeCoder *c = &s->c;
359  int i, base_qi;
360 
361  s->quant.yac_qi = vp8_rac_get_uint(c, 7);
362  s->quant.ydc_delta = vp8_rac_get_sint(c, 4);
363  s->quant.y2dc_delta = vp8_rac_get_sint(c, 4);
364  s->quant.y2ac_delta = vp8_rac_get_sint(c, 4);
365  s->quant.uvdc_delta = vp8_rac_get_sint(c, 4);
366  s->quant.uvac_delta = vp8_rac_get_sint(c, 4);
367 
368  for (i = 0; i < 4; i++) {
369  if (s->segmentation.enabled) {
370  base_qi = s->segmentation.base_quant[i];
371  if (!s->segmentation.absolute_vals)
372  base_qi += s->quant.yac_qi;
373  } else
374  base_qi = s->quant.yac_qi;
375 
376  s->qmat[i].luma_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + s->quant.ydc_delta, 7)];
377  s->qmat[i].luma_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi, 7)];
378  s->qmat[i].luma_dc_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + s->quant.y2dc_delta, 7)] * 2;
379  /* 101581>>16 is equivalent to 155/100 */
380  s->qmat[i].luma_dc_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi + s->quant.y2ac_delta, 7)] * 101581 >> 16;
381  s->qmat[i].chroma_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + s->quant.uvdc_delta, 7)];
382  s->qmat[i].chroma_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi + s->quant.uvac_delta, 7)];
383 
384  s->qmat[i].luma_dc_qmul[1] = FFMAX(s->qmat[i].luma_dc_qmul[1], 8);
385  s->qmat[i].chroma_qmul[0] = FFMIN(s->qmat[i].chroma_qmul[0], 132);
386  }
387 }
388 
389 /**
390  * Determine which buffers golden and altref should be updated with after this frame.
391  * The spec isn't clear here, so I'm going by my understanding of what libvpx does
392  *
393  * Intra frames update all 3 references
394  * Inter frames update VP56_FRAME_PREVIOUS if the update_last flag is set
395  * If the update (golden|altref) flag is set, it's updated with the current frame
396  * if update_last is set, and VP56_FRAME_PREVIOUS otherwise.
397  * If the flag is not set, the number read means:
398  * 0: no update
399  * 1: VP56_FRAME_PREVIOUS
400  * 2: update golden with altref, or update altref with golden
401  */
403 {
404  VP56RangeCoder *c = &s->c;
405 
406  if (update)
407  return VP56_FRAME_CURRENT;
408 
409  switch (vp8_rac_get_uint(c, 2)) {
410  case 1:
411  return VP56_FRAME_PREVIOUS;
412  case 2:
414  }
415  return VP56_FRAME_NONE;
416 }
417 
419 {
420  int i, j;
421  for (i = 0; i < 4; i++)
422  for (j = 0; j < 16; j++)
423  memcpy(s->prob->token[i][j], vp8_token_default_probs[i][vp8_coeff_band[j]],
424  sizeof(s->prob->token[i][j]));
425 }
426 
428 {
429  VP56RangeCoder *c = &s->c;
430  int i, j, k, l, m;
431 
432  for (i = 0; i < 4; i++)
433  for (j = 0; j < 8; j++)
434  for (k = 0; k < 3; k++)
435  for (l = 0; l < NUM_DCT_TOKENS-1; l++)
437  int prob = vp8_rac_get_uint(c, 8);
438  for (m = 0; vp8_coeff_band_indexes[j][m] >= 0; m++)
439  s->prob->token[i][vp8_coeff_band_indexes[j][m]][k][l] = prob;
440  }
441 }
442 
443 #define VP7_MVC_SIZE 17
444 #define VP8_MVC_SIZE 19
445 
447  int mvc_size)
448 {
449  VP56RangeCoder *c = &s->c;
450  int i, j;
451 
452  if (vp8_rac_get(c))
453  for (i = 0; i < 4; i++)
454  s->prob->pred16x16[i] = vp8_rac_get_uint(c, 8);
455  if (vp8_rac_get(c))
456  for (i = 0; i < 3; i++)
457  s->prob->pred8x8c[i] = vp8_rac_get_uint(c, 8);
458 
459  // 17.2 MV probability update
460  for (i = 0; i < 2; i++)
461  for (j = 0; j < mvc_size; j++)
463  s->prob->mvc[i][j] = vp8_rac_get_nn(c);
464 }
465 
466 static void update_refs(VP8Context *s)
467 {
468  VP56RangeCoder *c = &s->c;
469 
470  int update_golden = vp8_rac_get(c);
471  int update_altref = vp8_rac_get(c);
472 
473  s->update_golden = ref_to_update(s, update_golden, VP56_FRAME_GOLDEN);
474  s->update_altref = ref_to_update(s, update_altref, VP56_FRAME_GOLDEN2);
475 }
476 
477 static void copy_chroma(AVFrame *dst, AVFrame *src, int width, int height)
478 {
479  int i, j;
480 
481  for (j = 1; j < 3; j++) {
482  for (i = 0; i < height / 2; i++)
483  memcpy(dst->data[j] + i * dst->linesize[j],
484  src->data[j] + i * src->linesize[j], width / 2);
485  }
486 }
487 
488 static void fade(uint8_t *dst, ptrdiff_t dst_linesize,
489  const uint8_t *src, ptrdiff_t src_linesize,
490  int width, int height,
491  int alpha, int beta)
492 {
493  int i, j;
494  for (j = 0; j < height; j++) {
495  const uint8_t *src2 = src + j * src_linesize;
496  uint8_t *dst2 = dst + j * dst_linesize;
497  for (i = 0; i < width; i++) {
498  uint8_t y = src2[i];
499  dst2[i] = av_clip_uint8(y + ((y * beta) >> 8) + alpha);
500  }
501  }
502 }
503 
505 {
506  int alpha = (int8_t) vp8_rac_get_uint(c, 8);
507  int beta = (int8_t) vp8_rac_get_uint(c, 8);
508  int ret;
509 
510  if (c->end <= c->buffer && c->bits >= 0)
511  return AVERROR_INVALIDDATA;
512 
513  if (!s->keyframe && (alpha || beta)) {
514  int width = s->mb_width * 16;
515  int height = s->mb_height * 16;
516  AVFrame *src, *dst;
517 
518  if (!s->framep[VP56_FRAME_PREVIOUS] ||
519  !s->framep[VP56_FRAME_GOLDEN]) {
520  av_log(s->avctx, AV_LOG_WARNING, "Discarding interframe without a prior keyframe!\n");
521  return AVERROR_INVALIDDATA;
522  }
523 
524  dst =
525  src = s->framep[VP56_FRAME_PREVIOUS]->tf.f;
526 
527  /* preserve the golden frame, write a new previous frame */
530  if ((ret = vp8_alloc_frame(s, s->framep[VP56_FRAME_PREVIOUS], 1)) < 0)
531  return ret;
532 
533  dst = s->framep[VP56_FRAME_PREVIOUS]->tf.f;
534 
535  copy_chroma(dst, src, width, height);
536  }
537 
538  fade(dst->data[0], dst->linesize[0],
539  src->data[0], src->linesize[0],
540  width, height, alpha, beta);
541  }
542 
543  return 0;
544 }
545 
546 static int vp7_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
547 {
548  VP56RangeCoder *c = &s->c;
549  int part1_size, hscale, vscale, i, j, ret;
550  int width = s->avctx->width;
551  int height = s->avctx->height;
552 
553  if (buf_size < 4) {
554  return AVERROR_INVALIDDATA;
555  }
556 
557  s->profile = (buf[0] >> 1) & 7;
558  if (s->profile > 1) {
559  avpriv_request_sample(s->avctx, "Unknown profile %d", s->profile);
560  return AVERROR_INVALIDDATA;
561  }
562 
563  s->keyframe = !(buf[0] & 1);
564  s->invisible = 0;
565  part1_size = AV_RL24(buf) >> 4;
566 
567  if (buf_size < 4 - s->profile + part1_size) {
568  av_log(s->avctx, AV_LOG_ERROR, "Buffer size %d is too small, needed : %d\n", buf_size, 4 - s->profile + part1_size);
569  return AVERROR_INVALIDDATA;
570  }
571 
572  buf += 4 - s->profile;
573  buf_size -= 4 - s->profile;
574 
575  memcpy(s->put_pixels_tab, s->vp8dsp.put_vp8_epel_pixels_tab, sizeof(s->put_pixels_tab));
576 
577  ret = ff_vp56_init_range_decoder(c, buf, part1_size);
578  if (ret < 0)
579  return ret;
580  buf += part1_size;
581  buf_size -= part1_size;
582 
583  /* A. Dimension information (keyframes only) */
584  if (s->keyframe) {
585  width = vp8_rac_get_uint(c, 12);
586  height = vp8_rac_get_uint(c, 12);
587  hscale = vp8_rac_get_uint(c, 2);
588  vscale = vp8_rac_get_uint(c, 2);
589  if (hscale || vscale)
590  avpriv_request_sample(s->avctx, "Upscaling");
591 
595  sizeof(s->prob->pred16x16));
597  sizeof(s->prob->pred8x8c));
598  for (i = 0; i < 2; i++)
599  memcpy(s->prob->mvc[i], vp7_mv_default_prob[i],
600  sizeof(vp7_mv_default_prob[i]));
601  memset(&s->segmentation, 0, sizeof(s->segmentation));
602  memset(&s->lf_delta, 0, sizeof(s->lf_delta));
603  memcpy(s->prob[0].scan, ff_zigzag_scan, sizeof(s->prob[0].scan));
604  }
605 
606  if (s->keyframe || s->profile > 0)
607  memset(s->inter_dc_pred, 0 , sizeof(s->inter_dc_pred));
608 
609  /* B. Decoding information for all four macroblock-level features */
610  for (i = 0; i < 4; i++) {
611  s->feature_enabled[i] = vp8_rac_get(c);
612  if (s->feature_enabled[i]) {
614 
615  for (j = 0; j < 3; j++)
616  s->feature_index_prob[i][j] =
617  vp8_rac_get(c) ? vp8_rac_get_uint(c, 8) : 255;
618 
619  if (vp7_feature_value_size[s->profile][i])
620  for (j = 0; j < 4; j++)
621  s->feature_value[i][j] =
623  }
624  }
625 
626  s->segmentation.enabled = 0;
627  s->segmentation.update_map = 0;
628  s->lf_delta.enabled = 0;
629 
630  s->num_coeff_partitions = 1;
631  ret = ff_vp56_init_range_decoder(&s->coeff_partition[0], buf, buf_size);
632  if (ret < 0)
633  return ret;
634 
635  if (!s->macroblocks_base || /* first frame */
636  width != s->avctx->width || height != s->avctx->height ||
637  (width + 15) / 16 != s->mb_width || (height + 15) / 16 != s->mb_height) {
638  if ((ret = vp7_update_dimensions(s, width, height)) < 0)
639  return ret;
640  }
641 
642  /* C. Dequantization indices */
643  vp7_get_quants(s);
644 
645  /* D. Golden frame update flag (a Flag) for interframes only */
646  if (!s->keyframe) {
649  }
650 
651  s->update_last = 1;
652  s->update_probabilities = 1;
653  s->fade_present = 1;
654 
655  if (s->profile > 0) {
657  if (!s->update_probabilities)
658  s->prob[1] = s->prob[0];
659 
660  if (!s->keyframe)
661  s->fade_present = vp8_rac_get(c);
662  }
663 
664  if (c->end <= c->buffer && c->bits >= 0)
665  return AVERROR_INVALIDDATA;
666  /* E. Fading information for previous frame */
667  if (s->fade_present && vp8_rac_get(c)) {
668  if ((ret = vp7_fade_frame(s ,c)) < 0)
669  return ret;
670  }
671 
672  /* F. Loop filter type */
673  if (!s->profile)
674  s->filter.simple = vp8_rac_get(c);
675 
676  /* G. DCT coefficient ordering specification */
677  if (vp8_rac_get(c))
678  for (i = 1; i < 16; i++)
679  s->prob[0].scan[i] = ff_zigzag_scan[vp8_rac_get_uint(c, 4)];
680 
681  /* H. Loop filter levels */
682  if (s->profile > 0)
683  s->filter.simple = vp8_rac_get(c);
684  s->filter.level = vp8_rac_get_uint(c, 6);
685  s->filter.sharpness = vp8_rac_get_uint(c, 3);
686 
687  /* I. DCT coefficient probability update; 13.3 Token Probability Updates */
689 
690  s->mbskip_enabled = 0;
691 
692  /* J. The remaining frame header data occurs ONLY FOR INTERFRAMES */
693  if (!s->keyframe) {
694  s->prob->intra = vp8_rac_get_uint(c, 8);
695  s->prob->last = vp8_rac_get_uint(c, 8);
697  }
698 
699  return 0;
700 }
701 
702 static int vp8_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
703 {
704  VP56RangeCoder *c = &s->c;
705  int header_size, hscale, vscale, ret;
706  int width = s->avctx->width;
707  int height = s->avctx->height;
708 
709  if (buf_size < 3) {
710  av_log(s->avctx, AV_LOG_ERROR, "Insufficent data (%d) for header\n", buf_size);
711  return AVERROR_INVALIDDATA;
712  }
713 
714  s->keyframe = !(buf[0] & 1);
715  s->profile = (buf[0]>>1) & 7;
716  s->invisible = !(buf[0] & 0x10);
717  header_size = AV_RL24(buf) >> 5;
718  buf += 3;
719  buf_size -= 3;
720 
721  s->header_partition_size = header_size;
722 
723  if (s->profile > 3)
724  av_log(s->avctx, AV_LOG_WARNING, "Unknown profile %d\n", s->profile);
725 
726  if (!s->profile)
728  sizeof(s->put_pixels_tab));
729  else // profile 1-3 use bilinear, 4+ aren't defined so whatever
731  sizeof(s->put_pixels_tab));
732 
733  if (header_size > buf_size - 7 * s->keyframe) {
734  av_log(s->avctx, AV_LOG_ERROR, "Header size larger than data provided\n");
735  return AVERROR_INVALIDDATA;
736  }
737 
738  if (s->keyframe) {
739  if (AV_RL24(buf) != 0x2a019d) {
741  "Invalid start code 0x%x\n", AV_RL24(buf));
742  return AVERROR_INVALIDDATA;
743  }
744  width = AV_RL16(buf + 3) & 0x3fff;
745  height = AV_RL16(buf + 5) & 0x3fff;
746  hscale = buf[4] >> 6;
747  vscale = buf[6] >> 6;
748  buf += 7;
749  buf_size -= 7;
750 
751  if (hscale || vscale)
752  avpriv_request_sample(s->avctx, "Upscaling");
753 
757  sizeof(s->prob->pred16x16));
759  sizeof(s->prob->pred8x8c));
760  memcpy(s->prob->mvc, vp8_mv_default_prob,
761  sizeof(s->prob->mvc));
762  memset(&s->segmentation, 0, sizeof(s->segmentation));
763  memset(&s->lf_delta, 0, sizeof(s->lf_delta));
764  }
765 
766  ret = ff_vp56_init_range_decoder(c, buf, header_size);
767  if (ret < 0)
768  return ret;
769  buf += header_size;
770  buf_size -= header_size;
771 
772  if (s->keyframe) {
773  s->colorspace = vp8_rac_get(c);
774  if (s->colorspace)
775  av_log(s->avctx, AV_LOG_WARNING, "Unspecified colorspace\n");
776  s->fullrange = vp8_rac_get(c);
777  }
778 
779  if ((s->segmentation.enabled = vp8_rac_get(c)))
781  else
782  s->segmentation.update_map = 0; // FIXME: move this to some init function?
783 
784  s->filter.simple = vp8_rac_get(c);
785  s->filter.level = vp8_rac_get_uint(c, 6);
786  s->filter.sharpness = vp8_rac_get_uint(c, 3);
787 
788  if ((s->lf_delta.enabled = vp8_rac_get(c))) {
789  s->lf_delta.update = vp8_rac_get(c);
790  if (s->lf_delta.update)
791  update_lf_deltas(s);
792  }
793 
794  if (setup_partitions(s, buf, buf_size)) {
795  av_log(s->avctx, AV_LOG_ERROR, "Invalid partitions\n");
796  return AVERROR_INVALIDDATA;
797  }
798 
799  if (!s->macroblocks_base || /* first frame */
800  width != s->avctx->width || height != s->avctx->height ||
801  (width+15)/16 != s->mb_width || (height+15)/16 != s->mb_height)
802  if ((ret = vp8_update_dimensions(s, width, height)) < 0)
803  return ret;
804 
805  vp8_get_quants(s);
806 
807  if (!s->keyframe) {
808  update_refs(s);
810  s->sign_bias[VP56_FRAME_GOLDEN2 /* altref */] = vp8_rac_get(c);
811  }
812 
813  // if we aren't saving this frame's probabilities for future frames,
814  // make a copy of the current probabilities
815  if (!(s->update_probabilities = vp8_rac_get(c)))
816  s->prob[1] = s->prob[0];
817 
818  s->update_last = s->keyframe || vp8_rac_get(c);
819 
821 
822  if ((s->mbskip_enabled = vp8_rac_get(c)))
823  s->prob->mbskip = vp8_rac_get_uint(c, 8);
824 
825  if (!s->keyframe) {
826  s->prob->intra = vp8_rac_get_uint(c, 8);
827  s->prob->last = vp8_rac_get_uint(c, 8);
828  s->prob->golden = vp8_rac_get_uint(c, 8);
830  }
831 
832  // Record the entropy coder state here so that hwaccels can use it.
833  s->c.code_word = vp56_rac_renorm(&s->c);
834  s->coder_state_at_header_end.input = s->c.buffer - (-s->c.bits / 8);
838 
839  return 0;
840 }
841 
842 static av_always_inline
843 void clamp_mv(VP8mvbounds *s, VP56mv *dst, const VP56mv *src)
844 {
845  dst->x = av_clip(src->x, av_clip(s->mv_min.x, INT16_MIN, INT16_MAX),
846  av_clip(s->mv_max.x, INT16_MIN, INT16_MAX));
847  dst->y = av_clip(src->y, av_clip(s->mv_min.y, INT16_MIN, INT16_MAX),
848  av_clip(s->mv_max.y, INT16_MIN, INT16_MAX));
849 }
850 
851 /**
852  * Motion vector coding, 17.1.
853  */
855 {
856  int bit, x = 0;
857 
858  if (vp56_rac_get_prob_branchy(c, p[0])) {
859  int i;
860 
861  for (i = 0; i < 3; i++)
862  x += vp56_rac_get_prob(c, p[9 + i]) << i;
863  for (i = (vp7 ? 7 : 9); i > 3; i--)
864  x += vp56_rac_get_prob(c, p[9 + i]) << i;
865  if (!(x & (vp7 ? 0xF0 : 0xFFF0)) || vp56_rac_get_prob(c, p[12]))
866  x += 8;
867  } else {
868  // small_mvtree
869  const uint8_t *ps = p + 2;
870  bit = vp56_rac_get_prob(c, *ps);
871  ps += 1 + 3 * bit;
872  x += 4 * bit;
873  bit = vp56_rac_get_prob(c, *ps);
874  ps += 1 + bit;
875  x += 2 * bit;
876  x += vp56_rac_get_prob(c, *ps);
877  }
878 
879  return (x && vp56_rac_get_prob(c, p[1])) ? -x : x;
880 }
881 
883 {
884  return read_mv_component(c, p, 1);
885 }
886 
888 {
889  return read_mv_component(c, p, 0);
890 }
891 
892 static av_always_inline
893 const uint8_t *get_submv_prob(uint32_t left, uint32_t top, int is_vp7)
894 {
895  if (is_vp7)
896  return vp7_submv_prob;
897 
898  if (left == top)
899  return vp8_submv_prob[4 - !!left];
900  if (!top)
901  return vp8_submv_prob[2];
902  return vp8_submv_prob[1 - !!left];
903 }
904 
905 /**
906  * Split motion vector prediction, 16.4.
907  * @returns the number of motion vectors parsed (2, 4 or 16)
908  */
909 static av_always_inline
911  int layout, int is_vp7)
912 {
913  int part_idx;
914  int n, num;
915  VP8Macroblock *top_mb;
916  VP8Macroblock *left_mb = &mb[-1];
917  const uint8_t *mbsplits_left = vp8_mbsplits[left_mb->partitioning];
918  const uint8_t *mbsplits_top, *mbsplits_cur, *firstidx;
919  VP56mv *top_mv;
920  VP56mv *left_mv = left_mb->bmv;
921  VP56mv *cur_mv = mb->bmv;
922 
923  if (!layout) // layout is inlined, s->mb_layout is not
924  top_mb = &mb[2];
925  else
926  top_mb = &mb[-s->mb_width - 1];
927  mbsplits_top = vp8_mbsplits[top_mb->partitioning];
928  top_mv = top_mb->bmv;
929 
933  else
934  part_idx = VP8_SPLITMVMODE_8x8;
935  } else {
936  part_idx = VP8_SPLITMVMODE_4x4;
937  }
938 
939  num = vp8_mbsplit_count[part_idx];
940  mbsplits_cur = vp8_mbsplits[part_idx],
941  firstidx = vp8_mbfirstidx[part_idx];
942  mb->partitioning = part_idx;
943 
944  for (n = 0; n < num; n++) {
945  int k = firstidx[n];
946  uint32_t left, above;
947  const uint8_t *submv_prob;
948 
949  if (!(k & 3))
950  left = AV_RN32A(&left_mv[mbsplits_left[k + 3]]);
951  else
952  left = AV_RN32A(&cur_mv[mbsplits_cur[k - 1]]);
953  if (k <= 3)
954  above = AV_RN32A(&top_mv[mbsplits_top[k + 12]]);
955  else
956  above = AV_RN32A(&cur_mv[mbsplits_cur[k - 4]]);
957 
958  submv_prob = get_submv_prob(left, above, is_vp7);
959 
960  if (vp56_rac_get_prob_branchy(c, submv_prob[0])) {
961  if (vp56_rac_get_prob_branchy(c, submv_prob[1])) {
962  if (vp56_rac_get_prob_branchy(c, submv_prob[2])) {
963  mb->bmv[n].y = mb->mv.y +
964  read_mv_component(c, s->prob->mvc[0], is_vp7);
965  mb->bmv[n].x = mb->mv.x +
966  read_mv_component(c, s->prob->mvc[1], is_vp7);
967  } else {
968  AV_ZERO32(&mb->bmv[n]);
969  }
970  } else {
971  AV_WN32A(&mb->bmv[n], above);
972  }
973  } else {
974  AV_WN32A(&mb->bmv[n], left);
975  }
976  }
977 
978  return num;
979 }
980 
981 /**
982  * The vp7 reference decoder uses a padding macroblock column (added to right
983  * edge of the frame) to guard against illegal macroblock offsets. The
984  * algorithm has bugs that permit offsets to straddle the padding column.
985  * This function replicates those bugs.
986  *
987  * @param[out] edge_x macroblock x address
988  * @param[out] edge_y macroblock y address
989  *
990  * @return macroblock offset legal (boolean)
991  */
992 static int vp7_calculate_mb_offset(int mb_x, int mb_y, int mb_width,
993  int xoffset, int yoffset, int boundary,
994  int *edge_x, int *edge_y)
995 {
996  int vwidth = mb_width + 1;
997  int new = (mb_y + yoffset) * vwidth + mb_x + xoffset;
998  if (new < boundary || new % vwidth == vwidth - 1)
999  return 0;
1000  *edge_y = new / vwidth;
1001  *edge_x = new % vwidth;
1002  return 1;
1003 }
1004 
1005 static const VP56mv *get_bmv_ptr(const VP8Macroblock *mb, int subblock)
1006 {
1007  return &mb->bmv[mb->mode == VP8_MVMODE_SPLIT ? vp8_mbsplits[mb->partitioning][subblock] : 0];
1008 }
1009 
1010 static av_always_inline
1012  int mb_x, int mb_y, int layout)
1013 {
1014  VP8Macroblock *mb_edge[12];
1015  enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR };
1016  enum { VP8_EDGE_TOP, VP8_EDGE_LEFT, VP8_EDGE_TOPLEFT };
1017  int idx = CNT_ZERO;
1018  VP56mv near_mv[3];
1019  uint8_t cnt[3] = { 0 };
1020  VP56RangeCoder *c = &s->c;
1021  int i;
1022 
1023  AV_ZERO32(&near_mv[0]);
1024  AV_ZERO32(&near_mv[1]);
1025  AV_ZERO32(&near_mv[2]);
1026 
1027  for (i = 0; i < VP7_MV_PRED_COUNT; i++) {
1028  const VP7MVPred * pred = &vp7_mv_pred[i];
1029  int edge_x, edge_y;
1030 
1031  if (vp7_calculate_mb_offset(mb_x, mb_y, s->mb_width, pred->xoffset,
1032  pred->yoffset, !s->profile, &edge_x, &edge_y)) {
1033  VP8Macroblock *edge = mb_edge[i] = (s->mb_layout == 1)
1034  ? s->macroblocks_base + 1 + edge_x +
1035  (s->mb_width + 1) * (edge_y + 1)
1036  : s->macroblocks + edge_x +
1037  (s->mb_height - edge_y - 1) * 2;
1038  uint32_t mv = AV_RN32A(get_bmv_ptr(edge, vp7_mv_pred[i].subblock));
1039  if (mv) {
1040  if (AV_RN32A(&near_mv[CNT_NEAREST])) {
1041  if (mv == AV_RN32A(&near_mv[CNT_NEAREST])) {
1042  idx = CNT_NEAREST;
1043  } else if (AV_RN32A(&near_mv[CNT_NEAR])) {
1044  if (mv != AV_RN32A(&near_mv[CNT_NEAR]))
1045  continue;
1046  idx = CNT_NEAR;
1047  } else {
1048  AV_WN32A(&near_mv[CNT_NEAR], mv);
1049  idx = CNT_NEAR;
1050  }
1051  } else {
1052  AV_WN32A(&near_mv[CNT_NEAREST], mv);
1053  idx = CNT_NEAREST;
1054  }
1055  } else {
1056  idx = CNT_ZERO;
1057  }
1058  } else {
1059  idx = CNT_ZERO;
1060  }
1061  cnt[idx] += vp7_mv_pred[i].score;
1062  }
1063 
1065 
1066  if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_ZERO]][0])) {
1067  mb->mode = VP8_MVMODE_MV;
1068 
1069  if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_NEAREST]][1])) {
1070 
1071  if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_NEAR]][2])) {
1072 
1073  if (cnt[CNT_NEAREST] > cnt[CNT_NEAR])
1074  AV_WN32A(&mb->mv, cnt[CNT_ZERO] > cnt[CNT_NEAREST] ? 0 : AV_RN32A(&near_mv[CNT_NEAREST]));
1075  else
1076  AV_WN32A(&mb->mv, cnt[CNT_ZERO] > cnt[CNT_NEAR] ? 0 : AV_RN32A(&near_mv[CNT_NEAR]));
1077 
1078  if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_NEAR]][3])) {
1079  mb->mode = VP8_MVMODE_SPLIT;
1080  mb->mv = mb->bmv[decode_splitmvs(s, c, mb, layout, IS_VP7) - 1];
1081  } else {
1082  mb->mv.y += vp7_read_mv_component(c, s->prob->mvc[0]);
1083  mb->mv.x += vp7_read_mv_component(c, s->prob->mvc[1]);
1084  mb->bmv[0] = mb->mv;
1085  }
1086  } else {
1087  mb->mv = near_mv[CNT_NEAR];
1088  mb->bmv[0] = mb->mv;
1089  }
1090  } else {
1091  mb->mv = near_mv[CNT_NEAREST];
1092  mb->bmv[0] = mb->mv;
1093  }
1094  } else {
1095  mb->mode = VP8_MVMODE_ZERO;
1096  AV_ZERO32(&mb->mv);
1097  mb->bmv[0] = mb->mv;
1098  }
1099 }
1100 
1101 static av_always_inline
1103  int mb_x, int mb_y, int layout)
1104 {
1105  VP8Macroblock *mb_edge[3] = { 0 /* top */,
1106  mb - 1 /* left */,
1107  0 /* top-left */ };
1108  enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV };
1109  enum { VP8_EDGE_TOP, VP8_EDGE_LEFT, VP8_EDGE_TOPLEFT };
1110  int idx = CNT_ZERO;
1111  int cur_sign_bias = s->sign_bias[mb->ref_frame];
1112  int8_t *sign_bias = s->sign_bias;
1113  VP56mv near_mv[4];
1114  uint8_t cnt[4] = { 0 };
1115  VP56RangeCoder *c = &s->c;
1116 
1117  if (!layout) { // layout is inlined (s->mb_layout is not)
1118  mb_edge[0] = mb + 2;
1119  mb_edge[2] = mb + 1;
1120  } else {
1121  mb_edge[0] = mb - s->mb_width - 1;
1122  mb_edge[2] = mb - s->mb_width - 2;
1123  }
1124 
1125  AV_ZERO32(&near_mv[0]);
1126  AV_ZERO32(&near_mv[1]);
1127  AV_ZERO32(&near_mv[2]);
1128 
1129  /* Process MB on top, left and top-left */
1130 #define MV_EDGE_CHECK(n) \
1131  { \
1132  VP8Macroblock *edge = mb_edge[n]; \
1133  int edge_ref = edge->ref_frame; \
1134  if (edge_ref != VP56_FRAME_CURRENT) { \
1135  uint32_t mv = AV_RN32A(&edge->mv); \
1136  if (mv) { \
1137  if (cur_sign_bias != sign_bias[edge_ref]) { \
1138  /* SWAR negate of the values in mv. */ \
1139  mv = ~mv; \
1140  mv = ((mv & 0x7fff7fff) + \
1141  0x00010001) ^ (mv & 0x80008000); \
1142  } \
1143  if (!n || mv != AV_RN32A(&near_mv[idx])) \
1144  AV_WN32A(&near_mv[++idx], mv); \
1145  cnt[idx] += 1 + (n != 2); \
1146  } else \
1147  cnt[CNT_ZERO] += 1 + (n != 2); \
1148  } \
1149  }
1150 
1151  MV_EDGE_CHECK(0)
1152  MV_EDGE_CHECK(1)
1153  MV_EDGE_CHECK(2)
1154 
1156  if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_ZERO]][0])) {
1157  mb->mode = VP8_MVMODE_MV;
1158 
1159  /* If we have three distinct MVs, merge first and last if they're the same */
1160  if (cnt[CNT_SPLITMV] &&
1161  AV_RN32A(&near_mv[1 + VP8_EDGE_TOP]) == AV_RN32A(&near_mv[1 + VP8_EDGE_TOPLEFT]))
1162  cnt[CNT_NEAREST] += 1;
1163 
1164  /* Swap near and nearest if necessary */
1165  if (cnt[CNT_NEAR] > cnt[CNT_NEAREST]) {
1166  FFSWAP(uint8_t, cnt[CNT_NEAREST], cnt[CNT_NEAR]);
1167  FFSWAP( VP56mv, near_mv[CNT_NEAREST], near_mv[CNT_NEAR]);
1168  }
1169 
1170  if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAREST]][1])) {
1171  if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAR]][2])) {
1172  /* Choose the best mv out of 0,0 and the nearest mv */
1173  clamp_mv(mv_bounds, &mb->mv, &near_mv[CNT_ZERO + (cnt[CNT_NEAREST] >= cnt[CNT_ZERO])]);
1174  cnt[CNT_SPLITMV] = ((mb_edge[VP8_EDGE_LEFT]->mode == VP8_MVMODE_SPLIT) +
1175  (mb_edge[VP8_EDGE_TOP]->mode == VP8_MVMODE_SPLIT)) * 2 +
1176  (mb_edge[VP8_EDGE_TOPLEFT]->mode == VP8_MVMODE_SPLIT);
1177 
1178  if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_SPLITMV]][3])) {
1179  mb->mode = VP8_MVMODE_SPLIT;
1180  mb->mv = mb->bmv[decode_splitmvs(s, c, mb, layout, IS_VP8) - 1];
1181  } else {
1182  mb->mv.y += vp8_read_mv_component(c, s->prob->mvc[0]);
1183  mb->mv.x += vp8_read_mv_component(c, s->prob->mvc[1]);
1184  mb->bmv[0] = mb->mv;
1185  }
1186  } else {
1187  clamp_mv(mv_bounds, &mb->mv, &near_mv[CNT_NEAR]);
1188  mb->bmv[0] = mb->mv;
1189  }
1190  } else {
1191  clamp_mv(mv_bounds, &mb->mv, &near_mv[CNT_NEAREST]);
1192  mb->bmv[0] = mb->mv;
1193  }
1194  } else {
1195  mb->mode = VP8_MVMODE_ZERO;
1196  AV_ZERO32(&mb->mv);
1197  mb->bmv[0] = mb->mv;
1198  }
1199 }
1200 
1201 static av_always_inline
1203  int mb_x, int keyframe, int layout)
1204 {
1205  uint8_t *intra4x4 = mb->intra4x4_pred_mode_mb;
1206 
1207  if (layout) {
1208  VP8Macroblock *mb_top = mb - s->mb_width - 1;
1209  memcpy(mb->intra4x4_pred_mode_top, mb_top->intra4x4_pred_mode_top, 4);
1210  }
1211  if (keyframe) {
1212  int x, y;
1213  uint8_t *top;
1214  uint8_t *const left = s->intra4x4_pred_mode_left;
1215  if (layout)
1216  top = mb->intra4x4_pred_mode_top;
1217  else
1218  top = s->intra4x4_pred_mode_top + 4 * mb_x;
1219  for (y = 0; y < 4; y++) {
1220  for (x = 0; x < 4; x++) {
1221  const uint8_t *ctx;
1222  ctx = vp8_pred4x4_prob_intra[top[x]][left[y]];
1223  *intra4x4 = vp8_rac_get_tree(c, vp8_pred4x4_tree, ctx);
1224  left[y] = top[x] = *intra4x4;
1225  intra4x4++;
1226  }
1227  }
1228  } else {
1229  int i;
1230  for (i = 0; i < 16; i++)
1231  intra4x4[i] = vp8_rac_get_tree(c, vp8_pred4x4_tree,
1233  }
1234 }
1235 
1236 static av_always_inline
1238  VP8Macroblock *mb, int mb_x, int mb_y,
1239  uint8_t *segment, uint8_t *ref, int layout, int is_vp7)
1240 {
1241  VP56RangeCoder *c = &s->c;
1242  static const char * const vp7_feature_name[] = { "q-index",
1243  "lf-delta",
1244  "partial-golden-update",
1245  "blit-pitch" };
1246  if (is_vp7) {
1247  int i;
1248  *segment = 0;
1249  for (i = 0; i < 4; i++) {
1250  if (s->feature_enabled[i]) {
1253  s->feature_index_prob[i]);
1255  "Feature %s present in macroblock (value 0x%x)\n",
1256  vp7_feature_name[i], s->feature_value[i][index]);
1257  }
1258  }
1259  }
1260  } else if (s->segmentation.update_map) {
1261  int bit = vp56_rac_get_prob(c, s->prob->segmentid[0]);
1262  *segment = vp56_rac_get_prob(c, s->prob->segmentid[1+bit]) + 2*bit;
1263  } else if (s->segmentation.enabled)
1264  *segment = ref ? *ref : *segment;
1265  mb->segment = *segment;
1266 
1267  mb->skip = s->mbskip_enabled ? vp56_rac_get_prob(c, s->prob->mbskip) : 0;
1268 
1269  if (s->keyframe) {
1272 
1273  if (mb->mode == MODE_I4x4) {
1274  decode_intra4x4_modes(s, c, mb, mb_x, 1, layout);
1275  } else {
1276  const uint32_t modes = (is_vp7 ? vp7_pred4x4_mode
1277  : vp8_pred4x4_mode)[mb->mode] * 0x01010101u;
1278  if (s->mb_layout)
1279  AV_WN32A(mb->intra4x4_pred_mode_top, modes);
1280  else
1281  AV_WN32A(s->intra4x4_pred_mode_top + 4 * mb_x, modes);
1282  AV_WN32A(s->intra4x4_pred_mode_left, modes);
1283  }
1284 
1288  } else if (vp56_rac_get_prob_branchy(c, s->prob->intra)) {
1289  // inter MB, 16.2
1290  if (vp56_rac_get_prob_branchy(c, s->prob->last))
1291  mb->ref_frame =
1292  (!is_vp7 && vp56_rac_get_prob(c, s->prob->golden)) ? VP56_FRAME_GOLDEN2 /* altref */
1294  else
1296  s->ref_count[mb->ref_frame - 1]++;
1297 
1298  // motion vectors, 16.3
1299  if (is_vp7)
1300  vp7_decode_mvs(s, mb, mb_x, mb_y, layout);
1301  else
1302  vp8_decode_mvs(s, mv_bounds, mb, mb_x, mb_y, layout);
1303  } else {
1304  // intra MB, 16.1
1306 
1307  if (mb->mode == MODE_I4x4)
1308  decode_intra4x4_modes(s, c, mb, mb_x, 0, layout);
1309 
1311  s->prob->pred8x8c);
1314  AV_ZERO32(&mb->bmv[0]);
1315  }
1316 }
1317 
1318 /**
1319  * @param r arithmetic bitstream reader context
1320  * @param block destination for block coefficients
1321  * @param probs probabilities to use when reading trees from the bitstream
1322  * @param i initial coeff index, 0 unless a separate DC block is coded
1323  * @param qmul array holding the dc/ac dequant factor at position 0/1
1324  *
1325  * @return 0 if no coeffs were decoded
1326  * otherwise, the index of the last coeff decoded plus one
1327  */
1328 static av_always_inline
1330  uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
1331  int i, uint8_t *token_prob, int16_t qmul[2],
1332  const uint8_t scan[16], int vp7)
1333 {
1334  VP56RangeCoder c = *r;
1335  goto skip_eob;
1336  do {
1337  int coeff;
1338 restart:
1339  if (!vp56_rac_get_prob_branchy(&c, token_prob[0])) // DCT_EOB
1340  break;
1341 
1342 skip_eob:
1343  if (!vp56_rac_get_prob_branchy(&c, token_prob[1])) { // DCT_0
1344  if (++i == 16)
1345  break; // invalid input; blocks should end with EOB
1346  token_prob = probs[i][0];
1347  if (vp7)
1348  goto restart;
1349  goto skip_eob;
1350  }
1351 
1352  if (!vp56_rac_get_prob_branchy(&c, token_prob[2])) { // DCT_1
1353  coeff = 1;
1354  token_prob = probs[i + 1][1];
1355  } else {
1356  if (!vp56_rac_get_prob_branchy(&c, token_prob[3])) { // DCT 2,3,4
1357  coeff = vp56_rac_get_prob_branchy(&c, token_prob[4]);
1358  if (coeff)
1359  coeff += vp56_rac_get_prob(&c, token_prob[5]);
1360  coeff += 2;
1361  } else {
1362  // DCT_CAT*
1363  if (!vp56_rac_get_prob_branchy(&c, token_prob[6])) {
1364  if (!vp56_rac_get_prob_branchy(&c, token_prob[7])) { // DCT_CAT1
1365  coeff = 5 + vp56_rac_get_prob(&c, vp8_dct_cat1_prob[0]);
1366  } else { // DCT_CAT2
1367  coeff = 7;
1368  coeff += vp56_rac_get_prob(&c, vp8_dct_cat2_prob[0]) << 1;
1369  coeff += vp56_rac_get_prob(&c, vp8_dct_cat2_prob[1]);
1370  }
1371  } else { // DCT_CAT3 and up
1372  int a = vp56_rac_get_prob(&c, token_prob[8]);
1373  int b = vp56_rac_get_prob(&c, token_prob[9 + a]);
1374  int cat = (a << 1) + b;
1375  coeff = 3 + (8 << cat);
1376  coeff += vp8_rac_get_coeff(&c, ff_vp8_dct_cat_prob[cat]);
1377  }
1378  }
1379  token_prob = probs[i + 1][2];
1380  }
1381  block[scan[i]] = (vp8_rac_get(&c) ? -coeff : coeff) * qmul[!!i];
1382  } while (++i < 16);
1383 
1384  *r = c;
1385  return i;
1386 }
1387 
1388 static av_always_inline
1389 int inter_predict_dc(int16_t block[16], int16_t pred[2])
1390 {
1391  int16_t dc = block[0];
1392  int ret = 0;
1393 
1394  if (pred[1] > 3) {
1395  dc += pred[0];
1396  ret = 1;
1397  }
1398 
1399  if (!pred[0] | !dc | ((int32_t)pred[0] ^ (int32_t)dc) >> 31) {
1400  block[0] = pred[0] = dc;
1401  pred[1] = 0;
1402  } else {
1403  if (pred[0] == dc)
1404  pred[1]++;
1405  block[0] = pred[0] = dc;
1406  }
1407 
1408  return ret;
1409 }
1410 
1412  int16_t block[16],
1413  uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
1414  int i, uint8_t *token_prob,
1415  int16_t qmul[2],
1416  const uint8_t scan[16])
1417 {
1418  return decode_block_coeffs_internal(r, block, probs, i,
1419  token_prob, qmul, scan, IS_VP7);
1420 }
1421 
1422 #ifndef vp8_decode_block_coeffs_internal
1424  int16_t block[16],
1425  uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
1426  int i, uint8_t *token_prob,
1427  int16_t qmul[2])
1428 {
1429  return decode_block_coeffs_internal(r, block, probs, i,
1430  token_prob, qmul, ff_zigzag_scan, IS_VP8);
1431 }
1432 #endif
1433 
1434 /**
1435  * @param c arithmetic bitstream reader context
1436  * @param block destination for block coefficients
1437  * @param probs probabilities to use when reading trees from the bitstream
1438  * @param i initial coeff index, 0 unless a separate DC block is coded
1439  * @param zero_nhood the initial prediction context for number of surrounding
1440  * all-zero blocks (only left/top, so 0-2)
1441  * @param qmul array holding the dc/ac dequant factor at position 0/1
1442  * @param scan scan pattern (VP7 only)
1443  *
1444  * @return 0 if no coeffs were decoded
1445  * otherwise, the index of the last coeff decoded plus one
1446  */
1447 static av_always_inline
1449  uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
1450  int i, int zero_nhood, int16_t qmul[2],
1451  const uint8_t scan[16], int vp7)
1452 {
1453  uint8_t *token_prob = probs[i][zero_nhood];
1454  if (!vp56_rac_get_prob_branchy(c, token_prob[0])) // DCT_EOB
1455  return 0;
1456  return vp7 ? vp7_decode_block_coeffs_internal(c, block, probs, i,
1457  token_prob, qmul, scan)
1458  : vp8_decode_block_coeffs_internal(c, block, probs, i,
1459  token_prob, qmul);
1460 }
1461 
1462 static av_always_inline
1464  VP8Macroblock *mb, uint8_t t_nnz[9], uint8_t l_nnz[9],
1465  int is_vp7)
1466 {
1467  int i, x, y, luma_start = 0, luma_ctx = 3;
1468  int nnz_pred, nnz, nnz_total = 0;
1469  int segment = mb->segment;
1470  int block_dc = 0;
1471 
1472  if (mb->mode != MODE_I4x4 && (is_vp7 || mb->mode != VP8_MVMODE_SPLIT)) {
1473  nnz_pred = t_nnz[8] + l_nnz[8];
1474 
1475  // decode DC values and do hadamard
1476  nnz = decode_block_coeffs(c, td->block_dc, s->prob->token[1], 0,
1477  nnz_pred, s->qmat[segment].luma_dc_qmul,
1478  ff_zigzag_scan, is_vp7);
1479  l_nnz[8] = t_nnz[8] = !!nnz;
1480 
1481  if (is_vp7 && mb->mode > MODE_I4x4) {
1482  nnz |= inter_predict_dc(td->block_dc,
1483  s->inter_dc_pred[mb->ref_frame - 1]);
1484  }
1485 
1486  if (nnz) {
1487  nnz_total += nnz;
1488  block_dc = 1;
1489  if (nnz == 1)
1490  s->vp8dsp.vp8_luma_dc_wht_dc(td->block, td->block_dc);
1491  else
1492  s->vp8dsp.vp8_luma_dc_wht(td->block, td->block_dc);
1493  }
1494  luma_start = 1;
1495  luma_ctx = 0;
1496  }
1497 
1498  // luma blocks
1499  for (y = 0; y < 4; y++)
1500  for (x = 0; x < 4; x++) {
1501  nnz_pred = l_nnz[y] + t_nnz[x];
1502  nnz = decode_block_coeffs(c, td->block[y][x],
1503  s->prob->token[luma_ctx],
1504  luma_start, nnz_pred,
1505  s->qmat[segment].luma_qmul,
1506  s->prob[0].scan, is_vp7);
1507  /* nnz+block_dc may be one more than the actual last index,
1508  * but we don't care */
1509  td->non_zero_count_cache[y][x] = nnz + block_dc;
1510  t_nnz[x] = l_nnz[y] = !!nnz;
1511  nnz_total += nnz;
1512  }
1513 
1514  // chroma blocks
1515  // TODO: what to do about dimensions? 2nd dim for luma is x,
1516  // but for chroma it's (y<<1)|x
1517  for (i = 4; i < 6; i++)
1518  for (y = 0; y < 2; y++)
1519  for (x = 0; x < 2; x++) {
1520  nnz_pred = l_nnz[i + 2 * y] + t_nnz[i + 2 * x];
1521  nnz = decode_block_coeffs(c, td->block[i][(y << 1) + x],
1522  s->prob->token[2], 0, nnz_pred,
1523  s->qmat[segment].chroma_qmul,
1524  s->prob[0].scan, is_vp7);
1525  td->non_zero_count_cache[i][(y << 1) + x] = nnz;
1526  t_nnz[i + 2 * x] = l_nnz[i + 2 * y] = !!nnz;
1527  nnz_total += nnz;
1528  }
1529 
1530  // if there were no coded coeffs despite the macroblock not being marked skip,
1531  // we MUST not do the inner loop filter and should not do IDCT
1532  // Since skip isn't used for bitstream prediction, just manually set it.
1533  if (!nnz_total)
1534  mb->skip = 1;
1535 }
1536 
1537 static av_always_inline
1538 void backup_mb_border(uint8_t *top_border, uint8_t *src_y,
1539  uint8_t *src_cb, uint8_t *src_cr,
1540  ptrdiff_t linesize, ptrdiff_t uvlinesize, int simple)
1541 {
1542  AV_COPY128(top_border, src_y + 15 * linesize);
1543  if (!simple) {
1544  AV_COPY64(top_border + 16, src_cb + 7 * uvlinesize);
1545  AV_COPY64(top_border + 24, src_cr + 7 * uvlinesize);
1546  }
1547 }
1548 
1549 static av_always_inline
1550 void xchg_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb,
1551  uint8_t *src_cr, ptrdiff_t linesize, ptrdiff_t uvlinesize, int mb_x,
1552  int mb_y, int mb_width, int simple, int xchg)
1553 {
1554  uint8_t *top_border_m1 = top_border - 32; // for TL prediction
1555  src_y -= linesize;
1556  src_cb -= uvlinesize;
1557  src_cr -= uvlinesize;
1558 
1559 #define XCHG(a, b, xchg) \
1560  do { \
1561  if (xchg) \
1562  AV_SWAP64(b, a); \
1563  else \
1564  AV_COPY64(b, a); \
1565  } while (0)
1566 
1567  XCHG(top_border_m1 + 8, src_y - 8, xchg);
1568  XCHG(top_border, src_y, xchg);
1569  XCHG(top_border + 8, src_y + 8, 1);
1570  if (mb_x < mb_width - 1)
1571  XCHG(top_border + 32, src_y + 16, 1);
1572 
1573  // only copy chroma for normal loop filter
1574  // or to initialize the top row to 127
1575  if (!simple || !mb_y) {
1576  XCHG(top_border_m1 + 16, src_cb - 8, xchg);
1577  XCHG(top_border_m1 + 24, src_cr - 8, xchg);
1578  XCHG(top_border + 16, src_cb, 1);
1579  XCHG(top_border + 24, src_cr, 1);
1580  }
1581 }
1582 
1583 static av_always_inline
1584 int check_dc_pred8x8_mode(int mode, int mb_x, int mb_y)
1585 {
1586  if (!mb_x)
1587  return mb_y ? TOP_DC_PRED8x8 : DC_128_PRED8x8;
1588  else
1589  return mb_y ? mode : LEFT_DC_PRED8x8;
1590 }
1591 
1592 static av_always_inline
1593 int check_tm_pred8x8_mode(int mode, int mb_x, int mb_y, int vp7)
1594 {
1595  if (!mb_x)
1596  return mb_y ? VERT_PRED8x8 : (vp7 ? DC_128_PRED8x8 : DC_129_PRED8x8);
1597  else
1598  return mb_y ? mode : HOR_PRED8x8;
1599 }
1600 
1601 static av_always_inline
1602 int check_intra_pred8x8_mode_emuedge(int mode, int mb_x, int mb_y, int vp7)
1603 {
1604  switch (mode) {
1605  case DC_PRED8x8:
1606  return check_dc_pred8x8_mode(mode, mb_x, mb_y);
1607  case VERT_PRED8x8:
1608  return !mb_y ? (vp7 ? DC_128_PRED8x8 : DC_127_PRED8x8) : mode;
1609  case HOR_PRED8x8:
1610  return !mb_x ? (vp7 ? DC_128_PRED8x8 : DC_129_PRED8x8) : mode;
1611  case PLANE_PRED8x8: /* TM */
1612  return check_tm_pred8x8_mode(mode, mb_x, mb_y, vp7);
1613  }
1614  return mode;
1615 }
1616 
1617 static av_always_inline
1618 int check_tm_pred4x4_mode(int mode, int mb_x, int mb_y, int vp7)
1619 {
1620  if (!mb_x) {
1621  return mb_y ? VERT_VP8_PRED : (vp7 ? DC_128_PRED : DC_129_PRED);
1622  } else {
1623  return mb_y ? mode : HOR_VP8_PRED;
1624  }
1625 }
1626 
1627 static av_always_inline
1628 int check_intra_pred4x4_mode_emuedge(int mode, int mb_x, int mb_y,
1629  int *copy_buf, int vp7)
1630 {
1631  switch (mode) {
1632  case VERT_PRED:
1633  if (!mb_x && mb_y) {
1634  *copy_buf = 1;
1635  return mode;
1636  }
1637  /* fall-through */
1638  case DIAG_DOWN_LEFT_PRED:
1639  case VERT_LEFT_PRED:
1640  return !mb_y ? (vp7 ? DC_128_PRED : DC_127_PRED) : mode;
1641  case HOR_PRED:
1642  if (!mb_y) {
1643  *copy_buf = 1;
1644  return mode;
1645  }
1646  /* fall-through */
1647  case HOR_UP_PRED:
1648  return !mb_x ? (vp7 ? DC_128_PRED : DC_129_PRED) : mode;
1649  case TM_VP8_PRED:
1650  return check_tm_pred4x4_mode(mode, mb_x, mb_y, vp7);
1651  case DC_PRED: /* 4x4 DC doesn't use the same "H.264-style" exceptions
1652  * as 16x16/8x8 DC */
1653  case DIAG_DOWN_RIGHT_PRED:
1654  case VERT_RIGHT_PRED:
1655  case HOR_DOWN_PRED:
1656  if (!mb_y || !mb_x)
1657  *copy_buf = 1;
1658  return mode;
1659  }
1660  return mode;
1661 }
1662 
1663 static av_always_inline
1665  VP8Macroblock *mb, int mb_x, int mb_y, int is_vp7)
1666 {
1667  int x, y, mode, nnz;
1668  uint32_t tr;
1669 
1670  /* for the first row, we need to run xchg_mb_border to init the top edge
1671  * to 127 otherwise, skip it if we aren't going to deblock */
1672  if (mb_y && (s->deblock_filter || !mb_y) && td->thread_nr == 0)
1673  xchg_mb_border(s->top_border[mb_x + 1], dst[0], dst[1], dst[2],
1674  s->linesize, s->uvlinesize, mb_x, mb_y, s->mb_width,
1675  s->filter.simple, 1);
1676 
1677  if (mb->mode < MODE_I4x4) {
1678  mode = check_intra_pred8x8_mode_emuedge(mb->mode, mb_x, mb_y, is_vp7);
1679  s->hpc.pred16x16[mode](dst[0], s->linesize);
1680  } else {
1681  uint8_t *ptr = dst[0];
1682  uint8_t *intra4x4 = mb->intra4x4_pred_mode_mb;
1683  const uint8_t lo = is_vp7 ? 128 : 127;
1684  const uint8_t hi = is_vp7 ? 128 : 129;
1685  uint8_t tr_top[4] = { lo, lo, lo, lo };
1686 
1687  // all blocks on the right edge of the macroblock use bottom edge
1688  // the top macroblock for their topright edge
1689  uint8_t *tr_right = ptr - s->linesize + 16;
1690 
1691  // if we're on the right edge of the frame, said edge is extended
1692  // from the top macroblock
1693  if (mb_y && mb_x == s->mb_width - 1) {
1694  tr = tr_right[-1] * 0x01010101u;
1695  tr_right = (uint8_t *) &tr;
1696  }
1697 
1698  if (mb->skip)
1700 
1701  for (y = 0; y < 4; y++) {
1702  uint8_t *topright = ptr + 4 - s->linesize;
1703  for (x = 0; x < 4; x++) {
1704  int copy = 0;
1705  ptrdiff_t linesize = s->linesize;
1706  uint8_t *dst = ptr + 4 * x;
1707  LOCAL_ALIGNED(4, uint8_t, copy_dst, [5 * 8]);
1708 
1709  if ((y == 0 || x == 3) && mb_y == 0) {
1710  topright = tr_top;
1711  } else if (x == 3)
1712  topright = tr_right;
1713 
1714  mode = check_intra_pred4x4_mode_emuedge(intra4x4[x], mb_x + x,
1715  mb_y + y, &copy, is_vp7);
1716  if (copy) {
1717  dst = copy_dst + 12;
1718  linesize = 8;
1719  if (!(mb_y + y)) {
1720  copy_dst[3] = lo;
1721  AV_WN32A(copy_dst + 4, lo * 0x01010101U);
1722  } else {
1723  AV_COPY32(copy_dst + 4, ptr + 4 * x - s->linesize);
1724  if (!(mb_x + x)) {
1725  copy_dst[3] = hi;
1726  } else {
1727  copy_dst[3] = ptr[4 * x - s->linesize - 1];
1728  }
1729  }
1730  if (!(mb_x + x)) {
1731  copy_dst[11] =
1732  copy_dst[19] =
1733  copy_dst[27] =
1734  copy_dst[35] = hi;
1735  } else {
1736  copy_dst[11] = ptr[4 * x - 1];
1737  copy_dst[19] = ptr[4 * x + s->linesize - 1];
1738  copy_dst[27] = ptr[4 * x + s->linesize * 2 - 1];
1739  copy_dst[35] = ptr[4 * x + s->linesize * 3 - 1];
1740  }
1741  }
1742  s->hpc.pred4x4[mode](dst, topright, linesize);
1743  if (copy) {
1744  AV_COPY32(ptr + 4 * x, copy_dst + 12);
1745  AV_COPY32(ptr + 4 * x + s->linesize, copy_dst + 20);
1746  AV_COPY32(ptr + 4 * x + s->linesize * 2, copy_dst + 28);
1747  AV_COPY32(ptr + 4 * x + s->linesize * 3, copy_dst + 36);
1748  }
1749 
1750  nnz = td->non_zero_count_cache[y][x];
1751  if (nnz) {
1752  if (nnz == 1)
1753  s->vp8dsp.vp8_idct_dc_add(ptr + 4 * x,
1754  td->block[y][x], s->linesize);
1755  else
1756  s->vp8dsp.vp8_idct_add(ptr + 4 * x,
1757  td->block[y][x], s->linesize);
1758  }
1759  topright += 4;
1760  }
1761 
1762  ptr += 4 * s->linesize;
1763  intra4x4 += 4;
1764  }
1765  }
1766 
1768  mb_x, mb_y, is_vp7);
1769  s->hpc.pred8x8[mode](dst[1], s->uvlinesize);
1770  s->hpc.pred8x8[mode](dst[2], s->uvlinesize);
1771 
1772  if (mb_y && (s->deblock_filter || !mb_y) && td->thread_nr == 0)
1773  xchg_mb_border(s->top_border[mb_x + 1], dst[0], dst[1], dst[2],
1774  s->linesize, s->uvlinesize, mb_x, mb_y, s->mb_width,
1775  s->filter.simple, 0);
1776 }
1777 
1778 static const uint8_t subpel_idx[3][8] = {
1779  { 0, 1, 2, 1, 2, 1, 2, 1 }, // nr. of left extra pixels,
1780  // also function pointer index
1781  { 0, 3, 5, 3, 5, 3, 5, 3 }, // nr. of extra pixels required
1782  { 0, 2, 3, 2, 3, 2, 3, 2 }, // nr. of right extra pixels
1783 };
1784 
1785 /**
1786  * luma MC function
1787  *
1788  * @param s VP8 decoding context
1789  * @param dst target buffer for block data at block position
1790  * @param ref reference picture buffer at origin (0, 0)
1791  * @param mv motion vector (relative to block position) to get pixel data from
1792  * @param x_off horizontal position of block from origin (0, 0)
1793  * @param y_off vertical position of block from origin (0, 0)
1794  * @param block_w width of block (16, 8 or 4)
1795  * @param block_h height of block (always same as block_w)
1796  * @param width width of src/dst plane data
1797  * @param height height of src/dst plane data
1798  * @param linesize size of a single line of plane data, including padding
1799  * @param mc_func motion compensation function pointers (bilinear or sixtap MC)
1800  */
1801 static av_always_inline
1803  ThreadFrame *ref, const VP56mv *mv,
1804  int x_off, int y_off, int block_w, int block_h,
1805  int width, int height, ptrdiff_t linesize,
1806  vp8_mc_func mc_func[3][3])
1807 {
1808  uint8_t *src = ref->f->data[0];
1809 
1810  if (AV_RN32A(mv)) {
1811  ptrdiff_t src_linesize = linesize;
1812 
1813  int mx = (mv->x * 2) & 7, mx_idx = subpel_idx[0][mx];
1814  int my = (mv->y * 2) & 7, my_idx = subpel_idx[0][my];
1815 
1816  x_off += mv->x >> 2;
1817  y_off += mv->y >> 2;
1818 
1819  // edge emulation
1820  ff_thread_await_progress(ref, (3 + y_off + block_h + subpel_idx[2][my]) >> 4, 0);
1821  src += y_off * linesize + x_off;
1822  if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
1823  y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
1825  src - my_idx * linesize - mx_idx,
1826  EDGE_EMU_LINESIZE, linesize,
1827  block_w + subpel_idx[1][mx],
1828  block_h + subpel_idx[1][my],
1829  x_off - mx_idx, y_off - my_idx,
1830  width, height);
1831  src = td->edge_emu_buffer + mx_idx + EDGE_EMU_LINESIZE * my_idx;
1832  src_linesize = EDGE_EMU_LINESIZE;
1833  }
1834  mc_func[my_idx][mx_idx](dst, linesize, src, src_linesize, block_h, mx, my);
1835  } else {
1836  ff_thread_await_progress(ref, (3 + y_off + block_h) >> 4, 0);
1837  mc_func[0][0](dst, linesize, src + y_off * linesize + x_off,
1838  linesize, block_h, 0, 0);
1839  }
1840 }
1841 
1842 /**
1843  * chroma MC function
1844  *
1845  * @param s VP8 decoding context
1846  * @param dst1 target buffer for block data at block position (U plane)
1847  * @param dst2 target buffer for block data at block position (V plane)
1848  * @param ref reference picture buffer at origin (0, 0)
1849  * @param mv motion vector (relative to block position) to get pixel data from
1850  * @param x_off horizontal position of block from origin (0, 0)
1851  * @param y_off vertical position of block from origin (0, 0)
1852  * @param block_w width of block (16, 8 or 4)
1853  * @param block_h height of block (always same as block_w)
1854  * @param width width of src/dst plane data
1855  * @param height height of src/dst plane data
1856  * @param linesize size of a single line of plane data, including padding
1857  * @param mc_func motion compensation function pointers (bilinear or sixtap MC)
1858  */
1859 static av_always_inline
1861  uint8_t *dst2, ThreadFrame *ref, const VP56mv *mv,
1862  int x_off, int y_off, int block_w, int block_h,
1863  int width, int height, ptrdiff_t linesize,
1864  vp8_mc_func mc_func[3][3])
1865 {
1866  uint8_t *src1 = ref->f->data[1], *src2 = ref->f->data[2];
1867 
1868  if (AV_RN32A(mv)) {
1869  int mx = mv->x & 7, mx_idx = subpel_idx[0][mx];
1870  int my = mv->y & 7, my_idx = subpel_idx[0][my];
1871 
1872  x_off += mv->x >> 3;
1873  y_off += mv->y >> 3;
1874 
1875  // edge emulation
1876  src1 += y_off * linesize + x_off;
1877  src2 += y_off * linesize + x_off;
1878  ff_thread_await_progress(ref, (3 + y_off + block_h + subpel_idx[2][my]) >> 3, 0);
1879  if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
1880  y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
1882  src1 - my_idx * linesize - mx_idx,
1883  EDGE_EMU_LINESIZE, linesize,
1884  block_w + subpel_idx[1][mx],
1885  block_h + subpel_idx[1][my],
1886  x_off - mx_idx, y_off - my_idx, width, height);
1887  src1 = td->edge_emu_buffer + mx_idx + EDGE_EMU_LINESIZE * my_idx;
1888  mc_func[my_idx][mx_idx](dst1, linesize, src1, EDGE_EMU_LINESIZE, block_h, mx, my);
1889 
1891  src2 - my_idx * linesize - mx_idx,
1892  EDGE_EMU_LINESIZE, linesize,
1893  block_w + subpel_idx[1][mx],
1894  block_h + subpel_idx[1][my],
1895  x_off - mx_idx, y_off - my_idx, width, height);
1896  src2 = td->edge_emu_buffer + mx_idx + EDGE_EMU_LINESIZE * my_idx;
1897  mc_func[my_idx][mx_idx](dst2, linesize, src2, EDGE_EMU_LINESIZE, block_h, mx, my);
1898  } else {
1899  mc_func[my_idx][mx_idx](dst1, linesize, src1, linesize, block_h, mx, my);
1900  mc_func[my_idx][mx_idx](dst2, linesize, src2, linesize, block_h, mx, my);
1901  }
1902  } else {
1903  ff_thread_await_progress(ref, (3 + y_off + block_h) >> 3, 0);
1904  mc_func[0][0](dst1, linesize, src1 + y_off * linesize + x_off, linesize, block_h, 0, 0);
1905  mc_func[0][0](dst2, linesize, src2 + y_off * linesize + x_off, linesize, block_h, 0, 0);
1906  }
1907 }
1908 
1909 static av_always_inline
1911  ThreadFrame *ref_frame, int x_off, int y_off,
1912  int bx_off, int by_off, int block_w, int block_h,
1913  int width, int height, VP56mv *mv)
1914 {
1915  VP56mv uvmv = *mv;
1916 
1917  /* Y */
1918  vp8_mc_luma(s, td, dst[0] + by_off * s->linesize + bx_off,
1919  ref_frame, mv, x_off + bx_off, y_off + by_off,
1920  block_w, block_h, width, height, s->linesize,
1921  s->put_pixels_tab[block_w == 8]);
1922 
1923  /* U/V */
1924  if (s->profile == 3) {
1925  /* this block only applies VP8; it is safe to check
1926  * only the profile, as VP7 profile <= 1 */
1927  uvmv.x &= ~7;
1928  uvmv.y &= ~7;
1929  }
1930  x_off >>= 1;
1931  y_off >>= 1;
1932  bx_off >>= 1;
1933  by_off >>= 1;
1934  width >>= 1;
1935  height >>= 1;
1936  block_w >>= 1;
1937  block_h >>= 1;
1938  vp8_mc_chroma(s, td, dst[1] + by_off * s->uvlinesize + bx_off,
1939  dst[2] + by_off * s->uvlinesize + bx_off, ref_frame,
1940  &uvmv, x_off + bx_off, y_off + by_off,
1941  block_w, block_h, width, height, s->uvlinesize,
1942  s->put_pixels_tab[1 + (block_w == 4)]);
1943 }
1944 
1945 /* Fetch pixels for estimated mv 4 macroblocks ahead.
1946  * Optimized for 64-byte cache lines. Inspired by ffh264 prefetch_motion. */
1947 static av_always_inline
1948 void prefetch_motion(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y,
1949  int mb_xy, int ref)
1950 {
1951  /* Don't prefetch refs that haven't been used very often this frame. */
1952  if (s->ref_count[ref - 1] > (mb_xy >> 5)) {
1953  int x_off = mb_x << 4, y_off = mb_y << 4;
1954  int mx = (mb->mv.x >> 2) + x_off + 8;
1955  int my = (mb->mv.y >> 2) + y_off;
1956  uint8_t **src = s->framep[ref]->tf.f->data;
1957  int off = mx + (my + (mb_x & 3) * 4) * s->linesize + 64;
1958  /* For threading, a ff_thread_await_progress here might be useful, but
1959  * it actually slows down the decoder. Since a bad prefetch doesn't
1960  * generate bad decoder output, we don't run it here. */
1961  s->vdsp.prefetch(src[0] + off, s->linesize, 4);
1962  off = (mx >> 1) + ((my >> 1) + (mb_x & 7)) * s->uvlinesize + 64;
1963  s->vdsp.prefetch(src[1] + off, src[2] - src[1], 2);
1964  }
1965 }
1966 
1967 /**
1968  * Apply motion vectors to prediction buffer, chapter 18.
1969  */
1970 static av_always_inline
1972  VP8Macroblock *mb, int mb_x, int mb_y)
1973 {
1974  int x_off = mb_x << 4, y_off = mb_y << 4;
1975  int width = 16 * s->mb_width, height = 16 * s->mb_height;
1976  ThreadFrame *ref = &s->framep[mb->ref_frame]->tf;
1977  VP56mv *bmv = mb->bmv;
1978 
1979  switch (mb->partitioning) {
1980  case VP8_SPLITMVMODE_NONE:
1981  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1982  0, 0, 16, 16, width, height, &mb->mv);
1983  break;
1984  case VP8_SPLITMVMODE_4x4: {
1985  int x, y;
1986  VP56mv uvmv;
1987 
1988  /* Y */
1989  for (y = 0; y < 4; y++) {
1990  for (x = 0; x < 4; x++) {
1991  vp8_mc_luma(s, td, dst[0] + 4 * y * s->linesize + x * 4,
1992  ref, &bmv[4 * y + x],
1993  4 * x + x_off, 4 * y + y_off, 4, 4,
1994  width, height, s->linesize,
1995  s->put_pixels_tab[2]);
1996  }
1997  }
1998 
1999  /* U/V */
2000  x_off >>= 1;
2001  y_off >>= 1;
2002  width >>= 1;
2003  height >>= 1;
2004  for (y = 0; y < 2; y++) {
2005  for (x = 0; x < 2; x++) {
2006  uvmv.x = mb->bmv[2 * y * 4 + 2 * x ].x +
2007  mb->bmv[2 * y * 4 + 2 * x + 1].x +
2008  mb->bmv[(2 * y + 1) * 4 + 2 * x ].x +
2009  mb->bmv[(2 * y + 1) * 4 + 2 * x + 1].x;
2010  uvmv.y = mb->bmv[2 * y * 4 + 2 * x ].y +
2011  mb->bmv[2 * y * 4 + 2 * x + 1].y +
2012  mb->bmv[(2 * y + 1) * 4 + 2 * x ].y +
2013  mb->bmv[(2 * y + 1) * 4 + 2 * x + 1].y;
2014  uvmv.x = (uvmv.x + 2 + FF_SIGNBIT(uvmv.x)) >> 2;
2015  uvmv.y = (uvmv.y + 2 + FF_SIGNBIT(uvmv.y)) >> 2;
2016  if (s->profile == 3) {
2017  uvmv.x &= ~7;
2018  uvmv.y &= ~7;
2019  }
2020  vp8_mc_chroma(s, td, dst[1] + 4 * y * s->uvlinesize + x * 4,
2021  dst[2] + 4 * y * s->uvlinesize + x * 4, ref,
2022  &uvmv, 4 * x + x_off, 4 * y + y_off, 4, 4,
2023  width, height, s->uvlinesize,
2024  s->put_pixels_tab[2]);
2025  }
2026  }
2027  break;
2028  }
2029  case VP8_SPLITMVMODE_16x8:
2030  vp8_mc_part(s, td, dst, ref, x_off, y_off,
2031  0, 0, 16, 8, width, height, &bmv[0]);
2032  vp8_mc_part(s, td, dst, ref, x_off, y_off,
2033  0, 8, 16, 8, width, height, &bmv[1]);
2034  break;
2035  case VP8_SPLITMVMODE_8x16:
2036  vp8_mc_part(s, td, dst, ref, x_off, y_off,
2037  0, 0, 8, 16, width, height, &bmv[0]);
2038  vp8_mc_part(s, td, dst, ref, x_off, y_off,
2039  8, 0, 8, 16, width, height, &bmv[1]);
2040  break;
2041  case VP8_SPLITMVMODE_8x8:
2042  vp8_mc_part(s, td, dst, ref, x_off, y_off,
2043  0, 0, 8, 8, width, height, &bmv[0]);
2044  vp8_mc_part(s, td, dst, ref, x_off, y_off,
2045  8, 0, 8, 8, width, height, &bmv[1]);
2046  vp8_mc_part(s, td, dst, ref, x_off, y_off,
2047  0, 8, 8, 8, width, height, &bmv[2]);
2048  vp8_mc_part(s, td, dst, ref, x_off, y_off,
2049  8, 8, 8, 8, width, height, &bmv[3]);
2050  break;
2051  }
2052 }
2053 
2054 static av_always_inline
2056 {
2057  int x, y, ch;
2058 
2059  if (mb->mode != MODE_I4x4) {
2060  uint8_t *y_dst = dst[0];
2061  for (y = 0; y < 4; y++) {
2062  uint32_t nnz4 = AV_RL32(td->non_zero_count_cache[y]);
2063  if (nnz4) {
2064  if (nnz4 & ~0x01010101) {
2065  for (x = 0; x < 4; x++) {
2066  if ((uint8_t) nnz4 == 1)
2067  s->vp8dsp.vp8_idct_dc_add(y_dst + 4 * x,
2068  td->block[y][x],
2069  s->linesize);
2070  else if ((uint8_t) nnz4 > 1)
2071  s->vp8dsp.vp8_idct_add(y_dst + 4 * x,
2072  td->block[y][x],
2073  s->linesize);
2074  nnz4 >>= 8;
2075  if (!nnz4)
2076  break;
2077  }
2078  } else {
2079  s->vp8dsp.vp8_idct_dc_add4y(y_dst, td->block[y], s->linesize);
2080  }
2081  }
2082  y_dst += 4 * s->linesize;
2083  }
2084  }
2085 
2086  for (ch = 0; ch < 2; ch++) {
2087  uint32_t nnz4 = AV_RL32(td->non_zero_count_cache[4 + ch]);
2088  if (nnz4) {
2089  uint8_t *ch_dst = dst[1 + ch];
2090  if (nnz4 & ~0x01010101) {
2091  for (y = 0; y < 2; y++) {
2092  for (x = 0; x < 2; x++) {
2093  if ((uint8_t) nnz4 == 1)
2094  s->vp8dsp.vp8_idct_dc_add(ch_dst + 4 * x,
2095  td->block[4 + ch][(y << 1) + x],
2096  s->uvlinesize);
2097  else if ((uint8_t) nnz4 > 1)
2098  s->vp8dsp.vp8_idct_add(ch_dst + 4 * x,
2099  td->block[4 + ch][(y << 1) + x],
2100  s->uvlinesize);
2101  nnz4 >>= 8;
2102  if (!nnz4)
2103  goto chroma_idct_end;
2104  }
2105  ch_dst += 4 * s->uvlinesize;
2106  }
2107  } else {
2108  s->vp8dsp.vp8_idct_dc_add4uv(ch_dst, td->block[4 + ch], s->uvlinesize);
2109  }
2110  }
2111 chroma_idct_end:
2112  ;
2113  }
2114 }
2115 
2116 static av_always_inline
2118  VP8FilterStrength *f, int is_vp7)
2119 {
2120  int interior_limit, filter_level;
2121 
2122  if (s->segmentation.enabled) {
2123  filter_level = s->segmentation.filter_level[mb->segment];
2124  if (!s->segmentation.absolute_vals)
2125  filter_level += s->filter.level;
2126  } else
2127  filter_level = s->filter.level;
2128 
2129  if (s->lf_delta.enabled) {
2130  filter_level += s->lf_delta.ref[mb->ref_frame];
2131  filter_level += s->lf_delta.mode[mb->mode];
2132  }
2133 
2134  filter_level = av_clip_uintp2(filter_level, 6);
2135 
2136  interior_limit = filter_level;
2137  if (s->filter.sharpness) {
2138  interior_limit >>= (s->filter.sharpness + 3) >> 2;
2139  interior_limit = FFMIN(interior_limit, 9 - s->filter.sharpness);
2140  }
2141  interior_limit = FFMAX(interior_limit, 1);
2142 
2143  f->filter_level = filter_level;
2144  f->inner_limit = interior_limit;
2145  f->inner_filter = is_vp7 || !mb->skip || mb->mode == MODE_I4x4 ||
2146  mb->mode == VP8_MVMODE_SPLIT;
2147 }
2148 
2149 static av_always_inline
2151  int mb_x, int mb_y, int is_vp7)
2152 {
2153  int mbedge_lim, bedge_lim_y, bedge_lim_uv, hev_thresh;
2154  int filter_level = f->filter_level;
2155  int inner_limit = f->inner_limit;
2156  int inner_filter = f->inner_filter;
2157  ptrdiff_t linesize = s->linesize;
2158  ptrdiff_t uvlinesize = s->uvlinesize;
2159  static const uint8_t hev_thresh_lut[2][64] = {
2160  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
2161  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2162  3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
2163  3, 3, 3, 3 },
2164  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
2165  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2166  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2167  2, 2, 2, 2 }
2168  };
2169 
2170  if (!filter_level)
2171  return;
2172 
2173  if (is_vp7) {
2174  bedge_lim_y = filter_level;
2175  bedge_lim_uv = filter_level * 2;
2176  mbedge_lim = filter_level + 2;
2177  } else {
2178  bedge_lim_y =
2179  bedge_lim_uv = filter_level * 2 + inner_limit;
2180  mbedge_lim = bedge_lim_y + 4;
2181  }
2182 
2183  hev_thresh = hev_thresh_lut[s->keyframe][filter_level];
2184 
2185  if (mb_x) {
2186  s->vp8dsp.vp8_h_loop_filter16y(dst[0], linesize,
2187  mbedge_lim, inner_limit, hev_thresh);
2188  s->vp8dsp.vp8_h_loop_filter8uv(dst[1], dst[2], uvlinesize,
2189  mbedge_lim, inner_limit, hev_thresh);
2190  }
2191 
2192 #define H_LOOP_FILTER_16Y_INNER(cond) \
2193  if (cond && inner_filter) { \
2194  s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0] + 4, linesize, \
2195  bedge_lim_y, inner_limit, \
2196  hev_thresh); \
2197  s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0] + 8, linesize, \
2198  bedge_lim_y, inner_limit, \
2199  hev_thresh); \
2200  s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0] + 12, linesize, \
2201  bedge_lim_y, inner_limit, \
2202  hev_thresh); \
2203  s->vp8dsp.vp8_h_loop_filter8uv_inner(dst[1] + 4, dst[2] + 4, \
2204  uvlinesize, bedge_lim_uv, \
2205  inner_limit, hev_thresh); \
2206  }
2207 
2208  H_LOOP_FILTER_16Y_INNER(!is_vp7)
2209 
2210  if (mb_y) {
2211  s->vp8dsp.vp8_v_loop_filter16y(dst[0], linesize,
2212  mbedge_lim, inner_limit, hev_thresh);
2213  s->vp8dsp.vp8_v_loop_filter8uv(dst[1], dst[2], uvlinesize,
2214  mbedge_lim, inner_limit, hev_thresh);
2215  }
2216 
2217  if (inner_filter) {
2218  s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0] + 4 * linesize,
2219  linesize, bedge_lim_y,
2220  inner_limit, hev_thresh);
2221  s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0] + 8 * linesize,
2222  linesize, bedge_lim_y,
2223  inner_limit, hev_thresh);
2224  s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0] + 12 * linesize,
2225  linesize, bedge_lim_y,
2226  inner_limit, hev_thresh);
2227  s->vp8dsp.vp8_v_loop_filter8uv_inner(dst[1] + 4 * uvlinesize,
2228  dst[2] + 4 * uvlinesize,
2229  uvlinesize, bedge_lim_uv,
2230  inner_limit, hev_thresh);
2231  }
2232 
2233  H_LOOP_FILTER_16Y_INNER(is_vp7)
2234 }
2235 
2236 static av_always_inline
2238  int mb_x, int mb_y)
2239 {
2240  int mbedge_lim, bedge_lim;
2241  int filter_level = f->filter_level;
2242  int inner_limit = f->inner_limit;
2243  int inner_filter = f->inner_filter;
2244  ptrdiff_t linesize = s->linesize;
2245 
2246  if (!filter_level)
2247  return;
2248 
2249  bedge_lim = 2 * filter_level + inner_limit;
2250  mbedge_lim = bedge_lim + 4;
2251 
2252  if (mb_x)
2253  s->vp8dsp.vp8_h_loop_filter_simple(dst, linesize, mbedge_lim);
2254  if (inner_filter) {
2255  s->vp8dsp.vp8_h_loop_filter_simple(dst + 4, linesize, bedge_lim);
2256  s->vp8dsp.vp8_h_loop_filter_simple(dst + 8, linesize, bedge_lim);
2257  s->vp8dsp.vp8_h_loop_filter_simple(dst + 12, linesize, bedge_lim);
2258  }
2259 
2260  if (mb_y)
2261  s->vp8dsp.vp8_v_loop_filter_simple(dst, linesize, mbedge_lim);
2262  if (inner_filter) {
2263  s->vp8dsp.vp8_v_loop_filter_simple(dst + 4 * linesize, linesize, bedge_lim);
2264  s->vp8dsp.vp8_v_loop_filter_simple(dst + 8 * linesize, linesize, bedge_lim);
2265  s->vp8dsp.vp8_v_loop_filter_simple(dst + 12 * linesize, linesize, bedge_lim);
2266  }
2267 }
2268 
2269 #define MARGIN (16 << 2)
2270 static av_always_inline
2272  VP8Frame *prev_frame, int is_vp7)
2273 {
2274  VP8Context *s = avctx->priv_data;
2275  int mb_x, mb_y;
2276 
2277  s->mv_bounds.mv_min.y = -MARGIN;
2278  s->mv_bounds.mv_max.y = ((s->mb_height - 1) << 6) + MARGIN;
2279  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
2281  ((s->mb_width + 1) * (mb_y + 1) + 1);
2282  int mb_xy = mb_y * s->mb_width;
2283 
2284  AV_WN32A(s->intra4x4_pred_mode_left, DC_PRED * 0x01010101);
2285 
2286  s->mv_bounds.mv_min.x = -MARGIN;
2287  s->mv_bounds.mv_max.x = ((s->mb_width - 1) << 6) + MARGIN;
2288  for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb_xy++, mb++) {
2289  if (mb_y == 0)
2290  AV_WN32A((mb - s->mb_width - 1)->intra4x4_pred_mode_top,
2291  DC_PRED * 0x01010101);
2292  decode_mb_mode(s, &s->mv_bounds, mb, mb_x, mb_y, curframe->seg_map->data + mb_xy,
2293  prev_frame && prev_frame->seg_map ?
2294  prev_frame->seg_map->data + mb_xy : NULL, 1, is_vp7);
2295  s->mv_bounds.mv_min.x -= 64;
2296  s->mv_bounds.mv_max.x -= 64;
2297  }
2298  s->mv_bounds.mv_min.y -= 64;
2299  s->mv_bounds.mv_max.y -= 64;
2300  }
2301 }
2302 
2303 static void vp7_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame,
2304  VP8Frame *prev_frame)
2305 {
2306  vp78_decode_mv_mb_modes(avctx, cur_frame, prev_frame, IS_VP7);
2307 }
2308 
2309 static void vp8_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame,
2310  VP8Frame *prev_frame)
2311 {
2312  vp78_decode_mv_mb_modes(avctx, cur_frame, prev_frame, IS_VP8);
2313 }
2314 
2315 #if HAVE_THREADS
2316 #define check_thread_pos(td, otd, mb_x_check, mb_y_check) \
2317  do { \
2318  int tmp = (mb_y_check << 16) | (mb_x_check & 0xFFFF); \
2319  if (atomic_load(&otd->thread_mb_pos) < tmp) { \
2320  pthread_mutex_lock(&otd->lock); \
2321  atomic_store(&td->wait_mb_pos, tmp); \
2322  do { \
2323  if (atomic_load(&otd->thread_mb_pos) >= tmp) \
2324  break; \
2325  pthread_cond_wait(&otd->cond, &otd->lock); \
2326  } while (1); \
2327  atomic_store(&td->wait_mb_pos, INT_MAX); \
2328  pthread_mutex_unlock(&otd->lock); \
2329  } \
2330  } while (0)
2331 
2332 #define update_pos(td, mb_y, mb_x) \
2333  do { \
2334  int pos = (mb_y << 16) | (mb_x & 0xFFFF); \
2335  int sliced_threading = (avctx->active_thread_type == FF_THREAD_SLICE) && \
2336  (num_jobs > 1); \
2337  int is_null = !next_td || !prev_td; \
2338  int pos_check = (is_null) ? 1 : \
2339  (next_td != td && pos >= atomic_load(&next_td->wait_mb_pos)) || \
2340  (prev_td != td && pos >= atomic_load(&prev_td->wait_mb_pos)); \
2341  atomic_store(&td->thread_mb_pos, pos); \
2342  if (sliced_threading && pos_check) { \
2343  pthread_mutex_lock(&td->lock); \
2344  pthread_cond_broadcast(&td->cond); \
2345  pthread_mutex_unlock(&td->lock); \
2346  } \
2347  } while (0)
2348 #else
2349 #define check_thread_pos(td, otd, mb_x_check, mb_y_check) while(0)
2350 #define update_pos(td, mb_y, mb_x) while(0)
2351 #endif
2352 
2354  int jobnr, int threadnr, int is_vp7)
2355 {
2356  VP8Context *s = avctx->priv_data;
2357  VP8ThreadData *prev_td, *next_td, *td = &s->thread_data[threadnr];
2358  int mb_y = atomic_load(&td->thread_mb_pos) >> 16;
2359  int mb_x, mb_xy = mb_y * s->mb_width;
2360  int num_jobs = s->num_jobs;
2361  VP8Frame *curframe = s->curframe, *prev_frame = s->prev_frame;
2362  VP56RangeCoder *c = &s->coeff_partition[mb_y & (s->num_coeff_partitions - 1)];
2363  VP8Macroblock *mb;
2364  uint8_t *dst[3] = {
2365  curframe->tf.f->data[0] + 16 * mb_y * s->linesize,
2366  curframe->tf.f->data[1] + 8 * mb_y * s->uvlinesize,
2367  curframe->tf.f->data[2] + 8 * mb_y * s->uvlinesize
2368  };
2369 
2370  if (c->end <= c->buffer && c->bits >= 0)
2371  return AVERROR_INVALIDDATA;
2372 
2373  if (mb_y == 0)
2374  prev_td = td;
2375  else
2376  prev_td = &s->thread_data[(jobnr + num_jobs - 1) % num_jobs];
2377  if (mb_y == s->mb_height - 1)
2378  next_td = td;
2379  else
2380  next_td = &s->thread_data[(jobnr + 1) % num_jobs];
2381  if (s->mb_layout == 1)
2382  mb = s->macroblocks_base + ((s->mb_width + 1) * (mb_y + 1) + 1);
2383  else {
2384  // Make sure the previous frame has read its segmentation map,
2385  // if we re-use the same map.
2386  if (prev_frame && s->segmentation.enabled &&
2388  ff_thread_await_progress(&prev_frame->tf, mb_y, 0);
2389  mb = s->macroblocks + (s->mb_height - mb_y - 1) * 2;
2390  memset(mb - 1, 0, sizeof(*mb)); // zero left macroblock
2391  AV_WN32A(s->intra4x4_pred_mode_left, DC_PRED * 0x01010101);
2392  }
2393 
2394  if (!is_vp7 || mb_y == 0)
2395  memset(td->left_nnz, 0, sizeof(td->left_nnz));
2396 
2397  td->mv_bounds.mv_min.x = -MARGIN;
2398  td->mv_bounds.mv_max.x = ((s->mb_width - 1) << 6) + MARGIN;
2399 
2400  for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb_xy++, mb++) {
2401  if (c->end <= c->buffer && c->bits >= 0)
2402  return AVERROR_INVALIDDATA;
2403  // Wait for previous thread to read mb_x+2, and reach mb_y-1.
2404  if (prev_td != td) {
2405  if (threadnr != 0) {
2406  check_thread_pos(td, prev_td,
2407  mb_x + (is_vp7 ? 2 : 1),
2408  mb_y - (is_vp7 ? 2 : 1));
2409  } else {
2410  check_thread_pos(td, prev_td,
2411  mb_x + (is_vp7 ? 2 : 1) + s->mb_width + 3,
2412  mb_y - (is_vp7 ? 2 : 1));
2413  }
2414  }
2415 
2416  s->vdsp.prefetch(dst[0] + (mb_x & 3) * 4 * s->linesize + 64,
2417  s->linesize, 4);
2418  s->vdsp.prefetch(dst[1] + (mb_x & 7) * s->uvlinesize + 64,
2419  dst[2] - dst[1], 2);
2420 
2421  if (!s->mb_layout)
2422  decode_mb_mode(s, &td->mv_bounds, mb, mb_x, mb_y, curframe->seg_map->data + mb_xy,
2423  prev_frame && prev_frame->seg_map ?
2424  prev_frame->seg_map->data + mb_xy : NULL, 0, is_vp7);
2425 
2426  prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_PREVIOUS);
2427 
2428  if (!mb->skip)
2429  decode_mb_coeffs(s, td, c, mb, s->top_nnz[mb_x], td->left_nnz, is_vp7);
2430 
2431  if (mb->mode <= MODE_I4x4)
2432  intra_predict(s, td, dst, mb, mb_x, mb_y, is_vp7);
2433  else
2434  inter_predict(s, td, dst, mb, mb_x, mb_y);
2435 
2436  prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN);
2437 
2438  if (!mb->skip) {
2439  idct_mb(s, td, dst, mb);
2440  } else {
2441  AV_ZERO64(td->left_nnz);
2442  AV_WN64(s->top_nnz[mb_x], 0); // array of 9, so unaligned
2443 
2444  /* Reset DC block predictors if they would exist
2445  * if the mb had coefficients */
2446  if (mb->mode != MODE_I4x4 && mb->mode != VP8_MVMODE_SPLIT) {
2447  td->left_nnz[8] = 0;
2448  s->top_nnz[mb_x][8] = 0;
2449  }
2450  }
2451 
2452  if (s->deblock_filter)
2453  filter_level_for_mb(s, mb, &td->filter_strength[mb_x], is_vp7);
2454 
2455  if (s->deblock_filter && num_jobs != 1 && threadnr == num_jobs - 1) {
2456  if (s->filter.simple)
2457  backup_mb_border(s->top_border[mb_x + 1], dst[0],
2458  NULL, NULL, s->linesize, 0, 1);
2459  else
2460  backup_mb_border(s->top_border[mb_x + 1], dst[0],
2461  dst[1], dst[2], s->linesize, s->uvlinesize, 0);
2462  }
2463 
2464  prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN2);
2465 
2466  dst[0] += 16;
2467  dst[1] += 8;
2468  dst[2] += 8;
2469  td->mv_bounds.mv_min.x -= 64;
2470  td->mv_bounds.mv_max.x -= 64;
2471 
2472  if (mb_x == s->mb_width + 1) {
2473  update_pos(td, mb_y, s->mb_width + 3);
2474  } else {
2475  update_pos(td, mb_y, mb_x);
2476  }
2477  }
2478  return 0;
2479 }
2480 
2481 static int vp7_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata,
2482  int jobnr, int threadnr)
2483 {
2484  return decode_mb_row_no_filter(avctx, tdata, jobnr, threadnr, 1);
2485 }
2486 
2487 static int vp8_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata,
2488  int jobnr, int threadnr)
2489 {
2490  return decode_mb_row_no_filter(avctx, tdata, jobnr, threadnr, 0);
2491 }
2492 
2493 static av_always_inline void filter_mb_row(AVCodecContext *avctx, void *tdata,
2494  int jobnr, int threadnr, int is_vp7)
2495 {
2496  VP8Context *s = avctx->priv_data;
2497  VP8ThreadData *td = &s->thread_data[threadnr];
2498  int mb_x, mb_y = atomic_load(&td->thread_mb_pos) >> 16, num_jobs = s->num_jobs;
2499  AVFrame *curframe = s->curframe->tf.f;
2500  VP8Macroblock *mb;
2501  VP8ThreadData *prev_td, *next_td;
2502  uint8_t *dst[3] = {
2503  curframe->data[0] + 16 * mb_y * s->linesize,
2504  curframe->data[1] + 8 * mb_y * s->uvlinesize,
2505  curframe->data[2] + 8 * mb_y * s->uvlinesize
2506  };
2507 
2508  if (s->mb_layout == 1)
2509  mb = s->macroblocks_base + ((s->mb_width + 1) * (mb_y + 1) + 1);
2510  else
2511  mb = s->macroblocks + (s->mb_height - mb_y - 1) * 2;
2512 
2513  if (mb_y == 0)
2514  prev_td = td;
2515  else
2516  prev_td = &s->thread_data[(jobnr + num_jobs - 1) % num_jobs];
2517  if (mb_y == s->mb_height - 1)
2518  next_td = td;
2519  else
2520  next_td = &s->thread_data[(jobnr + 1) % num_jobs];
2521 
2522  for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb++) {
2523  VP8FilterStrength *f = &td->filter_strength[mb_x];
2524  if (prev_td != td)
2525  check_thread_pos(td, prev_td,
2526  (mb_x + 1) + (s->mb_width + 3), mb_y - 1);
2527  if (next_td != td)
2528  if (next_td != &s->thread_data[0])
2529  check_thread_pos(td, next_td, mb_x + 1, mb_y + 1);
2530 
2531  if (num_jobs == 1) {
2532  if (s->filter.simple)
2533  backup_mb_border(s->top_border[mb_x + 1], dst[0],
2534  NULL, NULL, s->linesize, 0, 1);
2535  else
2536  backup_mb_border(s->top_border[mb_x + 1], dst[0],
2537  dst[1], dst[2], s->linesize, s->uvlinesize, 0);
2538  }
2539 
2540  if (s->filter.simple)
2541  filter_mb_simple(s, dst[0], f, mb_x, mb_y);
2542  else
2543  filter_mb(s, dst, f, mb_x, mb_y, is_vp7);
2544  dst[0] += 16;
2545  dst[1] += 8;
2546  dst[2] += 8;
2547 
2548  update_pos(td, mb_y, (s->mb_width + 3) + mb_x);
2549  }
2550 }
2551 
2552 static void vp7_filter_mb_row(AVCodecContext *avctx, void *tdata,
2553  int jobnr, int threadnr)
2554 {
2555  filter_mb_row(avctx, tdata, jobnr, threadnr, 1);
2556 }
2557 
2558 static void vp8_filter_mb_row(AVCodecContext *avctx, void *tdata,
2559  int jobnr, int threadnr)
2560 {
2561  filter_mb_row(avctx, tdata, jobnr, threadnr, 0);
2562 }
2563 
2564 static av_always_inline
2565 int vp78_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr,
2566  int threadnr, int is_vp7)
2567 {
2568  VP8Context *s = avctx->priv_data;
2569  VP8ThreadData *td = &s->thread_data[jobnr];
2570  VP8ThreadData *next_td = NULL, *prev_td = NULL;
2571  VP8Frame *curframe = s->curframe;
2572  int mb_y, num_jobs = s->num_jobs;
2573  int ret;
2574 
2575  td->thread_nr = threadnr;
2576  td->mv_bounds.mv_min.y = -MARGIN - 64 * threadnr;
2577  td->mv_bounds.mv_max.y = ((s->mb_height - 1) << 6) + MARGIN - 64 * threadnr;
2578  for (mb_y = jobnr; mb_y < s->mb_height; mb_y += num_jobs) {
2579  atomic_store(&td->thread_mb_pos, mb_y << 16);
2580  ret = s->decode_mb_row_no_filter(avctx, tdata, jobnr, threadnr);
2581  if (ret < 0) {
2582  update_pos(td, s->mb_height, INT_MAX & 0xFFFF);
2583  return ret;
2584  }
2585  if (s->deblock_filter)
2586  s->filter_mb_row(avctx, tdata, jobnr, threadnr);
2587  update_pos(td, mb_y, INT_MAX & 0xFFFF);
2588 
2589  td->mv_bounds.mv_min.y -= 64 * num_jobs;
2590  td->mv_bounds.mv_max.y -= 64 * num_jobs;
2591 
2592  if (avctx->active_thread_type == FF_THREAD_FRAME)
2593  ff_thread_report_progress(&curframe->tf, mb_y, 0);
2594  }
2595 
2596  return 0;
2597 }
2598 
2599 static int vp7_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata,
2600  int jobnr, int threadnr)
2601 {
2602  return vp78_decode_mb_row_sliced(avctx, tdata, jobnr, threadnr, IS_VP7);
2603 }
2604 
2605 static int vp8_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata,
2606  int jobnr, int threadnr)
2607 {
2608  return vp78_decode_mb_row_sliced(avctx, tdata, jobnr, threadnr, IS_VP8);
2609 }
2610 
2611 static av_always_inline
2612 int vp78_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
2613  AVPacket *avpkt, int is_vp7)
2614 {
2615  VP8Context *s = avctx->priv_data;
2616  int ret, i, referenced, num_jobs;
2617  enum AVDiscard skip_thresh;
2618  VP8Frame *av_uninit(curframe), *prev_frame;
2619 
2620  if (is_vp7)
2621  ret = vp7_decode_frame_header(s, avpkt->data, avpkt->size);
2622  else
2623  ret = vp8_decode_frame_header(s, avpkt->data, avpkt->size);
2624 
2625  if (ret < 0)
2626  goto err;
2627 
2628  if (s->actually_webp) {
2629  // avctx->pix_fmt already set in caller.
2630  } else if (!is_vp7 && s->pix_fmt == AV_PIX_FMT_NONE) {
2631  s->pix_fmt = get_pixel_format(s);
2632  if (s->pix_fmt < 0) {
2633  ret = AVERROR(EINVAL);
2634  goto err;
2635  }
2636  avctx->pix_fmt = s->pix_fmt;
2637  }
2638 
2639  prev_frame = s->framep[VP56_FRAME_CURRENT];
2640 
2641  referenced = s->update_last || s->update_golden == VP56_FRAME_CURRENT ||
2643 
2644  skip_thresh = !referenced ? AVDISCARD_NONREF
2645  : !s->keyframe ? AVDISCARD_NONKEY
2646  : AVDISCARD_ALL;
2647 
2648  if (avctx->skip_frame >= skip_thresh) {
2649  s->invisible = 1;
2650  memcpy(&s->next_framep[0], &s->framep[0], sizeof(s->framep[0]) * 4);
2651  goto skip_decode;
2652  }
2653  s->deblock_filter = s->filter.level && avctx->skip_loop_filter < skip_thresh;
2654 
2655  // release no longer referenced frames
2656  for (i = 0; i < 5; i++)
2657  if (s->frames[i].tf.f->buf[0] &&
2658  &s->frames[i] != prev_frame &&
2659  &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
2660  &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
2661  &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2])
2662  vp8_release_frame(s, &s->frames[i]);
2663 
2664  curframe = s->framep[VP56_FRAME_CURRENT] = vp8_find_free_buffer(s);
2665 
2666  if (!s->colorspace)
2667  avctx->colorspace = AVCOL_SPC_BT470BG;
2668  if (s->fullrange)
2669  avctx->color_range = AVCOL_RANGE_JPEG;
2670  else
2671  avctx->color_range = AVCOL_RANGE_MPEG;
2672 
2673  /* Given that arithmetic probabilities are updated every frame, it's quite
2674  * likely that the values we have on a random interframe are complete
2675  * junk if we didn't start decode on a keyframe. So just don't display
2676  * anything rather than junk. */
2677  if (!s->keyframe && (!s->framep[VP56_FRAME_PREVIOUS] ||
2678  !s->framep[VP56_FRAME_GOLDEN] ||
2679  !s->framep[VP56_FRAME_GOLDEN2])) {
2680  av_log(avctx, AV_LOG_WARNING,
2681  "Discarding interframe without a prior keyframe!\n");
2682  ret = AVERROR_INVALIDDATA;
2683  goto err;
2684  }
2685 
2686  curframe->tf.f->key_frame = s->keyframe;
2687  curframe->tf.f->pict_type = s->keyframe ? AV_PICTURE_TYPE_I
2689  if ((ret = vp8_alloc_frame(s, curframe, referenced)) < 0)
2690  goto err;
2691 
2692  // check if golden and altref are swapped
2693  if (s->update_altref != VP56_FRAME_NONE)
2695  else
2697 
2698  if (s->update_golden != VP56_FRAME_NONE)
2700  else
2702 
2703  if (s->update_last)
2704  s->next_framep[VP56_FRAME_PREVIOUS] = curframe;
2705  else
2707 
2708  s->next_framep[VP56_FRAME_CURRENT] = curframe;
2709 
2710  ff_thread_finish_setup(avctx);
2711 
2712  if (avctx->hwaccel) {
2713  ret = avctx->hwaccel->start_frame(avctx, avpkt->data, avpkt->size);
2714  if (ret < 0)
2715  goto err;
2716 
2717  ret = avctx->hwaccel->decode_slice(avctx, avpkt->data, avpkt->size);
2718  if (ret < 0)
2719  goto err;
2720 
2721  ret = avctx->hwaccel->end_frame(avctx);
2722  if (ret < 0)
2723  goto err;
2724 
2725  } else {
2726  s->linesize = curframe->tf.f->linesize[0];
2727  s->uvlinesize = curframe->tf.f->linesize[1];
2728 
2729  memset(s->top_nnz, 0, s->mb_width * sizeof(*s->top_nnz));
2730  /* Zero macroblock structures for top/top-left prediction
2731  * from outside the frame. */
2732  if (!s->mb_layout)
2733  memset(s->macroblocks + s->mb_height * 2 - 1, 0,
2734  (s->mb_width + 1) * sizeof(*s->macroblocks));
2735  if (!s->mb_layout && s->keyframe)
2736  memset(s->intra4x4_pred_mode_top, DC_PRED, s->mb_width * 4);
2737 
2738  memset(s->ref_count, 0, sizeof(s->ref_count));
2739 
2740  if (s->mb_layout == 1) {
2741  // Make sure the previous frame has read its segmentation map,
2742  // if we re-use the same map.
2743  if (prev_frame && s->segmentation.enabled &&
2745  ff_thread_await_progress(&prev_frame->tf, 1, 0);
2746  if (is_vp7)
2747  vp7_decode_mv_mb_modes(avctx, curframe, prev_frame);
2748  else
2749  vp8_decode_mv_mb_modes(avctx, curframe, prev_frame);
2750  }
2751 
2752  if (avctx->active_thread_type == FF_THREAD_FRAME)
2753  num_jobs = 1;
2754  else
2755  num_jobs = FFMIN(s->num_coeff_partitions, avctx->thread_count);
2756  s->num_jobs = num_jobs;
2757  s->curframe = curframe;
2758  s->prev_frame = prev_frame;
2759  s->mv_bounds.mv_min.y = -MARGIN;
2760  s->mv_bounds.mv_max.y = ((s->mb_height - 1) << 6) + MARGIN;
2761  for (i = 0; i < MAX_THREADS; i++) {
2762  VP8ThreadData *td = &s->thread_data[i];
2763  atomic_init(&td->thread_mb_pos, 0);
2764  atomic_init(&td->wait_mb_pos, INT_MAX);
2765  }
2766  if (is_vp7)
2768  num_jobs);
2769  else
2771  num_jobs);
2772  }
2773 
2774  ff_thread_report_progress(&curframe->tf, INT_MAX, 0);
2775  memcpy(&s->framep[0], &s->next_framep[0], sizeof(s->framep[0]) * 4);
2776 
2777 skip_decode:
2778  // if future frames don't use the updated probabilities,
2779  // reset them to the values we saved
2780  if (!s->update_probabilities)
2781  s->prob[0] = s->prob[1];
2782 
2783  if (!s->invisible) {
2784  if ((ret = av_frame_ref(data, curframe->tf.f)) < 0)
2785  return ret;
2786  *got_frame = 1;
2787  }
2788 
2789  return avpkt->size;
2790 err:
2791  memcpy(&s->next_framep[0], &s->framep[0], sizeof(s->framep[0]) * 4);
2792  return ret;
2793 }
2794 
2795 int ff_vp8_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
2796  AVPacket *avpkt)
2797 {
2798  return vp78_decode_frame(avctx, data, got_frame, avpkt, IS_VP8);
2799 }
2800 
2801 #if CONFIG_VP7_DECODER
2802 static int vp7_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
2803  AVPacket *avpkt)
2804 {
2805  return vp78_decode_frame(avctx, data, got_frame, avpkt, IS_VP7);
2806 }
2807 #endif /* CONFIG_VP7_DECODER */
2808 
2810 {
2811  VP8Context *s = avctx->priv_data;
2812  int i;
2813 
2814  if (!s)
2815  return 0;
2816 
2817  vp8_decode_flush_impl(avctx, 1);
2818  for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++)
2819  av_frame_free(&s->frames[i].tf.f);
2820 
2821  return 0;
2822 }
2823 
2825 {
2826  int i;
2827  for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++) {
2828  s->frames[i].tf.f = av_frame_alloc();
2829  if (!s->frames[i].tf.f)
2830  return AVERROR(ENOMEM);
2831  }
2832  return 0;
2833 }
2834 
2835 static av_always_inline
2836 int vp78_decode_init(AVCodecContext *avctx, int is_vp7)
2837 {
2838  VP8Context *s = avctx->priv_data;
2839  int ret;
2840 
2841  s->avctx = avctx;
2842  s->vp7 = avctx->codec->id == AV_CODEC_ID_VP7;
2843  s->pix_fmt = AV_PIX_FMT_NONE;
2844  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
2845  avctx->internal->allocate_progress = 1;
2846 
2847  ff_videodsp_init(&s->vdsp, 8);
2848 
2849  ff_vp78dsp_init(&s->vp8dsp);
2850  if (CONFIG_VP7_DECODER && is_vp7) {
2852  ff_vp7dsp_init(&s->vp8dsp);
2855  } else if (CONFIG_VP8_DECODER && !is_vp7) {
2857  ff_vp8dsp_init(&s->vp8dsp);
2860  }
2861 
2862  /* does not change for VP8 */
2863  memcpy(s->prob[0].scan, ff_zigzag_scan, sizeof(s->prob[0].scan));
2864 
2865  if ((ret = vp8_init_frames(s)) < 0) {
2866  ff_vp8_decode_free(avctx);
2867  return ret;
2868  }
2869 
2870  return 0;
2871 }
2872 
2873 #if CONFIG_VP7_DECODER
2874 static int vp7_decode_init(AVCodecContext *avctx)
2875 {
2876  return vp78_decode_init(avctx, IS_VP7);
2877 }
2878 #endif /* CONFIG_VP7_DECODER */
2879 
2881 {
2882  return vp78_decode_init(avctx, IS_VP8);
2883 }
2884 
2885 #if CONFIG_VP8_DECODER
2886 #if HAVE_THREADS
2887 static av_cold int vp8_decode_init_thread_copy(AVCodecContext *avctx)
2888 {
2889  VP8Context *s = avctx->priv_data;
2890  int ret;
2891 
2892  s->avctx = avctx;
2893 
2894  if ((ret = vp8_init_frames(s)) < 0) {
2895  ff_vp8_decode_free(avctx);
2896  return ret;
2897  }
2898 
2899  return 0;
2900 }
2901 
2902 #define REBASE(pic) ((pic) ? (pic) - &s_src->frames[0] + &s->frames[0] : NULL)
2903 
2904 static int vp8_decode_update_thread_context(AVCodecContext *dst,
2905  const AVCodecContext *src)
2906 {
2907  VP8Context *s = dst->priv_data, *s_src = src->priv_data;
2908  int i;
2909 
2910  if (s->macroblocks_base &&
2911  (s_src->mb_width != s->mb_width || s_src->mb_height != s->mb_height)) {
2912  free_buffers(s);
2913  s->mb_width = s_src->mb_width;
2914  s->mb_height = s_src->mb_height;
2915  }
2916 
2917  s->pix_fmt = s_src->pix_fmt;
2918  s->prob[0] = s_src->prob[!s_src->update_probabilities];
2919  s->segmentation = s_src->segmentation;
2920  s->lf_delta = s_src->lf_delta;
2921  memcpy(s->sign_bias, s_src->sign_bias, sizeof(s->sign_bias));
2922 
2923  for (i = 0; i < FF_ARRAY_ELEMS(s_src->frames); i++) {
2924  if (s_src->frames[i].tf.f->buf[0]) {
2925  int ret = vp8_ref_frame(s, &s->frames[i], &s_src->frames[i]);
2926  if (ret < 0)
2927  return ret;
2928  }
2929  }
2930 
2931  s->framep[0] = REBASE(s_src->next_framep[0]);
2932  s->framep[1] = REBASE(s_src->next_framep[1]);
2933  s->framep[2] = REBASE(s_src->next_framep[2]);
2934  s->framep[3] = REBASE(s_src->next_framep[3]);
2935 
2936  return 0;
2937 }
2938 #endif /* HAVE_THREADS */
2939 #endif /* CONFIG_VP8_DECODER */
2940 
2941 #if CONFIG_VP7_DECODER
2943  .name = "vp7",
2944  .long_name = NULL_IF_CONFIG_SMALL("On2 VP7"),
2945  .type = AVMEDIA_TYPE_VIDEO,
2946  .id = AV_CODEC_ID_VP7,
2947  .priv_data_size = sizeof(VP8Context),
2948  .init = vp7_decode_init,
2949  .close = ff_vp8_decode_free,
2950  .decode = vp7_decode_frame,
2951  .capabilities = AV_CODEC_CAP_DR1,
2953 };
2954 #endif /* CONFIG_VP7_DECODER */
2955 
2956 #if CONFIG_VP8_DECODER
2958  .name = "vp8",
2959  .long_name = NULL_IF_CONFIG_SMALL("On2 VP8"),
2960  .type = AVMEDIA_TYPE_VIDEO,
2961  .id = AV_CODEC_ID_VP8,
2962  .priv_data_size = sizeof(VP8Context),
2964  .close = ff_vp8_decode_free,
2966  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS |
2969  .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp8_decode_init_thread_copy),
2970  .update_thread_context = ONLY_IF_THREADS_ENABLED(vp8_decode_update_thread_context),
2971  .hw_configs = (const AVCodecHWConfigInternal*[]) {
2972 #if CONFIG_VP8_VAAPI_HWACCEL
2973  HWACCEL_VAAPI(vp8),
2974 #endif
2975 #if CONFIG_VP8_NVDEC_HWACCEL
2976  HWACCEL_NVDEC(vp8),
2977 #endif
2978  NULL
2979  },
2980 };
2981 #endif /* CONFIG_VP7_DECODER */
const uint8_t * input
Definition: vp8.h:255
uint8_t golden
Definition: vp8.h:274
atomic_int wait_mb_pos
Definition: vp8.h:130
uint8_t inner_limit
Definition: vp8.h:77
#define VERT_PRED8x8
Definition: h264pred.h:70
VP8Macroblock * macroblocks
Definition: vp8.h:192
static const uint8_t vp8_dc_qlookup[VP8_MAX_QUANT+1]
Definition: vp8data.h:711
static av_always_inline void intra_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb, int mb_x, int mb_y, int is_vp7)
Definition: vp8.c:1664
static const uint8_t vp8_submv_prob[5][3]
Definition: vp8data.h:153
static const uint16_t vp7_ydc_qlookup[]
Definition: vp8data.h:778
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
#define NULL
Definition: coverity.c:32
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1326
const struct AVCodec * codec
Definition: avcodec.h:1542
discard all frames except keyframes
Definition: avcodec.h:802
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
Definition: avcodec.h:3654
Definition: vp9.h:47
void(* prefetch)(uint8_t *buf, ptrdiff_t stride, int h)
Prefetch memory into cache (if supported by hardware).
Definition: videodsp.h:76
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static const uint8_t vp7_mv_default_prob[2][17]
Definition: vp8data.h:744
#define DC_128_PRED8x8
Definition: h264pred.h:76
static av_always_inline int pthread_mutex_destroy(pthread_mutex_t *mutex)
Definition: os2threads.h:108
static void copy(const float *p1, float *p2, const int length)
(only used in prediction) no split MVs
Definition: vp8.h:72
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
unsigned int code_word
Definition: vp56.h:91
void ff_vp7dsp_init(VP8DSPContext *c)
static void update_lf_deltas(VP8Context *s)
Definition: vp8.c:281
This structure describes decoded (raw) audio or video data.
Definition: frame.h:226
struct VP8Context::@175 quant
#define atomic_store(object, desired)
Definition: stdatomic.h:85
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
static void flush(AVCodecContext *avctx)
static int vp8_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2487
static const uint8_t vp7_pred4x4_mode[]
Definition: vp8data.h:33
int8_t sign_bias[4]
one state [0, 1] per ref frame type
Definition: vp8.h:169
#define HWACCEL_NVDEC(codec)
Definition: hwaccel.h:71
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:1721
static av_always_inline int inter_predict_dc(int16_t block[16], int16_t pred[2])
Definition: vp8.c:1389
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
static int init_thread_copy(AVCodecContext *avctx)
Definition: tta.c:392
#define VP7_MV_PRED_COUNT
Definition: vp8data.h:68
AVFrame * f
Definition: thread.h:35
static av_always_inline int vp8_rac_get_tree(VP56RangeCoder *c, const int8_t(*tree)[2], const uint8_t *probs)
Definition: vp56.h:388
uint8_t feature_value[4][4]
Definition: vp8.h:341
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:418
uint32_t value
Definition: vp8.h:257
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
ptrdiff_t linesize
Definition: vp8.h:160
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:492
uint8_t * intra4x4_pred_mode_top
Definition: vp8.h:194
uint8_t mbskip_enabled
Definition: vp8.h:165
static VP56Frame ref_to_update(VP8Context *s, int update, VP56Frame ref)
Determine which buffers golden and altref should be updated with after this frame.
Definition: vp8.c:402
void(* vp8_v_loop_filter16y)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:48
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2164
static int vp7_decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, uint8_t *token_prob, int16_t qmul[2], const uint8_t scan[16])
Definition: vp8.c:1411
uint8_t token[4][16][3][NUM_DCT_TOKENS-1]
Definition: vp8.h:277
uint8_t scan[16]
Definition: vp8.h:279
int size
Definition: avcodec.h:1446
int uvac_delta
Definition: vp8.h:216
const char * b
Definition: vf_curves.c:116
static void vp8_decode_flush(AVCodecContext *avctx)
Definition: vp8.c:141
#define MARGIN
Definition: vp8.c:2269
vp8_mc_func put_vp8_bilinear_pixels_tab[3][3][3]
Definition: vp8dsp.h:81
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1743
VP56mv bmv[16]
Definition: vp8.h:93
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:411
av_cold void ff_vp78dsp_init(VP8DSPContext *dsp)
Definition: vp8dsp.c:666
uint8_t inner_filter
Definition: vp8.h:78
static const int8_t vp8_pred8x8c_tree[3][2]
Definition: vp8data.h:180
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
uint8_t segmentid[3]
Definition: vp8.h:270
static const uint16_t vp7_y2dc_qlookup[]
Definition: vp8data.h:803
discard all
Definition: avcodec.h:803
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:2690
static void copy_chroma(AVFrame *dst, AVFrame *src, int width, int height)
Definition: vp8.c:477
#define src
Definition: vp8dsp.c:254
struct VP8Context::@177 coder_state_at_header_end
#define HOR_PRED8x8
Definition: h264pred.h:69
AVCodec.
Definition: avcodec.h:3424
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:87
uint8_t sharpness
Definition: vp8.h:189
#define AV_WN32A(p, v)
Definition: intreadwrite.h:538
2 16x8 blocks (vertical)
Definition: vp8.h:68
#define AV_COPY32(d, s)
Definition: intreadwrite.h:586
static av_always_inline int pthread_cond_destroy(pthread_cond_t *cond)
Definition: os2threads.h:140
int update_probabilities
If this flag is not set, all the probability updates are discarded after this frame is decoded...
Definition: vp8.h:292
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:42
VP8intmv mv_min
Definition: vp8.h:102
VP8Frame * framep[4]
Definition: vp8.h:153
static int vp8_decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, uint8_t *token_prob, int16_t qmul[2])
Definition: vp8.c:1423
static void vp7_filter_mb_row(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2552
#define VP7_MVC_SIZE
Definition: vp8.c:443
static int vp7_read_mv_component(VP56RangeCoder *c, const uint8_t *p)
Definition: vp8.c:882
vp8_mc_func put_vp8_epel_pixels_tab[3][3][3]
first dimension: 4-log2(width) second dimension: 0 if no vertical interpolation is needed; 1 4-tap ve...
Definition: vp8dsp.h:80
static av_always_inline const uint8_t * get_submv_prob(uint32_t left, uint32_t top, int is_vp7)
Definition: vp8.c:893
static const uint8_t vp8_pred8x8c_prob_inter[3]
Definition: vp8data.h:189
static av_always_inline int decode_block_coeffs(VP56RangeCoder *c, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, int zero_nhood, int16_t qmul[2], const uint8_t scan[16], int vp7)
Definition: vp8.c:1448
uint8_t(* top_nnz)[9]
Definition: vp8.h:245
int num_jobs
Definition: vp8.h:310
static const uint8_t vp8_mbsplits[5][16]
Definition: vp8data.h:127
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:2991
#define AV_RN32A(p)
Definition: intreadwrite.h:526
static int16_t block[64]
Definition: dct.c:115
uint8_t pred16x16[4]
Definition: vp8.h:275
static const int8_t vp8_pred16x16_tree_intra[4][2]
Definition: vp8data.h:47
uint8_t update_map
Definition: vp8.h:180
#define PLANE_PRED8x8
Definition: h264pred.h:71
uint16_t mb_height
Definition: vp8.h:159
int16_t y
Definition: vp56.h:68
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:63
struct VP8Context::@176 lf_delta
void void avpriv_request_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
int update_golden
VP56_FRAME_NONE if not updated, or which frame to copy if so.
Definition: vp8.h:285
static av_always_inline void filter_mb_row(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr, int is_vp7)
Definition: vp8.c:2493
uint8_t intra4x4_pred_mode_top[4]
Definition: vp8.h:91
static enum AVPixelFormat get_pixel_format(VP8Context *s)
Definition: vp8.c:170
static av_always_inline void clamp_mv(VP8mvbounds *s, VP56mv *dst, const VP56mv *src)
Definition: vp8.c:843
uint8_t
static int vp7_update_dimensions(VP8Context *s, int width, int height)
Definition: vp8.c:248
#define av_cold
Definition: attributes.h:82
ptrdiff_t uvlinesize
Definition: vp8.h:161
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
#define mb
#define DC_PRED8x8
Definition: h264pred.h:68
int fade_present
Fade bit present in bitstream (VP7)
Definition: vp8.h:326
static av_always_inline void vp7_decode_mvs(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int layout)
Definition: vp8.c:1011
static VP8Frame * vp8_find_free_buffer(VP8Context *s)
Definition: vp8.c:146
uint8_t ref_frame
Definition: vp8.h:86
static av_always_inline int check_intra_pred4x4_mode_emuedge(int mode, int mb_x, int mb_y, int *copy_buf, int vp7)
Definition: vp8.c:1628
#define f(width, name)
Definition: cbs_vp9.c:255
Multithreading support functions.
Definition: vp9.h:46
#define TOP_DC_PRED8x8
Definition: h264pred.h:75
int ff_vp8_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: vp8.c:2795
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
uint8_t mvc[2][19]
Definition: vp8.h:278
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:253
VP56mv mv
Definition: vp8.h:92
int8_t base_quant[4]
Definition: vp8.h:182
static const uint8_t vp8_mv_update_prob[2][19]
Definition: vp8data.h:733
static AVFrame * frame
void(* pred8x8[4+3+4])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:97
int update_last
update VP56_FRAME_PREVIOUS with the current one
Definition: vp8.h:284
#define height
uint8_t * data
Definition: avcodec.h:1445
int8_t yoffset
Definition: vp8data.h:62
int ydc_delta
Definition: vp8.h:212
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
Definition: utils.c:1794
static void parse_segment_info(VP8Context *s)
Definition: vp8.c:259
ptrdiff_t size
Definition: opengl_enc.c:101
VP8Frame * prev_frame
Definition: vp8.h:156
int num_coeff_partitions
All coefficients are contained in separate arith coding contexts.
Definition: vp8.h:298
static const uint8_t vp8_token_default_probs[4][8][3][NUM_DCT_TOKENS-1]
Definition: vp8data.h:369
vp8_mc_func put_pixels_tab[3][3][3]
Definition: vp8.h:304
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
void(* pred4x4[9+3+3])(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.h:93
#define AV_COPY64(d, s)
Definition: intreadwrite.h:590
uint8_t feature_index_prob[4][3]
Definition: vp8.h:340
uint8_t intra4x4_pred_mode_mb[16]
Definition: vp8.h:90
#define av_log(a,...)
static av_always_inline int vp78_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt, int is_vp7)
Definition: vp8.c:2612
#define prob(name, subs,...)
Definition: cbs_vp9.c:374
uint8_t intra4x4_pred_mode_left[4]
Definition: vp8.h:195
#define VERT_VP8_PRED
for VP8, VERT_PRED is the average of
Definition: h264pred.h:60
uint32_t range
Definition: vp8.h:256
uint8_t colorspace
0 is the only value allowed (meaning bt601)
Definition: vp8.h:307
static const VP56mv * get_bmv_ptr(const VP8Macroblock *mb, int subblock)
Definition: vp8.c:1005
enum AVCodecID id
Definition: avcodec.h:3438
struct VP8Context::@173 filter
static const uint8_t vp8_mbsplit_count[4]
Definition: vp8data.h:142
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
static const int8_t vp8_coeff_band_indexes[8][10]
Definition: vp8data.h:331
#define td
Definition: regdef.h:70
H264PredContext hpc
Definition: vp8.h:303
Definition: vp8.h:138
static const uint8_t vp8_pred4x4_mode[]
Definition: vp8data.h:40
static av_always_inline void prefetch_motion(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int mb_xy, int ref)
Definition: vp8.c:1948
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
uint8_t absolute_vals
Definition: vp8.h:179
uint16_t mb_width
Definition: vp8.h:158
void(* vp8_luma_dc_wht_dc)(int16_t block[4][4][16], int16_t dc[16])
Definition: vp8dsp.h:39
static const uint8_t vp8_dct_cat2_prob[]
Definition: vp8data.h:345
static const uint8_t vp8_mv_default_prob[2][19]
Definition: vp8data.h:755
int y2dc_delta
Definition: vp8.h:213
#define atomic_load(object)
Definition: stdatomic.h:93
#define FF_SIGNBIT(x)
Definition: internal.h:88
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int current_sample, int64_t nb_samples_notify, AVRational time_base)
uint8_t last
Definition: vp8.h:273
static const int sizes[][2]
Definition: img2dec.c:52
#define AVERROR(e)
Definition: error.h:43
void(* vp8_h_loop_filter8uv)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:54
static int vp8_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
Definition: vp8.c:702
static void fade(uint8_t *dst, ptrdiff_t dst_linesize, const uint8_t *src, ptrdiff_t src_linesize, int width, int height, int alpha, int beta)
Definition: vp8.c:488
uint8_t mode
Definition: vp8.h:85
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
static av_always_inline int check_tm_pred8x8_mode(int mode, int mb_x, int mb_y, int vp7)
Definition: vp8.c:1593
static int vp8_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2605
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:2804
const char * r
Definition: vf_curves.c:114
const uint8_t ff_zigzag_scan[16+1]
Definition: mathtables.c:109
VP8 compatible video decoder.
void(* vp8_v_loop_filter8uv)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:52
static const uint8_t vp8_mbfirstidx[4][16]
Definition: vp8data.h:135
AVCodecContext * avctx
Definition: vp8.h:149
VP8mvbounds mv_bounds
Definition: vp8.h:167
#define EDGE_EMU_LINESIZE
Definition: vp8.h:132
uint16_t inter_dc_pred[2][2]
Interframe DC prediction (VP7) [0] VP56_FRAME_PREVIOUS [1] VP56_FRAME_GOLDEN.
Definition: vp8.h:333
VideoDSPContext vdsp
Definition: vp8.h:301
const char * name
Name of the codec implementation.
Definition: avcodec.h:3431
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_RL24
Definition: bytestream.h:87
VP8Macroblock * macroblocks_base
Definition: vp8.h:282
static av_always_inline void vp8_mc_part(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], ThreadFrame *ref_frame, int x_off, int y_off, int bx_off, int by_off, int block_w, int block_h, int width, int height, VP56mv *mv)
Definition: vp8.c:1910
static av_always_inline void decode_mb_mode(VP8Context *s, VP8mvbounds *mv_bounds, VP8Macroblock *mb, int mb_x, int mb_y, uint8_t *segment, uint8_t *ref, int layout, int is_vp7)
Definition: vp8.c:1237
static const uint8_t vp8_pred4x4_prob_inter[9]
Definition: vp8data.h:192
uint8_t edge_emu_buffer[21 *EDGE_EMU_LINESIZE]
Definition: vp8.h:133
int16_t block[6][4][16]
Definition: vp8.h:107
static av_always_inline int decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, uint8_t *token_prob, int16_t qmul[2], const uint8_t scan[16], int vp7)
Definition: vp8.c:1329
static const int vp7_mode_contexts[31][4]
Definition: vp8data.h:84
int yac_qi
Definition: vp8.h:211
static void vp8_filter_mb_row(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2558
static void vp7_get_quants(VP8Context *s)
Definition: vp8.c:337
#define FFMAX(a, b)
Definition: common.h:94
uint8_t keyframe
Definition: vp8.h:163
#define fail()
Definition: checkasm.h:117
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:1024
int x
Definition: vp8.h:97
const uint8_t * end
Definition: vp56.h:90
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:225
VP56Frame
Definition: vp56.h:40
int(* decode_mb_row_no_filter)(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.h:318
int16_t luma_qmul[2]
Definition: vp8.h:204
static const uint8_t vp8_pred16x16_prob_inter[4]
Definition: vp8data.h:164
Definition: hls.c:68
useful rectangle filling function
int ff_vp56_init_range_decoder(VP56RangeCoder *c, const uint8_t *buf, int buf_size)
Definition: vp56rac.c:40
#define MAX_THREADS
4x4 blocks of 4x4px each
Definition: vp8.h:71
uint8_t deblock_filter
Definition: vp8.h:164
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:2796
#define H_LOOP_FILTER_16Y_INNER(cond)
#define FFMIN(a, b)
Definition: common.h:96
uint8_t feature_present_prob[4]
Definition: vp8.h:339
static av_always_inline void vp8_mc_chroma(VP8Context *s, VP8ThreadData *td, uint8_t *dst1, uint8_t *dst2, ThreadFrame *ref, const VP56mv *mv, int x_off, int y_off, int block_w, int block_h, int width, int height, ptrdiff_t linesize, vp8_mc_func mc_func[3][3])
chroma MC function
Definition: vp8.c:1860
#define width
uint8_t fullrange
whether we can skip clamping in dsp functions
Definition: vp8.h:308
int16_t block_dc[16]
Definition: vp8.h:108
static av_unused int vp8_rac_get_sint(VP56RangeCoder *c, int bits)
Definition: vp56.h:344
int width
picture width / height.
Definition: avcodec.h:1706
uint8_t mbskip
Definition: vp8.h:271
int8_t ref[4]
filter strength adjustment for macroblocks that reference: [0] - intra / VP56_FRAME_CURRENT [1] - VP5...
Definition: vp8.h:241
static int vp7_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2481
void(* filter_mb_row)(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.h:319
void(* vp8_idct_dc_add4y)(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)
Definition: vp8dsp.h:42
static av_cold int vp8_init_frames(VP8Context *s)
Definition: vp8.c:2824
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
static void free_buffers(VP8Context *s)
Definition: vp8.c:50
int32_t
AVFormatContext * ctx
Definition: movenc.c:48
#define check_thread_pos(td, otd, mb_x_check, mb_y_check)
Definition: vp8.c:2349
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
Definition: os2threads.h:100
int y2ac_delta
Definition: vp8.h:214
static int vp8_read_mv_component(VP56RangeCoder *c, const uint8_t *p)
Definition: vp8.c:887
#define s(width, name)
Definition: cbs_vp9.c:257
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:2797
void(* vp8_mc_func)(uint8_t *dst, ptrdiff_t dstStride, uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: vp8dsp.h:33
int16_t luma_dc_qmul[2]
luma dc-only block quant
Definition: vp8.h:205
int16_t chroma_qmul[2]
Definition: vp8.h:206
static const uint8_t vp8_pred4x4_prob_intra[10][10][9]
Definition: vp8data.h:196
VP8mvbounds mv_bounds
Definition: vp8.h:135
uint8_t(* top_border)[16+8+8]
Definition: vp8.h:244
int n
Definition: avisynth_c.h:684
ThreadFrame tf
Definition: vp8.h:139
static av_always_inline int decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr, int is_vp7)
Definition: vp8.c:2353
static av_always_inline void filter_level_for_mb(VP8Context *s, VP8Macroblock *mb, VP8FilterStrength *f, int is_vp7)
Definition: vp8.c:2117
static const int8_t vp7_feature_index_tree[4][2]
Definition: vp8data.h:771
static const uint8_t vp7_feature_value_size[2][4]
Definition: vp8data.h:766
#define vp56_rac_get_prob
Definition: vp56.h:262
static void vp8_decode_flush_impl(AVCodecContext *avctx, int free_mem)
Definition: vp8.c:128
static av_always_inline void decode_mb_coeffs(VP8Context *s, VP8ThreadData *td, VP56RangeCoder *c, VP8Macroblock *mb, uint8_t t_nnz[9], uint8_t l_nnz[9], int is_vp7)
Definition: vp8.c:1463
HW acceleration through CUDA.
Definition: pixfmt.h:235
int bit_count
Definition: vp8.h:258
#define LEFT_DC_PRED8x8
Definition: h264pred.h:74
struct VP8Context::@174 qmat[4]
Macroblocks can have one of 4 different quants in a frame when segmentation is enabled.
static void vp8_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame, VP8Frame *prev_frame)
Definition: vp8.c:2309
#define cat(a, bpp, b)
Definition: vp9dsp_init.h:29
uint8_t segment
Definition: vp8.h:89
#define FF_ARRAY_ELEMS(a)
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:2785
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:512
int8_t xoffset
Definition: vp8data.h:63
static const float pred[4]
Definition: siprdata.h:259
static int vp7_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2599
#define IS_VP8
Definition: vp8dsp.h:104
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: avcodec.h:1028
static const int8_t mv[256][2]
Definition: 4xm.c:77
static void vp7_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame, VP8Frame *prev_frame)
Definition: vp8.c:2303
static av_always_inline int check_intra_pred8x8_mode_emuedge(int mode, int mb_x, int mb_y, int vp7)
Definition: vp8.c:1602
static av_always_inline int vp56_rac_get_prob_branchy(VP56RangeCoder *c, int prob)
Definition: vp56.h:279
int coeff_partition_size[8]
Definition: vp8.h:300
void(* vp8_v_loop_filter8uv_inner)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:62
void(* vp8_h_loop_filter_simple)(uint8_t *dst, ptrdiff_t stride, int flim)
Definition: vp8dsp.h:70
#define src1
Definition: h264pred.c:139
static av_always_inline void inter_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb, int mb_x, int mb_y)
Apply motion vectors to prediction buffer, chapter 18.
Definition: vp8.c:1971
VP8Frame * curframe
Definition: vp8.h:155
uint8_t simple
Definition: vp8.h:187
void(* vp8_idct_add)(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
Definition: vp8dsp.h:40
VP8Frame frames[5]
Definition: vp8.h:305
Libavcodec external API header.
static const uint8_t vp8_pred8x8c_prob_intra[3]
Definition: vp8data.h:186
uint8_t level
Definition: vp8.h:188
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:257
static void vp8_release_frame(VP8Context *s, VP8Frame *f)
Definition: vp8.c:95
AVBufferRef * seg_map
Definition: vp8.h:140
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
static const int16_t alpha[]
Definition: ilbcdata.h:55
static const uint16_t vp7_yac_qlookup[]
Definition: vp8data.h:790
main external API structure.
Definition: avcodec.h:1533
static av_always_inline unsigned int vp56_rac_renorm(VP56RangeCoder *c)
Definition: vp56.h:238
static int vp7_fade_frame(VP8Context *s, VP56RangeCoder *c)
Definition: vp8.c:504
uint8_t * data
The data buffer.
Definition: buffer.h:89
VP8Frame * next_framep[4]
Definition: vp8.h:154
int mb_layout
This describes the macroblock memory layout.
Definition: vp8.h:316
uint8_t left_nnz[9]
For coeff decode, we need to know whether the above block had non-zero coefficients.
Definition: vp8.h:123
static const uint8_t vp8_mbsplit_prob[3]
Definition: vp8data.h:145
VP56RangeCoder c
header context, includes mb modes and motion vectors
Definition: vp8.h:247
void * buf
Definition: avisynth_c.h:690
int y
Definition: vp8.h:98
int bits
Definition: vp56.h:87
void(* pred16x16[4+3+2])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:98
VP56RangeCoder coeff_partition[8]
Definition: vp8.h:299
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:83
uint8_t update
Definition: vp8.h:221
static const int8_t vp8_pred16x16_tree_inter[4][2]
Definition: vp8data.h:54
int vp7
Definition: vp8.h:321
AVBufferRef * hwaccel_priv_buf
Definition: vp8.h:142
static int setup_partitions(VP8Context *s, const uint8_t *buf, int buf_size)
Definition: vp8.c:305
int coded_height
Definition: avcodec.h:1721
static int vp8_update_dimensions(VP8Context *s, int width, int height)
Definition: vp8.c:253
int index
Definition: gxfenc.c:89
VP8FilterStrength * filter_strength
Definition: vp8.h:134
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:2157
void(* vp8_idct_dc_add4uv)(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)
Definition: vp8dsp.h:44
static av_always_inline int check_dc_pred8x8_mode(int mode, int mb_x, int mb_y)
Definition: vp8.c:1584
static void vp78_update_probability_tables(VP8Context *s)
Definition: vp8.c:427
#define MV_EDGE_CHECK(n)
static const int8_t vp8_pred4x4_tree[9][2]
Definition: vp8data.h:168
uint8_t enabled
whether each mb can have a different strength based on mode/ref
Definition: vp8.h:178
static av_always_inline void idct_mb(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb)
Definition: vp8.c:2055
static void vp78_update_pred16x16_pred8x8_mvc_probabilities(VP8Context *s, int mvc_size)
Definition: vp8.c:446
static av_always_inline int read_mv_component(VP56RangeCoder *c, const uint8_t *p, int vp7)
Motion vector coding, 17.1.
Definition: vp8.c:854
static const uint8_t subpel_idx[3][8]
Definition: vp8.c:1778
static void update_refs(VP8Context *s)
Definition: vp8.c:466
static av_always_inline int vp8_rac_get_coeff(VP56RangeCoder *c, const uint8_t *prob)
Definition: vp56.h:401
static const uint8_t vp8_coeff_band[16]
Definition: vp8data.h:325
int allocate_progress
Whether to allocate progress for frame threading.
Definition: internal.h:151
static const uint16_t vp8_ac_qlookup[VP8_MAX_QUANT+1]
Definition: vp8data.h:722
static const uint8_t vp8_pred16x16_prob_intra[4]
Definition: vp8data.h:161
uint8_t score
Definition: vp8data.h:65
int actually_webp
Definition: vp8.h:151
int header_partition_size
Definition: vp8.h:261
uint8_t update_feature_data
Definition: vp8.h:181
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:266
mfxU16 profile
Definition: qsvenc.c:44
static av_always_inline void decode_intra4x4_modes(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb, int mb_x, int keyframe, int layout)
Definition: vp8.c:1202
static int vp8_rac_get_uint(VP56RangeCoder *c, int bits)
Definition: vp56.h:332
#define DC_127_PRED8x8
Definition: h264pred.h:85
#define HWACCEL_VAAPI(codec)
Definition: hwaccel.h:73
void(* vp8_luma_dc_wht)(int16_t block[4][4][16], int16_t dc[16])
Definition: vp8dsp.h:38
Definition: vp56.h:66
struct VP8Context::@178 prob[2]
These are all of the updatable probabilities for binary decisions.
av_cold int ff_vp8_decode_init(AVCodecContext *avctx)
Definition: vp8.c:2880
int update_altref
Definition: vp8.h:286
void * hwaccel_picture_private
Definition: vp8.h:143
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:240
uint8_t feature_enabled[4]
Macroblock features (VP7)
Definition: vp8.h:338
int8_t mode[VP8_MVMODE_SPLIT+1]
filter strength adjustment for the following macroblock modes: [0-3] - i16x16 (always zero) [4] - i4x...
Definition: vp8.h:232
2 8x16 blocks (horizontal)
Definition: vp8.h:69
av_cold int ff_vp8_decode_free(AVCodecContext *avctx)
Definition: vp8.c:2809
Definition: vp9.h:48
#define AV_ZERO128(d)
Definition: intreadwrite.h:622
uint8_t pred8x8c[3]
Definition: vp8.h:276
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: pixfmt.h:511
discard all non reference
Definition: avcodec.h:799
static av_always_inline void vp78_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *curframe, VP8Frame *prev_frame, int is_vp7)
Definition: vp8.c:2271
uint8_t partitioning
Definition: vp8.h:87
#define AV_ZERO64(d)
Definition: intreadwrite.h:618
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
void(* vp8_v_loop_filter_simple)(uint8_t *dst, ptrdiff_t stride, int flim)
Definition: vp8dsp.h:69
int16_t x
Definition: vp56.h:67
common internal api header.
static void vp8_get_quants(VP8Context *s)
Definition: vp8.c:356
if(ret< 0)
Definition: vf_mcdeint.c:279
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
#define LOCAL_ALIGNED(a, t, v,...)
Definition: internal.h:114
#define AV_COPY128(d, s)
Definition: intreadwrite.h:594
static int ref_frame(Vp3DecodeContext *s, ThreadFrame *dst, ThreadFrame *src)
Definition: vp3.c:1964
static int vp8_alloc_frame(VP8Context *s, VP8Frame *f, int ref)
Definition: vp8.c:70
uint8_t chroma_pred_mode
Definition: vp8.h:88
static double c[64]
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
VP8intmv mv_max
Definition: vp8.h:103
static av_always_inline void backup_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, ptrdiff_t linesize, ptrdiff_t uvlinesize, int simple)
Definition: vp8.c:1538
static av_always_inline int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
Definition: os2threads.h:129
#define DC_129_PRED8x8
Definition: h264pred.h:86
int uvdc_delta
Definition: vp8.h:215
enum AVDiscard skip_loop_filter
Skip loop filtering for selected frames.
Definition: avcodec.h:2977
static av_always_inline int vp8_rac_get(VP56RangeCoder *c)
Definition: vp56.h:316
int invisible
Definition: vp8.h:283
static av_always_inline int decode_splitmvs(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb, int layout, int is_vp7)
Split motion vector prediction, 16.4.
Definition: vp8.c:910
static const SiprModeParam modes[MODE_COUNT]
Definition: sipr.c:69
int ref_count[3]
Definition: vp8.h:170
void * priv_data
Definition: avcodec.h:1560
static av_always_inline int check_tm_pred4x4_mode(int mode, int mb_x, int mb_y, int vp7)
Definition: vp8.c:1618
void(* vp8_h_loop_filter16y)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:50
#define MODE_I4x4
Definition: vp8.h:59
static int vp7_calculate_mb_offset(int mb_x, int mb_y, int mb_width, int xoffset, int yoffset, int boundary, int *edge_x, int *edge_y)
The vp7 reference decoder uses a padding macroblock column (added to right edge of the frame) to guar...
Definition: vp8.c:992
#define XCHG(a, b, xchg)
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
Definition: avcodec.h:2845
#define update_pos(td, mb_y, mb_x)
Definition: vp8.c:2350
int frame_priv_data_size
Size of per-frame hardware accelerator private data.
Definition: avcodec.h:3702
struct VP8Context::@172 segmentation
Base parameters for segmentation, i.e.
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1568
#define HOR_VP8_PRED
unaveraged version of HOR_PRED, see
Definition: h264pred.h:63
VP8DSPContext vp8dsp
Definition: vp8.h:302
static av_always_inline int update_dimensions(VP8Context *s, int width, int height, int is_vp7)
Definition: vp8.c:187
int thread_nr
Definition: vp8.h:124
static av_always_inline void xchg_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, ptrdiff_t linesize, ptrdiff_t uvlinesize, int mb_x, int mb_y, int mb_width, int simple, int xchg)
Definition: vp8.c:1550
#define AV_ZERO32(d)
Definition: intreadwrite.h:614
static av_always_inline int vp78_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr, int is_vp7)
Definition: vp8.c:2565
static const double coeff[2][5]
Definition: vf_owdenoise.c:72
void(* vp8_idct_dc_add)(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
Definition: vp8dsp.h:41
uint64_t layout
AVDiscard
Definition: avcodec.h:794
static av_unused int vp8_rac_get_nn(VP56RangeCoder *c)
Definition: vp56.h:366
void(* vp8_v_loop_filter16y_inner)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:58
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> dc
#define av_uninit(x)
Definition: attributes.h:148
const uint8_t * buffer
Definition: vp56.h:89
static av_always_inline void vp8_mc_luma(VP8Context *s, VP8ThreadData *td, uint8_t *dst, ThreadFrame *ref, const VP56mv *mv, int x_off, int y_off, int block_w, int block_h, int width, int height, ptrdiff_t linesize, vp8_mc_func mc_func[3][3])
luma MC function
Definition: vp8.c:1802
static const uint8_t vp8_token_update_probs[4][8][3][NUM_DCT_TOKENS-1]
Definition: vp8data.h:540
static av_always_inline void filter_mb(VP8Context *s, uint8_t *dst[3], VP8FilterStrength *f, int mb_x, int mb_y, int is_vp7)
Definition: vp8.c:2150
#define atomic_init(obj, value)
Definition: stdatomic.h:33
#define av_freep(p)
#define IS_VP7
Definition: vp8dsp.h:103
#define av_always_inline
Definition: attributes.h:39
int8_t filter_level[4]
base loop filter level
Definition: vp8.h:183
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:170
static const int vp8_mode_contexts[6][4]
Definition: vp8data.h:118
int high
Definition: vp56.h:86
static const uint8_t vp8_dct_cat1_prob[]
Definition: vp8data.h:342
#define FFSWAP(type, a, b)
Definition: common.h:99
uint8_t intra
Definition: vp8.h:272
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
Definition: avcodec.h:3682
AVCodec ff_vp7_decoder
static av_always_inline void vp8_decode_mvs(VP8Context *s, VP8mvbounds *mv_bounds, VP8Macroblock *mb, int mb_x, int mb_y, int layout)
Definition: vp8.c:1102
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
Definition: avcodec.h:3693
uint8_t non_zero_count_cache[6][4]
This is the index plus one of the last non-zero coeff for each of the blocks in the current macrobloc...
Definition: vp8.h:116
uint8_t skip
Definition: vp8.h:82
atomic_int thread_mb_pos
Definition: vp8.h:129
void ff_vp8dsp_init(VP8DSPContext *c)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
static void vp78_reset_probability_tables(VP8Context *s)
Definition: vp8.c:418
This structure stores compressed data.
Definition: avcodec.h:1422
#define VP8_MVC_SIZE
Definition: vp8.c:444
static int vp7_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
Definition: vp8.c:546
AVCodec ff_vp8_decoder
uint8_t profile
Definition: vp8.h:166
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:1144
const uint8_t *const ff_vp8_dct_cat_prob[]
Definition: vp8data.h:362
mode
Use these values in ebur128_init (or'ed).
Definition: ebur128.h:83
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:968
VP8ThreadData * thread_data
Definition: vp8.h:148
enum AVPixelFormat pix_fmt
Definition: vp8.h:150
Predicted.
Definition: avutil.h:275
2x2 blocks of 8x8px each
Definition: vp8.h:70
static av_always_inline void filter_mb_simple(VP8Context *s, uint8_t *dst, VP8FilterStrength *f, int mb_x, int mb_y)
Definition: vp8.c:2237
static const VP7MVPred vp7_mv_pred[VP7_MV_PRED_COUNT]
Definition: vp8data.h:69
static const uint16_t vp7_y2ac_qlookup[]
Definition: vp8data.h:816
static const uint8_t vp7_submv_prob[3]
Definition: vp8data.h:149
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(constuint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(constint16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(constint32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(constint64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(constfloat *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(constdouble *) pi *(INT64_C(1)<< 63)))#defineFMT_PAIR_FUNC(out, in) staticconv_func_type *constfmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};staticvoidcpy1(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, len);}staticvoidcpy2(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 2 *len);}staticvoidcpy4(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 4 *len);}staticvoidcpy8(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, constint *ch_map, intflags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) returnNULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) returnNULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case1:ctx->simd_f=cpy1;break;case2:ctx->simd_f=cpy2;break;case4:ctx->simd_f=cpy4;break;case8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);returnctx;}voidswri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}intswri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, intlen){intch;intoff=0;constintos=(out->planar?1:out->ch_count)*out->bps;unsignedmisaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){intplanes=in->planar?in->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){intplanes=out->planar?out->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){intplanes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
static av_always_inline int vp78_decode_init(AVCodecContext *avctx, int is_vp7)
Definition: vp8.c:2836
#define AV_WN64(p, v)
Definition: intreadwrite.h:380
uint8_t filter_level
Definition: vp8.h:76