FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
vp8.c
Go to the documentation of this file.
1 /*
2  * VP7/VP8 compatible video decoder
3  *
4  * Copyright (C) 2010 David Conrad
5  * Copyright (C) 2010 Ronald S. Bultje
6  * Copyright (C) 2010 Fiona Glaser
7  * Copyright (C) 2012 Daniel Kang
8  * Copyright (C) 2014 Peter Ross
9  *
10  * This file is part of FFmpeg.
11  *
12  * FFmpeg is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU Lesser General Public
14  * License as published by the Free Software Foundation; either
15  * version 2.1 of the License, or (at your option) any later version.
16  *
17  * FFmpeg is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20  * Lesser General Public License for more details.
21  *
22  * You should have received a copy of the GNU Lesser General Public
23  * License along with FFmpeg; if not, write to the Free Software
24  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25  */
26 
27 #include "libavutil/imgutils.h"
28 
29 #include "avcodec.h"
30 #include "internal.h"
31 #include "rectangle.h"
32 #include "thread.h"
33 #include "vp8.h"
34 #include "vp8data.h"
35 
36 #if ARCH_ARM
37 # include "arm/vp8.h"
38 #endif
39 
40 #if CONFIG_VP7_DECODER && CONFIG_VP8_DECODER
41 #define VPX(vp7, f) (vp7 ? vp7_ ## f : vp8_ ## f)
42 #elif CONFIG_VP7_DECODER
43 #define VPX(vp7, f) vp7_ ## f
44 #else // CONFIG_VP8_DECODER
45 #define VPX(vp7, f) vp8_ ## f
46 #endif
47 
48 static void free_buffers(VP8Context *s)
49 {
50  int i;
51  if (s->thread_data)
52  for (i = 0; i < MAX_THREADS; i++) {
53 #if HAVE_THREADS
54  pthread_cond_destroy(&s->thread_data[i].cond);
56 #endif
58  }
59  av_freep(&s->thread_data);
62  av_freep(&s->top_nnz);
63  av_freep(&s->top_border);
64 
65  s->macroblocks = NULL;
66 }
67 
68 static int vp8_alloc_frame(VP8Context *s, VP8Frame *f, int ref)
69 {
70  int ret;
71  if ((ret = ff_thread_get_buffer(s->avctx, &f->tf,
72  ref ? AV_GET_BUFFER_FLAG_REF : 0)) < 0)
73  return ret;
74  if (!(f->seg_map = av_buffer_allocz(s->mb_width * s->mb_height))) {
76  return AVERROR(ENOMEM);
77  }
78  return 0;
79 }
80 
82 {
85 }
86 
87 #if CONFIG_VP8_DECODER
88 static int vp8_ref_frame(VP8Context *s, VP8Frame *dst, VP8Frame *src)
89 {
90  int ret;
91 
92  vp8_release_frame(s, dst);
93 
94  if ((ret = ff_thread_ref_frame(&dst->tf, &src->tf)) < 0)
95  return ret;
96  if (src->seg_map &&
97  !(dst->seg_map = av_buffer_ref(src->seg_map))) {
98  vp8_release_frame(s, dst);
99  return AVERROR(ENOMEM);
100  }
101 
102  return 0;
103 }
104 #endif /* CONFIG_VP8_DECODER */
105 
106 static void vp8_decode_flush_impl(AVCodecContext *avctx, int free_mem)
107 {
108  VP8Context *s = avctx->priv_data;
109  int i;
110 
111  for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++)
112  vp8_release_frame(s, &s->frames[i]);
113  memset(s->framep, 0, sizeof(s->framep));
114 
115  if (free_mem)
116  free_buffers(s);
117 }
118 
119 static void vp8_decode_flush(AVCodecContext *avctx)
120 {
121  vp8_decode_flush_impl(avctx, 0);
122 }
123 
125 {
126  VP8Frame *frame = NULL;
127  int i;
128 
129  // find a free buffer
130  for (i = 0; i < 5; i++)
131  if (&s->frames[i] != s->framep[VP56_FRAME_CURRENT] &&
132  &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
133  &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
134  &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2]) {
135  frame = &s->frames[i];
136  break;
137  }
138  if (i == 5) {
139  av_log(s->avctx, AV_LOG_FATAL, "Ran out of free frames!\n");
140  abort();
141  }
142  if (frame->tf.f->data[0])
143  vp8_release_frame(s, frame);
144 
145  return frame;
146 }
147 
148 static av_always_inline
149 int update_dimensions(VP8Context *s, int width, int height, int is_vp7)
150 {
151  AVCodecContext *avctx = s->avctx;
152  int i, ret;
153 
154  if (width != s->avctx->width || ((width+15)/16 != s->mb_width || (height+15)/16 != s->mb_height) && s->macroblocks_base ||
155  height != s->avctx->height) {
157 
158  ret = ff_set_dimensions(s->avctx, width, height);
159  if (ret < 0)
160  return ret;
161  }
162 
163  s->mb_width = (s->avctx->coded_width + 15) / 16;
164  s->mb_height = (s->avctx->coded_height + 15) / 16;
165 
166  s->mb_layout = is_vp7 || avctx->active_thread_type == FF_THREAD_SLICE &&
167  FFMIN(s->num_coeff_partitions, avctx->thread_count) > 1;
168  if (!s->mb_layout) { // Frame threading and one thread
169  s->macroblocks_base = av_mallocz((s->mb_width + s->mb_height * 2 + 1) *
170  sizeof(*s->macroblocks));
172  } else // Sliced threading
173  s->macroblocks_base = av_mallocz((s->mb_width + 2) * (s->mb_height + 2) *
174  sizeof(*s->macroblocks));
175  s->top_nnz = av_mallocz(s->mb_width * sizeof(*s->top_nnz));
176  s->top_border = av_mallocz((s->mb_width + 1) * sizeof(*s->top_border));
178 
179  if (!s->macroblocks_base || !s->top_nnz || !s->top_border ||
180  !s->thread_data || (!s->intra4x4_pred_mode_top && !s->mb_layout)) {
181  free_buffers(s);
182  return AVERROR(ENOMEM);
183  }
184 
185  for (i = 0; i < MAX_THREADS; i++) {
187  av_mallocz(s->mb_width * sizeof(*s->thread_data[0].filter_strength));
188  if (!s->thread_data[i].filter_strength) {
189  free_buffers(s);
190  return AVERROR(ENOMEM);
191  }
192 #if HAVE_THREADS
193  pthread_mutex_init(&s->thread_data[i].lock, NULL);
194  pthread_cond_init(&s->thread_data[i].cond, NULL);
195 #endif
196  }
197 
198  s->macroblocks = s->macroblocks_base + 1;
199 
200  return 0;
201 }
202 
204 {
205  return update_dimensions(s, width, height, IS_VP7);
206 }
207 
209 {
210  return update_dimensions(s, width, height, IS_VP8);
211 }
212 
213 
215 {
216  VP56RangeCoder *c = &s->c;
217  int i;
218 
220 
221  if (vp8_rac_get(c)) { // update segment feature data
223 
224  for (i = 0; i < 4; i++)
226 
227  for (i = 0; i < 4; i++)
229  }
230  if (s->segmentation.update_map)
231  for (i = 0; i < 3; i++)
232  s->prob->segmentid[i] = vp8_rac_get(c) ? vp8_rac_get_uint(c, 8) : 255;
233 }
234 
236 {
237  VP56RangeCoder *c = &s->c;
238  int i;
239 
240  for (i = 0; i < 4; i++) {
241  if (vp8_rac_get(c)) {
242  s->lf_delta.ref[i] = vp8_rac_get_uint(c, 6);
243 
244  if (vp8_rac_get(c))
245  s->lf_delta.ref[i] = -s->lf_delta.ref[i];
246  }
247  }
248 
249  for (i = MODE_I4x4; i <= VP8_MVMODE_SPLIT; i++) {
250  if (vp8_rac_get(c)) {
251  s->lf_delta.mode[i] = vp8_rac_get_uint(c, 6);
252 
253  if (vp8_rac_get(c))
254  s->lf_delta.mode[i] = -s->lf_delta.mode[i];
255  }
256  }
257 }
258 
259 static int setup_partitions(VP8Context *s, const uint8_t *buf, int buf_size)
260 {
261  const uint8_t *sizes = buf;
262  int i;
263 
264  s->num_coeff_partitions = 1 << vp8_rac_get_uint(&s->c, 2);
265 
266  buf += 3 * (s->num_coeff_partitions - 1);
267  buf_size -= 3 * (s->num_coeff_partitions - 1);
268  if (buf_size < 0)
269  return -1;
270 
271  for (i = 0; i < s->num_coeff_partitions - 1; i++) {
272  int size = AV_RL24(sizes + 3 * i);
273  if (buf_size - size < 0)
274  return -1;
275 
276  ff_vp56_init_range_decoder(&s->coeff_partition[i], buf, size);
277  buf += size;
278  buf_size -= size;
279  }
280  ff_vp56_init_range_decoder(&s->coeff_partition[i], buf, buf_size);
281 
282  return 0;
283 }
284 
285 static void vp7_get_quants(VP8Context *s)
286 {
287  VP56RangeCoder *c = &s->c;
288 
289  int yac_qi = vp8_rac_get_uint(c, 7);
290  int ydc_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
291  int y2dc_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
292  int y2ac_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
293  int uvdc_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
294  int uvac_qi = vp8_rac_get(c) ? vp8_rac_get_uint(c, 7) : yac_qi;
295 
296  s->qmat[0].luma_qmul[0] = vp7_ydc_qlookup[ydc_qi];
297  s->qmat[0].luma_qmul[1] = vp7_yac_qlookup[yac_qi];
298  s->qmat[0].luma_dc_qmul[0] = vp7_y2dc_qlookup[y2dc_qi];
299  s->qmat[0].luma_dc_qmul[1] = vp7_y2ac_qlookup[y2ac_qi];
300  s->qmat[0].chroma_qmul[0] = FFMIN(vp7_ydc_qlookup[uvdc_qi], 132);
301  s->qmat[0].chroma_qmul[1] = vp7_yac_qlookup[uvac_qi];
302 }
303 
304 static void vp8_get_quants(VP8Context *s)
305 {
306  VP56RangeCoder *c = &s->c;
307  int i, base_qi;
308 
309  int yac_qi = vp8_rac_get_uint(c, 7);
310  int ydc_delta = vp8_rac_get_sint(c, 4);
311  int y2dc_delta = vp8_rac_get_sint(c, 4);
312  int y2ac_delta = vp8_rac_get_sint(c, 4);
313  int uvdc_delta = vp8_rac_get_sint(c, 4);
314  int uvac_delta = vp8_rac_get_sint(c, 4);
315 
316  for (i = 0; i < 4; i++) {
317  if (s->segmentation.enabled) {
318  base_qi = s->segmentation.base_quant[i];
319  if (!s->segmentation.absolute_vals)
320  base_qi += yac_qi;
321  } else
322  base_qi = yac_qi;
323 
324  s->qmat[i].luma_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + ydc_delta, 7)];
325  s->qmat[i].luma_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi, 7)];
326  s->qmat[i].luma_dc_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + y2dc_delta, 7)] * 2;
327  /* 101581>>16 is equivalent to 155/100 */
328  s->qmat[i].luma_dc_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi + y2ac_delta, 7)] * 101581 >> 16;
329  s->qmat[i].chroma_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + uvdc_delta, 7)];
330  s->qmat[i].chroma_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi + uvac_delta, 7)];
331 
332  s->qmat[i].luma_dc_qmul[1] = FFMAX(s->qmat[i].luma_dc_qmul[1], 8);
333  s->qmat[i].chroma_qmul[0] = FFMIN(s->qmat[i].chroma_qmul[0], 132);
334  }
335 }
336 
337 /**
338  * Determine which buffers golden and altref should be updated with after this frame.
339  * The spec isn't clear here, so I'm going by my understanding of what libvpx does
340  *
341  * Intra frames update all 3 references
342  * Inter frames update VP56_FRAME_PREVIOUS if the update_last flag is set
343  * If the update (golden|altref) flag is set, it's updated with the current frame
344  * if update_last is set, and VP56_FRAME_PREVIOUS otherwise.
345  * If the flag is not set, the number read means:
346  * 0: no update
347  * 1: VP56_FRAME_PREVIOUS
348  * 2: update golden with altref, or update altref with golden
349  */
351 {
352  VP56RangeCoder *c = &s->c;
353 
354  if (update)
355  return VP56_FRAME_CURRENT;
356 
357  switch (vp8_rac_get_uint(c, 2)) {
358  case 1:
359  return VP56_FRAME_PREVIOUS;
360  case 2:
362  }
363  return VP56_FRAME_NONE;
364 }
365 
367 {
368  int i, j;
369  for (i = 0; i < 4; i++)
370  for (j = 0; j < 16; j++)
371  memcpy(s->prob->token[i][j], vp8_token_default_probs[i][vp8_coeff_band[j]],
372  sizeof(s->prob->token[i][j]));
373 }
374 
376 {
377  VP56RangeCoder *c = &s->c;
378  int i, j, k, l, m;
379 
380  for (i = 0; i < 4; i++)
381  for (j = 0; j < 8; j++)
382  for (k = 0; k < 3; k++)
383  for (l = 0; l < NUM_DCT_TOKENS-1; l++)
385  int prob = vp8_rac_get_uint(c, 8);
386  for (m = 0; vp8_coeff_band_indexes[j][m] >= 0; m++)
387  s->prob->token[i][vp8_coeff_band_indexes[j][m]][k][l] = prob;
388  }
389 }
390 
391 #define VP7_MVC_SIZE 17
392 #define VP8_MVC_SIZE 19
393 
395  int mvc_size)
396 {
397  VP56RangeCoder *c = &s->c;
398  int i, j;
399 
400  if (vp8_rac_get(c))
401  for (i = 0; i < 4; i++)
402  s->prob->pred16x16[i] = vp8_rac_get_uint(c, 8);
403  if (vp8_rac_get(c))
404  for (i = 0; i < 3; i++)
405  s->prob->pred8x8c[i] = vp8_rac_get_uint(c, 8);
406 
407  // 17.2 MV probability update
408  for (i = 0; i < 2; i++)
409  for (j = 0; j < mvc_size; j++)
411  s->prob->mvc[i][j] = vp8_rac_get_nn(c);
412 }
413 
414 static void update_refs(VP8Context *s)
415 {
416  VP56RangeCoder *c = &s->c;
417 
418  int update_golden = vp8_rac_get(c);
419  int update_altref = vp8_rac_get(c);
420 
421  s->update_golden = ref_to_update(s, update_golden, VP56_FRAME_GOLDEN);
422  s->update_altref = ref_to_update(s, update_altref, VP56_FRAME_GOLDEN2);
423 }
424 
425 static void copy_chroma(AVFrame *dst, AVFrame *src, int width, int height)
426 {
427  int i, j;
428 
429  for (j = 1; j < 3; j++) {
430  for (i = 0; i < height / 2; i++)
431  memcpy(dst->data[j] + i * dst->linesize[j],
432  src->data[j] + i * src->linesize[j], width / 2);
433  }
434 }
435 
436 static void fade(uint8_t *dst, int dst_linesize,
437  const uint8_t *src, int src_linesize,
438  int width, int height,
439  int alpha, int beta)
440 {
441  int i, j;
442  for (j = 0; j < height; j++) {
443  for (i = 0; i < width; i++) {
444  uint8_t y = src[j * src_linesize + i];
445  dst[j * dst_linesize + i] = av_clip_uint8(y + ((y * beta) >> 8) + alpha);
446  }
447  }
448 }
449 
451 {
452  int alpha = (int8_t) vp8_rac_get_uint(c, 8);
453  int beta = (int8_t) vp8_rac_get_uint(c, 8);
454  int ret;
455 
456  if (!s->keyframe && (alpha || beta)) {
457  int width = s->mb_width * 16;
458  int height = s->mb_height * 16;
459  AVFrame *src, *dst;
460 
461  if (!s->framep[VP56_FRAME_PREVIOUS] ||
462  !s->framep[VP56_FRAME_GOLDEN]) {
463  av_log(s->avctx, AV_LOG_WARNING, "Discarding interframe without a prior keyframe!\n");
464  return AVERROR_INVALIDDATA;
465  }
466 
467  dst =
468  src = s->framep[VP56_FRAME_PREVIOUS]->tf.f;
469 
470  /* preserve the golden frame, write a new previous frame */
473  if ((ret = vp8_alloc_frame(s, s->framep[VP56_FRAME_PREVIOUS], 1)) < 0)
474  return ret;
475 
476  dst = s->framep[VP56_FRAME_PREVIOUS]->tf.f;
477 
478  copy_chroma(dst, src, width, height);
479  }
480 
481  fade(dst->data[0], dst->linesize[0],
482  src->data[0], src->linesize[0],
483  width, height, alpha, beta);
484  }
485 
486  return 0;
487 }
488 
489 static int vp7_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
490 {
491  VP56RangeCoder *c = &s->c;
492  int part1_size, hscale, vscale, i, j, ret;
493  int width = s->avctx->width;
494  int height = s->avctx->height;
495 
496  s->profile = (buf[0] >> 1) & 7;
497  if (s->profile > 1) {
498  avpriv_request_sample(s->avctx, "Unknown profile %d", s->profile);
499  return AVERROR_INVALIDDATA;
500  }
501 
502  s->keyframe = !(buf[0] & 1);
503  s->invisible = 0;
504  part1_size = AV_RL24(buf) >> 4;
505 
506  if (buf_size < 4 - s->profile + part1_size) {
507  av_log(s->avctx, AV_LOG_ERROR, "Buffer size %d is too small, needed : %d\n", buf_size, 4 - s->profile + part1_size);
508  return AVERROR_INVALIDDATA;
509  }
510 
511  buf += 4 - s->profile;
512  buf_size -= 4 - s->profile;
513 
514  memcpy(s->put_pixels_tab, s->vp8dsp.put_vp8_epel_pixels_tab, sizeof(s->put_pixels_tab));
515 
516  ff_vp56_init_range_decoder(c, buf, part1_size);
517  buf += part1_size;
518  buf_size -= part1_size;
519 
520  /* A. Dimension information (keyframes only) */
521  if (s->keyframe) {
522  width = vp8_rac_get_uint(c, 12);
523  height = vp8_rac_get_uint(c, 12);
524  hscale = vp8_rac_get_uint(c, 2);
525  vscale = vp8_rac_get_uint(c, 2);
526  if (hscale || vscale)
527  avpriv_request_sample(s->avctx, "Upscaling");
528 
532  sizeof(s->prob->pred16x16));
534  sizeof(s->prob->pred8x8c));
535  for (i = 0; i < 2; i++)
536  memcpy(s->prob->mvc[i], vp7_mv_default_prob[i],
537  sizeof(vp7_mv_default_prob[i]));
538  memset(&s->segmentation, 0, sizeof(s->segmentation));
539  memset(&s->lf_delta, 0, sizeof(s->lf_delta));
540  memcpy(s->prob[0].scan, zigzag_scan, sizeof(s->prob[0].scan));
541  }
542 
543  if (s->keyframe || s->profile > 0)
544  memset(s->inter_dc_pred, 0 , sizeof(s->inter_dc_pred));
545 
546  /* B. Decoding information for all four macroblock-level features */
547  for (i = 0; i < 4; i++) {
548  s->feature_enabled[i] = vp8_rac_get(c);
549  if (s->feature_enabled[i]) {
551 
552  for (j = 0; j < 3; j++)
553  s->feature_index_prob[i][j] =
554  vp8_rac_get(c) ? vp8_rac_get_uint(c, 8) : 255;
555 
556  if (vp7_feature_value_size[s->profile][i])
557  for (j = 0; j < 4; j++)
558  s->feature_value[i][j] =
560  }
561  }
562 
563  s->segmentation.enabled = 0;
564  s->segmentation.update_map = 0;
565  s->lf_delta.enabled = 0;
566 
567  s->num_coeff_partitions = 1;
568  ff_vp56_init_range_decoder(&s->coeff_partition[0], buf, buf_size);
569 
570  if (!s->macroblocks_base || /* first frame */
571  width != s->avctx->width || height != s->avctx->height ||
572  (width + 15) / 16 != s->mb_width || (height + 15) / 16 != s->mb_height) {
573  if ((ret = vp7_update_dimensions(s, width, height)) < 0)
574  return ret;
575  }
576 
577  /* C. Dequantization indices */
578  vp7_get_quants(s);
579 
580  /* D. Golden frame update flag (a Flag) for interframes only */
581  if (!s->keyframe) {
584  }
585 
586  s->update_last = 1;
587  s->update_probabilities = 1;
588  s->fade_present = 1;
589 
590  if (s->profile > 0) {
592  if (!s->update_probabilities)
593  s->prob[1] = s->prob[0];
594 
595  if (!s->keyframe)
596  s->fade_present = vp8_rac_get(c);
597  }
598 
599  /* E. Fading information for previous frame */
600  if (s->fade_present && vp8_rac_get(c)) {
601  if ((ret = vp7_fade_frame(s ,c)) < 0)
602  return ret;
603  }
604 
605  /* F. Loop filter type */
606  if (!s->profile)
607  s->filter.simple = vp8_rac_get(c);
608 
609  /* G. DCT coefficient ordering specification */
610  if (vp8_rac_get(c))
611  for (i = 1; i < 16; i++)
612  s->prob[0].scan[i] = zigzag_scan[vp8_rac_get_uint(c, 4)];
613 
614  /* H. Loop filter levels */
615  if (s->profile > 0)
616  s->filter.simple = vp8_rac_get(c);
617  s->filter.level = vp8_rac_get_uint(c, 6);
618  s->filter.sharpness = vp8_rac_get_uint(c, 3);
619 
620  /* I. DCT coefficient probability update; 13.3 Token Probability Updates */
622 
623  s->mbskip_enabled = 0;
624 
625  /* J. The remaining frame header data occurs ONLY FOR INTERFRAMES */
626  if (!s->keyframe) {
627  s->prob->intra = vp8_rac_get_uint(c, 8);
628  s->prob->last = vp8_rac_get_uint(c, 8);
630  }
631 
632  return 0;
633 }
634 
635 static int vp8_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
636 {
637  VP56RangeCoder *c = &s->c;
638  int header_size, hscale, vscale, ret;
639  int width = s->avctx->width;
640  int height = s->avctx->height;
641 
642  s->keyframe = !(buf[0] & 1);
643  s->profile = (buf[0]>>1) & 7;
644  s->invisible = !(buf[0] & 0x10);
645  header_size = AV_RL24(buf) >> 5;
646  buf += 3;
647  buf_size -= 3;
648 
649  if (s->profile > 3)
650  av_log(s->avctx, AV_LOG_WARNING, "Unknown profile %d\n", s->profile);
651 
652  if (!s->profile)
654  sizeof(s->put_pixels_tab));
655  else // profile 1-3 use bilinear, 4+ aren't defined so whatever
657  sizeof(s->put_pixels_tab));
658 
659  if (header_size > buf_size - 7 * s->keyframe) {
660  av_log(s->avctx, AV_LOG_ERROR, "Header size larger than data provided\n");
661  return AVERROR_INVALIDDATA;
662  }
663 
664  if (s->keyframe) {
665  if (AV_RL24(buf) != 0x2a019d) {
667  "Invalid start code 0x%x\n", AV_RL24(buf));
668  return AVERROR_INVALIDDATA;
669  }
670  width = AV_RL16(buf + 3) & 0x3fff;
671  height = AV_RL16(buf + 5) & 0x3fff;
672  hscale = buf[4] >> 6;
673  vscale = buf[6] >> 6;
674  buf += 7;
675  buf_size -= 7;
676 
677  if (hscale || vscale)
678  avpriv_request_sample(s->avctx, "Upscaling");
679 
683  sizeof(s->prob->pred16x16));
685  sizeof(s->prob->pred8x8c));
686  memcpy(s->prob->mvc, vp8_mv_default_prob,
687  sizeof(s->prob->mvc));
688  memset(&s->segmentation, 0, sizeof(s->segmentation));
689  memset(&s->lf_delta, 0, sizeof(s->lf_delta));
690  }
691 
692  ff_vp56_init_range_decoder(c, buf, header_size);
693  buf += header_size;
694  buf_size -= header_size;
695 
696  if (s->keyframe) {
697  s->colorspace = vp8_rac_get(c);
698  if (s->colorspace)
699  av_log(s->avctx, AV_LOG_WARNING, "Unspecified colorspace\n");
700  s->fullrange = vp8_rac_get(c);
701  }
702 
703  if ((s->segmentation.enabled = vp8_rac_get(c)))
705  else
706  s->segmentation.update_map = 0; // FIXME: move this to some init function?
707 
708  s->filter.simple = vp8_rac_get(c);
709  s->filter.level = vp8_rac_get_uint(c, 6);
710  s->filter.sharpness = vp8_rac_get_uint(c, 3);
711 
712  if ((s->lf_delta.enabled = vp8_rac_get(c)))
713  if (vp8_rac_get(c))
714  update_lf_deltas(s);
715 
716  if (setup_partitions(s, buf, buf_size)) {
717  av_log(s->avctx, AV_LOG_ERROR, "Invalid partitions\n");
718  return AVERROR_INVALIDDATA;
719  }
720 
721  if (!s->macroblocks_base || /* first frame */
722  width != s->avctx->width || height != s->avctx->height ||
723  (width+15)/16 != s->mb_width || (height+15)/16 != s->mb_height)
724  if ((ret = vp8_update_dimensions(s, width, height)) < 0)
725  return ret;
726 
727  vp8_get_quants(s);
728 
729  if (!s->keyframe) {
730  update_refs(s);
732  s->sign_bias[VP56_FRAME_GOLDEN2 /* altref */] = vp8_rac_get(c);
733  }
734 
735  // if we aren't saving this frame's probabilities for future frames,
736  // make a copy of the current probabilities
737  if (!(s->update_probabilities = vp8_rac_get(c)))
738  s->prob[1] = s->prob[0];
739 
740  s->update_last = s->keyframe || vp8_rac_get(c);
741 
743 
744  if ((s->mbskip_enabled = vp8_rac_get(c)))
745  s->prob->mbskip = vp8_rac_get_uint(c, 8);
746 
747  if (!s->keyframe) {
748  s->prob->intra = vp8_rac_get_uint(c, 8);
749  s->prob->last = vp8_rac_get_uint(c, 8);
750  s->prob->golden = vp8_rac_get_uint(c, 8);
752  }
753 
754  return 0;
755 }
756 
757 static av_always_inline
758 void clamp_mv(VP8Context *s, VP56mv *dst, const VP56mv *src)
759 {
760  dst->x = av_clip(src->x, av_clip(s->mv_min.x, INT16_MIN, INT16_MAX),
761  av_clip(s->mv_max.x, INT16_MIN, INT16_MAX));
762  dst->y = av_clip(src->y, av_clip(s->mv_min.y, INT16_MIN, INT16_MAX),
763  av_clip(s->mv_max.y, INT16_MIN, INT16_MAX));
764 }
765 
766 /**
767  * Motion vector coding, 17.1.
768  */
770 {
771  int bit, x = 0;
772 
773  if (vp56_rac_get_prob_branchy(c, p[0])) {
774  int i;
775 
776  for (i = 0; i < 3; i++)
777  x += vp56_rac_get_prob(c, p[9 + i]) << i;
778  for (i = (vp7 ? 7 : 9); i > 3; i--)
779  x += vp56_rac_get_prob(c, p[9 + i]) << i;
780  if (!(x & (vp7 ? 0xF0 : 0xFFF0)) || vp56_rac_get_prob(c, p[12]))
781  x += 8;
782  } else {
783  // small_mvtree
784  const uint8_t *ps = p + 2;
785  bit = vp56_rac_get_prob(c, *ps);
786  ps += 1 + 3 * bit;
787  x += 4 * bit;
788  bit = vp56_rac_get_prob(c, *ps);
789  ps += 1 + bit;
790  x += 2 * bit;
791  x += vp56_rac_get_prob(c, *ps);
792  }
793 
794  return (x && vp56_rac_get_prob(c, p[1])) ? -x : x;
795 }
796 
798 {
799  return read_mv_component(c, p, 1);
800 }
801 
803 {
804  return read_mv_component(c, p, 0);
805 }
806 
807 static av_always_inline
808 const uint8_t *get_submv_prob(uint32_t left, uint32_t top, int is_vp7)
809 {
810  if (is_vp7)
811  return vp7_submv_prob;
812 
813  if (left == top)
814  return vp8_submv_prob[4 - !!left];
815  if (!top)
816  return vp8_submv_prob[2];
817  return vp8_submv_prob[1 - !!left];
818 }
819 
820 /**
821  * Split motion vector prediction, 16.4.
822  * @returns the number of motion vectors parsed (2, 4 or 16)
823  */
824 static av_always_inline
826  int layout, int is_vp7)
827 {
828  int part_idx;
829  int n, num;
830  VP8Macroblock *top_mb;
831  VP8Macroblock *left_mb = &mb[-1];
832  const uint8_t *mbsplits_left = vp8_mbsplits[left_mb->partitioning];
833  const uint8_t *mbsplits_top, *mbsplits_cur, *firstidx;
834  VP56mv *top_mv;
835  VP56mv *left_mv = left_mb->bmv;
836  VP56mv *cur_mv = mb->bmv;
837 
838  if (!layout) // layout is inlined, s->mb_layout is not
839  top_mb = &mb[2];
840  else
841  top_mb = &mb[-s->mb_width - 1];
842  mbsplits_top = vp8_mbsplits[top_mb->partitioning];
843  top_mv = top_mb->bmv;
844 
848  else
849  part_idx = VP8_SPLITMVMODE_8x8;
850  } else {
851  part_idx = VP8_SPLITMVMODE_4x4;
852  }
853 
854  num = vp8_mbsplit_count[part_idx];
855  mbsplits_cur = vp8_mbsplits[part_idx],
856  firstidx = vp8_mbfirstidx[part_idx];
857  mb->partitioning = part_idx;
858 
859  for (n = 0; n < num; n++) {
860  int k = firstidx[n];
861  uint32_t left, above;
862  const uint8_t *submv_prob;
863 
864  if (!(k & 3))
865  left = AV_RN32A(&left_mv[mbsplits_left[k + 3]]);
866  else
867  left = AV_RN32A(&cur_mv[mbsplits_cur[k - 1]]);
868  if (k <= 3)
869  above = AV_RN32A(&top_mv[mbsplits_top[k + 12]]);
870  else
871  above = AV_RN32A(&cur_mv[mbsplits_cur[k - 4]]);
872 
873  submv_prob = get_submv_prob(left, above, is_vp7);
874 
875  if (vp56_rac_get_prob_branchy(c, submv_prob[0])) {
876  if (vp56_rac_get_prob_branchy(c, submv_prob[1])) {
877  if (vp56_rac_get_prob_branchy(c, submv_prob[2])) {
878  mb->bmv[n].y = mb->mv.y +
879  read_mv_component(c, s->prob->mvc[0], is_vp7);
880  mb->bmv[n].x = mb->mv.x +
881  read_mv_component(c, s->prob->mvc[1], is_vp7);
882  } else {
883  AV_ZERO32(&mb->bmv[n]);
884  }
885  } else {
886  AV_WN32A(&mb->bmv[n], above);
887  }
888  } else {
889  AV_WN32A(&mb->bmv[n], left);
890  }
891  }
892 
893  return num;
894 }
895 
896 /**
897  * The vp7 reference decoder uses a padding macroblock column (added to right
898  * edge of the frame) to guard against illegal macroblock offsets. The
899  * algorithm has bugs that permit offsets to straddle the padding column.
900  * This function replicates those bugs.
901  *
902  * @param[out] edge_x macroblock x address
903  * @param[out] edge_y macroblock y address
904  *
905  * @return macroblock offset legal (boolean)
906  */
907 static int vp7_calculate_mb_offset(int mb_x, int mb_y, int mb_width,
908  int xoffset, int yoffset, int boundary,
909  int *edge_x, int *edge_y)
910 {
911  int vwidth = mb_width + 1;
912  int new = (mb_y + yoffset) * vwidth + mb_x + xoffset;
913  if (new < boundary || new % vwidth == vwidth - 1)
914  return 0;
915  *edge_y = new / vwidth;
916  *edge_x = new % vwidth;
917  return 1;
918 }
919 
920 static const VP56mv *get_bmv_ptr(const VP8Macroblock *mb, int subblock)
921 {
922  return &mb->bmv[mb->mode == VP8_MVMODE_SPLIT ? vp8_mbsplits[mb->partitioning][subblock] : 0];
923 }
924 
925 static av_always_inline
927  int mb_x, int mb_y, int layout)
928 {
929  VP8Macroblock *mb_edge[12];
930  enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR };
931  enum { VP8_EDGE_TOP, VP8_EDGE_LEFT, VP8_EDGE_TOPLEFT };
932  int idx = CNT_ZERO;
933  VP56mv near_mv[3];
934  uint8_t cnt[3] = { 0 };
935  VP56RangeCoder *c = &s->c;
936  int i;
937 
938  AV_ZERO32(&near_mv[0]);
939  AV_ZERO32(&near_mv[1]);
940  AV_ZERO32(&near_mv[2]);
941 
942  for (i = 0; i < VP7_MV_PRED_COUNT; i++) {
943  const VP7MVPred * pred = &vp7_mv_pred[i];
944  int edge_x, edge_y;
945 
946  if (vp7_calculate_mb_offset(mb_x, mb_y, s->mb_width, pred->xoffset,
947  pred->yoffset, !s->profile, &edge_x, &edge_y)) {
948  VP8Macroblock *edge = mb_edge[i] = (s->mb_layout == 1)
949  ? s->macroblocks_base + 1 + edge_x +
950  (s->mb_width + 1) * (edge_y + 1)
951  : s->macroblocks + edge_x +
952  (s->mb_height - edge_y - 1) * 2;
953  uint32_t mv = AV_RN32A(get_bmv_ptr(edge, vp7_mv_pred[i].subblock));
954  if (mv) {
955  if (AV_RN32A(&near_mv[CNT_NEAREST])) {
956  if (mv == AV_RN32A(&near_mv[CNT_NEAREST])) {
957  idx = CNT_NEAREST;
958  } else if (AV_RN32A(&near_mv[CNT_NEAR])) {
959  if (mv != AV_RN32A(&near_mv[CNT_NEAR]))
960  continue;
961  idx = CNT_NEAR;
962  } else {
963  AV_WN32A(&near_mv[CNT_NEAR], mv);
964  idx = CNT_NEAR;
965  }
966  } else {
967  AV_WN32A(&near_mv[CNT_NEAREST], mv);
968  idx = CNT_NEAREST;
969  }
970  } else {
971  idx = CNT_ZERO;
972  }
973  } else {
974  idx = CNT_ZERO;
975  }
976  cnt[idx] += vp7_mv_pred[i].score;
977  }
978 
980 
981  if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_ZERO]][0])) {
982  mb->mode = VP8_MVMODE_MV;
983 
984  if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_NEAREST]][1])) {
985 
986  if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_NEAR]][2])) {
987 
988  if (cnt[CNT_NEAREST] > cnt[CNT_NEAR])
989  AV_WN32A(&mb->mv, cnt[CNT_ZERO] > cnt[CNT_NEAREST] ? 0 : AV_RN32A(&near_mv[CNT_NEAREST]));
990  else
991  AV_WN32A(&mb->mv, cnt[CNT_ZERO] > cnt[CNT_NEAR] ? 0 : AV_RN32A(&near_mv[CNT_NEAR]));
992 
993  if (vp56_rac_get_prob_branchy(c, vp7_mode_contexts[cnt[CNT_NEAR]][3])) {
994  mb->mode = VP8_MVMODE_SPLIT;
995  mb->mv = mb->bmv[decode_splitmvs(s, c, mb, layout, IS_VP7) - 1];
996  } else {
997  mb->mv.y += vp7_read_mv_component(c, s->prob->mvc[0]);
998  mb->mv.x += vp7_read_mv_component(c, s->prob->mvc[1]);
999  mb->bmv[0] = mb->mv;
1000  }
1001  } else {
1002  mb->mv = near_mv[CNT_NEAR];
1003  mb->bmv[0] = mb->mv;
1004  }
1005  } else {
1006  mb->mv = near_mv[CNT_NEAREST];
1007  mb->bmv[0] = mb->mv;
1008  }
1009  } else {
1010  mb->mode = VP8_MVMODE_ZERO;
1011  AV_ZERO32(&mb->mv);
1012  mb->bmv[0] = mb->mv;
1013  }
1014 }
1015 
1016 static av_always_inline
1018  int mb_x, int mb_y, int layout)
1019 {
1020  VP8Macroblock *mb_edge[3] = { 0 /* top */,
1021  mb - 1 /* left */,
1022  0 /* top-left */ };
1023  enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV };
1024  enum { VP8_EDGE_TOP, VP8_EDGE_LEFT, VP8_EDGE_TOPLEFT };
1025  int idx = CNT_ZERO;
1026  int cur_sign_bias = s->sign_bias[mb->ref_frame];
1027  int8_t *sign_bias = s->sign_bias;
1028  VP56mv near_mv[4];
1029  uint8_t cnt[4] = { 0 };
1030  VP56RangeCoder *c = &s->c;
1031 
1032  if (!layout) { // layout is inlined (s->mb_layout is not)
1033  mb_edge[0] = mb + 2;
1034  mb_edge[2] = mb + 1;
1035  } else {
1036  mb_edge[0] = mb - s->mb_width - 1;
1037  mb_edge[2] = mb - s->mb_width - 2;
1038  }
1039 
1040  AV_ZERO32(&near_mv[0]);
1041  AV_ZERO32(&near_mv[1]);
1042  AV_ZERO32(&near_mv[2]);
1043 
1044  /* Process MB on top, left and top-left */
1045 #define MV_EDGE_CHECK(n) \
1046  { \
1047  VP8Macroblock *edge = mb_edge[n]; \
1048  int edge_ref = edge->ref_frame; \
1049  if (edge_ref != VP56_FRAME_CURRENT) { \
1050  uint32_t mv = AV_RN32A(&edge->mv); \
1051  if (mv) { \
1052  if (cur_sign_bias != sign_bias[edge_ref]) { \
1053  /* SWAR negate of the values in mv. */ \
1054  mv = ~mv; \
1055  mv = ((mv & 0x7fff7fff) + \
1056  0x00010001) ^ (mv & 0x80008000); \
1057  } \
1058  if (!n || mv != AV_RN32A(&near_mv[idx])) \
1059  AV_WN32A(&near_mv[++idx], mv); \
1060  cnt[idx] += 1 + (n != 2); \
1061  } else \
1062  cnt[CNT_ZERO] += 1 + (n != 2); \
1063  } \
1064  }
1065 
1066  MV_EDGE_CHECK(0)
1067  MV_EDGE_CHECK(1)
1068  MV_EDGE_CHECK(2)
1069 
1071  if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_ZERO]][0])) {
1072  mb->mode = VP8_MVMODE_MV;
1073 
1074  /* If we have three distinct MVs, merge first and last if they're the same */
1075  if (cnt[CNT_SPLITMV] &&
1076  AV_RN32A(&near_mv[1 + VP8_EDGE_TOP]) == AV_RN32A(&near_mv[1 + VP8_EDGE_TOPLEFT]))
1077  cnt[CNT_NEAREST] += 1;
1078 
1079  /* Swap near and nearest if necessary */
1080  if (cnt[CNT_NEAR] > cnt[CNT_NEAREST]) {
1081  FFSWAP(uint8_t, cnt[CNT_NEAREST], cnt[CNT_NEAR]);
1082  FFSWAP( VP56mv, near_mv[CNT_NEAREST], near_mv[CNT_NEAR]);
1083  }
1084 
1085  if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAREST]][1])) {
1086  if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAR]][2])) {
1087  /* Choose the best mv out of 0,0 and the nearest mv */
1088  clamp_mv(s, &mb->mv, &near_mv[CNT_ZERO + (cnt[CNT_NEAREST] >= cnt[CNT_ZERO])]);
1089  cnt[CNT_SPLITMV] = ((mb_edge[VP8_EDGE_LEFT]->mode == VP8_MVMODE_SPLIT) +
1090  (mb_edge[VP8_EDGE_TOP]->mode == VP8_MVMODE_SPLIT)) * 2 +
1091  (mb_edge[VP8_EDGE_TOPLEFT]->mode == VP8_MVMODE_SPLIT);
1092 
1093  if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_SPLITMV]][3])) {
1094  mb->mode = VP8_MVMODE_SPLIT;
1095  mb->mv = mb->bmv[decode_splitmvs(s, c, mb, layout, IS_VP8) - 1];
1096  } else {
1097  mb->mv.y += vp8_read_mv_component(c, s->prob->mvc[0]);
1098  mb->mv.x += vp8_read_mv_component(c, s->prob->mvc[1]);
1099  mb->bmv[0] = mb->mv;
1100  }
1101  } else {
1102  clamp_mv(s, &mb->mv, &near_mv[CNT_NEAR]);
1103  mb->bmv[0] = mb->mv;
1104  }
1105  } else {
1106  clamp_mv(s, &mb->mv, &near_mv[CNT_NEAREST]);
1107  mb->bmv[0] = mb->mv;
1108  }
1109  } else {
1110  mb->mode = VP8_MVMODE_ZERO;
1111  AV_ZERO32(&mb->mv);
1112  mb->bmv[0] = mb->mv;
1113  }
1114 }
1115 
1116 static av_always_inline
1118  int mb_x, int keyframe, int layout)
1119 {
1120  uint8_t *intra4x4 = mb->intra4x4_pred_mode_mb;
1121 
1122  if (layout) {
1123  VP8Macroblock *mb_top = mb - s->mb_width - 1;
1124  memcpy(mb->intra4x4_pred_mode_top, mb_top->intra4x4_pred_mode_top, 4);
1125  }
1126  if (keyframe) {
1127  int x, y;
1128  uint8_t *top;
1129  uint8_t *const left = s->intra4x4_pred_mode_left;
1130  if (layout)
1131  top = mb->intra4x4_pred_mode_top;
1132  else
1133  top = s->intra4x4_pred_mode_top + 4 * mb_x;
1134  for (y = 0; y < 4; y++) {
1135  for (x = 0; x < 4; x++) {
1136  const uint8_t *ctx;
1137  ctx = vp8_pred4x4_prob_intra[top[x]][left[y]];
1138  *intra4x4 = vp8_rac_get_tree(c, vp8_pred4x4_tree, ctx);
1139  left[y] = top[x] = *intra4x4;
1140  intra4x4++;
1141  }
1142  }
1143  } else {
1144  int i;
1145  for (i = 0; i < 16; i++)
1146  intra4x4[i] = vp8_rac_get_tree(c, vp8_pred4x4_tree,
1148  }
1149 }
1150 
1151 static av_always_inline
1152 void decode_mb_mode(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y,
1153  uint8_t *segment, uint8_t *ref, int layout, int is_vp7)
1154 {
1155  VP56RangeCoder *c = &s->c;
1156  const char *vp7_feature_name[] = { "q-index",
1157  "lf-delta",
1158  "partial-golden-update",
1159  "blit-pitch" };
1160  if (is_vp7) {
1161  int i;
1162  *segment = 0;
1163  for (i = 0; i < 4; i++) {
1164  if (s->feature_enabled[i]) {
1167  s->feature_index_prob[i]);
1169  "Feature %s present in macroblock (value 0x%x)\n",
1170  vp7_feature_name[i], s->feature_value[i][index]);
1171  }
1172  }
1173  }
1174  } else if (s->segmentation.update_map) {
1175  int bit = vp56_rac_get_prob(c, s->prob->segmentid[0]);
1176  *segment = vp56_rac_get_prob(c, s->prob->segmentid[1+bit]) + 2*bit;
1177  } else if (s->segmentation.enabled)
1178  *segment = ref ? *ref : *segment;
1179  mb->segment = *segment;
1180 
1181  mb->skip = s->mbskip_enabled ? vp56_rac_get_prob(c, s->prob->mbskip) : 0;
1182 
1183  if (s->keyframe) {
1186 
1187  if (mb->mode == MODE_I4x4) {
1188  decode_intra4x4_modes(s, c, mb, mb_x, 1, layout);
1189  } else {
1190  const uint32_t modes = (is_vp7 ? vp7_pred4x4_mode
1191  : vp8_pred4x4_mode)[mb->mode] * 0x01010101u;
1192  if (s->mb_layout)
1193  AV_WN32A(mb->intra4x4_pred_mode_top, modes);
1194  else
1195  AV_WN32A(s->intra4x4_pred_mode_top + 4 * mb_x, modes);
1196  AV_WN32A(s->intra4x4_pred_mode_left, modes);
1197  }
1198 
1202  } else if (vp56_rac_get_prob_branchy(c, s->prob->intra)) {
1203  // inter MB, 16.2
1204  if (vp56_rac_get_prob_branchy(c, s->prob->last))
1205  mb->ref_frame =
1206  (!is_vp7 && vp56_rac_get_prob(c, s->prob->golden)) ? VP56_FRAME_GOLDEN2 /* altref */
1208  else
1210  s->ref_count[mb->ref_frame - 1]++;
1211 
1212  // motion vectors, 16.3
1213  if (is_vp7)
1214  vp7_decode_mvs(s, mb, mb_x, mb_y, layout);
1215  else
1216  vp8_decode_mvs(s, mb, mb_x, mb_y, layout);
1217  } else {
1218  // intra MB, 16.1
1220 
1221  if (mb->mode == MODE_I4x4)
1222  decode_intra4x4_modes(s, c, mb, mb_x, 0, layout);
1223 
1225  s->prob->pred8x8c);
1228  AV_ZERO32(&mb->bmv[0]);
1229  }
1230 }
1231 
1232 /**
1233  * @param r arithmetic bitstream reader context
1234  * @param block destination for block coefficients
1235  * @param probs probabilities to use when reading trees from the bitstream
1236  * @param i initial coeff index, 0 unless a separate DC block is coded
1237  * @param qmul array holding the dc/ac dequant factor at position 0/1
1238  *
1239  * @return 0 if no coeffs were decoded
1240  * otherwise, the index of the last coeff decoded plus one
1241  */
1242 static av_always_inline
1244  uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
1245  int i, uint8_t *token_prob, int16_t qmul[2],
1246  const uint8_t scan[16], int vp7)
1247 {
1248  VP56RangeCoder c = *r;
1249  goto skip_eob;
1250  do {
1251  int coeff;
1252 restart:
1253  if (!vp56_rac_get_prob_branchy(&c, token_prob[0])) // DCT_EOB
1254  break;
1255 
1256 skip_eob:
1257  if (!vp56_rac_get_prob_branchy(&c, token_prob[1])) { // DCT_0
1258  if (++i == 16)
1259  break; // invalid input; blocks should end with EOB
1260  token_prob = probs[i][0];
1261  if (vp7)
1262  goto restart;
1263  goto skip_eob;
1264  }
1265 
1266  if (!vp56_rac_get_prob_branchy(&c, token_prob[2])) { // DCT_1
1267  coeff = 1;
1268  token_prob = probs[i + 1][1];
1269  } else {
1270  if (!vp56_rac_get_prob_branchy(&c, token_prob[3])) { // DCT 2,3,4
1271  coeff = vp56_rac_get_prob_branchy(&c, token_prob[4]);
1272  if (coeff)
1273  coeff += vp56_rac_get_prob(&c, token_prob[5]);
1274  coeff += 2;
1275  } else {
1276  // DCT_CAT*
1277  if (!vp56_rac_get_prob_branchy(&c, token_prob[6])) {
1278  if (!vp56_rac_get_prob_branchy(&c, token_prob[7])) { // DCT_CAT1
1279  coeff = 5 + vp56_rac_get_prob(&c, vp8_dct_cat1_prob[0]);
1280  } else { // DCT_CAT2
1281  coeff = 7;
1282  coeff += vp56_rac_get_prob(&c, vp8_dct_cat2_prob[0]) << 1;
1283  coeff += vp56_rac_get_prob(&c, vp8_dct_cat2_prob[1]);
1284  }
1285  } else { // DCT_CAT3 and up
1286  int a = vp56_rac_get_prob(&c, token_prob[8]);
1287  int b = vp56_rac_get_prob(&c, token_prob[9 + a]);
1288  int cat = (a << 1) + b;
1289  coeff = 3 + (8 << cat);
1290  coeff += vp8_rac_get_coeff(&c, ff_vp8_dct_cat_prob[cat]);
1291  }
1292  }
1293  token_prob = probs[i + 1][2];
1294  }
1295  block[scan[i]] = (vp8_rac_get(&c) ? -coeff : coeff) * qmul[!!i];
1296  } while (++i < 16);
1297 
1298  *r = c;
1299  return i;
1300 }
1301 
1302 static av_always_inline
1303 int inter_predict_dc(int16_t block[16], int16_t pred[2])
1304 {
1305  int16_t dc = block[0];
1306  int ret = 0;
1307 
1308  if (pred[1] > 3) {
1309  dc += pred[0];
1310  ret = 1;
1311  }
1312 
1313  if (!pred[0] | !dc | ((int32_t)pred[0] ^ (int32_t)dc) >> 31) {
1314  block[0] = pred[0] = dc;
1315  pred[1] = 0;
1316  } else {
1317  if (pred[0] == dc)
1318  pred[1]++;
1319  block[0] = pred[0] = dc;
1320  }
1321 
1322  return ret;
1323 }
1324 
1326  int16_t block[16],
1327  uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
1328  int i, uint8_t *token_prob,
1329  int16_t qmul[2],
1330  const uint8_t scan[16])
1331 {
1332  return decode_block_coeffs_internal(r, block, probs, i,
1333  token_prob, qmul, scan, IS_VP7);
1334 }
1335 
1336 #ifndef vp8_decode_block_coeffs_internal
1338  int16_t block[16],
1339  uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
1340  int i, uint8_t *token_prob,
1341  int16_t qmul[2])
1342 {
1343  return decode_block_coeffs_internal(r, block, probs, i,
1344  token_prob, qmul, zigzag_scan, IS_VP8);
1345 }
1346 #endif
1347 
1348 /**
1349  * @param c arithmetic bitstream reader context
1350  * @param block destination for block coefficients
1351  * @param probs probabilities to use when reading trees from the bitstream
1352  * @param i initial coeff index, 0 unless a separate DC block is coded
1353  * @param zero_nhood the initial prediction context for number of surrounding
1354  * all-zero blocks (only left/top, so 0-2)
1355  * @param qmul array holding the dc/ac dequant factor at position 0/1
1356  * @param scan scan pattern (VP7 only)
1357  *
1358  * @return 0 if no coeffs were decoded
1359  * otherwise, the index of the last coeff decoded plus one
1360  */
1361 static av_always_inline
1363  uint8_t probs[16][3][NUM_DCT_TOKENS - 1],
1364  int i, int zero_nhood, int16_t qmul[2],
1365  const uint8_t scan[16], int vp7)
1366 {
1367  uint8_t *token_prob = probs[i][zero_nhood];
1368  if (!vp56_rac_get_prob_branchy(c, token_prob[0])) // DCT_EOB
1369  return 0;
1370  return vp7 ? vp7_decode_block_coeffs_internal(c, block, probs, i,
1371  token_prob, qmul, scan)
1372  : vp8_decode_block_coeffs_internal(c, block, probs, i,
1373  token_prob, qmul);
1374 }
1375 
1376 static av_always_inline
1378  VP8Macroblock *mb, uint8_t t_nnz[9], uint8_t l_nnz[9],
1379  int is_vp7)
1380 {
1381  int i, x, y, luma_start = 0, luma_ctx = 3;
1382  int nnz_pred, nnz, nnz_total = 0;
1383  int segment = mb->segment;
1384  int block_dc = 0;
1385 
1386  if (mb->mode != MODE_I4x4 && (is_vp7 || mb->mode != VP8_MVMODE_SPLIT)) {
1387  nnz_pred = t_nnz[8] + l_nnz[8];
1388 
1389  // decode DC values and do hadamard
1390  nnz = decode_block_coeffs(c, td->block_dc, s->prob->token[1], 0,
1391  nnz_pred, s->qmat[segment].luma_dc_qmul,
1392  zigzag_scan, is_vp7);
1393  l_nnz[8] = t_nnz[8] = !!nnz;
1394 
1395  if (is_vp7 && mb->mode > MODE_I4x4) {
1396  nnz |= inter_predict_dc(td->block_dc,
1397  s->inter_dc_pred[mb->ref_frame - 1]);
1398  }
1399 
1400  if (nnz) {
1401  nnz_total += nnz;
1402  block_dc = 1;
1403  if (nnz == 1)
1404  s->vp8dsp.vp8_luma_dc_wht_dc(td->block, td->block_dc);
1405  else
1406  s->vp8dsp.vp8_luma_dc_wht(td->block, td->block_dc);
1407  }
1408  luma_start = 1;
1409  luma_ctx = 0;
1410  }
1411 
1412  // luma blocks
1413  for (y = 0; y < 4; y++)
1414  for (x = 0; x < 4; x++) {
1415  nnz_pred = l_nnz[y] + t_nnz[x];
1416  nnz = decode_block_coeffs(c, td->block[y][x],
1417  s->prob->token[luma_ctx],
1418  luma_start, nnz_pred,
1419  s->qmat[segment].luma_qmul,
1420  s->prob[0].scan, is_vp7);
1421  /* nnz+block_dc may be one more than the actual last index,
1422  * but we don't care */
1423  td->non_zero_count_cache[y][x] = nnz + block_dc;
1424  t_nnz[x] = l_nnz[y] = !!nnz;
1425  nnz_total += nnz;
1426  }
1427 
1428  // chroma blocks
1429  // TODO: what to do about dimensions? 2nd dim for luma is x,
1430  // but for chroma it's (y<<1)|x
1431  for (i = 4; i < 6; i++)
1432  for (y = 0; y < 2; y++)
1433  for (x = 0; x < 2; x++) {
1434  nnz_pred = l_nnz[i + 2 * y] + t_nnz[i + 2 * x];
1435  nnz = decode_block_coeffs(c, td->block[i][(y << 1) + x],
1436  s->prob->token[2], 0, nnz_pred,
1437  s->qmat[segment].chroma_qmul,
1438  s->prob[0].scan, is_vp7);
1439  td->non_zero_count_cache[i][(y << 1) + x] = nnz;
1440  t_nnz[i + 2 * x] = l_nnz[i + 2 * y] = !!nnz;
1441  nnz_total += nnz;
1442  }
1443 
1444  // if there were no coded coeffs despite the macroblock not being marked skip,
1445  // we MUST not do the inner loop filter and should not do IDCT
1446  // Since skip isn't used for bitstream prediction, just manually set it.
1447  if (!nnz_total)
1448  mb->skip = 1;
1449 }
1450 
1451 static av_always_inline
1452 void backup_mb_border(uint8_t *top_border, uint8_t *src_y,
1453  uint8_t *src_cb, uint8_t *src_cr,
1454  int linesize, int uvlinesize, int simple)
1455 {
1456  AV_COPY128(top_border, src_y + 15 * linesize);
1457  if (!simple) {
1458  AV_COPY64(top_border + 16, src_cb + 7 * uvlinesize);
1459  AV_COPY64(top_border + 24, src_cr + 7 * uvlinesize);
1460  }
1461 }
1462 
1463 static av_always_inline
1464 void xchg_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb,
1465  uint8_t *src_cr, int linesize, int uvlinesize, int mb_x,
1466  int mb_y, int mb_width, int simple, int xchg)
1467 {
1468  uint8_t *top_border_m1 = top_border - 32; // for TL prediction
1469  src_y -= linesize;
1470  src_cb -= uvlinesize;
1471  src_cr -= uvlinesize;
1472 
1473 #define XCHG(a, b, xchg) \
1474  do { \
1475  if (xchg) \
1476  AV_SWAP64(b, a); \
1477  else \
1478  AV_COPY64(b, a); \
1479  } while (0)
1480 
1481  XCHG(top_border_m1 + 8, src_y - 8, xchg);
1482  XCHG(top_border, src_y, xchg);
1483  XCHG(top_border + 8, src_y + 8, 1);
1484  if (mb_x < mb_width - 1)
1485  XCHG(top_border + 32, src_y + 16, 1);
1486 
1487  // only copy chroma for normal loop filter
1488  // or to initialize the top row to 127
1489  if (!simple || !mb_y) {
1490  XCHG(top_border_m1 + 16, src_cb - 8, xchg);
1491  XCHG(top_border_m1 + 24, src_cr - 8, xchg);
1492  XCHG(top_border + 16, src_cb, 1);
1493  XCHG(top_border + 24, src_cr, 1);
1494  }
1495 }
1496 
1497 static av_always_inline
1498 int check_dc_pred8x8_mode(int mode, int mb_x, int mb_y)
1499 {
1500  if (!mb_x)
1501  return mb_y ? TOP_DC_PRED8x8 : DC_128_PRED8x8;
1502  else
1503  return mb_y ? mode : LEFT_DC_PRED8x8;
1504 }
1505 
1506 static av_always_inline
1507 int check_tm_pred8x8_mode(int mode, int mb_x, int mb_y, int vp7)
1508 {
1509  if (!mb_x)
1510  return mb_y ? VERT_PRED8x8 : (vp7 ? DC_128_PRED8x8 : DC_129_PRED8x8);
1511  else
1512  return mb_y ? mode : HOR_PRED8x8;
1513 }
1514 
1515 static av_always_inline
1516 int check_intra_pred8x8_mode_emuedge(int mode, int mb_x, int mb_y, int vp7)
1517 {
1518  switch (mode) {
1519  case DC_PRED8x8:
1520  return check_dc_pred8x8_mode(mode, mb_x, mb_y);
1521  case VERT_PRED8x8:
1522  return !mb_y ? (vp7 ? DC_128_PRED8x8 : DC_127_PRED8x8) : mode;
1523  case HOR_PRED8x8:
1524  return !mb_x ? (vp7 ? DC_128_PRED8x8 : DC_129_PRED8x8) : mode;
1525  case PLANE_PRED8x8: /* TM */
1526  return check_tm_pred8x8_mode(mode, mb_x, mb_y, vp7);
1527  }
1528  return mode;
1529 }
1530 
1531 static av_always_inline
1532 int check_tm_pred4x4_mode(int mode, int mb_x, int mb_y, int vp7)
1533 {
1534  if (!mb_x) {
1535  return mb_y ? VERT_VP8_PRED : (vp7 ? DC_128_PRED : DC_129_PRED);
1536  } else {
1537  return mb_y ? mode : HOR_VP8_PRED;
1538  }
1539 }
1540 
1541 static av_always_inline
1542 int check_intra_pred4x4_mode_emuedge(int mode, int mb_x, int mb_y,
1543  int *copy_buf, int vp7)
1544 {
1545  switch (mode) {
1546  case VERT_PRED:
1547  if (!mb_x && mb_y) {
1548  *copy_buf = 1;
1549  return mode;
1550  }
1551  /* fall-through */
1552  case DIAG_DOWN_LEFT_PRED:
1553  case VERT_LEFT_PRED:
1554  return !mb_y ? (vp7 ? DC_128_PRED : DC_127_PRED) : mode;
1555  case HOR_PRED:
1556  if (!mb_y) {
1557  *copy_buf = 1;
1558  return mode;
1559  }
1560  /* fall-through */
1561  case HOR_UP_PRED:
1562  return !mb_x ? (vp7 ? DC_128_PRED : DC_129_PRED) : mode;
1563  case TM_VP8_PRED:
1564  return check_tm_pred4x4_mode(mode, mb_x, mb_y, vp7);
1565  case DC_PRED: /* 4x4 DC doesn't use the same "H.264-style" exceptions
1566  * as 16x16/8x8 DC */
1567  case DIAG_DOWN_RIGHT_PRED:
1568  case VERT_RIGHT_PRED:
1569  case HOR_DOWN_PRED:
1570  if (!mb_y || !mb_x)
1571  *copy_buf = 1;
1572  return mode;
1573  }
1574  return mode;
1575 }
1576 
1577 static av_always_inline
1579  VP8Macroblock *mb, int mb_x, int mb_y, int is_vp7)
1580 {
1581  int x, y, mode, nnz;
1582  uint32_t tr;
1583 
1584  /* for the first row, we need to run xchg_mb_border to init the top edge
1585  * to 127 otherwise, skip it if we aren't going to deblock */
1586  if (mb_y && (s->deblock_filter || !mb_y) && td->thread_nr == 0)
1587  xchg_mb_border(s->top_border[mb_x + 1], dst[0], dst[1], dst[2],
1588  s->linesize, s->uvlinesize, mb_x, mb_y, s->mb_width,
1589  s->filter.simple, 1);
1590 
1591  if (mb->mode < MODE_I4x4) {
1592  mode = check_intra_pred8x8_mode_emuedge(mb->mode, mb_x, mb_y, is_vp7);
1593  s->hpc.pred16x16[mode](dst[0], s->linesize);
1594  } else {
1595  uint8_t *ptr = dst[0];
1596  uint8_t *intra4x4 = mb->intra4x4_pred_mode_mb;
1597  const uint8_t lo = is_vp7 ? 128 : 127;
1598  const uint8_t hi = is_vp7 ? 128 : 129;
1599  uint8_t tr_top[4] = { lo, lo, lo, lo };
1600 
1601  // all blocks on the right edge of the macroblock use bottom edge
1602  // the top macroblock for their topright edge
1603  uint8_t *tr_right = ptr - s->linesize + 16;
1604 
1605  // if we're on the right edge of the frame, said edge is extended
1606  // from the top macroblock
1607  if (mb_y && mb_x == s->mb_width - 1) {
1608  tr = tr_right[-1] * 0x01010101u;
1609  tr_right = (uint8_t *) &tr;
1610  }
1611 
1612  if (mb->skip)
1614 
1615  for (y = 0; y < 4; y++) {
1616  uint8_t *topright = ptr + 4 - s->linesize;
1617  for (x = 0; x < 4; x++) {
1618  int copy = 0, linesize = s->linesize;
1619  uint8_t *dst = ptr + 4 * x;
1620  LOCAL_ALIGNED(4, uint8_t, copy_dst, [5 * 8]);
1621 
1622  if ((y == 0 || x == 3) && mb_y == 0) {
1623  topright = tr_top;
1624  } else if (x == 3)
1625  topright = tr_right;
1626 
1627  mode = check_intra_pred4x4_mode_emuedge(intra4x4[x], mb_x + x,
1628  mb_y + y, &copy, is_vp7);
1629  if (copy) {
1630  dst = copy_dst + 12;
1631  linesize = 8;
1632  if (!(mb_y + y)) {
1633  copy_dst[3] = lo;
1634  AV_WN32A(copy_dst + 4, lo * 0x01010101U);
1635  } else {
1636  AV_COPY32(copy_dst + 4, ptr + 4 * x - s->linesize);
1637  if (!(mb_x + x)) {
1638  copy_dst[3] = hi;
1639  } else {
1640  copy_dst[3] = ptr[4 * x - s->linesize - 1];
1641  }
1642  }
1643  if (!(mb_x + x)) {
1644  copy_dst[11] =
1645  copy_dst[19] =
1646  copy_dst[27] =
1647  copy_dst[35] = hi;
1648  } else {
1649  copy_dst[11] = ptr[4 * x - 1];
1650  copy_dst[19] = ptr[4 * x + s->linesize - 1];
1651  copy_dst[27] = ptr[4 * x + s->linesize * 2 - 1];
1652  copy_dst[35] = ptr[4 * x + s->linesize * 3 - 1];
1653  }
1654  }
1655  s->hpc.pred4x4[mode](dst, topright, linesize);
1656  if (copy) {
1657  AV_COPY32(ptr + 4 * x, copy_dst + 12);
1658  AV_COPY32(ptr + 4 * x + s->linesize, copy_dst + 20);
1659  AV_COPY32(ptr + 4 * x + s->linesize * 2, copy_dst + 28);
1660  AV_COPY32(ptr + 4 * x + s->linesize * 3, copy_dst + 36);
1661  }
1662 
1663  nnz = td->non_zero_count_cache[y][x];
1664  if (nnz) {
1665  if (nnz == 1)
1666  s->vp8dsp.vp8_idct_dc_add(ptr + 4 * x,
1667  td->block[y][x], s->linesize);
1668  else
1669  s->vp8dsp.vp8_idct_add(ptr + 4 * x,
1670  td->block[y][x], s->linesize);
1671  }
1672  topright += 4;
1673  }
1674 
1675  ptr += 4 * s->linesize;
1676  intra4x4 += 4;
1677  }
1678  }
1679 
1681  mb_x, mb_y, is_vp7);
1682  s->hpc.pred8x8[mode](dst[1], s->uvlinesize);
1683  s->hpc.pred8x8[mode](dst[2], s->uvlinesize);
1684 
1685  if (mb_y && (s->deblock_filter || !mb_y) && td->thread_nr == 0)
1686  xchg_mb_border(s->top_border[mb_x + 1], dst[0], dst[1], dst[2],
1687  s->linesize, s->uvlinesize, mb_x, mb_y, s->mb_width,
1688  s->filter.simple, 0);
1689 }
1690 
1691 static const uint8_t subpel_idx[3][8] = {
1692  { 0, 1, 2, 1, 2, 1, 2, 1 }, // nr. of left extra pixels,
1693  // also function pointer index
1694  { 0, 3, 5, 3, 5, 3, 5, 3 }, // nr. of extra pixels required
1695  { 0, 2, 3, 2, 3, 2, 3, 2 }, // nr. of right extra pixels
1696 };
1697 
1698 /**
1699  * luma MC function
1700  *
1701  * @param s VP8 decoding context
1702  * @param dst target buffer for block data at block position
1703  * @param ref reference picture buffer at origin (0, 0)
1704  * @param mv motion vector (relative to block position) to get pixel data from
1705  * @param x_off horizontal position of block from origin (0, 0)
1706  * @param y_off vertical position of block from origin (0, 0)
1707  * @param block_w width of block (16, 8 or 4)
1708  * @param block_h height of block (always same as block_w)
1709  * @param width width of src/dst plane data
1710  * @param height height of src/dst plane data
1711  * @param linesize size of a single line of plane data, including padding
1712  * @param mc_func motion compensation function pointers (bilinear or sixtap MC)
1713  */
1714 static av_always_inline
1716  ThreadFrame *ref, const VP56mv *mv,
1717  int x_off, int y_off, int block_w, int block_h,
1718  int width, int height, ptrdiff_t linesize,
1719  vp8_mc_func mc_func[3][3])
1720 {
1721  uint8_t *src = ref->f->data[0];
1722 
1723  if (AV_RN32A(mv)) {
1724  int src_linesize = linesize;
1725 
1726  int mx = (mv->x * 2) & 7, mx_idx = subpel_idx[0][mx];
1727  int my = (mv->y * 2) & 7, my_idx = subpel_idx[0][my];
1728 
1729  x_off += mv->x >> 2;
1730  y_off += mv->y >> 2;
1731 
1732  // edge emulation
1733  ff_thread_await_progress(ref, (3 + y_off + block_h + subpel_idx[2][my]) >> 4, 0);
1734  src += y_off * linesize + x_off;
1735  if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
1736  y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
1738  src - my_idx * linesize - mx_idx,
1739  EDGE_EMU_LINESIZE, linesize,
1740  block_w + subpel_idx[1][mx],
1741  block_h + subpel_idx[1][my],
1742  x_off - mx_idx, y_off - my_idx,
1743  width, height);
1744  src = td->edge_emu_buffer + mx_idx + EDGE_EMU_LINESIZE * my_idx;
1745  src_linesize = EDGE_EMU_LINESIZE;
1746  }
1747  mc_func[my_idx][mx_idx](dst, linesize, src, src_linesize, block_h, mx, my);
1748  } else {
1749  ff_thread_await_progress(ref, (3 + y_off + block_h) >> 4, 0);
1750  mc_func[0][0](dst, linesize, src + y_off * linesize + x_off,
1751  linesize, block_h, 0, 0);
1752  }
1753 }
1754 
1755 /**
1756  * chroma MC function
1757  *
1758  * @param s VP8 decoding context
1759  * @param dst1 target buffer for block data at block position (U plane)
1760  * @param dst2 target buffer for block data at block position (V plane)
1761  * @param ref reference picture buffer at origin (0, 0)
1762  * @param mv motion vector (relative to block position) to get pixel data from
1763  * @param x_off horizontal position of block from origin (0, 0)
1764  * @param y_off vertical position of block from origin (0, 0)
1765  * @param block_w width of block (16, 8 or 4)
1766  * @param block_h height of block (always same as block_w)
1767  * @param width width of src/dst plane data
1768  * @param height height of src/dst plane data
1769  * @param linesize size of a single line of plane data, including padding
1770  * @param mc_func motion compensation function pointers (bilinear or sixtap MC)
1771  */
1772 static av_always_inline
1774  uint8_t *dst2, ThreadFrame *ref, const VP56mv *mv,
1775  int x_off, int y_off, int block_w, int block_h,
1776  int width, int height, ptrdiff_t linesize,
1777  vp8_mc_func mc_func[3][3])
1778 {
1779  uint8_t *src1 = ref->f->data[1], *src2 = ref->f->data[2];
1780 
1781  if (AV_RN32A(mv)) {
1782  int mx = mv->x & 7, mx_idx = subpel_idx[0][mx];
1783  int my = mv->y & 7, my_idx = subpel_idx[0][my];
1784 
1785  x_off += mv->x >> 3;
1786  y_off += mv->y >> 3;
1787 
1788  // edge emulation
1789  src1 += y_off * linesize + x_off;
1790  src2 += y_off * linesize + x_off;
1791  ff_thread_await_progress(ref, (3 + y_off + block_h + subpel_idx[2][my]) >> 3, 0);
1792  if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
1793  y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
1795  src1 - my_idx * linesize - mx_idx,
1796  EDGE_EMU_LINESIZE, linesize,
1797  block_w + subpel_idx[1][mx],
1798  block_h + subpel_idx[1][my],
1799  x_off - mx_idx, y_off - my_idx, width, height);
1800  src1 = td->edge_emu_buffer + mx_idx + EDGE_EMU_LINESIZE * my_idx;
1801  mc_func[my_idx][mx_idx](dst1, linesize, src1, EDGE_EMU_LINESIZE, block_h, mx, my);
1802 
1804  src2 - my_idx * linesize - mx_idx,
1805  EDGE_EMU_LINESIZE, linesize,
1806  block_w + subpel_idx[1][mx],
1807  block_h + subpel_idx[1][my],
1808  x_off - mx_idx, y_off - my_idx, width, height);
1809  src2 = td->edge_emu_buffer + mx_idx + EDGE_EMU_LINESIZE * my_idx;
1810  mc_func[my_idx][mx_idx](dst2, linesize, src2, EDGE_EMU_LINESIZE, block_h, mx, my);
1811  } else {
1812  mc_func[my_idx][mx_idx](dst1, linesize, src1, linesize, block_h, mx, my);
1813  mc_func[my_idx][mx_idx](dst2, linesize, src2, linesize, block_h, mx, my);
1814  }
1815  } else {
1816  ff_thread_await_progress(ref, (3 + y_off + block_h) >> 3, 0);
1817  mc_func[0][0](dst1, linesize, src1 + y_off * linesize + x_off, linesize, block_h, 0, 0);
1818  mc_func[0][0](dst2, linesize, src2 + y_off * linesize + x_off, linesize, block_h, 0, 0);
1819  }
1820 }
1821 
1822 static av_always_inline
1824  ThreadFrame *ref_frame, int x_off, int y_off,
1825  int bx_off, int by_off, int block_w, int block_h,
1826  int width, int height, VP56mv *mv)
1827 {
1828  VP56mv uvmv = *mv;
1829 
1830  /* Y */
1831  vp8_mc_luma(s, td, dst[0] + by_off * s->linesize + bx_off,
1832  ref_frame, mv, x_off + bx_off, y_off + by_off,
1833  block_w, block_h, width, height, s->linesize,
1834  s->put_pixels_tab[block_w == 8]);
1835 
1836  /* U/V */
1837  if (s->profile == 3) {
1838  /* this block only applies VP8; it is safe to check
1839  * only the profile, as VP7 profile <= 1 */
1840  uvmv.x &= ~7;
1841  uvmv.y &= ~7;
1842  }
1843  x_off >>= 1;
1844  y_off >>= 1;
1845  bx_off >>= 1;
1846  by_off >>= 1;
1847  width >>= 1;
1848  height >>= 1;
1849  block_w >>= 1;
1850  block_h >>= 1;
1851  vp8_mc_chroma(s, td, dst[1] + by_off * s->uvlinesize + bx_off,
1852  dst[2] + by_off * s->uvlinesize + bx_off, ref_frame,
1853  &uvmv, x_off + bx_off, y_off + by_off,
1854  block_w, block_h, width, height, s->uvlinesize,
1855  s->put_pixels_tab[1 + (block_w == 4)]);
1856 }
1857 
1858 /* Fetch pixels for estimated mv 4 macroblocks ahead.
1859  * Optimized for 64-byte cache lines. Inspired by ffh264 prefetch_motion. */
1860 static av_always_inline
1861 void prefetch_motion(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y,
1862  int mb_xy, int ref)
1863 {
1864  /* Don't prefetch refs that haven't been used very often this frame. */
1865  if (s->ref_count[ref - 1] > (mb_xy >> 5)) {
1866  int x_off = mb_x << 4, y_off = mb_y << 4;
1867  int mx = (mb->mv.x >> 2) + x_off + 8;
1868  int my = (mb->mv.y >> 2) + y_off;
1869  uint8_t **src = s->framep[ref]->tf.f->data;
1870  int off = mx + (my + (mb_x & 3) * 4) * s->linesize + 64;
1871  /* For threading, a ff_thread_await_progress here might be useful, but
1872  * it actually slows down the decoder. Since a bad prefetch doesn't
1873  * generate bad decoder output, we don't run it here. */
1874  s->vdsp.prefetch(src[0] + off, s->linesize, 4);
1875  off = (mx >> 1) + ((my >> 1) + (mb_x & 7)) * s->uvlinesize + 64;
1876  s->vdsp.prefetch(src[1] + off, src[2] - src[1], 2);
1877  }
1878 }
1879 
1880 /**
1881  * Apply motion vectors to prediction buffer, chapter 18.
1882  */
1883 static av_always_inline
1885  VP8Macroblock *mb, int mb_x, int mb_y)
1886 {
1887  int x_off = mb_x << 4, y_off = mb_y << 4;
1888  int width = 16 * s->mb_width, height = 16 * s->mb_height;
1889  ThreadFrame *ref = &s->framep[mb->ref_frame]->tf;
1890  VP56mv *bmv = mb->bmv;
1891 
1892  switch (mb->partitioning) {
1893  case VP8_SPLITMVMODE_NONE:
1894  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1895  0, 0, 16, 16, width, height, &mb->mv);
1896  break;
1897  case VP8_SPLITMVMODE_4x4: {
1898  int x, y;
1899  VP56mv uvmv;
1900 
1901  /* Y */
1902  for (y = 0; y < 4; y++) {
1903  for (x = 0; x < 4; x++) {
1904  vp8_mc_luma(s, td, dst[0] + 4 * y * s->linesize + x * 4,
1905  ref, &bmv[4 * y + x],
1906  4 * x + x_off, 4 * y + y_off, 4, 4,
1907  width, height, s->linesize,
1908  s->put_pixels_tab[2]);
1909  }
1910  }
1911 
1912  /* U/V */
1913  x_off >>= 1;
1914  y_off >>= 1;
1915  width >>= 1;
1916  height >>= 1;
1917  for (y = 0; y < 2; y++) {
1918  for (x = 0; x < 2; x++) {
1919  uvmv.x = mb->bmv[2 * y * 4 + 2 * x ].x +
1920  mb->bmv[2 * y * 4 + 2 * x + 1].x +
1921  mb->bmv[(2 * y + 1) * 4 + 2 * x ].x +
1922  mb->bmv[(2 * y + 1) * 4 + 2 * x + 1].x;
1923  uvmv.y = mb->bmv[2 * y * 4 + 2 * x ].y +
1924  mb->bmv[2 * y * 4 + 2 * x + 1].y +
1925  mb->bmv[(2 * y + 1) * 4 + 2 * x ].y +
1926  mb->bmv[(2 * y + 1) * 4 + 2 * x + 1].y;
1927  uvmv.x = (uvmv.x + 2 + FF_SIGNBIT(uvmv.x)) >> 2;
1928  uvmv.y = (uvmv.y + 2 + FF_SIGNBIT(uvmv.y)) >> 2;
1929  if (s->profile == 3) {
1930  uvmv.x &= ~7;
1931  uvmv.y &= ~7;
1932  }
1933  vp8_mc_chroma(s, td, dst[1] + 4 * y * s->uvlinesize + x * 4,
1934  dst[2] + 4 * y * s->uvlinesize + x * 4, ref,
1935  &uvmv, 4 * x + x_off, 4 * y + y_off, 4, 4,
1936  width, height, s->uvlinesize,
1937  s->put_pixels_tab[2]);
1938  }
1939  }
1940  break;
1941  }
1942  case VP8_SPLITMVMODE_16x8:
1943  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1944  0, 0, 16, 8, width, height, &bmv[0]);
1945  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1946  0, 8, 16, 8, width, height, &bmv[1]);
1947  break;
1948  case VP8_SPLITMVMODE_8x16:
1949  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1950  0, 0, 8, 16, width, height, &bmv[0]);
1951  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1952  8, 0, 8, 16, width, height, &bmv[1]);
1953  break;
1954  case VP8_SPLITMVMODE_8x8:
1955  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1956  0, 0, 8, 8, width, height, &bmv[0]);
1957  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1958  8, 0, 8, 8, width, height, &bmv[1]);
1959  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1960  0, 8, 8, 8, width, height, &bmv[2]);
1961  vp8_mc_part(s, td, dst, ref, x_off, y_off,
1962  8, 8, 8, 8, width, height, &bmv[3]);
1963  break;
1964  }
1965 }
1966 
1967 static av_always_inline
1969 {
1970  int x, y, ch;
1971 
1972  if (mb->mode != MODE_I4x4) {
1973  uint8_t *y_dst = dst[0];
1974  for (y = 0; y < 4; y++) {
1975  uint32_t nnz4 = AV_RL32(td->non_zero_count_cache[y]);
1976  if (nnz4) {
1977  if (nnz4 & ~0x01010101) {
1978  for (x = 0; x < 4; x++) {
1979  if ((uint8_t) nnz4 == 1)
1980  s->vp8dsp.vp8_idct_dc_add(y_dst + 4 * x,
1981  td->block[y][x],
1982  s->linesize);
1983  else if ((uint8_t) nnz4 > 1)
1984  s->vp8dsp.vp8_idct_add(y_dst + 4 * x,
1985  td->block[y][x],
1986  s->linesize);
1987  nnz4 >>= 8;
1988  if (!nnz4)
1989  break;
1990  }
1991  } else {
1992  s->vp8dsp.vp8_idct_dc_add4y(y_dst, td->block[y], s->linesize);
1993  }
1994  }
1995  y_dst += 4 * s->linesize;
1996  }
1997  }
1998 
1999  for (ch = 0; ch < 2; ch++) {
2000  uint32_t nnz4 = AV_RL32(td->non_zero_count_cache[4 + ch]);
2001  if (nnz4) {
2002  uint8_t *ch_dst = dst[1 + ch];
2003  if (nnz4 & ~0x01010101) {
2004  for (y = 0; y < 2; y++) {
2005  for (x = 0; x < 2; x++) {
2006  if ((uint8_t) nnz4 == 1)
2007  s->vp8dsp.vp8_idct_dc_add(ch_dst + 4 * x,
2008  td->block[4 + ch][(y << 1) + x],
2009  s->uvlinesize);
2010  else if ((uint8_t) nnz4 > 1)
2011  s->vp8dsp.vp8_idct_add(ch_dst + 4 * x,
2012  td->block[4 + ch][(y << 1) + x],
2013  s->uvlinesize);
2014  nnz4 >>= 8;
2015  if (!nnz4)
2016  goto chroma_idct_end;
2017  }
2018  ch_dst += 4 * s->uvlinesize;
2019  }
2020  } else {
2021  s->vp8dsp.vp8_idct_dc_add4uv(ch_dst, td->block[4 + ch], s->uvlinesize);
2022  }
2023  }
2024 chroma_idct_end:
2025  ;
2026  }
2027 }
2028 
2029 static av_always_inline
2031  VP8FilterStrength *f, int is_vp7)
2032 {
2033  int interior_limit, filter_level;
2034 
2035  if (s->segmentation.enabled) {
2036  filter_level = s->segmentation.filter_level[mb->segment];
2037  if (!s->segmentation.absolute_vals)
2038  filter_level += s->filter.level;
2039  } else
2040  filter_level = s->filter.level;
2041 
2042  if (s->lf_delta.enabled) {
2043  filter_level += s->lf_delta.ref[mb->ref_frame];
2044  filter_level += s->lf_delta.mode[mb->mode];
2045  }
2046 
2047  filter_level = av_clip_uintp2(filter_level, 6);
2048 
2049  interior_limit = filter_level;
2050  if (s->filter.sharpness) {
2051  interior_limit >>= (s->filter.sharpness + 3) >> 2;
2052  interior_limit = FFMIN(interior_limit, 9 - s->filter.sharpness);
2053  }
2054  interior_limit = FFMAX(interior_limit, 1);
2055 
2056  f->filter_level = filter_level;
2057  f->inner_limit = interior_limit;
2058  f->inner_filter = is_vp7 || !mb->skip || mb->mode == MODE_I4x4 ||
2059  mb->mode == VP8_MVMODE_SPLIT;
2060 }
2061 
2062 static av_always_inline
2064  int mb_x, int mb_y, int is_vp7)
2065 {
2066  int mbedge_lim, bedge_lim_y, bedge_lim_uv, hev_thresh;
2067  int filter_level = f->filter_level;
2068  int inner_limit = f->inner_limit;
2069  int inner_filter = f->inner_filter;
2070  int linesize = s->linesize;
2071  int uvlinesize = s->uvlinesize;
2072  static const uint8_t hev_thresh_lut[2][64] = {
2073  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
2074  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2075  3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
2076  3, 3, 3, 3 },
2077  { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
2078  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2079  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2080  2, 2, 2, 2 }
2081  };
2082 
2083  if (!filter_level)
2084  return;
2085 
2086  if (is_vp7) {
2087  bedge_lim_y = filter_level;
2088  bedge_lim_uv = filter_level * 2;
2089  mbedge_lim = filter_level + 2;
2090  } else {
2091  bedge_lim_y =
2092  bedge_lim_uv = filter_level * 2 + inner_limit;
2093  mbedge_lim = bedge_lim_y + 4;
2094  }
2095 
2096  hev_thresh = hev_thresh_lut[s->keyframe][filter_level];
2097 
2098  if (mb_x) {
2099  s->vp8dsp.vp8_h_loop_filter16y(dst[0], linesize,
2100  mbedge_lim, inner_limit, hev_thresh);
2101  s->vp8dsp.vp8_h_loop_filter8uv(dst[1], dst[2], uvlinesize,
2102  mbedge_lim, inner_limit, hev_thresh);
2103  }
2104 
2105 #define H_LOOP_FILTER_16Y_INNER(cond) \
2106  if (cond && inner_filter) { \
2107  s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0] + 4, linesize, \
2108  bedge_lim_y, inner_limit, \
2109  hev_thresh); \
2110  s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0] + 8, linesize, \
2111  bedge_lim_y, inner_limit, \
2112  hev_thresh); \
2113  s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0] + 12, linesize, \
2114  bedge_lim_y, inner_limit, \
2115  hev_thresh); \
2116  s->vp8dsp.vp8_h_loop_filter8uv_inner(dst[1] + 4, dst[2] + 4, \
2117  uvlinesize, bedge_lim_uv, \
2118  inner_limit, hev_thresh); \
2119  }
2120 
2121  H_LOOP_FILTER_16Y_INNER(!is_vp7)
2122 
2123  if (mb_y) {
2124  s->vp8dsp.vp8_v_loop_filter16y(dst[0], linesize,
2125  mbedge_lim, inner_limit, hev_thresh);
2126  s->vp8dsp.vp8_v_loop_filter8uv(dst[1], dst[2], uvlinesize,
2127  mbedge_lim, inner_limit, hev_thresh);
2128  }
2129 
2130  if (inner_filter) {
2131  s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0] + 4 * linesize,
2132  linesize, bedge_lim_y,
2133  inner_limit, hev_thresh);
2134  s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0] + 8 * linesize,
2135  linesize, bedge_lim_y,
2136  inner_limit, hev_thresh);
2137  s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0] + 12 * linesize,
2138  linesize, bedge_lim_y,
2139  inner_limit, hev_thresh);
2140  s->vp8dsp.vp8_v_loop_filter8uv_inner(dst[1] + 4 * uvlinesize,
2141  dst[2] + 4 * uvlinesize,
2142  uvlinesize, bedge_lim_uv,
2143  inner_limit, hev_thresh);
2144  }
2145 
2146  H_LOOP_FILTER_16Y_INNER(is_vp7)
2147 }
2148 
2149 static av_always_inline
2151  int mb_x, int mb_y)
2152 {
2153  int mbedge_lim, bedge_lim;
2154  int filter_level = f->filter_level;
2155  int inner_limit = f->inner_limit;
2156  int inner_filter = f->inner_filter;
2157  int linesize = s->linesize;
2158 
2159  if (!filter_level)
2160  return;
2161 
2162  bedge_lim = 2 * filter_level + inner_limit;
2163  mbedge_lim = bedge_lim + 4;
2164 
2165  if (mb_x)
2166  s->vp8dsp.vp8_h_loop_filter_simple(dst, linesize, mbedge_lim);
2167  if (inner_filter) {
2168  s->vp8dsp.vp8_h_loop_filter_simple(dst + 4, linesize, bedge_lim);
2169  s->vp8dsp.vp8_h_loop_filter_simple(dst + 8, linesize, bedge_lim);
2170  s->vp8dsp.vp8_h_loop_filter_simple(dst + 12, linesize, bedge_lim);
2171  }
2172 
2173  if (mb_y)
2174  s->vp8dsp.vp8_v_loop_filter_simple(dst, linesize, mbedge_lim);
2175  if (inner_filter) {
2176  s->vp8dsp.vp8_v_loop_filter_simple(dst + 4 * linesize, linesize, bedge_lim);
2177  s->vp8dsp.vp8_v_loop_filter_simple(dst + 8 * linesize, linesize, bedge_lim);
2178  s->vp8dsp.vp8_v_loop_filter_simple(dst + 12 * linesize, linesize, bedge_lim);
2179  }
2180 }
2181 
2182 #define MARGIN (16 << 2)
2183 static av_always_inline
2185  VP8Frame *prev_frame, int is_vp7)
2186 {
2187  VP8Context *s = avctx->priv_data;
2188  int mb_x, mb_y;
2189 
2190  s->mv_min.y = -MARGIN;
2191  s->mv_max.y = ((s->mb_height - 1) << 6) + MARGIN;
2192  for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
2194  ((s->mb_width + 1) * (mb_y + 1) + 1);
2195  int mb_xy = mb_y * s->mb_width;
2196 
2197  AV_WN32A(s->intra4x4_pred_mode_left, DC_PRED * 0x01010101);
2198 
2199  s->mv_min.x = -MARGIN;
2200  s->mv_max.x = ((s->mb_width - 1) << 6) + MARGIN;
2201  for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb_xy++, mb++) {
2202  if (mb_y == 0)
2203  AV_WN32A((mb - s->mb_width - 1)->intra4x4_pred_mode_top,
2204  DC_PRED * 0x01010101);
2205  decode_mb_mode(s, mb, mb_x, mb_y, curframe->seg_map->data + mb_xy,
2206  prev_frame && prev_frame->seg_map ?
2207  prev_frame->seg_map->data + mb_xy : NULL, 1, is_vp7);
2208  s->mv_min.x -= 64;
2209  s->mv_max.x -= 64;
2210  }
2211  s->mv_min.y -= 64;
2212  s->mv_max.y -= 64;
2213  }
2214 }
2215 
2216 static void vp7_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame,
2217  VP8Frame *prev_frame)
2218 {
2219  vp78_decode_mv_mb_modes(avctx, cur_frame, prev_frame, IS_VP7);
2220 }
2221 
2222 static void vp8_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame,
2223  VP8Frame *prev_frame)
2224 {
2225  vp78_decode_mv_mb_modes(avctx, cur_frame, prev_frame, IS_VP8);
2226 }
2227 
2228 #if HAVE_THREADS
2229 #define check_thread_pos(td, otd, mb_x_check, mb_y_check) \
2230  do { \
2231  int tmp = (mb_y_check << 16) | (mb_x_check & 0xFFFF); \
2232  if (otd->thread_mb_pos < tmp) { \
2233  pthread_mutex_lock(&otd->lock); \
2234  td->wait_mb_pos = tmp; \
2235  do { \
2236  if (otd->thread_mb_pos >= tmp) \
2237  break; \
2238  pthread_cond_wait(&otd->cond, &otd->lock); \
2239  } while (1); \
2240  td->wait_mb_pos = INT_MAX; \
2241  pthread_mutex_unlock(&otd->lock); \
2242  } \
2243  } while (0)
2244 
2245 #define update_pos(td, mb_y, mb_x) \
2246  do { \
2247  int pos = (mb_y << 16) | (mb_x & 0xFFFF); \
2248  int sliced_threading = (avctx->active_thread_type == FF_THREAD_SLICE) && \
2249  (num_jobs > 1); \
2250  int is_null = !next_td || !prev_td; \
2251  int pos_check = (is_null) ? 1 \
2252  : (next_td != td && \
2253  pos >= next_td->wait_mb_pos) || \
2254  (prev_td != td && \
2255  pos >= prev_td->wait_mb_pos); \
2256  td->thread_mb_pos = pos; \
2257  if (sliced_threading && pos_check) { \
2258  pthread_mutex_lock(&td->lock); \
2259  pthread_cond_broadcast(&td->cond); \
2260  pthread_mutex_unlock(&td->lock); \
2261  } \
2262  } while (0)
2263 #else
2264 #define check_thread_pos(td, otd, mb_x_check, mb_y_check) while(0)
2265 #define update_pos(td, mb_y, mb_x) while(0)
2266 #endif
2267 
2269  int jobnr, int threadnr, int is_vp7)
2270 {
2271  VP8Context *s = avctx->priv_data;
2272  VP8ThreadData *prev_td, *next_td, *td = &s->thread_data[threadnr];
2273  int mb_y = td->thread_mb_pos >> 16;
2274  int mb_x, mb_xy = mb_y * s->mb_width;
2275  int num_jobs = s->num_jobs;
2276  VP8Frame *curframe = s->curframe, *prev_frame = s->prev_frame;
2277  VP56RangeCoder *c = &s->coeff_partition[mb_y & (s->num_coeff_partitions - 1)];
2278  VP8Macroblock *mb;
2279  uint8_t *dst[3] = {
2280  curframe->tf.f->data[0] + 16 * mb_y * s->linesize,
2281  curframe->tf.f->data[1] + 8 * mb_y * s->uvlinesize,
2282  curframe->tf.f->data[2] + 8 * mb_y * s->uvlinesize
2283  };
2284  if (mb_y == 0)
2285  prev_td = td;
2286  else
2287  prev_td = &s->thread_data[(jobnr + num_jobs - 1) % num_jobs];
2288  if (mb_y == s->mb_height - 1)
2289  next_td = td;
2290  else
2291  next_td = &s->thread_data[(jobnr + 1) % num_jobs];
2292  if (s->mb_layout == 1)
2293  mb = s->macroblocks_base + ((s->mb_width + 1) * (mb_y + 1) + 1);
2294  else {
2295  // Make sure the previous frame has read its segmentation map,
2296  // if we re-use the same map.
2297  if (prev_frame && s->segmentation.enabled &&
2299  ff_thread_await_progress(&prev_frame->tf, mb_y, 0);
2300  mb = s->macroblocks + (s->mb_height - mb_y - 1) * 2;
2301  memset(mb - 1, 0, sizeof(*mb)); // zero left macroblock
2302  AV_WN32A(s->intra4x4_pred_mode_left, DC_PRED * 0x01010101);
2303  }
2304 
2305  if (!is_vp7 || mb_y == 0)
2306  memset(td->left_nnz, 0, sizeof(td->left_nnz));
2307 
2308  s->mv_min.x = -MARGIN;
2309  s->mv_max.x = ((s->mb_width - 1) << 6) + MARGIN;
2310 
2311  for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb_xy++, mb++) {
2312  // Wait for previous thread to read mb_x+2, and reach mb_y-1.
2313  if (prev_td != td) {
2314  if (threadnr != 0) {
2315  check_thread_pos(td, prev_td,
2316  mb_x + (is_vp7 ? 2 : 1),
2317  mb_y - (is_vp7 ? 2 : 1));
2318  } else {
2319  check_thread_pos(td, prev_td,
2320  mb_x + (is_vp7 ? 2 : 1) + s->mb_width + 3,
2321  mb_y - (is_vp7 ? 2 : 1));
2322  }
2323  }
2324 
2325  s->vdsp.prefetch(dst[0] + (mb_x & 3) * 4 * s->linesize + 64,
2326  s->linesize, 4);
2327  s->vdsp.prefetch(dst[1] + (mb_x & 7) * s->uvlinesize + 64,
2328  dst[2] - dst[1], 2);
2329 
2330  if (!s->mb_layout)
2331  decode_mb_mode(s, mb, mb_x, mb_y, curframe->seg_map->data + mb_xy,
2332  prev_frame && prev_frame->seg_map ?
2333  prev_frame->seg_map->data + mb_xy : NULL, 0, is_vp7);
2334 
2335  prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_PREVIOUS);
2336 
2337  if (!mb->skip)
2338  decode_mb_coeffs(s, td, c, mb, s->top_nnz[mb_x], td->left_nnz, is_vp7);
2339 
2340  if (mb->mode <= MODE_I4x4)
2341  intra_predict(s, td, dst, mb, mb_x, mb_y, is_vp7);
2342  else
2343  inter_predict(s, td, dst, mb, mb_x, mb_y);
2344 
2345  prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN);
2346 
2347  if (!mb->skip) {
2348  idct_mb(s, td, dst, mb);
2349  } else {
2350  AV_ZERO64(td->left_nnz);
2351  AV_WN64(s->top_nnz[mb_x], 0); // array of 9, so unaligned
2352 
2353  /* Reset DC block predictors if they would exist
2354  * if the mb had coefficients */
2355  if (mb->mode != MODE_I4x4 && mb->mode != VP8_MVMODE_SPLIT) {
2356  td->left_nnz[8] = 0;
2357  s->top_nnz[mb_x][8] = 0;
2358  }
2359  }
2360 
2361  if (s->deblock_filter)
2362  filter_level_for_mb(s, mb, &td->filter_strength[mb_x], is_vp7);
2363 
2364  if (s->deblock_filter && num_jobs != 1 && threadnr == num_jobs - 1) {
2365  if (s->filter.simple)
2366  backup_mb_border(s->top_border[mb_x + 1], dst[0],
2367  NULL, NULL, s->linesize, 0, 1);
2368  else
2369  backup_mb_border(s->top_border[mb_x + 1], dst[0],
2370  dst[1], dst[2], s->linesize, s->uvlinesize, 0);
2371  }
2372 
2373  prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN2);
2374 
2375  dst[0] += 16;
2376  dst[1] += 8;
2377  dst[2] += 8;
2378  s->mv_min.x -= 64;
2379  s->mv_max.x -= 64;
2380 
2381  if (mb_x == s->mb_width + 1) {
2382  update_pos(td, mb_y, s->mb_width + 3);
2383  } else {
2384  update_pos(td, mb_y, mb_x);
2385  }
2386  }
2387 }
2388 
2389 static void vp7_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata,
2390  int jobnr, int threadnr)
2391 {
2392  decode_mb_row_no_filter(avctx, tdata, jobnr, threadnr, 1);
2393 }
2394 
2395 static void vp8_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata,
2396  int jobnr, int threadnr)
2397 {
2398  decode_mb_row_no_filter(avctx, tdata, jobnr, threadnr, 0);
2399 }
2400 
2401 static av_always_inline void filter_mb_row(AVCodecContext *avctx, void *tdata,
2402  int jobnr, int threadnr, int is_vp7)
2403 {
2404  VP8Context *s = avctx->priv_data;
2405  VP8ThreadData *td = &s->thread_data[threadnr];
2406  int mb_x, mb_y = td->thread_mb_pos >> 16, num_jobs = s->num_jobs;
2407  AVFrame *curframe = s->curframe->tf.f;
2408  VP8Macroblock *mb;
2409  VP8ThreadData *prev_td, *next_td;
2410  uint8_t *dst[3] = {
2411  curframe->data[0] + 16 * mb_y * s->linesize,
2412  curframe->data[1] + 8 * mb_y * s->uvlinesize,
2413  curframe->data[2] + 8 * mb_y * s->uvlinesize
2414  };
2415 
2416  if (s->mb_layout == 1)
2417  mb = s->macroblocks_base + ((s->mb_width + 1) * (mb_y + 1) + 1);
2418  else
2419  mb = s->macroblocks + (s->mb_height - mb_y - 1) * 2;
2420 
2421  if (mb_y == 0)
2422  prev_td = td;
2423  else
2424  prev_td = &s->thread_data[(jobnr + num_jobs - 1) % num_jobs];
2425  if (mb_y == s->mb_height - 1)
2426  next_td = td;
2427  else
2428  next_td = &s->thread_data[(jobnr + 1) % num_jobs];
2429 
2430  for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb++) {
2431  VP8FilterStrength *f = &td->filter_strength[mb_x];
2432  if (prev_td != td)
2433  check_thread_pos(td, prev_td,
2434  (mb_x + 1) + (s->mb_width + 3), mb_y - 1);
2435  if (next_td != td)
2436  if (next_td != &s->thread_data[0])
2437  check_thread_pos(td, next_td, mb_x + 1, mb_y + 1);
2438 
2439  if (num_jobs == 1) {
2440  if (s->filter.simple)
2441  backup_mb_border(s->top_border[mb_x + 1], dst[0],
2442  NULL, NULL, s->linesize, 0, 1);
2443  else
2444  backup_mb_border(s->top_border[mb_x + 1], dst[0],
2445  dst[1], dst[2], s->linesize, s->uvlinesize, 0);
2446  }
2447 
2448  if (s->filter.simple)
2449  filter_mb_simple(s, dst[0], f, mb_x, mb_y);
2450  else
2451  filter_mb(s, dst, f, mb_x, mb_y, is_vp7);
2452  dst[0] += 16;
2453  dst[1] += 8;
2454  dst[2] += 8;
2455 
2456  update_pos(td, mb_y, (s->mb_width + 3) + mb_x);
2457  }
2458 }
2459 
2460 static void vp7_filter_mb_row(AVCodecContext *avctx, void *tdata,
2461  int jobnr, int threadnr)
2462 {
2463  filter_mb_row(avctx, tdata, jobnr, threadnr, 1);
2464 }
2465 
2466 static void vp8_filter_mb_row(AVCodecContext *avctx, void *tdata,
2467  int jobnr, int threadnr)
2468 {
2469  filter_mb_row(avctx, tdata, jobnr, threadnr, 0);
2470 }
2471 
2472 static av_always_inline
2473 int vp78_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr,
2474  int threadnr, int is_vp7)
2475 {
2476  VP8Context *s = avctx->priv_data;
2477  VP8ThreadData *td = &s->thread_data[jobnr];
2478  VP8ThreadData *next_td = NULL, *prev_td = NULL;
2479  VP8Frame *curframe = s->curframe;
2480  int mb_y, num_jobs = s->num_jobs;
2481 
2482  td->thread_nr = threadnr;
2483  for (mb_y = jobnr; mb_y < s->mb_height; mb_y += num_jobs) {
2484  if (mb_y >= s->mb_height)
2485  break;
2486  td->thread_mb_pos = mb_y << 16;
2487  s->decode_mb_row_no_filter(avctx, tdata, jobnr, threadnr);
2488  if (s->deblock_filter)
2489  s->filter_mb_row(avctx, tdata, jobnr, threadnr);
2490  update_pos(td, mb_y, INT_MAX & 0xFFFF);
2491 
2492  s->mv_min.y -= 64;
2493  s->mv_max.y -= 64;
2494 
2495  if (avctx->active_thread_type == FF_THREAD_FRAME)
2496  ff_thread_report_progress(&curframe->tf, mb_y, 0);
2497  }
2498 
2499  return 0;
2500 }
2501 
2502 static int vp7_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata,
2503  int jobnr, int threadnr)
2504 {
2505  return vp78_decode_mb_row_sliced(avctx, tdata, jobnr, threadnr, IS_VP7);
2506 }
2507 
2508 static int vp8_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata,
2509  int jobnr, int threadnr)
2510 {
2511  return vp78_decode_mb_row_sliced(avctx, tdata, jobnr, threadnr, IS_VP8);
2512 }
2513 
2514 
2515 static av_always_inline
2516 int vp78_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
2517  AVPacket *avpkt, int is_vp7)
2518 {
2519  VP8Context *s = avctx->priv_data;
2520  int ret, i, referenced, num_jobs;
2521  enum AVDiscard skip_thresh;
2522  VP8Frame *av_uninit(curframe), *prev_frame;
2523 
2524  if (is_vp7)
2525  ret = vp7_decode_frame_header(s, avpkt->data, avpkt->size);
2526  else
2527  ret = vp8_decode_frame_header(s, avpkt->data, avpkt->size);
2528 
2529  if (ret < 0)
2530  goto err;
2531 
2532  prev_frame = s->framep[VP56_FRAME_CURRENT];
2533 
2534  referenced = s->update_last || s->update_golden == VP56_FRAME_CURRENT ||
2536 
2537  skip_thresh = !referenced ? AVDISCARD_NONREF
2538  : !s->keyframe ? AVDISCARD_NONKEY
2539  : AVDISCARD_ALL;
2540 
2541  if (avctx->skip_frame >= skip_thresh) {
2542  s->invisible = 1;
2543  memcpy(&s->next_framep[0], &s->framep[0], sizeof(s->framep[0]) * 4);
2544  goto skip_decode;
2545  }
2546  s->deblock_filter = s->filter.level && avctx->skip_loop_filter < skip_thresh;
2547 
2548  // release no longer referenced frames
2549  for (i = 0; i < 5; i++)
2550  if (s->frames[i].tf.f->data[0] &&
2551  &s->frames[i] != prev_frame &&
2552  &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
2553  &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
2554  &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2])
2555  vp8_release_frame(s, &s->frames[i]);
2556 
2557  curframe = s->framep[VP56_FRAME_CURRENT] = vp8_find_free_buffer(s);
2558 
2559  if (!s->colorspace)
2560  avctx->colorspace = AVCOL_SPC_BT470BG;
2561  if (s->fullrange)
2562  avctx->color_range = AVCOL_RANGE_JPEG;
2563  else
2564  avctx->color_range = AVCOL_RANGE_MPEG;
2565 
2566  /* Given that arithmetic probabilities are updated every frame, it's quite
2567  * likely that the values we have on a random interframe are complete
2568  * junk if we didn't start decode on a keyframe. So just don't display
2569  * anything rather than junk. */
2570  if (!s->keyframe && (!s->framep[VP56_FRAME_PREVIOUS] ||
2571  !s->framep[VP56_FRAME_GOLDEN] ||
2572  !s->framep[VP56_FRAME_GOLDEN2])) {
2573  av_log(avctx, AV_LOG_WARNING,
2574  "Discarding interframe without a prior keyframe!\n");
2575  ret = AVERROR_INVALIDDATA;
2576  goto err;
2577  }
2578 
2579  curframe->tf.f->key_frame = s->keyframe;
2580  curframe->tf.f->pict_type = s->keyframe ? AV_PICTURE_TYPE_I
2582  if ((ret = vp8_alloc_frame(s, curframe, referenced)) < 0)
2583  goto err;
2584 
2585  // check if golden and altref are swapped
2586  if (s->update_altref != VP56_FRAME_NONE)
2588  else
2590 
2591  if (s->update_golden != VP56_FRAME_NONE)
2593  else
2595 
2596  if (s->update_last)
2597  s->next_framep[VP56_FRAME_PREVIOUS] = curframe;
2598  else
2600 
2601  s->next_framep[VP56_FRAME_CURRENT] = curframe;
2602 
2603  if (avctx->codec->update_thread_context)
2604  ff_thread_finish_setup(avctx);
2605 
2606  s->linesize = curframe->tf.f->linesize[0];
2607  s->uvlinesize = curframe->tf.f->linesize[1];
2608 
2609  memset(s->top_nnz, 0, s->mb_width * sizeof(*s->top_nnz));
2610  /* Zero macroblock structures for top/top-left prediction
2611  * from outside the frame. */
2612  if (!s->mb_layout)
2613  memset(s->macroblocks + s->mb_height * 2 - 1, 0,
2614  (s->mb_width + 1) * sizeof(*s->macroblocks));
2615  if (!s->mb_layout && s->keyframe)
2616  memset(s->intra4x4_pred_mode_top, DC_PRED, s->mb_width * 4);
2617 
2618  memset(s->ref_count, 0, sizeof(s->ref_count));
2619 
2620  if (s->mb_layout == 1) {
2621  // Make sure the previous frame has read its segmentation map,
2622  // if we re-use the same map.
2623  if (prev_frame && s->segmentation.enabled &&
2625  ff_thread_await_progress(&prev_frame->tf, 1, 0);
2626  if (is_vp7)
2627  vp7_decode_mv_mb_modes(avctx, curframe, prev_frame);
2628  else
2629  vp8_decode_mv_mb_modes(avctx, curframe, prev_frame);
2630  }
2631 
2632  if (avctx->active_thread_type == FF_THREAD_FRAME)
2633  num_jobs = 1;
2634  else
2635  num_jobs = FFMIN(s->num_coeff_partitions, avctx->thread_count);
2636  s->num_jobs = num_jobs;
2637  s->curframe = curframe;
2638  s->prev_frame = prev_frame;
2639  s->mv_min.y = -MARGIN;
2640  s->mv_max.y = ((s->mb_height - 1) << 6) + MARGIN;
2641  for (i = 0; i < MAX_THREADS; i++) {
2642  s->thread_data[i].thread_mb_pos = 0;
2643  s->thread_data[i].wait_mb_pos = INT_MAX;
2644  }
2645  if (is_vp7)
2647  num_jobs);
2648  else
2650  num_jobs);
2651 
2652  ff_thread_report_progress(&curframe->tf, INT_MAX, 0);
2653  memcpy(&s->framep[0], &s->next_framep[0], sizeof(s->framep[0]) * 4);
2654 
2655 skip_decode:
2656  // if future frames don't use the updated probabilities,
2657  // reset them to the values we saved
2658  if (!s->update_probabilities)
2659  s->prob[0] = s->prob[1];
2660 
2661  if (!s->invisible) {
2662  if ((ret = av_frame_ref(data, curframe->tf.f)) < 0)
2663  return ret;
2664  *got_frame = 1;
2665  }
2666 
2667  return avpkt->size;
2668 err:
2669  memcpy(&s->next_framep[0], &s->framep[0], sizeof(s->framep[0]) * 4);
2670  return ret;
2671 }
2672 
2673 int ff_vp8_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
2674  AVPacket *avpkt)
2675 {
2676  return vp78_decode_frame(avctx, data, got_frame, avpkt, IS_VP8);
2677 }
2678 
2679 #if CONFIG_VP7_DECODER
2680 static int vp7_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
2681  AVPacket *avpkt)
2682 {
2683  return vp78_decode_frame(avctx, data, got_frame, avpkt, IS_VP7);
2684 }
2685 #endif /* CONFIG_VP7_DECODER */
2686 
2688 {
2689  VP8Context *s = avctx->priv_data;
2690  int i;
2691 
2692  vp8_decode_flush_impl(avctx, 1);
2693  for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++)
2694  av_frame_free(&s->frames[i].tf.f);
2695 
2696  return 0;
2697 }
2698 
2700 {
2701  int i;
2702  for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++) {
2703  s->frames[i].tf.f = av_frame_alloc();
2704  if (!s->frames[i].tf.f)
2705  return AVERROR(ENOMEM);
2706  }
2707  return 0;
2708 }
2709 
2710 static av_always_inline
2711 int vp78_decode_init(AVCodecContext *avctx, int is_vp7)
2712 {
2713  VP8Context *s = avctx->priv_data;
2714  int ret;
2715 
2716  s->avctx = avctx;
2717  s->vp7 = avctx->codec->id == AV_CODEC_ID_VP7;
2718  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
2719  avctx->internal->allocate_progress = 1;
2720 
2721  ff_videodsp_init(&s->vdsp, 8);
2722 
2723  ff_vp78dsp_init(&s->vp8dsp);
2724  if (CONFIG_VP7_DECODER && is_vp7) {
2726  ff_vp7dsp_init(&s->vp8dsp);
2729  } else if (CONFIG_VP8_DECODER && !is_vp7) {
2731  ff_vp8dsp_init(&s->vp8dsp);
2734  }
2735 
2736  /* does not change for VP8 */
2737  memcpy(s->prob[0].scan, zigzag_scan, sizeof(s->prob[0].scan));
2738 
2739  if ((ret = vp8_init_frames(s)) < 0) {
2740  ff_vp8_decode_free(avctx);
2741  return ret;
2742  }
2743 
2744  return 0;
2745 }
2746 
2747 #if CONFIG_VP7_DECODER
2748 static int vp7_decode_init(AVCodecContext *avctx)
2749 {
2750  return vp78_decode_init(avctx, IS_VP7);
2751 }
2752 #endif /* CONFIG_VP7_DECODER */
2753 
2755 {
2756  return vp78_decode_init(avctx, IS_VP8);
2757 }
2758 
2759 #if CONFIG_VP8_DECODER
2760 static av_cold int vp8_decode_init_thread_copy(AVCodecContext *avctx)
2761 {
2762  VP8Context *s = avctx->priv_data;
2763  int ret;
2764 
2765  s->avctx = avctx;
2766 
2767  if ((ret = vp8_init_frames(s)) < 0) {
2768  ff_vp8_decode_free(avctx);
2769  return ret;
2770  }
2771 
2772  return 0;
2773 }
2774 
2775 #define REBASE(pic) ((pic) ? (pic) - &s_src->frames[0] + &s->frames[0] : NULL)
2776 
2777 static int vp8_decode_update_thread_context(AVCodecContext *dst,
2778  const AVCodecContext *src)
2779 {
2780  VP8Context *s = dst->priv_data, *s_src = src->priv_data;
2781  int i;
2782 
2783  if (s->macroblocks_base &&
2784  (s_src->mb_width != s->mb_width || s_src->mb_height != s->mb_height)) {
2785  free_buffers(s);
2786  s->mb_width = s_src->mb_width;
2787  s->mb_height = s_src->mb_height;
2788  }
2789 
2790  s->prob[0] = s_src->prob[!s_src->update_probabilities];
2791  s->segmentation = s_src->segmentation;
2792  s->lf_delta = s_src->lf_delta;
2793  memcpy(s->sign_bias, s_src->sign_bias, sizeof(s->sign_bias));
2794 
2795  for (i = 0; i < FF_ARRAY_ELEMS(s_src->frames); i++) {
2796  if (s_src->frames[i].tf.f->data[0]) {
2797  int ret = vp8_ref_frame(s, &s->frames[i], &s_src->frames[i]);
2798  if (ret < 0)
2799  return ret;
2800  }
2801  }
2802 
2803  s->framep[0] = REBASE(s_src->next_framep[0]);
2804  s->framep[1] = REBASE(s_src->next_framep[1]);
2805  s->framep[2] = REBASE(s_src->next_framep[2]);
2806  s->framep[3] = REBASE(s_src->next_framep[3]);
2807 
2808  return 0;
2809 }
2810 #endif /* CONFIG_VP8_DECODER */
2811 
2812 #if CONFIG_VP7_DECODER
2813 AVCodec ff_vp7_decoder = {
2814  .name = "vp7",
2815  .long_name = NULL_IF_CONFIG_SMALL("On2 VP7"),
2816  .type = AVMEDIA_TYPE_VIDEO,
2817  .id = AV_CODEC_ID_VP7,
2818  .priv_data_size = sizeof(VP8Context),
2819  .init = vp7_decode_init,
2820  .close = ff_vp8_decode_free,
2821  .decode = vp7_decode_frame,
2822  .capabilities = CODEC_CAP_DR1,
2824 };
2825 #endif /* CONFIG_VP7_DECODER */
2826 
2827 #if CONFIG_VP8_DECODER
2828 AVCodec ff_vp8_decoder = {
2829  .name = "vp8",
2830  .long_name = NULL_IF_CONFIG_SMALL("On2 VP8"),
2831  .type = AVMEDIA_TYPE_VIDEO,
2832  .id = AV_CODEC_ID_VP8,
2833  .priv_data_size = sizeof(VP8Context),
2835  .close = ff_vp8_decode_free,
2839  .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp8_decode_init_thread_copy),
2840  .update_thread_context = ONLY_IF_THREADS_ENABLED(vp8_decode_update_thread_context),
2841 };
2842 #endif /* CONFIG_VP7_DECODER */
uint8_t golden
Definition: vp8.h:242
uint8_t inner_limit
Definition: vp8.h:82
#define VERT_PRED8x8
Definition: h264pred.h:70
VP8Macroblock * macroblocks
Definition: vp8.h:185
static const uint8_t vp8_dc_qlookup[VP8_MAX_QUANT+1]
Definition: vp8data.h:719
static av_always_inline void intra_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb, int mb_x, int mb_y, int is_vp7)
Definition: vp8.c:1578
static const uint8_t vp8_submv_prob[5][3]
Definition: vp8data.h:153
static const uint16_t vp7_ydc_qlookup[]
Definition: vp8data.h:786
#define NULL
Definition: coverity.c:32
const struct AVCodec * codec
Definition: avcodec.h:1250
discard all frames except keyframes
Definition: avcodec.h:668
Definition: vp9.h:47
void(* prefetch)(uint8_t *buf, ptrdiff_t stride, int h)
Prefetch memory into cache (if supported by hardware).
Definition: videodsp.h:76
const char * s
Definition: avisynth_c.h:631
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static const uint8_t vp7_mv_default_prob[2][17]
Definition: vp8data.h:752
#define DC_128_PRED8x8
Definition: h264pred.h:76
static av_always_inline int pthread_mutex_destroy(pthread_mutex_t *mutex)
Definition: os2threads.h:94
(only used in prediction) no split MVs
Definition: vp8.h:77
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:124
void ff_vp7dsp_init(VP8DSPContext *c)
static void update_lf_deltas(VP8Context *s)
Definition: vp8.c:235
This structure describes decoded (raw) audio or video data.
Definition: frame.h:171
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
static const uint8_t vp7_pred4x4_mode[]
Definition: vp8data.h:33
int8_t sign_bias[4]
one state [0, 1] per ref frame type
Definition: vp8.h:163
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:1424
static av_always_inline int inter_predict_dc(int16_t block[16], int16_t pred[2])
Definition: vp8.c:1303
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
#define VP7_MV_PRED_COUNT
Definition: vp8data.h:68
AVFrame * f
Definition: thread.h:36
static av_always_inline int vp8_rac_get_tree(VP56RangeCoder *c, const int8_t(*tree)[2], const uint8_t *probs)
Definition: vp56.h:376
uint8_t feature_value[4][4]
Definition: vp8.h:308
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:229
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:506
static av_always_inline void decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr, int is_vp7)
Definition: vp8.c:2268
uint8_t * intra4x4_pred_mode_top
Definition: vp8.h:187
uint8_t mbskip_enabled
Definition: vp8.h:158
static VP56Frame ref_to_update(VP8Context *s, int update, VP56Frame ref)
Determine which buffers golden and altref should be updated with after this frame.
Definition: vp8.c:350
void(* vp8_v_loop_filter16y)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:48
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1960
static int vp7_decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, uint8_t *token_prob, int16_t qmul[2], const uint8_t scan[16])
Definition: vp8.c:1325
uint8_t token[4][16][3][NUM_DCT_TOKENS-1]
Definition: vp8.h:245
uint8_t scan[16]
Definition: vp8.h:247
int linesize
Definition: vp8.h:153
int size
Definition: avcodec.h:1163
const char * b
Definition: vf_curves.c:109
static void vp8_decode_flush(AVCodecContext *avctx)
Definition: vp8.c:119
#define MARGIN
Definition: vp8.c:2182
vp8_mc_func put_vp8_bilinear_pixels_tab[3][3][3]
Definition: vp8dsp.h:81
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1444
VP56mv bmv[16]
Definition: vp8.h:98
uint8_t inner_filter
Definition: vp8.h:83
struct VP8Context::@94 qmat[4]
Macroblocks can have one of 4 different quants in a frame when segmentation is enabled.
#define FF_ARRAY_ELEMS(a)
static const int8_t vp8_pred8x8c_tree[3][2]
Definition: vp8data.h:180
uint8_t segmentid[3]
Definition: vp8.h:238
static const uint16_t vp7_y2dc_qlookup[]
Definition: vp8data.h:811
discard all
Definition: avcodec.h:669
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
static void copy_chroma(AVFrame *dst, AVFrame *src, int width, int height)
Definition: vp8.c:425
#define HOR_PRED8x8
Definition: h264pred.h:69
AVCodec.
Definition: avcodec.h:3181
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:85
uint8_t sharpness
Definition: vp8.h:182
#define AV_WN32A(p, v)
Definition: intreadwrite.h:538
2 16x8 blocks (vertical)
Definition: vp8.h:73
#define AV_COPY32(d, s)
Definition: intreadwrite.h:586
static av_always_inline int pthread_cond_destroy(pthread_cond_t *cond)
Definition: os2threads.h:124
int update_probabilities
If this flag is not set, all the probability updates are discarded after this frame is decoded...
Definition: vp8.h:260
VP8Frame * framep[4]
Definition: vp8.h:146
static int vp8_decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, uint8_t *token_prob, int16_t qmul[2])
Definition: vp8.c:1337
static void vp7_filter_mb_row(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2460
#define VP7_MVC_SIZE
Definition: vp8.c:391
static int vp7_read_mv_component(VP56RangeCoder *c, const uint8_t *p)
Definition: vp8.c:797
vp8_mc_func put_vp8_epel_pixels_tab[3][3][3]
first dimension: width>>3, height is assumed equal to width second dimension: 0 if no vertical interp...
Definition: vp8dsp.h:80
static av_always_inline const uint8_t * get_submv_prob(uint32_t left, uint32_t top, int is_vp7)
Definition: vp8.c:808
static const uint8_t vp8_pred8x8c_prob_inter[3]
Definition: vp8data.h:189
static av_always_inline int decode_block_coeffs(VP56RangeCoder *c, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, int zero_nhood, int16_t qmul[2], const uint8_t scan[16], int vp7)
Definition: vp8.c:1362
uint8_t(* top_nnz)[9]
Definition: vp8.h:227
int num_jobs
Definition: vp8.h:277
static const uint8_t vp8_mbsplits[5][16]
Definition: vp8data.h:127
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:2947
#define AV_RN32A(p)
Definition: intreadwrite.h:526
uint8_t pred16x16[4]
Definition: vp8.h:243
static const int8_t vp8_pred16x16_tree_intra[4][2]
Definition: vp8data.h:47
uint8_t update_map
Definition: vp8.h:174
#define PLANE_PRED8x8
Definition: h264pred.h:71
uint16_t mb_height
Definition: vp8.h:152
int16_t y
Definition: vp56.h:67
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:63
void void avpriv_request_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
int update_golden
VP56_FRAME_NONE if not updated, or which frame to copy if so.
Definition: vp8.h:253
static av_always_inline void filter_mb_row(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr, int is_vp7)
Definition: vp8.c:2401
uint8_t intra4x4_pred_mode_top[4]
Definition: vp8.h:96
static av_always_inline void xchg_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int mb_x, int mb_y, int mb_width, int simple, int xchg)
Definition: vp8.c:1464
if()
Definition: avfilter.c:975
uint8_t
static int vp7_update_dimensions(VP8Context *s, int width, int height)
Definition: vp8.c:203
#define av_cold
Definition: attributes.h:74
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:135
#define mb
#define DC_PRED8x8
Definition: h264pred.h:68
int fade_present
Fade bit present in bitstream (VP7)
Definition: vp8.h:293
static av_always_inline void vp7_decode_mvs(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int layout)
Definition: vp8.c:926
mode
Definition: f_perms.c:27
static VP8Frame * vp8_find_free_buffer(VP8Context *s)
Definition: vp8.c:124
uint8_t ref_frame
Definition: vp8.h:91
static av_always_inline int check_intra_pred4x4_mode_emuedge(int mode, int mb_x, int mb_y, int *copy_buf, int vp7)
Definition: vp8.c:1542
Multithreading support functions.
Definition: vp9.h:46
#define TOP_DC_PRED8x8
Definition: h264pred.h:75
int ff_vp8_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: vp8.c:2673
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:363
uint8_t mvc[2][19]
Definition: vp8.h:246
VP56mv mv
Definition: vp8.h:97
int8_t base_quant[4]
Definition: vp8.h:175
static const uint8_t vp8_mv_update_prob[2][19]
Definition: vp8data.h:741
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:789
static AVFrame * frame
void(* pred8x8[4+3+4])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:97
int update_last
update VP56_FRAME_PREVIOUS with the current one
Definition: vp8.h:252
uint8_t * data
Definition: avcodec.h:1162
int8_t yoffset
Definition: vp8data.h:62
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
Definition: utils.c:3683
static void copy(LZOContext *c, int cnt)
Copies bytes from input to output buffer with checking.
Definition: lzo.c:85
static void parse_segment_info(VP8Context *s)
Definition: vp8.c:214
ptrdiff_t size
Definition: opengl_enc.c:101
VP8Frame * prev_frame
Definition: vp8.h:149
int num_coeff_partitions
All coefficients are contained in separate arith coding contexts.
Definition: vp8.h:266
static const uint8_t vp8_token_default_probs[4][8][3][NUM_DCT_TOKENS-1]
Definition: vp8data.h:369
static void fade(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int width, int height, int alpha, int beta)
Definition: vp8.c:436
vp8_mc_func put_pixels_tab[3][3][3]
Definition: vp8.h:271
void ff_thread_finish_setup(AVCodecContext *avctx)
If the codec defines update_thread_context(), call this when they are ready for the next thread to st...
void(* pred4x4[9+3+3])(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.h:93
#define AV_COPY64(d, s)
Definition: intreadwrite.h:590
uint8_t feature_index_prob[4][3]
Definition: vp8.h:307
uint8_t intra4x4_pred_mode_mb[16]
Definition: vp8.h:95
#define av_log(a,...)
static av_always_inline int vp78_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt, int is_vp7)
Definition: vp8.c:2516
unsigned m
Definition: audioconvert.c:187
uint8_t intra4x4_pred_mode_left[4]
Definition: vp8.h:188
#define VERT_VP8_PRED
for VP8, VERT_PRED is the average of
Definition: h264pred.h:60
av_cold void ff_vp78dsp_init(VP8DSPContext *dsp)
Definition: vp8dsp.c:664
uint8_t colorspace
0 is the only value allowed (meaning bt601)
Definition: vp8.h:274
static const VP56mv * get_bmv_ptr(const VP8Macroblock *mb, int subblock)
Definition: vp8.c:920
enum AVCodecID id
Definition: avcodec.h:3195
static const uint8_t vp8_mbsplit_count[4]
Definition: vp8data.h:142
static double alpha(void *priv, double x, double y)
Definition: vf_geq.c:98
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
static const int8_t vp8_coeff_band_indexes[8][10]
Definition: vp8data.h:331
#define td
Definition: regdef.h:70
H264PredContext hpc
Definition: vp8.h:270
Definition: vp8.h:132
static const uint8_t vp8_pred4x4_mode[]
Definition: vp8data.h:40
static av_always_inline void prefetch_motion(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int mb_xy, int ref)
Definition: vp8.c:1861
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
uint8_t absolute_vals
Definition: vp8.h:173
uint16_t mb_width
Definition: vp8.h:151
void(* vp8_luma_dc_wht_dc)(int16_t block[4][4][16], int16_t dc[16])
Definition: vp8dsp.h:39
static const uint8_t vp8_dct_cat2_prob[]
Definition: vp8data.h:345
static const uint8_t vp8_mv_default_prob[2][19]
Definition: vp8data.h:763
static void vp8_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2395
int profile
Definition: mxfenc.c:1804
#define FF_SIGNBIT(x)
Definition: internal.h:66
uint8_t last
Definition: vp8.h:241
static const int sizes[][2]
Definition: img2dec.c:48
#define AVERROR(e)
Definition: error.h:43
void(* vp8_h_loop_filter8uv)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:54
static int vp8_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
Definition: vp8.c:635
uint8_t mode
Definition: vp8.h:90
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:148
static av_always_inline int check_tm_pred8x8_mode(int mode, int mb_x, int mb_y, int vp7)
Definition: vp8.c:1507
static int vp8_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2508
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:175
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:2772
const char * r
Definition: vf_curves.c:107
VP8 compatible video decoder.
void(* vp8_v_loop_filter8uv)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:52
static const uint8_t vp8_mbfirstidx[4][16]
Definition: vp8data.h:135
AVCodecContext * avctx
Definition: vp8.h:145
#define EDGE_EMU_LINESIZE
Definition: vp8.h:127
uint16_t inter_dc_pred[2][2]
Interframe DC prediction (VP7) [0] VP56_FRAME_PREVIOUS [1] VP56_FRAME_GOLDEN.
Definition: vp8.h:300
VideoDSPContext vdsp
Definition: vp8.h:268
const char * name
Name of the codec implementation.
Definition: avcodec.h:3188
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_RL24
Definition: bytestream.h:85
VP8Macroblock * macroblocks_base
Definition: vp8.h:250
static av_always_inline void vp8_mc_part(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], ThreadFrame *ref_frame, int x_off, int y_off, int bx_off, int by_off, int block_w, int block_h, int width, int height, VP56mv *mv)
Definition: vp8.c:1823
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:411
static const uint8_t vp8_pred4x4_prob_inter[9]
Definition: vp8data.h:192
uint8_t edge_emu_buffer[21 *EDGE_EMU_LINESIZE]
Definition: vp8.h:128
int16_t block[6][4][16]
Definition: vp8.h:102
static av_always_inline int decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16], uint8_t probs[16][3][NUM_DCT_TOKENS-1], int i, uint8_t *token_prob, int16_t qmul[2], const uint8_t scan[16], int vp7)
Definition: vp8.c:1243
static const int vp7_mode_contexts[31][4]
Definition: vp8data.h:84
static void vp8_filter_mb_row(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2466
static void vp7_get_quants(VP8Context *s)
Definition: vp8.c:285
#define FFMAX(a, b)
Definition: common.h:64
Libavcodec external API header.
uint8_t keyframe
Definition: vp8.h:156
int x
Definition: vp8.h:138
struct VP8Context::@93 filter
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:214
VP56Frame
Definition: vp56.h:39
int16_t luma_qmul[2]
Definition: vp8.h:197
static const uint8_t vp8_pred16x16_prob_inter[4]
Definition: vp8data.h:164
Definition: hls.c:68
static void vp7_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2389
useful rectangle filling function
#define MAX_THREADS
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int64_t nb_samples_notify, AVRational time_base)
4x4 blocks of 4x4px each
Definition: vp8.h:76
uint8_t deblock_filter
Definition: vp8.h:157
#define H_LOOP_FILTER_16Y_INNER(cond)
#define FFMIN(a, b)
Definition: common.h:66
float y
uint8_t feature_present_prob[4]
Definition: vp8.h:306
static av_always_inline void vp8_mc_chroma(VP8Context *s, VP8ThreadData *td, uint8_t *dst1, uint8_t *dst2, ThreadFrame *ref, const VP56mv *mv, int x_off, int y_off, int block_w, int block_h, int width, int height, ptrdiff_t linesize, vp8_mc_func mc_func[3][3])
chroma MC function
Definition: vp8.c:1773
uint8_t fullrange
whether we can skip clamping in dsp functions
Definition: vp8.h:275
struct VP8Context::@95 lf_delta
ret
Definition: avfilter.c:974
int16_t block_dc[16]
Definition: vp8.h:103
static av_unused int vp8_rac_get_sint(VP56RangeCoder *c, int bits)
Definition: vp56.h:332
int width
picture width / height.
Definition: avcodec.h:1414
uint8_t mbskip
Definition: vp8.h:239
int8_t ref[4]
filter strength adjustment for macroblocks that reference: [0] - intra / VP56_FRAME_CURRENT [1] - VP5...
Definition: vp8.h:223
void(* filter_mb_row)(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.h:286
void(* vp8_idct_dc_add4y)(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)
Definition: vp8dsp.h:42
static av_cold int vp8_init_frames(VP8Context *s)
Definition: vp8.c:2699
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
static void free_buffers(VP8Context *s)
Definition: vp8.c:48
int32_t
#define check_thread_pos(td, otd, mb_x_check, mb_y_check)
Definition: vp8.c:2264
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
Definition: os2threads.h:87
static int vp8_read_mv_component(VP56RangeCoder *c, const uint8_t *p)
Definition: vp8.c:802
void(* vp8_mc_func)(uint8_t *dst, ptrdiff_t dstStride, uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: vp8dsp.h:33
int16_t luma_dc_qmul[2]
luma dc-only block quant
Definition: vp8.h:198
int16_t chroma_qmul[2]
Definition: vp8.h:199
static const uint8_t vp8_pred4x4_prob_intra[10][10][9]
Definition: vp8data.h:196
uint8_t(* top_border)[16+8+8]
Definition: vp8.h:226
float u
int n
Definition: avisynth_c.h:547
struct VP8Context::@92 segmentation
Base parameters for segmentation, i.e.
ThreadFrame tf
Definition: vp8.h:133
struct VP8Context::@96 prob[2]
These are all of the updatable probabilities for binary decisions.
static av_always_inline void filter_level_for_mb(VP8Context *s, VP8Macroblock *mb, VP8FilterStrength *f, int is_vp7)
Definition: vp8.c:2030
static const int8_t vp7_feature_index_tree[4][2]
Definition: vp8data.h:779
static const uint8_t vp7_feature_value_size[2][4]
Definition: vp8data.h:774
#define vp56_rac_get_prob
Definition: vp56.h:250
static void vp8_decode_flush_impl(AVCodecContext *avctx, int free_mem)
Definition: vp8.c:106
static av_always_inline void decode_mb_coeffs(VP8Context *s, VP8ThreadData *td, VP56RangeCoder *c, VP8Macroblock *mb, uint8_t t_nnz[9], uint8_t l_nnz[9], int is_vp7)
Definition: vp8.c:1377
#define LEFT_DC_PRED8x8
Definition: h264pred.h:74
static void vp8_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame, VP8Frame *prev_frame)
Definition: vp8.c:2222
uint8_t segment
Definition: vp8.h:94
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:2753
static void flush(AVCodecContext *avctx)
Definition: aacdec.c:514
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:523
int8_t xoffset
Definition: vp8data.h:63
static const float pred[4]
Definition: siprdata.h:259
static int vp7_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.c:2502
#define IS_VP8
Definition: vp8dsp.h:103
static const int8_t mv[256][2]
Definition: 4xm.c:77
static void vp7_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *cur_frame, VP8Frame *prev_frame)
Definition: vp8.c:2216
static av_always_inline int check_intra_pred8x8_mode_emuedge(int mode, int mb_x, int mb_y, int vp7)
Definition: vp8.c:1516
static av_always_inline int vp56_rac_get_prob_branchy(VP56RangeCoder *c, int prob)
Definition: vp56.h:267
void(* vp8_v_loop_filter8uv_inner)(uint8_t *dstU, uint8_t *dstV, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:62
void(* vp8_h_loop_filter_simple)(uint8_t *dst, ptrdiff_t stride, int flim)
Definition: vp8dsp.h:70
static av_always_inline void inter_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb, int mb_x, int mb_y)
Apply motion vectors to prediction buffer, chapter 18.
Definition: vp8.c:1884
VP8Frame * curframe
Definition: vp8.h:148
uint8_t simple
Definition: vp8.h:180
AVS_Value src
Definition: avisynth_c.h:482
void(* vp8_idct_add)(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
Definition: vp8dsp.h:40
VP8Frame frames[5]
Definition: vp8.h:272
static const uint8_t vp8_pred8x8c_prob_intra[3]
Definition: vp8data.h:186
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:2765
uint8_t level
Definition: vp8.h:181
static const uint8_t zigzag_scan[16+1]
Definition: h264data.h:54
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:199
static void vp8_release_frame(VP8Context *s, VP8Frame *f)
Definition: vp8.c:81
AVBufferRef * seg_map
Definition: vp8.h:134
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
static const uint16_t vp7_yac_qlookup[]
Definition: vp8data.h:798
main external API structure.
Definition: avcodec.h:1241
static int vp7_fade_frame(VP8Context *s, VP56RangeCoder *c)
Definition: vp8.c:450
uint8_t * data
The data buffer.
Definition: buffer.h:89
VP8Frame * next_framep[4]
Definition: vp8.h:147
int mb_layout
This describes the macroblock memory layout.
Definition: vp8.h:283
uint8_t left_nnz[9]
For coeff decode, we need to know whether the above block had non-zero coefficients.
Definition: vp8.h:118
static const uint8_t vp8_mbsplit_prob[3]
Definition: vp8data.h:145
VP56RangeCoder c
header context, includes mb modes and motion vectors
Definition: vp8.h:229
void * buf
Definition: avisynth_c.h:553
int y
Definition: vp8.h:139
void(* pred16x16[4+3+2])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:98
VP56RangeCoder coeff_partition[8]
Definition: vp8.h:267
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:82
static const int8_t vp8_pred16x16_tree_inter[4][2]
Definition: vp8data.h:54
BYTE int const BYTE int int int height
Definition: avisynth_c.h:676
int vp7
Definition: vp8.h:288
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:2764
static int setup_partitions(VP8Context *s, const uint8_t *buf, int buf_size)
Definition: vp8.c:259
int coded_height
Definition: avcodec.h:1424
static int vp8_update_dimensions(VP8Context *s, int width, int height)
Definition: vp8.c:208
int index
Definition: gxfenc.c:89
VP8FilterStrength * filter_strength
Definition: vp8.h:129
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1953
VP8intmv mv_min
Definition: vp8.h:160
static av_always_inline void clamp_mv(VP8Context *s, VP56mv *dst, const VP56mv *src)
Definition: vp8.c:758
void(* vp8_idct_dc_add4uv)(uint8_t *dst, int16_t block[4][16], ptrdiff_t stride)
Definition: vp8dsp.h:44
static av_always_inline int check_dc_pred8x8_mode(int mode, int mb_x, int mb_y)
Definition: vp8.c:1498
static void vp78_update_probability_tables(VP8Context *s)
Definition: vp8.c:375
#define MV_EDGE_CHECK(n)
static const int8_t vp8_pred4x4_tree[9][2]
Definition: vp8data.h:168
uint8_t enabled
whether each mb can have a different strength based on mode/ref
Definition: vp8.h:172
static av_always_inline void idct_mb(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3], VP8Macroblock *mb)
Definition: vp8.c:1968
static void vp78_update_pred16x16_pred8x8_mvc_probabilities(VP8Context *s, int mvc_size)
Definition: vp8.c:394
static av_always_inline int read_mv_component(VP56RangeCoder *c, const uint8_t *p, int vp7)
Motion vector coding, 17.1.
Definition: vp8.c:769
static const uint8_t subpel_idx[3][8]
Definition: vp8.c:1691
int uvlinesize
Definition: vp8.h:154
static void update_refs(VP8Context *s)
Definition: vp8.c:414
static av_always_inline int vp8_rac_get_coeff(VP56RangeCoder *c, const uint8_t *prob)
Definition: vp56.h:389
static const uint8_t vp8_coeff_band[16]
Definition: vp8data.h:325
int allocate_progress
Whether to allocate progress for frame threading.
Definition: internal.h:117
static const uint16_t vp8_ac_qlookup[VP8_MAX_QUANT+1]
Definition: vp8data.h:730
static const uint8_t vp8_pred16x16_prob_intra[4]
Definition: vp8data.h:161
uint8_t score
Definition: vp8data.h:65
static av_always_inline void decode_intra4x4_modes(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb, int mb_x, int keyframe, int layout)
Definition: vp8.c:1117
static int vp8_rac_get_uint(VP56RangeCoder *c, int bits)
Definition: vp56.h:320
#define DC_127_PRED8x8
Definition: h264pred.h:85
void ff_vp56_init_range_decoder(VP56RangeCoder *c, const uint8_t *buf, int buf_size)
Definition: vp56rac.c:40
void(* vp8_luma_dc_wht)(int16_t block[4][4][16], int16_t dc[16])
Definition: vp8dsp.h:38
Definition: vp56.h:65
av_cold int ff_vp8_decode_init(AVCodecContext *avctx)
Definition: vp8.c:2754
int update_altref
Definition: vp8.h:254
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:182
VP8intmv mv_max
Definition: vp8.h:161
uint8_t feature_enabled[4]
Macroblock features (VP7)
Definition: vp8.h:305
int8_t mode[VP8_MVMODE_SPLIT+1]
filter strength adjustment for the following macroblock modes: [0-3] - i16x16 (always zero) [4] - i4x...
Definition: vp8.h:214
2 8x16 blocks (horizontal)
Definition: vp8.h:74
av_cold int ff_vp8_decode_free(AVCodecContext *avctx)
Definition: vp8.c:2687
static av_always_inline void backup_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr, int linesize, int uvlinesize, int simple)
Definition: vp8.c:1452
Definition: vp9.h:48
#define AV_ZERO128(d)
Definition: intreadwrite.h:622
uint8_t pred8x8c[3]
Definition: vp8.h:244
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: pixfmt.h:522
static int decode(AVCodecContext *avctx, void *data, int *got_sub, AVPacket *avpkt)
Definition: ccaption_dec.c:522
discard all non reference
Definition: avcodec.h:665
static av_always_inline void vp78_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *curframe, VP8Frame *prev_frame, int is_vp7)
Definition: vp8.c:2184
uint8_t partitioning
Definition: vp8.h:92
void(* decode_mb_row_no_filter)(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr)
Definition: vp8.h:285
#define AV_ZERO64(d)
Definition: intreadwrite.h:618
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:63
static av_always_inline void decode_mb_mode(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, uint8_t *segment, uint8_t *ref, int layout, int is_vp7)
Definition: vp8.c:1152
void(* vp8_v_loop_filter_simple)(uint8_t *dst, ptrdiff_t stride, int flim)
Definition: vp8dsp.h:69
int16_t x
Definition: vp56.h:66
#define CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: avcodec.h:870
common internal api header.
static void vp8_get_quants(VP8Context *s)
Definition: vp8.c:304
#define CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:866
#define LOCAL_ALIGNED(a, t, v,...)
Definition: internal.h:109
#define AV_COPY128(d, s)
Definition: intreadwrite.h:594
static int ref_frame(Vp3DecodeContext *s, ThreadFrame *dst, ThreadFrame *src)
Definition: vp3.c:1915
int wait_mb_pos
Definition: vp8.h:125
static int vp8_alloc_frame(VP8Context *s, VP8Frame *f, int ref)
Definition: vp8.c:68
uint8_t chroma_pred_mode
Definition: vp8.h:93
static double c[64]
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:92
static av_always_inline int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
Definition: os2threads.h:115
#define DC_129_PRED8x8
Definition: h264pred.h:86
enum AVDiscard skip_loop_filter
Skip loop filtering for selected frames.
Definition: avcodec.h:2933
static av_always_inline int vp8_rac_get(VP56RangeCoder *c)
Definition: vp56.h:304
int invisible
Definition: vp8.h:251
static av_always_inline int decode_splitmvs(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb, int layout, int is_vp7)
Split motion vector prediction, 16.4.
Definition: vp8.c:825
static const SiprModeParam modes[MODE_COUNT]
Definition: sipr.c:69
int ref_count[3]
Definition: vp8.h:164
void * priv_data
Definition: avcodec.h:1283
int(* update_thread_context)(AVCodecContext *dst, const AVCodecContext *src)
Copy necessary context variables from a previous thread context to the current one.
Definition: avcodec.h:3238
static av_always_inline int check_tm_pred4x4_mode(int mode, int mb_x, int mb_y, int vp7)
Definition: vp8.c:1532
void(* vp8_h_loop_filter16y)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:50
#define MODE_I4x4
Definition: vp8.h:64
static int vp7_calculate_mb_offset(int mb_x, int mb_y, int mb_width, int xoffset, int yoffset, int boundary, int *edge_x, int *edge_y)
The vp7 reference decoder uses a padding macroblock column (added to right edge of the frame) to guar...
Definition: vp8.c:907
#define XCHG(a, b, xchg)
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
Definition: avcodec.h:2813
#define update_pos(td, mb_y, mb_x)
Definition: vp8.c:2265
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1291
#define HOR_VP8_PRED
unaveraged version of HOR_PRED, see
Definition: h264pred.h:63
VP8DSPContext vp8dsp
Definition: vp8.h:269
static av_always_inline int update_dimensions(VP8Context *s, int width, int height, int is_vp7)
Definition: vp8.c:149
int thread_nr
Definition: vp8.h:119
#define AV_ZERO32(d)
Definition: intreadwrite.h:614
static av_always_inline int vp78_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata, int jobnr, int threadnr, int is_vp7)
Definition: vp8.c:2473
static const double coeff[2][5]
Definition: vf_owdenoise.c:71
void(* vp8_idct_dc_add)(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
Definition: vp8dsp.h:41
uint64_t layout
AVDiscard
Definition: avcodec.h:660
static av_unused int vp8_rac_get_nn(VP56RangeCoder *c)
Definition: vp56.h:354
void(* vp8_v_loop_filter16y_inner)(uint8_t *dst, ptrdiff_t stride, int flim_E, int flim_I, int hev_thresh)
Definition: vp8dsp.h:58
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> dc
#define av_uninit(x)
Definition: attributes.h:141
static av_always_inline void vp8_mc_luma(VP8Context *s, VP8ThreadData *td, uint8_t *dst, ThreadFrame *ref, const VP56mv *mv, int x_off, int y_off, int block_w, int block_h, int width, int height, ptrdiff_t linesize, vp8_mc_func mc_func[3][3])
luma MC function
Definition: vp8.c:1715
static const uint8_t vp8_token_update_probs[4][8][3][NUM_DCT_TOKENS-1]
Definition: vp8data.h:540
static av_always_inline void filter_mb(VP8Context *s, uint8_t *dst[3], VP8FilterStrength *f, int mb_x, int mb_y, int is_vp7)
Definition: vp8.c:2063
#define av_freep(p)
#define IS_VP7
Definition: vp8dsp.h:102
static int init_thread_copy(AVCodecContext *avctx)
Definition: alac.c:640
#define av_always_inline
Definition: attributes.h:37
int8_t filter_level[4]
base loop filter level
Definition: vp8.h:176
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:170
static const int vp8_mode_contexts[6][4]
Definition: vp8data.h:118
static const uint8_t vp8_dct_cat1_prob[]
Definition: vp8data.h:342
#define FFSWAP(type, a, b)
Definition: common.h:69
uint8_t intra
Definition: vp8.h:240
static av_always_inline void vp8_decode_mvs(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int layout)
Definition: vp8.c:1017
uint8_t non_zero_count_cache[6][4]
This is the index plus one of the last non-zero coeff for each of the blocks in the current macrobloc...
Definition: vp8.h:111
uint8_t skip
Definition: vp8.h:87
void ff_vp8dsp_init(VP8DSPContext *c)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:85
static void vp78_reset_probability_tables(VP8Context *s)
Definition: vp8.c:366
This structure stores compressed data.
Definition: avcodec.h:1139
#define VP8_MVC_SIZE
Definition: vp8.c:392
static int vp7_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
Definition: vp8.c:489
uint8_t profile
Definition: vp8.h:159
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:969
const uint8_t *const ff_vp8_dct_cat_prob[]
Definition: vp8data.h:362
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:250
VP8ThreadData * thread_data
Definition: vp8.h:144
Predicted.
Definition: avutil.h:268
int thread_mb_pos
Definition: vp8.h:124
2x2 blocks of 8x8px each
Definition: vp8.h:75
static av_always_inline void filter_mb_simple(VP8Context *s, uint8_t *dst, VP8FilterStrength *f, int mb_x, int mb_y)
Definition: vp8.c:2150
static const VP7MVPred vp7_mv_pred[VP7_MV_PRED_COUNT]
Definition: vp8data.h:69
static const uint16_t vp7_y2ac_qlookup[]
Definition: vp8data.h:824
static const uint8_t vp7_submv_prob[3]
Definition: vp8data.h:149
static av_always_inline int vp78_decode_init(AVCodecContext *avctx, int is_vp7)
Definition: vp8.c:2711
#define AV_WN64(p, v)
Definition: intreadwrite.h:380
uint8_t filter_level
Definition: vp8.h:81
static int width
static int16_t block[64]
Definition: dct-test.c:110