FFmpeg
vp3.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2003-2004 The FFmpeg project
3  * Copyright (C) 2019 Peter Ross
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * On2 VP3/VP4 Video Decoder
25  *
26  * VP3 Video Decoder by Mike Melanson (mike at multimedia.cx)
27  * For more information about the VP3 coding process, visit:
28  * http://wiki.multimedia.cx/index.php?title=On2_VP3
29  *
30  * Theora decoder by Alex Beregszaszi
31  */
32 
33 #include <stdio.h>
34 #include <stdlib.h>
35 #include <string.h>
36 
37 #include "libavutil/imgutils.h"
38 
39 #include "avcodec.h"
40 #include "get_bits.h"
41 #include "hpeldsp.h"
42 #include "internal.h"
43 #include "mathops.h"
44 #include "thread.h"
45 #include "videodsp.h"
46 #include "vp3data.h"
47 #include "vp4data.h"
48 #include "vp3dsp.h"
49 #include "xiph.h"
50 
51 #define FRAGMENT_PIXELS 8
52 
53 // FIXME split things out into their own arrays
54 typedef struct Vp3Fragment {
55  int16_t dc;
58 } Vp3Fragment;
59 
60 #define SB_NOT_CODED 0
61 #define SB_PARTIALLY_CODED 1
62 #define SB_FULLY_CODED 2
63 
64 // This is the maximum length of a single long bit run that can be encoded
65 // for superblock coding or block qps. Theora special-cases this to read a
66 // bit instead of flipping the current bit to allow for runs longer than 4129.
67 #define MAXIMUM_LONG_BIT_RUN 4129
68 
69 #define MODE_INTER_NO_MV 0
70 #define MODE_INTRA 1
71 #define MODE_INTER_PLUS_MV 2
72 #define MODE_INTER_LAST_MV 3
73 #define MODE_INTER_PRIOR_LAST 4
74 #define MODE_USING_GOLDEN 5
75 #define MODE_GOLDEN_MV 6
76 #define MODE_INTER_FOURMV 7
77 #define CODING_MODE_COUNT 8
78 
79 /* special internal mode */
80 #define MODE_COPY 8
81 
82 static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb);
83 static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb);
84 
85 
86 /* There are 6 preset schemes, plus a free-form scheme */
87 static const int ModeAlphabet[6][CODING_MODE_COUNT] = {
88  /* scheme 1: Last motion vector dominates */
93 
94  /* scheme 2 */
99 
100  /* scheme 3 */
105 
106  /* scheme 4 */
111 
112  /* scheme 5: No motion vector dominates */
117 
118  /* scheme 6 */
123 };
124 
125 static const uint8_t hilbert_offset[16][2] = {
126  { 0, 0 }, { 1, 0 }, { 1, 1 }, { 0, 1 },
127  { 0, 2 }, { 0, 3 }, { 1, 3 }, { 1, 2 },
128  { 2, 2 }, { 2, 3 }, { 3, 3 }, { 3, 2 },
129  { 3, 1 }, { 2, 1 }, { 2, 0 }, { 3, 0 }
130 };
131 
132 enum {
138 };
139 
140 static const uint8_t vp4_pred_block_type_map[8] = {
149 };
150 
151 typedef struct {
152  int dc;
153  int type;
154 } VP4Predictor;
155 
156 #define MIN_DEQUANT_VAL 2
157 
158 typedef struct Vp3DecodeContext {
161  int version;
162  int width, height;
167  int keyframe;
173  DECLARE_ALIGNED(16, int16_t, block)[64];
177 
178  int qps[3];
179  int nqps;
180  int last_qps[3];
181 
191  unsigned char *superblock_coding;
192 
193  int macroblock_count; /* y macroblock count */
199  int yuv_macroblock_count; /* y+u+v macroblock count */
200 
204 
207  int data_offset[3];
211 
212  int8_t (*motion_val[2])[2];
213 
214  /* tables */
215  uint16_t coded_dc_scale_factor[2][64];
216  uint32_t coded_ac_scale_factor[64];
219  uint8_t qr_size[2][3][64];
220  uint16_t qr_base[2][3][64];
221 
222  /**
223  * This is a list of all tokens in bitstream order. Reordering takes place
224  * by pulling from each level during IDCT. As a consequence, IDCT must be
225  * in Hilbert order, making the minimum slice height 64 for 4:2:0 and 32
226  * otherwise. The 32 different tokens with up to 12 bits of extradata are
227  * collapsed into 3 types, packed as follows:
228  * (from the low to high bits)
229  *
230  * 2 bits: type (0,1,2)
231  * 0: EOB run, 14 bits for run length (12 needed)
232  * 1: zero run, 7 bits for run length
233  * 7 bits for the next coefficient (3 needed)
234  * 2: coefficient, 14 bits (11 needed)
235  *
236  * Coefficients are signed, so are packed in the highest bits for automatic
237  * sign extension.
238  */
239  int16_t *dct_tokens[3][64];
240  int16_t *dct_tokens_base;
241 #define TOKEN_EOB(eob_run) ((eob_run) << 2)
242 #define TOKEN_ZERO_RUN(coeff, zero_run) (((coeff) * 512) + ((zero_run) << 2) + 1)
243 #define TOKEN_COEFF(coeff) (((coeff) * 4) + 2)
244 
245  /**
246  * number of blocks that contain DCT coefficients at
247  * the given level or higher
248  */
249  int num_coded_frags[3][64];
251 
252  /* this is a list of indexes into the all_fragments array indicating
253  * which of the fragments are coded */
255 
259 
260  VLC dc_vlc[16];
265 
266  VLC superblock_run_length_vlc; /* version < 2 */
267  VLC fragment_run_length_vlc; /* version < 2 */
268  VLC block_pattern_vlc[2]; /* version >= 2*/
270  VLC motion_vector_vlc; /* version < 2 */
271  VLC vp4_mv_vlc[2][7]; /* version >=2 */
272 
273  /* these arrays need to be on 16-byte boundaries since SSE2 operations
274  * index into them */
275  DECLARE_ALIGNED(16, int16_t, qmat)[3][2][3][64]; ///< qmat[qpi][is_inter][plane]
276 
277  /* This table contains superblock_count * 16 entries. Each set of 16
278  * numbers corresponds to the fragment indexes 0..15 of the superblock.
279  * An entry will be -1 to indicate that no entry corresponds to that
280  * index. */
282 
283  /* This is an array that indicates how a particular macroblock
284  * is coded. */
285  unsigned char *macroblock_coding;
286 
288 
289  /* Huffman decode */
290  int hti;
291  unsigned int hbits;
292  int entries;
294  uint32_t huffman_table[80][32][2];
295 
298 
299  VP4Predictor * dc_pred_row; /* dc_pred_row[y_superblock_width * 4] */
301 
302 /************************************************************************
303  * VP3 specific functions
304  ************************************************************************/
305 
306 static av_cold void free_tables(AVCodecContext *avctx)
307 {
308  Vp3DecodeContext *s = avctx->priv_data;
309 
310  av_freep(&s->superblock_coding);
311  av_freep(&s->all_fragments);
312  av_freep(&s->nkf_coded_fragment_list);
313  av_freep(&s->kf_coded_fragment_list);
314  av_freep(&s->dct_tokens_base);
315  av_freep(&s->superblock_fragments);
316  av_freep(&s->macroblock_coding);
317  av_freep(&s->dc_pred_row);
318  av_freep(&s->motion_val[0]);
319  av_freep(&s->motion_val[1]);
320 }
321 
322 static void vp3_decode_flush(AVCodecContext *avctx)
323 {
324  Vp3DecodeContext *s = avctx->priv_data;
325 
326  if (s->golden_frame.f)
327  ff_thread_release_buffer(avctx, &s->golden_frame);
328  if (s->last_frame.f)
329  ff_thread_release_buffer(avctx, &s->last_frame);
330  if (s->current_frame.f)
331  ff_thread_release_buffer(avctx, &s->current_frame);
332 }
333 
335 {
336  Vp3DecodeContext *s = avctx->priv_data;
337  int i, j;
338 
339  free_tables(avctx);
340  av_freep(&s->edge_emu_buffer);
341 
342  s->theora_tables = 0;
343 
344  /* release all frames */
345  vp3_decode_flush(avctx);
346  av_frame_free(&s->current_frame.f);
347  av_frame_free(&s->last_frame.f);
348  av_frame_free(&s->golden_frame.f);
349 
350  for (i = 0; i < 16; i++) {
351  ff_free_vlc(&s->dc_vlc[i]);
352  ff_free_vlc(&s->ac_vlc_1[i]);
353  ff_free_vlc(&s->ac_vlc_2[i]);
354  ff_free_vlc(&s->ac_vlc_3[i]);
355  ff_free_vlc(&s->ac_vlc_4[i]);
356  }
357 
358  ff_free_vlc(&s->superblock_run_length_vlc);
359  ff_free_vlc(&s->fragment_run_length_vlc);
360  ff_free_vlc(&s->mode_code_vlc);
361  ff_free_vlc(&s->motion_vector_vlc);
362 
363  for (j = 0; j < 2; j++)
364  for (i = 0; i < 7; i++)
365  ff_free_vlc(&s->vp4_mv_vlc[j][i]);
366 
367  for (i = 0; i < 2; i++)
368  ff_free_vlc(&s->block_pattern_vlc[i]);
369  return 0;
370 }
371 
372 /**
373  * This function sets up all of the various blocks mappings:
374  * superblocks <-> fragments, macroblocks <-> fragments,
375  * superblocks <-> macroblocks
376  *
377  * @return 0 is successful; returns 1 if *anything* went wrong.
378  */
380 {
381  int sb_x, sb_y, plane;
382  int x, y, i, j = 0;
383 
384  for (plane = 0; plane < 3; plane++) {
385  int sb_width = plane ? s->c_superblock_width
386  : s->y_superblock_width;
387  int sb_height = plane ? s->c_superblock_height
388  : s->y_superblock_height;
389  int frag_width = s->fragment_width[!!plane];
390  int frag_height = s->fragment_height[!!plane];
391 
392  for (sb_y = 0; sb_y < sb_height; sb_y++)
393  for (sb_x = 0; sb_x < sb_width; sb_x++)
394  for (i = 0; i < 16; i++) {
395  x = 4 * sb_x + hilbert_offset[i][0];
396  y = 4 * sb_y + hilbert_offset[i][1];
397 
398  if (x < frag_width && y < frag_height)
399  s->superblock_fragments[j++] = s->fragment_start[plane] +
400  y * frag_width + x;
401  else
402  s->superblock_fragments[j++] = -1;
403  }
404  }
405 
406  return 0; /* successful path out */
407 }
408 
409 /*
410  * This function sets up the dequantization tables used for a particular
411  * frame.
412  */
413 static void init_dequantizer(Vp3DecodeContext *s, int qpi)
414 {
415  int ac_scale_factor = s->coded_ac_scale_factor[s->qps[qpi]];
416  int i, plane, inter, qri, bmi, bmj, qistart;
417 
418  for (inter = 0; inter < 2; inter++) {
419  for (plane = 0; plane < 3; plane++) {
420  int dc_scale_factor = s->coded_dc_scale_factor[!!plane][s->qps[qpi]];
421  int sum = 0;
422  for (qri = 0; qri < s->qr_count[inter][plane]; qri++) {
423  sum += s->qr_size[inter][plane][qri];
424  if (s->qps[qpi] <= sum)
425  break;
426  }
427  qistart = sum - s->qr_size[inter][plane][qri];
428  bmi = s->qr_base[inter][plane][qri];
429  bmj = s->qr_base[inter][plane][qri + 1];
430  for (i = 0; i < 64; i++) {
431  int coeff = (2 * (sum - s->qps[qpi]) * s->base_matrix[bmi][i] -
432  2 * (qistart - s->qps[qpi]) * s->base_matrix[bmj][i] +
433  s->qr_size[inter][plane][qri]) /
434  (2 * s->qr_size[inter][plane][qri]);
435 
436  int qmin = 8 << (inter + !i);
437  int qscale = i ? ac_scale_factor : dc_scale_factor;
438  int qbias = (1 + inter) * 3;
439  s->qmat[qpi][inter][plane][s->idct_permutation[i]] =
440  (i == 0 || s->version < 2) ? av_clip((qscale * coeff) / 100 * 4, qmin, 4096)
441  : (qscale * (coeff - qbias) / 100 + qbias) * 4;
442  }
443  /* all DC coefficients use the same quant so as not to interfere
444  * with DC prediction */
445  s->qmat[qpi][inter][plane][0] = s->qmat[0][inter][plane][0];
446  }
447  }
448 }
449 
450 /*
451  * This function initializes the loop filter boundary limits if the frame's
452  * quality index is different from the previous frame's.
453  *
454  * The filter_limit_values may not be larger than 127.
455  */
457 {
458  ff_vp3dsp_set_bounding_values(s->bounding_values_array, s->filter_limit_values[s->qps[0]]);
459 }
460 
461 /*
462  * This function unpacks all of the superblock/macroblock/fragment coding
463  * information from the bitstream.
464  */
466 {
467  int superblock_starts[3] = {
468  0, s->u_superblock_start, s->v_superblock_start
469  };
470  int bit = 0;
471  int current_superblock = 0;
472  int current_run = 0;
473  int num_partial_superblocks = 0;
474 
475  int i, j;
476  int current_fragment;
477  int plane;
478  int plane0_num_coded_frags = 0;
479 
480  if (s->keyframe) {
481  memset(s->superblock_coding, SB_FULLY_CODED, s->superblock_count);
482  } else {
483  /* unpack the list of partially-coded superblocks */
484  bit = get_bits1(gb) ^ 1;
485  current_run = 0;
486 
487  while (current_superblock < s->superblock_count && get_bits_left(gb) > 0) {
488  if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN)
489  bit = get_bits1(gb);
490  else
491  bit ^= 1;
492 
493  current_run = get_vlc2(gb, s->superblock_run_length_vlc.table,
494  6, 2) + 1;
495  if (current_run == 34)
496  current_run += get_bits(gb, 12);
497 
498  if (current_run > s->superblock_count - current_superblock) {
499  av_log(s->avctx, AV_LOG_ERROR,
500  "Invalid partially coded superblock run length\n");
501  return -1;
502  }
503 
504  memset(s->superblock_coding + current_superblock, bit, current_run);
505 
506  current_superblock += current_run;
507  if (bit)
508  num_partial_superblocks += current_run;
509  }
510 
511  /* unpack the list of fully coded superblocks if any of the blocks were
512  * not marked as partially coded in the previous step */
513  if (num_partial_superblocks < s->superblock_count) {
514  int superblocks_decoded = 0;
515 
516  current_superblock = 0;
517  bit = get_bits1(gb) ^ 1;
518  current_run = 0;
519 
520  while (superblocks_decoded < s->superblock_count - num_partial_superblocks &&
521  get_bits_left(gb) > 0) {
522  if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN)
523  bit = get_bits1(gb);
524  else
525  bit ^= 1;
526 
527  current_run = get_vlc2(gb, s->superblock_run_length_vlc.table,
528  6, 2) + 1;
529  if (current_run == 34)
530  current_run += get_bits(gb, 12);
531 
532  for (j = 0; j < current_run; current_superblock++) {
533  if (current_superblock >= s->superblock_count) {
534  av_log(s->avctx, AV_LOG_ERROR,
535  "Invalid fully coded superblock run length\n");
536  return -1;
537  }
538 
539  /* skip any superblocks already marked as partially coded */
540  if (s->superblock_coding[current_superblock] == SB_NOT_CODED) {
541  s->superblock_coding[current_superblock] = 2 * bit;
542  j++;
543  }
544  }
545  superblocks_decoded += current_run;
546  }
547  }
548 
549  /* if there were partial blocks, initialize bitstream for
550  * unpacking fragment codings */
551  if (num_partial_superblocks) {
552  current_run = 0;
553  bit = get_bits1(gb);
554  /* toggle the bit because as soon as the first run length is
555  * fetched the bit will be toggled again */
556  bit ^= 1;
557  }
558  }
559 
560  /* figure out which fragments are coded; iterate through each
561  * superblock (all planes) */
562  s->total_num_coded_frags = 0;
563  memset(s->macroblock_coding, MODE_COPY, s->macroblock_count);
564 
565  s->coded_fragment_list[0] = s->keyframe ? s->kf_coded_fragment_list
566  : s->nkf_coded_fragment_list;
567 
568  for (plane = 0; plane < 3; plane++) {
569  int sb_start = superblock_starts[plane];
570  int sb_end = sb_start + (plane ? s->c_superblock_count
571  : s->y_superblock_count);
572  int num_coded_frags = 0;
573 
574  if (s->keyframe) {
575  if (s->num_kf_coded_fragment[plane] == -1) {
576  for (i = sb_start; i < sb_end; i++) {
577  /* iterate through all 16 fragments in a superblock */
578  for (j = 0; j < 16; j++) {
579  /* if the fragment is in bounds, check its coding status */
580  current_fragment = s->superblock_fragments[i * 16 + j];
581  if (current_fragment != -1) {
582  s->coded_fragment_list[plane][num_coded_frags++] =
583  current_fragment;
584  }
585  }
586  }
587  s->num_kf_coded_fragment[plane] = num_coded_frags;
588  } else
589  num_coded_frags = s->num_kf_coded_fragment[plane];
590  } else {
591  for (i = sb_start; i < sb_end && get_bits_left(gb) > 0; i++) {
592  if (get_bits_left(gb) < plane0_num_coded_frags >> 2) {
593  return AVERROR_INVALIDDATA;
594  }
595  /* iterate through all 16 fragments in a superblock */
596  for (j = 0; j < 16; j++) {
597  /* if the fragment is in bounds, check its coding status */
598  current_fragment = s->superblock_fragments[i * 16 + j];
599  if (current_fragment != -1) {
600  int coded = s->superblock_coding[i];
601 
602  if (coded == SB_PARTIALLY_CODED) {
603  /* fragment may or may not be coded; this is the case
604  * that cares about the fragment coding runs */
605  if (current_run-- == 0) {
606  bit ^= 1;
607  current_run = get_vlc2(gb, s->fragment_run_length_vlc.table, 5, 2);
608  }
609  coded = bit;
610  }
611 
612  if (coded) {
613  /* default mode; actual mode will be decoded in
614  * the next phase */
615  s->all_fragments[current_fragment].coding_method =
617  s->coded_fragment_list[plane][num_coded_frags++] =
618  current_fragment;
619  } else {
620  /* not coded; copy this fragment from the prior frame */
621  s->all_fragments[current_fragment].coding_method =
622  MODE_COPY;
623  }
624  }
625  }
626  }
627  }
628  if (!plane)
629  plane0_num_coded_frags = num_coded_frags;
630  s->total_num_coded_frags += num_coded_frags;
631  for (i = 0; i < 64; i++)
632  s->num_coded_frags[plane][i] = num_coded_frags;
633  if (plane < 2)
634  s->coded_fragment_list[plane + 1] = s->coded_fragment_list[plane] +
635  num_coded_frags;
636  }
637  return 0;
638 }
639 
640 #define BLOCK_X (2 * mb_x + (k & 1))
641 #define BLOCK_Y (2 * mb_y + (k >> 1))
642 
643 #if CONFIG_VP4_DECODER
644 /**
645  * @return number of blocks, or > yuv_macroblock_count on error.
646  * return value is always >= 1.
647  */
648 static int vp4_get_mb_count(Vp3DecodeContext *s, GetBitContext *gb)
649 {
650  int v = 1;
651  int bits;
652  while ((bits = show_bits(gb, 9)) == 0x1ff) {
653  skip_bits(gb, 9);
654  v += 256;
655  if (v > s->yuv_macroblock_count) {
656  av_log(s->avctx, AV_LOG_ERROR, "Invalid run length\n");
657  return v;
658  }
659  }
660 #define body(n) { \
661  skip_bits(gb, 2 + n); \
662  v += (1 << n) + get_bits(gb, n); }
663 #define thresh(n) (0x200 - (0x80 >> n))
664 #define else_if(n) else if (bits < thresh(n)) body(n)
665  if (bits < 0x100) {
666  skip_bits(gb, 1);
667  } else if (bits < thresh(0)) {
668  skip_bits(gb, 2);
669  v += 1;
670  }
671  else_if(1)
672  else_if(2)
673  else_if(3)
674  else_if(4)
675  else_if(5)
676  else_if(6)
677  else body(7)
678 #undef body
679 #undef thresh
680 #undef else_if
681  return v;
682 }
683 
684 static int vp4_get_block_pattern(Vp3DecodeContext *s, GetBitContext *gb, int *next_block_pattern_table)
685 {
686  int v = get_vlc2(gb, s->block_pattern_vlc[*next_block_pattern_table].table, 3, 2);
687  if (v == -1) {
688  av_log(s->avctx, AV_LOG_ERROR, "Invalid block pattern\n");
689  *next_block_pattern_table = 0;
690  return 0;
691  }
692  *next_block_pattern_table = vp4_block_pattern_table_selector[v];
693  return v + 1;
694 }
695 
696 static int vp4_unpack_macroblocks(Vp3DecodeContext *s, GetBitContext *gb)
697 {
698  int plane, i, j, k, fragment;
699  int next_block_pattern_table;
700  int bit, current_run, has_partial;
701 
702  memset(s->macroblock_coding, MODE_COPY, s->macroblock_count);
703 
704  if (s->keyframe)
705  return 0;
706 
707  has_partial = 0;
708  bit = get_bits1(gb);
709  for (i = 0; i < s->yuv_macroblock_count; i += current_run) {
710  if (get_bits_left(gb) <= 0)
711  return AVERROR_INVALIDDATA;
712  current_run = vp4_get_mb_count(s, gb);
713  if (current_run > s->yuv_macroblock_count - i)
714  return -1;
715  memset(s->superblock_coding + i, 2 * bit, current_run);
716  bit ^= 1;
717  has_partial |= bit;
718  }
719 
720  if (has_partial) {
721  if (get_bits_left(gb) <= 0)
722  return AVERROR_INVALIDDATA;
723  bit = get_bits1(gb);
724  current_run = vp4_get_mb_count(s, gb);
725  for (i = 0; i < s->yuv_macroblock_count; i++) {
726  if (!s->superblock_coding[i]) {
727  if (!current_run) {
728  bit ^= 1;
729  current_run = vp4_get_mb_count(s, gb);
730  }
731  s->superblock_coding[i] = bit;
732  current_run--;
733  }
734  }
735  if (current_run) /* handle situation when vp4_get_mb_count() fails */
736  return -1;
737  }
738 
739  next_block_pattern_table = 0;
740  i = 0;
741  for (plane = 0; plane < 3; plane++) {
742  int sb_x, sb_y;
743  int sb_width = plane ? s->c_superblock_width : s->y_superblock_width;
744  int sb_height = plane ? s->c_superblock_height : s->y_superblock_height;
745  int mb_width = plane ? s->c_macroblock_width : s->macroblock_width;
746  int mb_height = plane ? s->c_macroblock_height : s->macroblock_height;
747  int fragment_width = s->fragment_width[!!plane];
748  int fragment_height = s->fragment_height[!!plane];
749 
750  for (sb_y = 0; sb_y < sb_height; sb_y++) {
751  for (sb_x = 0; sb_x < sb_width; sb_x++) {
752  for (j = 0; j < 4; j++) {
753  int mb_x = 2 * sb_x + (j >> 1);
754  int mb_y = 2 * sb_y + (j >> 1) ^ (j & 1);
755  int mb_coded, pattern, coded;
756 
757  if (mb_x >= mb_width || mb_y >= mb_height)
758  continue;
759 
760  mb_coded = s->superblock_coding[i++];
761 
762  if (mb_coded == SB_FULLY_CODED)
763  pattern = 0xF;
764  else if (mb_coded == SB_PARTIALLY_CODED)
765  pattern = vp4_get_block_pattern(s, gb, &next_block_pattern_table);
766  else
767  pattern = 0;
768 
769  for (k = 0; k < 4; k++) {
770  if (BLOCK_X >= fragment_width || BLOCK_Y >= fragment_height)
771  continue;
772  fragment = s->fragment_start[plane] + BLOCK_Y * fragment_width + BLOCK_X;
773  coded = pattern & (8 >> k);
774  /* MODE_INTER_NO_MV is the default for coded fragments.
775  the actual method is decoded in the next phase. */
776  s->all_fragments[fragment].coding_method = coded ? MODE_INTER_NO_MV : MODE_COPY;
777  }
778  }
779  }
780  }
781  }
782  return 0;
783 }
784 #endif
785 
786 /*
787  * This function unpacks all the coding mode data for individual macroblocks
788  * from the bitstream.
789  */
791 {
792  int i, j, k, sb_x, sb_y;
793  int scheme;
794  int current_macroblock;
795  int current_fragment;
796  int coding_mode;
797  int custom_mode_alphabet[CODING_MODE_COUNT];
798  const int *alphabet;
799  Vp3Fragment *frag;
800 
801  if (s->keyframe) {
802  for (i = 0; i < s->fragment_count; i++)
803  s->all_fragments[i].coding_method = MODE_INTRA;
804  } else {
805  /* fetch the mode coding scheme for this frame */
806  scheme = get_bits(gb, 3);
807 
808  /* is it a custom coding scheme? */
809  if (scheme == 0) {
810  for (i = 0; i < 8; i++)
811  custom_mode_alphabet[i] = MODE_INTER_NO_MV;
812  for (i = 0; i < 8; i++)
813  custom_mode_alphabet[get_bits(gb, 3)] = i;
814  alphabet = custom_mode_alphabet;
815  } else
816  alphabet = ModeAlphabet[scheme - 1];
817 
818  /* iterate through all of the macroblocks that contain 1 or more
819  * coded fragments */
820  for (sb_y = 0; sb_y < s->y_superblock_height; sb_y++) {
821  for (sb_x = 0; sb_x < s->y_superblock_width; sb_x++) {
822  if (get_bits_left(gb) <= 0)
823  return -1;
824 
825  for (j = 0; j < 4; j++) {
826  int mb_x = 2 * sb_x + (j >> 1);
827  int mb_y = 2 * sb_y + (((j >> 1) + j) & 1);
828  current_macroblock = mb_y * s->macroblock_width + mb_x;
829 
830  if (mb_x >= s->macroblock_width ||
831  mb_y >= s->macroblock_height)
832  continue;
833 
834  /* coding modes are only stored if the macroblock has
835  * at least one luma block coded, otherwise it must be
836  * INTER_NO_MV */
837  for (k = 0; k < 4; k++) {
838  current_fragment = BLOCK_Y *
839  s->fragment_width[0] + BLOCK_X;
840  if (s->all_fragments[current_fragment].coding_method != MODE_COPY)
841  break;
842  }
843  if (k == 4) {
844  s->macroblock_coding[current_macroblock] = MODE_INTER_NO_MV;
845  continue;
846  }
847 
848  /* mode 7 means get 3 bits for each coding mode */
849  if (scheme == 7)
850  coding_mode = get_bits(gb, 3);
851  else
852  coding_mode = alphabet[get_vlc2(gb, s->mode_code_vlc.table, 3, 3)];
853 
854  s->macroblock_coding[current_macroblock] = coding_mode;
855  for (k = 0; k < 4; k++) {
856  frag = s->all_fragments + BLOCK_Y * s->fragment_width[0] + BLOCK_X;
857  if (frag->coding_method != MODE_COPY)
858  frag->coding_method = coding_mode;
859  }
860 
861 #define SET_CHROMA_MODES \
862  if (frag[s->fragment_start[1]].coding_method != MODE_COPY) \
863  frag[s->fragment_start[1]].coding_method = coding_mode; \
864  if (frag[s->fragment_start[2]].coding_method != MODE_COPY) \
865  frag[s->fragment_start[2]].coding_method = coding_mode;
866 
867  if (s->chroma_y_shift) {
868  frag = s->all_fragments + mb_y *
869  s->fragment_width[1] + mb_x;
871  } else if (s->chroma_x_shift) {
872  frag = s->all_fragments +
873  2 * mb_y * s->fragment_width[1] + mb_x;
874  for (k = 0; k < 2; k++) {
876  frag += s->fragment_width[1];
877  }
878  } else {
879  for (k = 0; k < 4; k++) {
880  frag = s->all_fragments +
881  BLOCK_Y * s->fragment_width[1] + BLOCK_X;
883  }
884  }
885  }
886  }
887  }
888  }
889 
890  return 0;
891 }
892 
893 static int vp4_get_mv(Vp3DecodeContext *s, GetBitContext *gb, int axis, int last_motion)
894 {
895  int v = get_vlc2(gb, s->vp4_mv_vlc[axis][vp4_mv_table_selector[FFABS(last_motion)]].table, 6, 2) - 31;
896  return last_motion < 0 ? -v : v;
897 }
898 
899 /*
900  * This function unpacks all the motion vectors for the individual
901  * macroblocks from the bitstream.
902  */
904 {
905  int j, k, sb_x, sb_y;
906  int coding_mode;
907  int motion_x[4];
908  int motion_y[4];
909  int last_motion_x = 0;
910  int last_motion_y = 0;
911  int prior_last_motion_x = 0;
912  int prior_last_motion_y = 0;
913  int last_gold_motion_x = 0;
914  int last_gold_motion_y = 0;
915  int current_macroblock;
916  int current_fragment;
917  int frag;
918 
919  if (s->keyframe)
920  return 0;
921 
922  /* coding mode 0 is the VLC scheme; 1 is the fixed code scheme; 2 is VP4 code scheme */
923  coding_mode = s->version < 2 ? get_bits1(gb) : 2;
924 
925  /* iterate through all of the macroblocks that contain 1 or more
926  * coded fragments */
927  for (sb_y = 0; sb_y < s->y_superblock_height; sb_y++) {
928  for (sb_x = 0; sb_x < s->y_superblock_width; sb_x++) {
929  if (get_bits_left(gb) <= 0)
930  return -1;
931 
932  for (j = 0; j < 4; j++) {
933  int mb_x = 2 * sb_x + (j >> 1);
934  int mb_y = 2 * sb_y + (((j >> 1) + j) & 1);
935  current_macroblock = mb_y * s->macroblock_width + mb_x;
936 
937  if (mb_x >= s->macroblock_width ||
938  mb_y >= s->macroblock_height ||
939  s->macroblock_coding[current_macroblock] == MODE_COPY)
940  continue;
941 
942  switch (s->macroblock_coding[current_macroblock]) {
943  case MODE_GOLDEN_MV:
944  if (coding_mode == 2) { /* VP4 */
945  last_gold_motion_x = motion_x[0] = vp4_get_mv(s, gb, 0, last_gold_motion_x);
946  last_gold_motion_y = motion_y[0] = vp4_get_mv(s, gb, 1, last_gold_motion_y);
947  break;
948  } /* otherwise fall through */
949  case MODE_INTER_PLUS_MV:
950  /* all 6 fragments use the same motion vector */
951  if (coding_mode == 0) {
952  motion_x[0] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
953  motion_y[0] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
954  } else if (coding_mode == 1) {
955  motion_x[0] = fixed_motion_vector_table[get_bits(gb, 6)];
956  motion_y[0] = fixed_motion_vector_table[get_bits(gb, 6)];
957  } else { /* VP4 */
958  motion_x[0] = vp4_get_mv(s, gb, 0, last_motion_x);
959  motion_y[0] = vp4_get_mv(s, gb, 1, last_motion_y);
960  }
961 
962  /* vector maintenance, only on MODE_INTER_PLUS_MV */
963  if (s->macroblock_coding[current_macroblock] == MODE_INTER_PLUS_MV) {
964  prior_last_motion_x = last_motion_x;
965  prior_last_motion_y = last_motion_y;
966  last_motion_x = motion_x[0];
967  last_motion_y = motion_y[0];
968  }
969  break;
970 
971  case MODE_INTER_FOURMV:
972  /* vector maintenance */
973  prior_last_motion_x = last_motion_x;
974  prior_last_motion_y = last_motion_y;
975 
976  /* fetch 4 vectors from the bitstream, one for each
977  * Y fragment, then average for the C fragment vectors */
978  for (k = 0; k < 4; k++) {
979  current_fragment = BLOCK_Y * s->fragment_width[0] + BLOCK_X;
980  if (s->all_fragments[current_fragment].coding_method != MODE_COPY) {
981  if (coding_mode == 0) {
982  motion_x[k] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
983  motion_y[k] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
984  } else if (coding_mode == 1) {
985  motion_x[k] = fixed_motion_vector_table[get_bits(gb, 6)];
986  motion_y[k] = fixed_motion_vector_table[get_bits(gb, 6)];
987  } else { /* VP4 */
988  motion_x[k] = vp4_get_mv(s, gb, 0, prior_last_motion_x);
989  motion_y[k] = vp4_get_mv(s, gb, 1, prior_last_motion_y);
990  }
991  last_motion_x = motion_x[k];
992  last_motion_y = motion_y[k];
993  } else {
994  motion_x[k] = 0;
995  motion_y[k] = 0;
996  }
997  }
998  break;
999 
1000  case MODE_INTER_LAST_MV:
1001  /* all 6 fragments use the last motion vector */
1002  motion_x[0] = last_motion_x;
1003  motion_y[0] = last_motion_y;
1004 
1005  /* no vector maintenance (last vector remains the
1006  * last vector) */
1007  break;
1008 
1009  case MODE_INTER_PRIOR_LAST:
1010  /* all 6 fragments use the motion vector prior to the
1011  * last motion vector */
1012  motion_x[0] = prior_last_motion_x;
1013  motion_y[0] = prior_last_motion_y;
1014 
1015  /* vector maintenance */
1016  prior_last_motion_x = last_motion_x;
1017  prior_last_motion_y = last_motion_y;
1018  last_motion_x = motion_x[0];
1019  last_motion_y = motion_y[0];
1020  break;
1021 
1022  default:
1023  /* covers intra, inter without MV, golden without MV */
1024  motion_x[0] = 0;
1025  motion_y[0] = 0;
1026 
1027  /* no vector maintenance */
1028  break;
1029  }
1030 
1031  /* assign the motion vectors to the correct fragments */
1032  for (k = 0; k < 4; k++) {
1033  current_fragment =
1034  BLOCK_Y * s->fragment_width[0] + BLOCK_X;
1035  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
1036  s->motion_val[0][current_fragment][0] = motion_x[k];
1037  s->motion_val[0][current_fragment][1] = motion_y[k];
1038  } else {
1039  s->motion_val[0][current_fragment][0] = motion_x[0];
1040  s->motion_val[0][current_fragment][1] = motion_y[0];
1041  }
1042  }
1043 
1044  if (s->chroma_y_shift) {
1045  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
1046  motion_x[0] = RSHIFT(motion_x[0] + motion_x[1] +
1047  motion_x[2] + motion_x[3], 2);
1048  motion_y[0] = RSHIFT(motion_y[0] + motion_y[1] +
1049  motion_y[2] + motion_y[3], 2);
1050  }
1051  if (s->version <= 2) {
1052  motion_x[0] = (motion_x[0] >> 1) | (motion_x[0] & 1);
1053  motion_y[0] = (motion_y[0] >> 1) | (motion_y[0] & 1);
1054  }
1055  frag = mb_y * s->fragment_width[1] + mb_x;
1056  s->motion_val[1][frag][0] = motion_x[0];
1057  s->motion_val[1][frag][1] = motion_y[0];
1058  } else if (s->chroma_x_shift) {
1059  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
1060  motion_x[0] = RSHIFT(motion_x[0] + motion_x[1], 1);
1061  motion_y[0] = RSHIFT(motion_y[0] + motion_y[1], 1);
1062  motion_x[1] = RSHIFT(motion_x[2] + motion_x[3], 1);
1063  motion_y[1] = RSHIFT(motion_y[2] + motion_y[3], 1);
1064  } else {
1065  motion_x[1] = motion_x[0];
1066  motion_y[1] = motion_y[0];
1067  }
1068  if (s->version <= 2) {
1069  motion_x[0] = (motion_x[0] >> 1) | (motion_x[0] & 1);
1070  motion_x[1] = (motion_x[1] >> 1) | (motion_x[1] & 1);
1071  }
1072  frag = 2 * mb_y * s->fragment_width[1] + mb_x;
1073  for (k = 0; k < 2; k++) {
1074  s->motion_val[1][frag][0] = motion_x[k];
1075  s->motion_val[1][frag][1] = motion_y[k];
1076  frag += s->fragment_width[1];
1077  }
1078  } else {
1079  for (k = 0; k < 4; k++) {
1080  frag = BLOCK_Y * s->fragment_width[1] + BLOCK_X;
1081  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
1082  s->motion_val[1][frag][0] = motion_x[k];
1083  s->motion_val[1][frag][1] = motion_y[k];
1084  } else {
1085  s->motion_val[1][frag][0] = motion_x[0];
1086  s->motion_val[1][frag][1] = motion_y[0];
1087  }
1088  }
1089  }
1090  }
1091  }
1092  }
1093 
1094  return 0;
1095 }
1096 
1098 {
1099  int qpi, i, j, bit, run_length, blocks_decoded, num_blocks_at_qpi;
1100  int num_blocks = s->total_num_coded_frags;
1101 
1102  for (qpi = 0; qpi < s->nqps - 1 && num_blocks > 0; qpi++) {
1103  i = blocks_decoded = num_blocks_at_qpi = 0;
1104 
1105  bit = get_bits1(gb) ^ 1;
1106  run_length = 0;
1107 
1108  do {
1109  if (run_length == MAXIMUM_LONG_BIT_RUN)
1110  bit = get_bits1(gb);
1111  else
1112  bit ^= 1;
1113 
1114  run_length = get_vlc2(gb, s->superblock_run_length_vlc.table, 6, 2) + 1;
1115  if (run_length == 34)
1116  run_length += get_bits(gb, 12);
1117  blocks_decoded += run_length;
1118 
1119  if (!bit)
1120  num_blocks_at_qpi += run_length;
1121 
1122  for (j = 0; j < run_length; i++) {
1123  if (i >= s->total_num_coded_frags)
1124  return -1;
1125 
1126  if (s->all_fragments[s->coded_fragment_list[0][i]].qpi == qpi) {
1127  s->all_fragments[s->coded_fragment_list[0][i]].qpi += bit;
1128  j++;
1129  }
1130  }
1131  } while (blocks_decoded < num_blocks && get_bits_left(gb) > 0);
1132 
1133  num_blocks -= num_blocks_at_qpi;
1134  }
1135 
1136  return 0;
1137 }
1138 
1139 static inline int get_eob_run(GetBitContext *gb, int token)
1140 {
1141  int v = eob_run_table[token].base;
1142  if (eob_run_table[token].bits)
1143  v += get_bits(gb, eob_run_table[token].bits);
1144  return v;
1145 }
1146 
1147 static inline int get_coeff(GetBitContext *gb, int token, int16_t *coeff)
1148 {
1149  int bits_to_get, zero_run;
1150 
1151  bits_to_get = coeff_get_bits[token];
1152  if (bits_to_get)
1153  bits_to_get = get_bits(gb, bits_to_get);
1154  *coeff = coeff_tables[token][bits_to_get];
1155 
1156  zero_run = zero_run_base[token];
1157  if (zero_run_get_bits[token])
1158  zero_run += get_bits(gb, zero_run_get_bits[token]);
1159 
1160  return zero_run;
1161 }
1162 
1163 /*
1164  * This function is called by unpack_dct_coeffs() to extract the VLCs from
1165  * the bitstream. The VLCs encode tokens which are used to unpack DCT
1166  * data. This function unpacks all the VLCs for either the Y plane or both
1167  * C planes, and is called for DC coefficients or different AC coefficient
1168  * levels (since different coefficient types require different VLC tables.
1169  *
1170  * This function returns a residual eob run. E.g, if a particular token gave
1171  * instructions to EOB the next 5 fragments and there were only 2 fragments
1172  * left in the current fragment range, 3 would be returned so that it could
1173  * be passed into the next call to this same function.
1174  */
1176  VLC *table, int coeff_index,
1177  int plane,
1178  int eob_run)
1179 {
1180  int i, j = 0;
1181  int token;
1182  int zero_run = 0;
1183  int16_t coeff = 0;
1184  int blocks_ended;
1185  int coeff_i = 0;
1186  int num_coeffs = s->num_coded_frags[plane][coeff_index];
1187  int16_t *dct_tokens = s->dct_tokens[plane][coeff_index];
1188 
1189  /* local references to structure members to avoid repeated dereferences */
1190  int *coded_fragment_list = s->coded_fragment_list[plane];
1191  Vp3Fragment *all_fragments = s->all_fragments;
1192  VLC_TYPE(*vlc_table)[2] = table->table;
1193 
1194  if (num_coeffs < 0) {
1195  av_log(s->avctx, AV_LOG_ERROR,
1196  "Invalid number of coefficients at level %d\n", coeff_index);
1197  return AVERROR_INVALIDDATA;
1198  }
1199 
1200  if (eob_run > num_coeffs) {
1201  coeff_i =
1202  blocks_ended = num_coeffs;
1203  eob_run -= num_coeffs;
1204  } else {
1205  coeff_i =
1206  blocks_ended = eob_run;
1207  eob_run = 0;
1208  }
1209 
1210  // insert fake EOB token to cover the split between planes or zzi
1211  if (blocks_ended)
1212  dct_tokens[j++] = blocks_ended << 2;
1213 
1214  while (coeff_i < num_coeffs && get_bits_left(gb) > 0) {
1215  /* decode a VLC into a token */
1216  token = get_vlc2(gb, vlc_table, 11, 3);
1217  /* use the token to get a zero run, a coefficient, and an eob run */
1218  if ((unsigned) token <= 6U) {
1219  eob_run = get_eob_run(gb, token);
1220  if (!eob_run)
1221  eob_run = INT_MAX;
1222 
1223  // record only the number of blocks ended in this plane,
1224  // any spill will be recorded in the next plane.
1225  if (eob_run > num_coeffs - coeff_i) {
1226  dct_tokens[j++] = TOKEN_EOB(num_coeffs - coeff_i);
1227  blocks_ended += num_coeffs - coeff_i;
1228  eob_run -= num_coeffs - coeff_i;
1229  coeff_i = num_coeffs;
1230  } else {
1231  dct_tokens[j++] = TOKEN_EOB(eob_run);
1232  blocks_ended += eob_run;
1233  coeff_i += eob_run;
1234  eob_run = 0;
1235  }
1236  } else if (token >= 0) {
1237  zero_run = get_coeff(gb, token, &coeff);
1238 
1239  if (zero_run) {
1240  dct_tokens[j++] = TOKEN_ZERO_RUN(coeff, zero_run);
1241  } else {
1242  // Save DC into the fragment structure. DC prediction is
1243  // done in raster order, so the actual DC can't be in with
1244  // other tokens. We still need the token in dct_tokens[]
1245  // however, or else the structure collapses on itself.
1246  if (!coeff_index)
1247  all_fragments[coded_fragment_list[coeff_i]].dc = coeff;
1248 
1249  dct_tokens[j++] = TOKEN_COEFF(coeff);
1250  }
1251 
1252  if (coeff_index + zero_run > 64) {
1253  av_log(s->avctx, AV_LOG_DEBUG,
1254  "Invalid zero run of %d with %d coeffs left\n",
1255  zero_run, 64 - coeff_index);
1256  zero_run = 64 - coeff_index;
1257  }
1258 
1259  // zero runs code multiple coefficients,
1260  // so don't try to decode coeffs for those higher levels
1261  for (i = coeff_index + 1; i <= coeff_index + zero_run; i++)
1262  s->num_coded_frags[plane][i]--;
1263  coeff_i++;
1264  } else {
1265  av_log(s->avctx, AV_LOG_ERROR, "Invalid token %d\n", token);
1266  return -1;
1267  }
1268  }
1269 
1270  if (blocks_ended > s->num_coded_frags[plane][coeff_index])
1271  av_log(s->avctx, AV_LOG_ERROR, "More blocks ended than coded!\n");
1272 
1273  // decrement the number of blocks that have higher coefficients for each
1274  // EOB run at this level
1275  if (blocks_ended)
1276  for (i = coeff_index + 1; i < 64; i++)
1277  s->num_coded_frags[plane][i] -= blocks_ended;
1278 
1279  // setup the next buffer
1280  if (plane < 2)
1281  s->dct_tokens[plane + 1][coeff_index] = dct_tokens + j;
1282  else if (coeff_index < 63)
1283  s->dct_tokens[0][coeff_index + 1] = dct_tokens + j;
1284 
1285  return eob_run;
1286 }
1287 
1289  int first_fragment,
1290  int fragment_width,
1291  int fragment_height);
1292 /*
1293  * This function unpacks all of the DCT coefficient data from the
1294  * bitstream.
1295  */
1297 {
1298  int i;
1299  int dc_y_table;
1300  int dc_c_table;
1301  int ac_y_table;
1302  int ac_c_table;
1303  int residual_eob_run = 0;
1304  VLC *y_tables[64];
1305  VLC *c_tables[64];
1306 
1307  s->dct_tokens[0][0] = s->dct_tokens_base;
1308 
1309  if (get_bits_left(gb) < 16)
1310  return AVERROR_INVALIDDATA;
1311 
1312  /* fetch the DC table indexes */
1313  dc_y_table = get_bits(gb, 4);
1314  dc_c_table = get_bits(gb, 4);
1315 
1316  /* unpack the Y plane DC coefficients */
1317  residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_y_table], 0,
1318  0, residual_eob_run);
1319  if (residual_eob_run < 0)
1320  return residual_eob_run;
1321  if (get_bits_left(gb) < 8)
1322  return AVERROR_INVALIDDATA;
1323 
1324  /* reverse prediction of the Y-plane DC coefficients */
1325  reverse_dc_prediction(s, 0, s->fragment_width[0], s->fragment_height[0]);
1326 
1327  /* unpack the C plane DC coefficients */
1328  residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_c_table], 0,
1329  1, residual_eob_run);
1330  if (residual_eob_run < 0)
1331  return residual_eob_run;
1332  residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_c_table], 0,
1333  2, residual_eob_run);
1334  if (residual_eob_run < 0)
1335  return residual_eob_run;
1336 
1337  /* reverse prediction of the C-plane DC coefficients */
1338  if (!(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1339  reverse_dc_prediction(s, s->fragment_start[1],
1340  s->fragment_width[1], s->fragment_height[1]);
1341  reverse_dc_prediction(s, s->fragment_start[2],
1342  s->fragment_width[1], s->fragment_height[1]);
1343  }
1344 
1345  if (get_bits_left(gb) < 8)
1346  return AVERROR_INVALIDDATA;
1347  /* fetch the AC table indexes */
1348  ac_y_table = get_bits(gb, 4);
1349  ac_c_table = get_bits(gb, 4);
1350 
1351  /* build tables of AC VLC tables */
1352  for (i = 1; i <= 5; i++) {
1353  y_tables[i] = &s->ac_vlc_1[ac_y_table];
1354  c_tables[i] = &s->ac_vlc_1[ac_c_table];
1355  }
1356  for (i = 6; i <= 14; i++) {
1357  y_tables[i] = &s->ac_vlc_2[ac_y_table];
1358  c_tables[i] = &s->ac_vlc_2[ac_c_table];
1359  }
1360  for (i = 15; i <= 27; i++) {
1361  y_tables[i] = &s->ac_vlc_3[ac_y_table];
1362  c_tables[i] = &s->ac_vlc_3[ac_c_table];
1363  }
1364  for (i = 28; i <= 63; i++) {
1365  y_tables[i] = &s->ac_vlc_4[ac_y_table];
1366  c_tables[i] = &s->ac_vlc_4[ac_c_table];
1367  }
1368 
1369  /* decode all AC coefficients */
1370  for (i = 1; i <= 63; i++) {
1371  residual_eob_run = unpack_vlcs(s, gb, y_tables[i], i,
1372  0, residual_eob_run);
1373  if (residual_eob_run < 0)
1374  return residual_eob_run;
1375 
1376  residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i,
1377  1, residual_eob_run);
1378  if (residual_eob_run < 0)
1379  return residual_eob_run;
1380  residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i,
1381  2, residual_eob_run);
1382  if (residual_eob_run < 0)
1383  return residual_eob_run;
1384  }
1385 
1386  return 0;
1387 }
1388 
1389 #if CONFIG_VP4_DECODER
1390 /**
1391  * eob_tracker[] is instead of TOKEN_EOB(value)
1392  * a dummy TOKEN_EOB(0) value is used to make vp3_dequant work
1393  *
1394  * @return < 0 on error
1395  */
1396 static int vp4_unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb,
1397  VLC *vlc_tables[64],
1398  int plane, int eob_tracker[64], int fragment)
1399 {
1400  int token;
1401  int zero_run = 0;
1402  int16_t coeff = 0;
1403  int coeff_i = 0;
1404  int eob_run;
1405 
1406  while (!eob_tracker[coeff_i]) {
1407  if (get_bits_left(gb) < 1)
1408  return AVERROR_INVALIDDATA;
1409 
1410  token = get_vlc2(gb, vlc_tables[coeff_i]->table, 11, 3);
1411 
1412  /* use the token to get a zero run, a coefficient, and an eob run */
1413  if ((unsigned) token <= 6U) {
1414  eob_run = get_eob_run(gb, token);
1415  *s->dct_tokens[plane][coeff_i]++ = TOKEN_EOB(0);
1416  eob_tracker[coeff_i] = eob_run - 1;
1417  return 0;
1418  } else if (token >= 0) {
1419  zero_run = get_coeff(gb, token, &coeff);
1420 
1421  if (zero_run) {
1422  if (coeff_i + zero_run > 64) {
1423  av_log(s->avctx, AV_LOG_DEBUG,
1424  "Invalid zero run of %d with %d coeffs left\n",
1425  zero_run, 64 - coeff_i);
1426  zero_run = 64 - coeff_i;
1427  }
1428  *s->dct_tokens[plane][coeff_i]++ = TOKEN_ZERO_RUN(coeff, zero_run);
1429  coeff_i += zero_run;
1430  } else {
1431  if (!coeff_i)
1432  s->all_fragments[fragment].dc = coeff;
1433 
1434  *s->dct_tokens[plane][coeff_i]++ = TOKEN_COEFF(coeff);
1435  }
1436  coeff_i++;
1437  if (coeff_i >= 64) /* > 64 occurs when there is a zero_run overflow */
1438  return 0; /* stop */
1439  } else {
1440  av_log(s->avctx, AV_LOG_ERROR, "Invalid token %d\n", token);
1441  return -1;
1442  }
1443  }
1444  *s->dct_tokens[plane][coeff_i]++ = TOKEN_EOB(0);
1445  eob_tracker[coeff_i]--;
1446  return 0;
1447 }
1448 
1449 static void vp4_dc_predictor_reset(VP4Predictor *p)
1450 {
1451  p->dc = 0;
1452  p->type = VP4_DC_UNDEFINED;
1453 }
1454 
1455 static void vp4_dc_pred_before(const Vp3DecodeContext *s, VP4Predictor dc_pred[6][6], int sb_x)
1456 {
1457  int i, j;
1458 
1459  for (i = 0; i < 4; i++)
1460  dc_pred[0][i + 1] = s->dc_pred_row[sb_x * 4 + i];
1461 
1462  for (j = 1; j < 5; j++)
1463  for (i = 0; i < 4; i++)
1464  vp4_dc_predictor_reset(&dc_pred[j][i + 1]);
1465 }
1466 
1467 static void vp4_dc_pred_after(Vp3DecodeContext *s, VP4Predictor dc_pred[6][6], int sb_x)
1468 {
1469  int i;
1470 
1471  for (i = 0; i < 4; i++)
1472  s->dc_pred_row[sb_x * 4 + i] = dc_pred[4][i + 1];
1473 
1474  for (i = 1; i < 5; i++)
1475  dc_pred[i][0] = dc_pred[i][4];
1476 }
1477 
1478 /* note: dc_pred points to the current block */
1479 static int vp4_dc_pred(const Vp3DecodeContext *s, const VP4Predictor * dc_pred, const int * last_dc, int type, int plane)
1480 {
1481  int count = 0;
1482  int dc = 0;
1483 
1484  if (dc_pred[-6].type == type) {
1485  dc += dc_pred[-6].dc;
1486  count++;
1487  }
1488 
1489  if (dc_pred[6].type == type) {
1490  dc += dc_pred[6].dc;
1491  count++;
1492  }
1493 
1494  if (count != 2 && dc_pred[-1].type == type) {
1495  dc += dc_pred[-1].dc;
1496  count++;
1497  }
1498 
1499  if (count != 2 && dc_pred[1].type == type) {
1500  dc += dc_pred[1].dc;
1501  count++;
1502  }
1503 
1504  /* using division instead of shift to correctly handle negative values */
1505  return count == 2 ? dc / 2 : last_dc[type];
1506 }
1507 
1508 static void vp4_set_tokens_base(Vp3DecodeContext *s)
1509 {
1510  int plane, i;
1511  int16_t *base = s->dct_tokens_base;
1512  for (plane = 0; plane < 3; plane++) {
1513  for (i = 0; i < 64; i++) {
1514  s->dct_tokens[plane][i] = base;
1515  base += s->fragment_width[!!plane] * s->fragment_height[!!plane];
1516  }
1517  }
1518 }
1519 
1520 static int vp4_unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb)
1521 {
1522  int i, j;
1523  int dc_y_table;
1524  int dc_c_table;
1525  int ac_y_table;
1526  int ac_c_table;
1527  VLC *tables[2][64];
1528  int plane, sb_y, sb_x;
1529  int eob_tracker[64];
1530  VP4Predictor dc_pred[6][6];
1531  int last_dc[NB_VP4_DC_TYPES];
1532 
1533  if (get_bits_left(gb) < 16)
1534  return AVERROR_INVALIDDATA;
1535 
1536  /* fetch the DC table indexes */
1537  dc_y_table = get_bits(gb, 4);
1538  dc_c_table = get_bits(gb, 4);
1539 
1540  ac_y_table = get_bits(gb, 4);
1541  ac_c_table = get_bits(gb, 4);
1542 
1543  /* build tables of DC/AC VLC tables */
1544 
1545  tables[0][0] = &s->dc_vlc[dc_y_table];
1546  tables[1][0] = &s->dc_vlc[dc_c_table];
1547  for (i = 1; i <= 5; i++) {
1548  tables[0][i] = &s->ac_vlc_1[ac_y_table];
1549  tables[1][i] = &s->ac_vlc_1[ac_c_table];
1550  }
1551  for (i = 6; i <= 14; i++) {
1552  tables[0][i] = &s->ac_vlc_2[ac_y_table];
1553  tables[1][i] = &s->ac_vlc_2[ac_c_table];
1554  }
1555  for (i = 15; i <= 27; i++) {
1556  tables[0][i] = &s->ac_vlc_3[ac_y_table];
1557  tables[1][i] = &s->ac_vlc_3[ac_c_table];
1558  }
1559  for (i = 28; i <= 63; i++) {
1560  tables[0][i] = &s->ac_vlc_4[ac_y_table];
1561  tables[1][i] = &s->ac_vlc_4[ac_c_table];
1562  }
1563 
1564  vp4_set_tokens_base(s);
1565 
1566  memset(last_dc, 0, sizeof(last_dc));
1567 
1568  for (plane = 0; plane < ((s->avctx->flags & AV_CODEC_FLAG_GRAY) ? 1 : 3); plane++) {
1569  memset(eob_tracker, 0, sizeof(eob_tracker));
1570 
1571  /* initialise dc prediction */
1572  for (i = 0; i < s->fragment_width[!!plane]; i++)
1573  vp4_dc_predictor_reset(&s->dc_pred_row[i]);
1574 
1575  for (j = 0; j < 6; j++)
1576  for (i = 0; i < 6; i++)
1577  vp4_dc_predictor_reset(&dc_pred[j][i]);
1578 
1579  for (sb_y = 0; sb_y * 4 < s->fragment_height[!!plane]; sb_y++) {
1580  for (sb_x = 0; sb_x *4 < s->fragment_width[!!plane]; sb_x++) {
1581  vp4_dc_pred_before(s, dc_pred, sb_x);
1582  for (j = 0; j < 16; j++) {
1583  int hx = hilbert_offset[j][0];
1584  int hy = hilbert_offset[j][1];
1585  int x = 4 * sb_x + hx;
1586  int y = 4 * sb_y + hy;
1587  VP4Predictor *this_dc_pred = &dc_pred[hy + 1][hx + 1];
1588  int fragment, dc_block_type;
1589 
1590  if (x >= s->fragment_width[!!plane] || y >= s->fragment_height[!!plane])
1591  continue;
1592 
1593  fragment = s->fragment_start[plane] + y * s->fragment_width[!!plane] + x;
1594 
1595  if (s->all_fragments[fragment].coding_method == MODE_COPY)
1596  continue;
1597 
1598  if (vp4_unpack_vlcs(s, gb, tables[!!plane], plane, eob_tracker, fragment) < 0)
1599  return -1;
1600 
1601  dc_block_type = vp4_pred_block_type_map[s->all_fragments[fragment].coding_method];
1602 
1603  s->all_fragments[fragment].dc +=
1604  vp4_dc_pred(s, this_dc_pred, last_dc, dc_block_type, plane);
1605 
1606  this_dc_pred->type = dc_block_type,
1607  this_dc_pred->dc = last_dc[dc_block_type] = s->all_fragments[fragment].dc;
1608  }
1609  vp4_dc_pred_after(s, dc_pred, sb_x);
1610  }
1611  }
1612  }
1613 
1614  vp4_set_tokens_base(s);
1615 
1616  return 0;
1617 }
1618 #endif
1619 
1620 /*
1621  * This function reverses the DC prediction for each coded fragment in
1622  * the frame. Much of this function is adapted directly from the original
1623  * VP3 source code.
1624  */
1625 #define COMPATIBLE_FRAME(x) \
1626  (compatible_frame[s->all_fragments[x].coding_method] == current_frame_type)
1627 #define DC_COEFF(u) s->all_fragments[u].dc
1628 
1630  int first_fragment,
1631  int fragment_width,
1632  int fragment_height)
1633 {
1634 #define PUL 8
1635 #define PU 4
1636 #define PUR 2
1637 #define PL 1
1638 
1639  int x, y;
1640  int i = first_fragment;
1641 
1642  int predicted_dc;
1643 
1644  /* DC values for the left, up-left, up, and up-right fragments */
1645  int vl, vul, vu, vur;
1646 
1647  /* indexes for the left, up-left, up, and up-right fragments */
1648  int l, ul, u, ur;
1649 
1650  /*
1651  * The 6 fields mean:
1652  * 0: up-left multiplier
1653  * 1: up multiplier
1654  * 2: up-right multiplier
1655  * 3: left multiplier
1656  */
1657  static const int predictor_transform[16][4] = {
1658  { 0, 0, 0, 0 },
1659  { 0, 0, 0, 128 }, // PL
1660  { 0, 0, 128, 0 }, // PUR
1661  { 0, 0, 53, 75 }, // PUR|PL
1662  { 0, 128, 0, 0 }, // PU
1663  { 0, 64, 0, 64 }, // PU |PL
1664  { 0, 128, 0, 0 }, // PU |PUR
1665  { 0, 0, 53, 75 }, // PU |PUR|PL
1666  { 128, 0, 0, 0 }, // PUL
1667  { 0, 0, 0, 128 }, // PUL|PL
1668  { 64, 0, 64, 0 }, // PUL|PUR
1669  { 0, 0, 53, 75 }, // PUL|PUR|PL
1670  { 0, 128, 0, 0 }, // PUL|PU
1671  { -104, 116, 0, 116 }, // PUL|PU |PL
1672  { 24, 80, 24, 0 }, // PUL|PU |PUR
1673  { -104, 116, 0, 116 } // PUL|PU |PUR|PL
1674  };
1675 
1676  /* This table shows which types of blocks can use other blocks for
1677  * prediction. For example, INTRA is the only mode in this table to
1678  * have a frame number of 0. That means INTRA blocks can only predict
1679  * from other INTRA blocks. There are 2 golden frame coding types;
1680  * blocks encoding in these modes can only predict from other blocks
1681  * that were encoded with these 1 of these 2 modes. */
1682  static const unsigned char compatible_frame[9] = {
1683  1, /* MODE_INTER_NO_MV */
1684  0, /* MODE_INTRA */
1685  1, /* MODE_INTER_PLUS_MV */
1686  1, /* MODE_INTER_LAST_MV */
1687  1, /* MODE_INTER_PRIOR_MV */
1688  2, /* MODE_USING_GOLDEN */
1689  2, /* MODE_GOLDEN_MV */
1690  1, /* MODE_INTER_FOUR_MV */
1691  3 /* MODE_COPY */
1692  };
1693  int current_frame_type;
1694 
1695  /* there is a last DC predictor for each of the 3 frame types */
1696  short last_dc[3];
1697 
1698  int transform = 0;
1699 
1700  vul =
1701  vu =
1702  vur =
1703  vl = 0;
1704  last_dc[0] =
1705  last_dc[1] =
1706  last_dc[2] = 0;
1707 
1708  /* for each fragment row... */
1709  for (y = 0; y < fragment_height; y++) {
1710  /* for each fragment in a row... */
1711  for (x = 0; x < fragment_width; x++, i++) {
1712 
1713  /* reverse prediction if this block was coded */
1714  if (s->all_fragments[i].coding_method != MODE_COPY) {
1715  current_frame_type =
1716  compatible_frame[s->all_fragments[i].coding_method];
1717 
1718  transform = 0;
1719  if (x) {
1720  l = i - 1;
1721  vl = DC_COEFF(l);
1722  if (COMPATIBLE_FRAME(l))
1723  transform |= PL;
1724  }
1725  if (y) {
1726  u = i - fragment_width;
1727  vu = DC_COEFF(u);
1728  if (COMPATIBLE_FRAME(u))
1729  transform |= PU;
1730  if (x) {
1731  ul = i - fragment_width - 1;
1732  vul = DC_COEFF(ul);
1733  if (COMPATIBLE_FRAME(ul))
1734  transform |= PUL;
1735  }
1736  if (x + 1 < fragment_width) {
1737  ur = i - fragment_width + 1;
1738  vur = DC_COEFF(ur);
1739  if (COMPATIBLE_FRAME(ur))
1740  transform |= PUR;
1741  }
1742  }
1743 
1744  if (transform == 0) {
1745  /* if there were no fragments to predict from, use last
1746  * DC saved */
1747  predicted_dc = last_dc[current_frame_type];
1748  } else {
1749  /* apply the appropriate predictor transform */
1750  predicted_dc =
1751  (predictor_transform[transform][0] * vul) +
1752  (predictor_transform[transform][1] * vu) +
1753  (predictor_transform[transform][2] * vur) +
1754  (predictor_transform[transform][3] * vl);
1755 
1756  predicted_dc /= 128;
1757 
1758  /* check for outranging on the [ul u l] and
1759  * [ul u ur l] predictors */
1760  if ((transform == 15) || (transform == 13)) {
1761  if (FFABS(predicted_dc - vu) > 128)
1762  predicted_dc = vu;
1763  else if (FFABS(predicted_dc - vl) > 128)
1764  predicted_dc = vl;
1765  else if (FFABS(predicted_dc - vul) > 128)
1766  predicted_dc = vul;
1767  }
1768  }
1769 
1770  /* at long last, apply the predictor */
1771  DC_COEFF(i) += predicted_dc;
1772  /* save the DC */
1773  last_dc[current_frame_type] = DC_COEFF(i);
1774  }
1775  }
1776  }
1777 }
1778 
1779 static void apply_loop_filter(Vp3DecodeContext *s, int plane,
1780  int ystart, int yend)
1781 {
1782  int x, y;
1783  int *bounding_values = s->bounding_values_array + 127;
1784 
1785  int width = s->fragment_width[!!plane];
1786  int height = s->fragment_height[!!plane];
1787  int fragment = s->fragment_start[plane] + ystart * width;
1788  ptrdiff_t stride = s->current_frame.f->linesize[plane];
1789  uint8_t *plane_data = s->current_frame.f->data[plane];
1790  if (!s->flipped_image)
1791  stride = -stride;
1792  plane_data += s->data_offset[plane] + 8 * ystart * stride;
1793 
1794  for (y = ystart; y < yend; y++) {
1795  for (x = 0; x < width; x++) {
1796  /* This code basically just deblocks on the edges of coded blocks.
1797  * However, it has to be much more complicated because of the
1798  * brain damaged deblock ordering used in VP3/Theora. Order matters
1799  * because some pixels get filtered twice. */
1800  if (s->all_fragments[fragment].coding_method != MODE_COPY) {
1801  /* do not perform left edge filter for left columns frags */
1802  if (x > 0) {
1803  s->vp3dsp.h_loop_filter(
1804  plane_data + 8 * x,
1805  stride, bounding_values);
1806  }
1807 
1808  /* do not perform top edge filter for top row fragments */
1809  if (y > 0) {
1810  s->vp3dsp.v_loop_filter(
1811  plane_data + 8 * x,
1812  stride, bounding_values);
1813  }
1814 
1815  /* do not perform right edge filter for right column
1816  * fragments or if right fragment neighbor is also coded
1817  * in this frame (it will be filtered in next iteration) */
1818  if ((x < width - 1) &&
1819  (s->all_fragments[fragment + 1].coding_method == MODE_COPY)) {
1820  s->vp3dsp.h_loop_filter(
1821  plane_data + 8 * x + 8,
1822  stride, bounding_values);
1823  }
1824 
1825  /* do not perform bottom edge filter for bottom row
1826  * fragments or if bottom fragment neighbor is also coded
1827  * in this frame (it will be filtered in the next row) */
1828  if ((y < height - 1) &&
1829  (s->all_fragments[fragment + width].coding_method == MODE_COPY)) {
1830  s->vp3dsp.v_loop_filter(
1831  plane_data + 8 * x + 8 * stride,
1832  stride, bounding_values);
1833  }
1834  }
1835 
1836  fragment++;
1837  }
1838  plane_data += 8 * stride;
1839  }
1840 }
1841 
1842 /**
1843  * Pull DCT tokens from the 64 levels to decode and dequant the coefficients
1844  * for the next block in coding order
1845  */
1846 static inline int vp3_dequant(Vp3DecodeContext *s, Vp3Fragment *frag,
1847  int plane, int inter, int16_t block[64])
1848 {
1849  int16_t *dequantizer = s->qmat[frag->qpi][inter][plane];
1850  uint8_t *perm = s->idct_scantable;
1851  int i = 0;
1852 
1853  do {
1854  int token = *s->dct_tokens[plane][i];
1855  switch (token & 3) {
1856  case 0: // EOB
1857  if (--token < 4) // 0-3 are token types so the EOB run must now be 0
1858  s->dct_tokens[plane][i]++;
1859  else
1860  *s->dct_tokens[plane][i] = token & ~3;
1861  goto end;
1862  case 1: // zero run
1863  s->dct_tokens[plane][i]++;
1864  i += (token >> 2) & 0x7f;
1865  if (i > 63) {
1866  av_log(s->avctx, AV_LOG_ERROR, "Coefficient index overflow\n");
1867  return i;
1868  }
1869  block[perm[i]] = (token >> 9) * dequantizer[perm[i]];
1870  i++;
1871  break;
1872  case 2: // coeff
1873  block[perm[i]] = (token >> 2) * dequantizer[perm[i]];
1874  s->dct_tokens[plane][i++]++;
1875  break;
1876  default: // shouldn't happen
1877  return i;
1878  }
1879  } while (i < 64);
1880  // return value is expected to be a valid level
1881  i--;
1882 end:
1883  // the actual DC+prediction is in the fragment structure
1884  block[0] = frag->dc * s->qmat[0][inter][plane][0];
1885  return i;
1886 }
1887 
1888 /**
1889  * called when all pixels up to row y are complete
1890  */
1892 {
1893  int h, cy, i;
1895 
1896  if (HAVE_THREADS && s->avctx->active_thread_type & FF_THREAD_FRAME) {
1897  int y_flipped = s->flipped_image ? s->height - y : y;
1898 
1899  /* At the end of the frame, report INT_MAX instead of the height of
1900  * the frame. This makes the other threads' ff_thread_await_progress()
1901  * calls cheaper, because they don't have to clip their values. */
1902  ff_thread_report_progress(&s->current_frame,
1903  y_flipped == s->height ? INT_MAX
1904  : y_flipped - 1,
1905  0);
1906  }
1907 
1908  if (!s->avctx->draw_horiz_band)
1909  return;
1910 
1911  h = y - s->last_slice_end;
1912  s->last_slice_end = y;
1913  y -= h;
1914 
1915  if (!s->flipped_image)
1916  y = s->height - y - h;
1917 
1918  cy = y >> s->chroma_y_shift;
1919  offset[0] = s->current_frame.f->linesize[0] * y;
1920  offset[1] = s->current_frame.f->linesize[1] * cy;
1921  offset[2] = s->current_frame.f->linesize[2] * cy;
1922  for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
1923  offset[i] = 0;
1924 
1925  emms_c();
1926  s->avctx->draw_horiz_band(s->avctx, s->current_frame.f, offset, y, 3, h);
1927 }
1928 
1929 /**
1930  * Wait for the reference frame of the current fragment.
1931  * The progress value is in luma pixel rows.
1932  */
1934  int motion_y, int y)
1935 {
1936  ThreadFrame *ref_frame;
1937  int ref_row;
1938  int border = motion_y & 1;
1939 
1940  if (fragment->coding_method == MODE_USING_GOLDEN ||
1941  fragment->coding_method == MODE_GOLDEN_MV)
1942  ref_frame = &s->golden_frame;
1943  else
1944  ref_frame = &s->last_frame;
1945 
1946  ref_row = y + (motion_y >> 1);
1947  ref_row = FFMAX(FFABS(ref_row), ref_row + 8 + border);
1948 
1949  ff_thread_await_progress(ref_frame, ref_row, 0);
1950 }
1951 
1952 #if CONFIG_VP4_DECODER
1953 /**
1954  * @return non-zero if temp (edge_emu_buffer) was populated
1955  */
1956 static int vp4_mc_loop_filter(Vp3DecodeContext *s, int plane, int motion_x, int motion_y, int bx, int by,
1957  uint8_t * motion_source, int stride, int src_x, int src_y, uint8_t *temp)
1958 {
1959  int motion_shift = plane ? 4 : 2;
1960  int subpel_mask = plane ? 3 : 1;
1961  int *bounding_values = s->bounding_values_array + 127;
1962 
1963  int i;
1964  int x, y;
1965  int x2, y2;
1966  int x_subpel, y_subpel;
1967  int x_offset, y_offset;
1968 
1969  int block_width = plane ? 8 : 16;
1970  int plane_width = s->width >> (plane && s->chroma_x_shift);
1971  int plane_height = s->height >> (plane && s->chroma_y_shift);
1972 
1973 #define loop_stride 12
1974  uint8_t loop[12 * loop_stride];
1975 
1976  /* using division instead of shift to correctly handle negative values */
1977  x = 8 * bx + motion_x / motion_shift;
1978  y = 8 * by + motion_y / motion_shift;
1979 
1980  x_subpel = motion_x & subpel_mask;
1981  y_subpel = motion_y & subpel_mask;
1982 
1983  if (x_subpel || y_subpel) {
1984  x--;
1985  y--;
1986 
1987  if (x_subpel)
1988  x = FFMIN(x, x + FFSIGN(motion_x));
1989 
1990  if (y_subpel)
1991  y = FFMIN(y, y + FFSIGN(motion_y));
1992 
1993  x2 = x + block_width;
1994  y2 = y + block_width;
1995 
1996  if (x2 < 0 || x2 >= plane_width || y2 < 0 || y2 >= plane_height)
1997  return 0;
1998 
1999  x_offset = (-(x + 2) & 7) + 2;
2000  y_offset = (-(y + 2) & 7) + 2;
2001 
2002  if (x_offset > 8 + x_subpel && y_offset > 8 + y_subpel)
2003  return 0;
2004 
2005  s->vdsp.emulated_edge_mc(loop, motion_source - stride - 1,
2006  loop_stride, stride,
2007  12, 12, src_x - 1, src_y - 1,
2008  plane_width,
2009  plane_height);
2010 
2011  if (x_offset <= 8 + x_subpel)
2012  ff_vp3dsp_h_loop_filter_12(loop + x_offset, loop_stride, bounding_values);
2013 
2014  if (y_offset <= 8 + y_subpel)
2015  ff_vp3dsp_v_loop_filter_12(loop + y_offset*loop_stride, loop_stride, bounding_values);
2016 
2017  } else {
2018 
2019  x_offset = -x & 7;
2020  y_offset = -y & 7;
2021 
2022  if (!x_offset && !y_offset)
2023  return 0;
2024 
2025  s->vdsp.emulated_edge_mc(loop, motion_source - stride - 1,
2026  loop_stride, stride,
2027  12, 12, src_x - 1, src_y - 1,
2028  plane_width,
2029  plane_height);
2030 
2031 #define safe_loop_filter(name, ptr, stride, bounding_values) \
2032  if ((uintptr_t)(ptr) & 7) \
2033  s->vp3dsp.name##_unaligned(ptr, stride, bounding_values); \
2034  else \
2035  s->vp3dsp.name(ptr, stride, bounding_values);
2036 
2037  if (x_offset)
2038  safe_loop_filter(h_loop_filter, loop + loop_stride + x_offset + 1, loop_stride, bounding_values);
2039 
2040  if (y_offset)
2041  safe_loop_filter(v_loop_filter, loop + (y_offset + 1)*loop_stride + 1, loop_stride, bounding_values);
2042  }
2043 
2044  for (i = 0; i < 9; i++)
2045  memcpy(temp + i*stride, loop + (i + 1) * loop_stride + 1, 9);
2046 
2047  return 1;
2048 }
2049 #endif
2050 
2051 /*
2052  * Perform the final rendering for a particular slice of data.
2053  * The slice number ranges from 0..(c_superblock_height - 1).
2054  */
2055 static void render_slice(Vp3DecodeContext *s, int slice)
2056 {
2057  int x, y, i, j, fragment;
2058  int16_t *block = s->block;
2059  int motion_x = 0xdeadbeef, motion_y = 0xdeadbeef;
2060  int motion_halfpel_index;
2061  uint8_t *motion_source;
2062  int plane, first_pixel;
2063 
2064  if (slice >= s->c_superblock_height)
2065  return;
2066 
2067  for (plane = 0; plane < 3; plane++) {
2068  uint8_t *output_plane = s->current_frame.f->data[plane] +
2069  s->data_offset[plane];
2070  uint8_t *last_plane = s->last_frame.f->data[plane] +
2071  s->data_offset[plane];
2072  uint8_t *golden_plane = s->golden_frame.f->data[plane] +
2073  s->data_offset[plane];
2074  ptrdiff_t stride = s->current_frame.f->linesize[plane];
2075  int plane_width = s->width >> (plane && s->chroma_x_shift);
2076  int plane_height = s->height >> (plane && s->chroma_y_shift);
2077  int8_t(*motion_val)[2] = s->motion_val[!!plane];
2078 
2079  int sb_x, sb_y = slice << (!plane && s->chroma_y_shift);
2080  int slice_height = sb_y + 1 + (!plane && s->chroma_y_shift);
2081  int slice_width = plane ? s->c_superblock_width
2082  : s->y_superblock_width;
2083 
2084  int fragment_width = s->fragment_width[!!plane];
2085  int fragment_height = s->fragment_height[!!plane];
2086  int fragment_start = s->fragment_start[plane];
2087 
2088  int do_await = !plane && HAVE_THREADS &&
2089  (s->avctx->active_thread_type & FF_THREAD_FRAME);
2090 
2091  if (!s->flipped_image)
2092  stride = -stride;
2093  if (CONFIG_GRAY && plane && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
2094  continue;
2095 
2096  /* for each superblock row in the slice (both of them)... */
2097  for (; sb_y < slice_height; sb_y++) {
2098  /* for each superblock in a row... */
2099  for (sb_x = 0; sb_x < slice_width; sb_x++) {
2100  /* for each block in a superblock... */
2101  for (j = 0; j < 16; j++) {
2102  x = 4 * sb_x + hilbert_offset[j][0];
2103  y = 4 * sb_y + hilbert_offset[j][1];
2104  fragment = y * fragment_width + x;
2105 
2106  i = fragment_start + fragment;
2107 
2108  // bounds check
2109  if (x >= fragment_width || y >= fragment_height)
2110  continue;
2111 
2112  first_pixel = 8 * y * stride + 8 * x;
2113 
2114  if (do_await &&
2115  s->all_fragments[i].coding_method != MODE_INTRA)
2116  await_reference_row(s, &s->all_fragments[i],
2117  motion_val[fragment][1],
2118  (16 * y) >> s->chroma_y_shift);
2119 
2120  /* transform if this block was coded */
2121  if (s->all_fragments[i].coding_method != MODE_COPY) {
2122  if ((s->all_fragments[i].coding_method == MODE_USING_GOLDEN) ||
2123  (s->all_fragments[i].coding_method == MODE_GOLDEN_MV))
2124  motion_source = golden_plane;
2125  else
2126  motion_source = last_plane;
2127 
2128  motion_source += first_pixel;
2129  motion_halfpel_index = 0;
2130 
2131  /* sort out the motion vector if this fragment is coded
2132  * using a motion vector method */
2133  if ((s->all_fragments[i].coding_method > MODE_INTRA) &&
2134  (s->all_fragments[i].coding_method != MODE_USING_GOLDEN)) {
2135  int src_x, src_y;
2136  int standard_mc = 1;
2137  motion_x = motion_val[fragment][0];
2138  motion_y = motion_val[fragment][1];
2139 #if CONFIG_VP4_DECODER
2140  if (plane && s->version >= 2) {
2141  motion_x = (motion_x >> 1) | (motion_x & 1);
2142  motion_y = (motion_y >> 1) | (motion_y & 1);
2143  }
2144 #endif
2145 
2146  src_x = (motion_x >> 1) + 8 * x;
2147  src_y = (motion_y >> 1) + 8 * y;
2148 
2149  motion_halfpel_index = motion_x & 0x01;
2150  motion_source += (motion_x >> 1);
2151 
2152  motion_halfpel_index |= (motion_y & 0x01) << 1;
2153  motion_source += ((motion_y >> 1) * stride);
2154 
2155 #if CONFIG_VP4_DECODER
2156  if (s->version >= 2) {
2157  uint8_t *temp = s->edge_emu_buffer;
2158  if (stride < 0)
2159  temp -= 8 * stride;
2160  if (vp4_mc_loop_filter(s, plane, motion_val[fragment][0], motion_val[fragment][1], x, y, motion_source, stride, src_x, src_y, temp)) {
2161  motion_source = temp;
2162  standard_mc = 0;
2163  }
2164  }
2165 #endif
2166 
2167  if (standard_mc && (
2168  src_x < 0 || src_y < 0 ||
2169  src_x + 9 >= plane_width ||
2170  src_y + 9 >= plane_height)) {
2171  uint8_t *temp = s->edge_emu_buffer;
2172  if (stride < 0)
2173  temp -= 8 * stride;
2174 
2175  s->vdsp.emulated_edge_mc(temp, motion_source,
2176  stride, stride,
2177  9, 9, src_x, src_y,
2178  plane_width,
2179  plane_height);
2180  motion_source = temp;
2181  }
2182  }
2183 
2184  /* first, take care of copying a block from either the
2185  * previous or the golden frame */
2186  if (s->all_fragments[i].coding_method != MODE_INTRA) {
2187  /* Note, it is possible to implement all MC cases
2188  * with put_no_rnd_pixels_l2 which would look more
2189  * like the VP3 source but this would be slower as
2190  * put_no_rnd_pixels_tab is better optimized */
2191  if (motion_halfpel_index != 3) {
2192  s->hdsp.put_no_rnd_pixels_tab[1][motion_halfpel_index](
2193  output_plane + first_pixel,
2194  motion_source, stride, 8);
2195  } else {
2196  /* d is 0 if motion_x and _y have the same sign,
2197  * else -1 */
2198  int d = (motion_x ^ motion_y) >> 31;
2199  s->vp3dsp.put_no_rnd_pixels_l2(output_plane + first_pixel,
2200  motion_source - d,
2201  motion_source + stride + 1 + d,
2202  stride, 8);
2203  }
2204  }
2205 
2206  /* invert DCT and place (or add) in final output */
2207 
2208  if (s->all_fragments[i].coding_method == MODE_INTRA) {
2209  vp3_dequant(s, s->all_fragments + i,
2210  plane, 0, block);
2211  s->vp3dsp.idct_put(output_plane + first_pixel,
2212  stride,
2213  block);
2214  } else {
2215  if (vp3_dequant(s, s->all_fragments + i,
2216  plane, 1, block)) {
2217  s->vp3dsp.idct_add(output_plane + first_pixel,
2218  stride,
2219  block);
2220  } else {
2221  s->vp3dsp.idct_dc_add(output_plane + first_pixel,
2222  stride, block);
2223  }
2224  }
2225  } else {
2226  /* copy directly from the previous frame */
2227  s->hdsp.put_pixels_tab[1][0](
2228  output_plane + first_pixel,
2229  last_plane + first_pixel,
2230  stride, 8);
2231  }
2232  }
2233  }
2234 
2235  // Filter up to the last row in the superblock row
2236  if (s->version < 2 && !s->skip_loop_filter)
2237  apply_loop_filter(s, plane, 4 * sb_y - !!sb_y,
2238  FFMIN(4 * sb_y + 3, fragment_height - 1));
2239  }
2240  }
2241 
2242  /* this looks like a good place for slice dispatch... */
2243  /* algorithm:
2244  * if (slice == s->macroblock_height - 1)
2245  * dispatch (both last slice & 2nd-to-last slice);
2246  * else if (slice > 0)
2247  * dispatch (slice - 1);
2248  */
2249 
2250  vp3_draw_horiz_band(s, FFMIN((32 << s->chroma_y_shift) * (slice + 1) - 16,
2251  s->height - 16));
2252 }
2253 
2254 /// Allocate tables for per-frame data in Vp3DecodeContext
2256 {
2257  Vp3DecodeContext *s = avctx->priv_data;
2258  int y_fragment_count, c_fragment_count;
2259 
2260  free_tables(avctx);
2261 
2262  y_fragment_count = s->fragment_width[0] * s->fragment_height[0];
2263  c_fragment_count = s->fragment_width[1] * s->fragment_height[1];
2264 
2265  /* superblock_coding is used by unpack_superblocks (VP3/Theora) and vp4_unpack_macroblocks (VP4) */
2266  s->superblock_coding = av_mallocz(FFMAX(s->superblock_count, s->yuv_macroblock_count));
2267  s->all_fragments = av_mallocz_array(s->fragment_count, sizeof(Vp3Fragment));
2268 
2269  s-> kf_coded_fragment_list = av_mallocz_array(s->fragment_count, sizeof(int));
2270  s->nkf_coded_fragment_list = av_mallocz_array(s->fragment_count, sizeof(int));
2271  memset(s-> num_kf_coded_fragment, -1, sizeof(s-> num_kf_coded_fragment));
2272 
2273  s->dct_tokens_base = av_mallocz_array(s->fragment_count,
2274  64 * sizeof(*s->dct_tokens_base));
2275  s->motion_val[0] = av_mallocz_array(y_fragment_count, sizeof(*s->motion_val[0]));
2276  s->motion_val[1] = av_mallocz_array(c_fragment_count, sizeof(*s->motion_val[1]));
2277 
2278  /* work out the block mapping tables */
2279  s->superblock_fragments = av_mallocz_array(s->superblock_count, 16 * sizeof(int));
2280  s->macroblock_coding = av_mallocz(s->macroblock_count + 1);
2281 
2282  s->dc_pred_row = av_malloc_array(s->y_superblock_width * 4, sizeof(*s->dc_pred_row));
2283 
2284  if (!s->superblock_coding || !s->all_fragments ||
2285  !s->dct_tokens_base || !s->kf_coded_fragment_list ||
2286  !s->nkf_coded_fragment_list ||
2287  !s->superblock_fragments || !s->macroblock_coding ||
2288  !s->dc_pred_row ||
2289  !s->motion_val[0] || !s->motion_val[1]) {
2290  return -1;
2291  }
2292 
2294 
2295  return 0;
2296 }
2297 
2299 {
2300  s->current_frame.f = av_frame_alloc();
2301  s->last_frame.f = av_frame_alloc();
2302  s->golden_frame.f = av_frame_alloc();
2303 
2304  if (!s->current_frame.f || !s->last_frame.f || !s->golden_frame.f)
2305  return AVERROR(ENOMEM);
2306 
2307  return 0;
2308 }
2309 
2311 {
2312  Vp3DecodeContext *s = avctx->priv_data;
2313  int i, inter, plane, ret;
2314  int c_width;
2315  int c_height;
2316  int y_fragment_count, c_fragment_count;
2317 #if CONFIG_VP4_DECODER
2318  int j;
2319 #endif
2320 
2321  ret = init_frames(s);
2322  if (ret < 0)
2323  return ret;
2324 
2325  if (avctx->codec_tag == MKTAG('V', 'P', '4', '0'))
2326  s->version = 3;
2327  else if (avctx->codec_tag == MKTAG('V', 'P', '3', '0'))
2328  s->version = 0;
2329  else
2330  s->version = 1;
2331 
2332  s->avctx = avctx;
2333  s->width = FFALIGN(avctx->coded_width, 16);
2334  s->height = FFALIGN(avctx->coded_height, 16);
2335  if (avctx->codec_id != AV_CODEC_ID_THEORA)
2336  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
2338  ff_hpeldsp_init(&s->hdsp, avctx->flags | AV_CODEC_FLAG_BITEXACT);
2339  ff_videodsp_init(&s->vdsp, 8);
2340  ff_vp3dsp_init(&s->vp3dsp, avctx->flags);
2341 
2342  for (i = 0; i < 64; i++) {
2343 #define TRANSPOSE(x) (((x) >> 3) | (((x) & 7) << 3))
2344  s->idct_permutation[i] = TRANSPOSE(i);
2345  s->idct_scantable[i] = TRANSPOSE(ff_zigzag_direct[i]);
2346 #undef TRANSPOSE
2347  }
2348 
2349  /* initialize to an impossible value which will force a recalculation
2350  * in the first frame decode */
2351  for (i = 0; i < 3; i++)
2352  s->qps[i] = -1;
2353 
2354  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_x_shift, &s->chroma_y_shift);
2355  if (ret)
2356  return ret;
2357 
2358  s->y_superblock_width = (s->width + 31) / 32;
2359  s->y_superblock_height = (s->height + 31) / 32;
2360  s->y_superblock_count = s->y_superblock_width * s->y_superblock_height;
2361 
2362  /* work out the dimensions for the C planes */
2363  c_width = s->width >> s->chroma_x_shift;
2364  c_height = s->height >> s->chroma_y_shift;
2365  s->c_superblock_width = (c_width + 31) / 32;
2366  s->c_superblock_height = (c_height + 31) / 32;
2367  s->c_superblock_count = s->c_superblock_width * s->c_superblock_height;
2368 
2369  s->superblock_count = s->y_superblock_count + (s->c_superblock_count * 2);
2370  s->u_superblock_start = s->y_superblock_count;
2371  s->v_superblock_start = s->u_superblock_start + s->c_superblock_count;
2372 
2373  s->macroblock_width = (s->width + 15) / 16;
2374  s->macroblock_height = (s->height + 15) / 16;
2375  s->macroblock_count = s->macroblock_width * s->macroblock_height;
2376  s->c_macroblock_width = (c_width + 15) / 16;
2377  s->c_macroblock_height = (c_height + 15) / 16;
2378  s->c_macroblock_count = s->c_macroblock_width * s->c_macroblock_height;
2379  s->yuv_macroblock_count = s->macroblock_count + 2 * s->c_macroblock_count;
2380 
2381  s->fragment_width[0] = s->width / FRAGMENT_PIXELS;
2382  s->fragment_height[0] = s->height / FRAGMENT_PIXELS;
2383  s->fragment_width[1] = s->fragment_width[0] >> s->chroma_x_shift;
2384  s->fragment_height[1] = s->fragment_height[0] >> s->chroma_y_shift;
2385 
2386  /* fragment count covers all 8x8 blocks for all 3 planes */
2387  y_fragment_count = s->fragment_width[0] * s->fragment_height[0];
2388  c_fragment_count = s->fragment_width[1] * s->fragment_height[1];
2389  s->fragment_count = y_fragment_count + 2 * c_fragment_count;
2390  s->fragment_start[1] = y_fragment_count;
2391  s->fragment_start[2] = y_fragment_count + c_fragment_count;
2392 
2393  if (!s->theora_tables) {
2394  for (i = 0; i < 64; i++) {
2395  s->coded_dc_scale_factor[0][i] = s->version < 2 ? vp31_dc_scale_factor[i] : vp4_y_dc_scale_factor[i];
2396  s->coded_dc_scale_factor[1][i] = s->version < 2 ? vp31_dc_scale_factor[i] : vp4_uv_dc_scale_factor[i];
2397  s->coded_ac_scale_factor[i] = s->version < 2 ? vp31_ac_scale_factor[i] : vp4_ac_scale_factor[i];
2398  s->base_matrix[0][i] = s->version < 2 ? vp31_intra_y_dequant[i] : vp4_generic_dequant[i];
2399  s->base_matrix[1][i] = s->version < 2 ? vp31_intra_c_dequant[i] : vp4_generic_dequant[i];
2400  s->base_matrix[2][i] = s->version < 2 ? vp31_inter_dequant[i] : vp4_generic_dequant[i];
2401  s->filter_limit_values[i] = s->version < 2 ? vp31_filter_limit_values[i] : vp4_filter_limit_values[i];
2402  }
2403 
2404  for (inter = 0; inter < 2; inter++) {
2405  for (plane = 0; plane < 3; plane++) {
2406  s->qr_count[inter][plane] = 1;
2407  s->qr_size[inter][plane][0] = 63;
2408  s->qr_base[inter][plane][0] =
2409  s->qr_base[inter][plane][1] = 2 * inter + (!!plane) * !inter;
2410  }
2411  }
2412 
2413  /* init VLC tables */
2414  if (s->version < 2) {
2415  for (i = 0; i < 16; i++) {
2416  /* DC histograms */
2417  if ((ret = init_vlc(&s->dc_vlc[i], 11, 32,
2418  &dc_bias[i][0][1], 4, 2,
2419  &dc_bias[i][0][0], 4, 2, 0)) < 0)
2420  return ret;
2421 
2422  /* group 1 AC histograms */
2423  if ((ret = init_vlc(&s->ac_vlc_1[i], 11, 32,
2424  &ac_bias_0[i][0][1], 4, 2,
2425  &ac_bias_0[i][0][0], 4, 2, 0)) < 0)
2426  return ret;
2427 
2428  /* group 2 AC histograms */
2429  if ((ret = init_vlc(&s->ac_vlc_2[i], 11, 32,
2430  &ac_bias_1[i][0][1], 4, 2,
2431  &ac_bias_1[i][0][0], 4, 2, 0)) < 0)
2432  return ret;
2433 
2434  /* group 3 AC histograms */
2435  if ((ret = init_vlc(&s->ac_vlc_3[i], 11, 32,
2436  &ac_bias_2[i][0][1], 4, 2,
2437  &ac_bias_2[i][0][0], 4, 2, 0)) < 0)
2438  return ret;
2439 
2440  /* group 4 AC histograms */
2441  if ((ret = init_vlc(&s->ac_vlc_4[i], 11, 32,
2442  &ac_bias_3[i][0][1], 4, 2,
2443  &ac_bias_3[i][0][0], 4, 2, 0)) < 0)
2444  return ret;
2445  }
2446 #if CONFIG_VP4_DECODER
2447  } else { /* version >= 2 */
2448  for (i = 0; i < 16; i++) {
2449  /* DC histograms */
2450  if ((ret = init_vlc(&s->dc_vlc[i], 11, 32,
2451  &vp4_dc_bias[i][0][1], 4, 2,
2452  &vp4_dc_bias[i][0][0], 4, 2, 0)) < 0)
2453  return ret;
2454 
2455  /* group 1 AC histograms */
2456  if ((ret = init_vlc(&s->ac_vlc_1[i], 11, 32,
2457  &vp4_ac_bias_0[i][0][1], 4, 2,
2458  &vp4_ac_bias_0[i][0][0], 4, 2, 0)) < 0)
2459  return ret;
2460 
2461  /* group 2 AC histograms */
2462  if ((ret = init_vlc(&s->ac_vlc_2[i], 11, 32,
2463  &vp4_ac_bias_1[i][0][1], 4, 2,
2464  &vp4_ac_bias_1[i][0][0], 4, 2, 0)) < 0)
2465  return ret;
2466 
2467  /* group 3 AC histograms */
2468  if ((ret = init_vlc(&s->ac_vlc_3[i], 11, 32,
2469  &vp4_ac_bias_2[i][0][1], 4, 2,
2470  &vp4_ac_bias_2[i][0][0], 4, 2, 0)) < 0)
2471  return ret;
2472 
2473  /* group 4 AC histograms */
2474  if ((ret = init_vlc(&s->ac_vlc_4[i], 11, 32,
2475  &vp4_ac_bias_3[i][0][1], 4, 2,
2476  &vp4_ac_bias_3[i][0][0], 4, 2, 0)) < 0)
2477  return ret;
2478  }
2479 #endif
2480  }
2481  } else {
2482  for (i = 0; i < 16; i++) {
2483  /* DC histograms */
2484  if (init_vlc(&s->dc_vlc[i], 11, 32,
2485  &s->huffman_table[i][0][1], 8, 4,
2486  &s->huffman_table[i][0][0], 8, 4, 0) < 0)
2487  goto vlc_fail;
2488 
2489  /* group 1 AC histograms */
2490  if (init_vlc(&s->ac_vlc_1[i], 11, 32,
2491  &s->huffman_table[i + 16][0][1], 8, 4,
2492  &s->huffman_table[i + 16][0][0], 8, 4, 0) < 0)
2493  goto vlc_fail;
2494 
2495  /* group 2 AC histograms */
2496  if (init_vlc(&s->ac_vlc_2[i], 11, 32,
2497  &s->huffman_table[i + 16 * 2][0][1], 8, 4,
2498  &s->huffman_table[i + 16 * 2][0][0], 8, 4, 0) < 0)
2499  goto vlc_fail;
2500 
2501  /* group 3 AC histograms */
2502  if (init_vlc(&s->ac_vlc_3[i], 11, 32,
2503  &s->huffman_table[i + 16 * 3][0][1], 8, 4,
2504  &s->huffman_table[i + 16 * 3][0][0], 8, 4, 0) < 0)
2505  goto vlc_fail;
2506 
2507  /* group 4 AC histograms */
2508  if (init_vlc(&s->ac_vlc_4[i], 11, 32,
2509  &s->huffman_table[i + 16 * 4][0][1], 8, 4,
2510  &s->huffman_table[i + 16 * 4][0][0], 8, 4, 0) < 0)
2511  goto vlc_fail;
2512  }
2513  }
2514 
2515  if ((ret = init_vlc(&s->superblock_run_length_vlc, 6, 34,
2516  &superblock_run_length_vlc_table[0][1], 4, 2,
2517  &superblock_run_length_vlc_table[0][0], 4, 2, 0)) < 0)
2518  return ret;
2519 
2520  if ((ret = init_vlc(&s->fragment_run_length_vlc, 5, 30,
2521  &fragment_run_length_vlc_table[0][1], 4, 2,
2522  &fragment_run_length_vlc_table[0][0], 4, 2, 0)) < 0)
2523  return ret;
2524 
2525  if ((ret = init_vlc(&s->mode_code_vlc, 3, 8,
2526  &mode_code_vlc_table[0][1], 2, 1,
2527  &mode_code_vlc_table[0][0], 2, 1, 0)) < 0)
2528  return ret;
2529 
2530  if ((ret = init_vlc(&s->motion_vector_vlc, 6, 63,
2531  &motion_vector_vlc_table[0][1], 2, 1,
2532  &motion_vector_vlc_table[0][0], 2, 1, 0)) < 0)
2533  return ret;
2534 
2535 #if CONFIG_VP4_DECODER
2536  for (j = 0; j < 2; j++)
2537  for (i = 0; i < 7; i++)
2538  if ((ret = init_vlc(&s->vp4_mv_vlc[j][i], 6, 63,
2539  &vp4_mv_vlc[j][i][0][1], 4, 2,
2540  &vp4_mv_vlc[j][i][0][0], 4, 2, 0)) < 0)
2541  return ret;
2542 
2543  /* version >= 2 */
2544  for (i = 0; i < 2; i++)
2545  if ((ret = init_vlc(&s->block_pattern_vlc[i], 3, 14,
2546  &vp4_block_pattern_vlc[i][0][1], 2, 1,
2547  &vp4_block_pattern_vlc[i][0][0], 2, 1, 0)) < 0)
2548  return ret;
2549 #endif
2550 
2551  return allocate_tables(avctx);
2552 
2553 vlc_fail:
2554  av_log(avctx, AV_LOG_FATAL, "Invalid huffman table\n");
2555  return -1;
2556 }
2557 
2558 /// Release and shuffle frames after decode finishes
2559 static int update_frames(AVCodecContext *avctx)
2560 {
2561  Vp3DecodeContext *s = avctx->priv_data;
2562  int ret = 0;
2563 
2564  /* shuffle frames (last = current) */
2565  ff_thread_release_buffer(avctx, &s->last_frame);
2566  ret = ff_thread_ref_frame(&s->last_frame, &s->current_frame);
2567  if (ret < 0)
2568  goto fail;
2569 
2570  if (s->keyframe) {
2571  ff_thread_release_buffer(avctx, &s->golden_frame);
2572  ret = ff_thread_ref_frame(&s->golden_frame, &s->current_frame);
2573  }
2574 
2575 fail:
2576  ff_thread_release_buffer(avctx, &s->current_frame);
2577  return ret;
2578 }
2579 
2580 #if HAVE_THREADS
2581 static int ref_frame(Vp3DecodeContext *s, ThreadFrame *dst, ThreadFrame *src)
2582 {
2583  ff_thread_release_buffer(s->avctx, dst);
2584  if (src->f->data[0])
2585  return ff_thread_ref_frame(dst, src);
2586  return 0;
2587 }
2588 
2589 static int ref_frames(Vp3DecodeContext *dst, Vp3DecodeContext *src)
2590 {
2591  int ret;
2592  if ((ret = ref_frame(dst, &dst->current_frame, &src->current_frame)) < 0 ||
2593  (ret = ref_frame(dst, &dst->golden_frame, &src->golden_frame)) < 0 ||
2594  (ret = ref_frame(dst, &dst->last_frame, &src->last_frame)) < 0)
2595  return ret;
2596  return 0;
2597 }
2598 
2599 static int vp3_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
2600 {
2601  Vp3DecodeContext *s = dst->priv_data, *s1 = src->priv_data;
2602  int qps_changed = 0, i, err;
2603 
2604  if (!s1->current_frame.f->data[0] ||
2605  s->width != s1->width || s->height != s1->height) {
2606  if (s != s1)
2607  ref_frames(s, s1);
2608  return -1;
2609  }
2610 
2611  if (s != s1) {
2612  // copy previous frame data
2613  if ((err = ref_frames(s, s1)) < 0)
2614  return err;
2615 
2616  s->keyframe = s1->keyframe;
2617 
2618  // copy qscale data if necessary
2619  for (i = 0; i < 3; i++) {
2620  if (s->qps[i] != s1->qps[1]) {
2621  qps_changed = 1;
2622  memcpy(&s->qmat[i], &s1->qmat[i], sizeof(s->qmat[i]));
2623  }
2624  }
2625 
2626  if (s->qps[0] != s1->qps[0])
2627  memcpy(&s->bounding_values_array, &s1->bounding_values_array,
2628  sizeof(s->bounding_values_array));
2629 
2630  if (qps_changed) {
2631  memcpy(s->qps, s1->qps, sizeof(s->qps));
2632  memcpy(s->last_qps, s1->last_qps, sizeof(s->last_qps));
2633  s->nqps = s1->nqps;
2634  }
2635  }
2636 
2637  return update_frames(dst);
2638 }
2639 #endif
2640 
2642  void *data, int *got_frame,
2643  AVPacket *avpkt)
2644 {
2645  AVFrame *frame = data;
2646  const uint8_t *buf = avpkt->data;
2647  int buf_size = avpkt->size;
2648  Vp3DecodeContext *s = avctx->priv_data;
2649  GetBitContext gb;
2650  int i, ret;
2651 
2652  if ((ret = init_get_bits8(&gb, buf, buf_size)) < 0)
2653  return ret;
2654 
2655 #if CONFIG_THEORA_DECODER
2656  if (s->theora && get_bits1(&gb)) {
2657  int type = get_bits(&gb, 7);
2658  skip_bits_long(&gb, 6*8); /* "theora" */
2659 
2660  if (s->avctx->active_thread_type&FF_THREAD_FRAME) {
2661  av_log(avctx, AV_LOG_ERROR, "midstream reconfiguration with multithreading is unsupported, try -threads 1\n");
2662  return AVERROR_PATCHWELCOME;
2663  }
2664  if (type == 0) {
2665  vp3_decode_end(avctx);
2666  ret = theora_decode_header(avctx, &gb);
2667 
2668  if (ret >= 0)
2669  ret = vp3_decode_init(avctx);
2670  if (ret < 0) {
2671  vp3_decode_end(avctx);
2672  return ret;
2673  }
2674  return buf_size;
2675  } else if (type == 2) {
2676  vp3_decode_end(avctx);
2677  ret = theora_decode_tables(avctx, &gb);
2678  if (ret >= 0)
2679  ret = vp3_decode_init(avctx);
2680  if (ret < 0) {
2681  vp3_decode_end(avctx);
2682  return ret;
2683  }
2684  return buf_size;
2685  }
2686 
2687  av_log(avctx, AV_LOG_ERROR,
2688  "Header packet passed to frame decoder, skipping\n");
2689  return -1;
2690  }
2691 #endif
2692 
2693  s->keyframe = !get_bits1(&gb);
2694  if (!s->all_fragments) {
2695  av_log(avctx, AV_LOG_ERROR, "Data packet without prior valid headers\n");
2696  return -1;
2697  }
2698  if (!s->theora)
2699  skip_bits(&gb, 1);
2700  for (i = 0; i < 3; i++)
2701  s->last_qps[i] = s->qps[i];
2702 
2703  s->nqps = 0;
2704  do {
2705  s->qps[s->nqps++] = get_bits(&gb, 6);
2706  } while (s->theora >= 0x030200 && s->nqps < 3 && get_bits1(&gb));
2707  for (i = s->nqps; i < 3; i++)
2708  s->qps[i] = -1;
2709 
2710  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2711  av_log(s->avctx, AV_LOG_INFO, " VP3 %sframe #%d: Q index = %d\n",
2712  s->keyframe ? "key" : "", avctx->frame_number + 1, s->qps[0]);
2713 
2714  s->skip_loop_filter = !s->filter_limit_values[s->qps[0]] ||
2715  avctx->skip_loop_filter >= (s->keyframe ? AVDISCARD_ALL
2716  : AVDISCARD_NONKEY);
2717 
2718  if (s->qps[0] != s->last_qps[0])
2720 
2721  for (i = 0; i < s->nqps; i++)
2722  // reinit all dequantizers if the first one changed, because
2723  // the DC of the first quantizer must be used for all matrices
2724  if (s->qps[i] != s->last_qps[i] || s->qps[0] != s->last_qps[0])
2725  init_dequantizer(s, i);
2726 
2727  if (avctx->skip_frame >= AVDISCARD_NONKEY && !s->keyframe)
2728  return buf_size;
2729 
2730  s->current_frame.f->pict_type = s->keyframe ? AV_PICTURE_TYPE_I
2732  s->current_frame.f->key_frame = s->keyframe;
2733  if ((ret = ff_thread_get_buffer(avctx, &s->current_frame, AV_GET_BUFFER_FLAG_REF)) < 0)
2734  goto error;
2735 
2736  if (!s->edge_emu_buffer)
2737  s->edge_emu_buffer = av_malloc(9 * FFABS(s->current_frame.f->linesize[0]));
2738 
2739  if (s->keyframe) {
2740  if (!s->theora) {
2741  skip_bits(&gb, 4); /* width code */
2742  skip_bits(&gb, 4); /* height code */
2743  if (s->version) {
2744  int version = get_bits(&gb, 5);
2745 #if !CONFIG_VP4_DECODER
2746  if (version >= 2) {
2747  av_log(avctx, AV_LOG_ERROR, "This build does not support decoding VP4.\n");
2749  }
2750 #endif
2751  s->version = version;
2752  if (avctx->frame_number == 0)
2753  av_log(s->avctx, AV_LOG_DEBUG,
2754  "VP version: %d\n", s->version);
2755  }
2756  }
2757  if (s->version || s->theora) {
2758  if (get_bits1(&gb))
2759  av_log(s->avctx, AV_LOG_ERROR,
2760  "Warning, unsupported keyframe coding type?!\n");
2761  skip_bits(&gb, 2); /* reserved? */
2762 
2763 #if CONFIG_VP4_DECODER
2764  if (s->version >= 2) {
2765  int mb_height, mb_width;
2766  int mb_width_mul, mb_width_div, mb_height_mul, mb_height_div;
2767 
2768  mb_height = get_bits(&gb, 8);
2769  mb_width = get_bits(&gb, 8);
2770  if (mb_height != s->macroblock_height ||
2771  mb_width != s->macroblock_width)
2772  avpriv_request_sample(s->avctx, "macroblock dimension mismatch");
2773 
2774  mb_width_mul = get_bits(&gb, 5);
2775  mb_width_div = get_bits(&gb, 3);
2776  mb_height_mul = get_bits(&gb, 5);
2777  mb_height_div = get_bits(&gb, 3);
2778  if (mb_width_mul != 1 || mb_width_div != 1 || mb_height_mul != 1 || mb_height_div != 1)
2779  avpriv_request_sample(s->avctx, "unexpected macroblock dimension multipler/divider");
2780 
2781  if (get_bits(&gb, 2))
2782  avpriv_request_sample(s->avctx, "unknown bits");
2783  }
2784 #endif
2785  }
2786  } else {
2787  if (!s->golden_frame.f->data[0]) {
2788  av_log(s->avctx, AV_LOG_WARNING,
2789  "vp3: first frame not a keyframe\n");
2790 
2791  s->golden_frame.f->pict_type = AV_PICTURE_TYPE_I;
2792  if ((ret = ff_thread_get_buffer(avctx, &s->golden_frame,
2793  AV_GET_BUFFER_FLAG_REF)) < 0)
2794  goto error;
2795  ff_thread_release_buffer(avctx, &s->last_frame);
2796  if ((ret = ff_thread_ref_frame(&s->last_frame,
2797  &s->golden_frame)) < 0)
2798  goto error;
2799  ff_thread_report_progress(&s->last_frame, INT_MAX, 0);
2800  }
2801  }
2802 
2803  memset(s->all_fragments, 0, s->fragment_count * sizeof(Vp3Fragment));
2804  ff_thread_finish_setup(avctx);
2805 
2806  if (s->version < 2) {
2807  if ((ret = unpack_superblocks(s, &gb)) < 0) {
2808  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_superblocks\n");
2809  goto error;
2810  }
2811 #if CONFIG_VP4_DECODER
2812  } else {
2813  if ((ret = vp4_unpack_macroblocks(s, &gb)) < 0) {
2814  av_log(s->avctx, AV_LOG_ERROR, "error in vp4_unpack_macroblocks\n");
2815  goto error;
2816  }
2817 #endif
2818  }
2819  if ((ret = unpack_modes(s, &gb)) < 0) {
2820  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_modes\n");
2821  goto error;
2822  }
2823  if (ret = unpack_vectors(s, &gb)) {
2824  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_vectors\n");
2825  goto error;
2826  }
2827  if ((ret = unpack_block_qpis(s, &gb)) < 0) {
2828  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_block_qpis\n");
2829  goto error;
2830  }
2831 
2832  if (s->version < 2) {
2833  if ((ret = unpack_dct_coeffs(s, &gb)) < 0) {
2834  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_dct_coeffs\n");
2835  goto error;
2836  }
2837 #if CONFIG_VP4_DECODER
2838  } else {
2839  if ((ret = vp4_unpack_dct_coeffs(s, &gb)) < 0) {
2840  av_log(s->avctx, AV_LOG_ERROR, "error in vp4_unpack_dct_coeffs\n");
2841  goto error;
2842  }
2843 #endif
2844  }
2845 
2846  for (i = 0; i < 3; i++) {
2847  int height = s->height >> (i && s->chroma_y_shift);
2848  if (s->flipped_image)
2849  s->data_offset[i] = 0;
2850  else
2851  s->data_offset[i] = (height - 1) * s->current_frame.f->linesize[i];
2852  }
2853 
2854  s->last_slice_end = 0;
2855  for (i = 0; i < s->c_superblock_height; i++)
2856  render_slice(s, i);
2857 
2858  // filter the last row
2859  if (s->version < 2)
2860  for (i = 0; i < 3; i++) {
2861  int row = (s->height >> (3 + (i && s->chroma_y_shift))) - 1;
2862  apply_loop_filter(s, i, row, row + 1);
2863  }
2864  vp3_draw_horiz_band(s, s->height);
2865 
2866  /* output frame, offset as needed */
2867  if ((ret = av_frame_ref(data, s->current_frame.f)) < 0)
2868  return ret;
2869 
2870  frame->crop_left = s->offset_x;
2871  frame->crop_right = avctx->coded_width - avctx->width - s->offset_x;
2872  frame->crop_top = s->offset_y;
2873  frame->crop_bottom = avctx->coded_height - avctx->height - s->offset_y;
2874 
2875  *got_frame = 1;
2876 
2877  if (!HAVE_THREADS || !(s->avctx->active_thread_type & FF_THREAD_FRAME)) {
2878  ret = update_frames(avctx);
2879  if (ret < 0)
2880  return ret;
2881  }
2882 
2883  return buf_size;
2884 
2885 error:
2886  ff_thread_report_progress(&s->current_frame, INT_MAX, 0);
2887 
2888  if (!HAVE_THREADS || !(s->avctx->active_thread_type & FF_THREAD_FRAME))
2889  av_frame_unref(s->current_frame.f);
2890 
2891  return ret;
2892 }
2893 
2895 {
2896  Vp3DecodeContext *s = avctx->priv_data;
2897 
2898  if (get_bits1(gb)) {
2899  int token;
2900  if (s->entries >= 32) { /* overflow */
2901  av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n");
2902  return -1;
2903  }
2904  token = get_bits(gb, 5);
2905  ff_dlog(avctx, "hti %d hbits %x token %d entry : %d size %d\n",
2906  s->hti, s->hbits, token, s->entries, s->huff_code_size);
2907  s->huffman_table[s->hti][token][0] = s->hbits;
2908  s->huffman_table[s->hti][token][1] = s->huff_code_size;
2909  s->entries++;
2910  } else {
2911  if (s->huff_code_size >= 32) { /* overflow */
2912  av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n");
2913  return -1;
2914  }
2915  s->huff_code_size++;
2916  s->hbits <<= 1;
2917  if (read_huffman_tree(avctx, gb))
2918  return -1;
2919  s->hbits |= 1;
2920  if (read_huffman_tree(avctx, gb))
2921  return -1;
2922  s->hbits >>= 1;
2923  s->huff_code_size--;
2924  }
2925  return 0;
2926 }
2927 
2928 #if CONFIG_THEORA_DECODER
2929 static const enum AVPixelFormat theora_pix_fmts[4] = {
2931 };
2932 
2933 static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb)
2934 {
2935  Vp3DecodeContext *s = avctx->priv_data;
2936  int visible_width, visible_height, colorspace;
2937  uint8_t offset_x = 0, offset_y = 0;
2938  int ret;
2939  AVRational fps, aspect;
2940 
2941  if (get_bits_left(gb) < 206)
2942  return AVERROR_INVALIDDATA;
2943 
2944  s->theora_header = 0;
2945  s->theora = get_bits(gb, 24);
2946  av_log(avctx, AV_LOG_DEBUG, "Theora bitstream version %X\n", s->theora);
2947  if (!s->theora) {
2948  s->theora = 1;
2949  avpriv_request_sample(s->avctx, "theora 0");
2950  }
2951 
2952  /* 3.2.0 aka alpha3 has the same frame orientation as original vp3
2953  * but previous versions have the image flipped relative to vp3 */
2954  if (s->theora < 0x030200) {
2955  s->flipped_image = 1;
2956  av_log(avctx, AV_LOG_DEBUG,
2957  "Old (<alpha3) Theora bitstream, flipped image\n");
2958  }
2959 
2960  visible_width =
2961  s->width = get_bits(gb, 16) << 4;
2962  visible_height =
2963  s->height = get_bits(gb, 16) << 4;
2964 
2965  if (s->theora >= 0x030200) {
2966  visible_width = get_bits(gb, 24);
2967  visible_height = get_bits(gb, 24);
2968 
2969  offset_x = get_bits(gb, 8); /* offset x */
2970  offset_y = get_bits(gb, 8); /* offset y, from bottom */
2971  }
2972 
2973  /* sanity check */
2974  if (av_image_check_size(visible_width, visible_height, 0, avctx) < 0 ||
2975  visible_width + offset_x > s->width ||
2976  visible_height + offset_y > s->height) {
2977  av_log(avctx, AV_LOG_ERROR,
2978  "Invalid frame dimensions - w:%d h:%d x:%d y:%d (%dx%d).\n",
2979  visible_width, visible_height, offset_x, offset_y,
2980  s->width, s->height);
2981  return AVERROR_INVALIDDATA;
2982  }
2983 
2984  fps.num = get_bits_long(gb, 32);
2985  fps.den = get_bits_long(gb, 32);
2986  if (fps.num && fps.den) {
2987  if (fps.num < 0 || fps.den < 0) {
2988  av_log(avctx, AV_LOG_ERROR, "Invalid framerate\n");
2989  return AVERROR_INVALIDDATA;
2990  }
2991  av_reduce(&avctx->framerate.den, &avctx->framerate.num,
2992  fps.den, fps.num, 1 << 30);
2993  }
2994 
2995  aspect.num = get_bits(gb, 24);
2996  aspect.den = get_bits(gb, 24);
2997  if (aspect.num && aspect.den) {
2999  &avctx->sample_aspect_ratio.den,
3000  aspect.num, aspect.den, 1 << 30);
3001  ff_set_sar(avctx, avctx->sample_aspect_ratio);
3002  }
3003 
3004  if (s->theora < 0x030200)
3005  skip_bits(gb, 5); /* keyframe frequency force */
3006  colorspace = get_bits(gb, 8);
3007  skip_bits(gb, 24); /* bitrate */
3008 
3009  skip_bits(gb, 6); /* quality hint */
3010 
3011  if (s->theora >= 0x030200) {
3012  skip_bits(gb, 5); /* keyframe frequency force */
3013  avctx->pix_fmt = theora_pix_fmts[get_bits(gb, 2)];
3014  if (avctx->pix_fmt == AV_PIX_FMT_NONE) {
3015  av_log(avctx, AV_LOG_ERROR, "Invalid pixel format\n");
3016  return AVERROR_INVALIDDATA;
3017  }
3018  skip_bits(gb, 3); /* reserved */
3019  } else
3020  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
3021 
3022  ret = ff_set_dimensions(avctx, s->width, s->height);
3023  if (ret < 0)
3024  return ret;
3025  if (!(avctx->flags2 & AV_CODEC_FLAG2_IGNORE_CROP)) {
3026  avctx->width = visible_width;
3027  avctx->height = visible_height;
3028  // translate offsets from theora axis ([0,0] lower left)
3029  // to normal axis ([0,0] upper left)
3030  s->offset_x = offset_x;
3031  s->offset_y = s->height - visible_height - offset_y;
3032  }
3033 
3034  if (colorspace == 1)
3036  else if (colorspace == 2)
3038 
3039  if (colorspace == 1 || colorspace == 2) {
3040  avctx->colorspace = AVCOL_SPC_BT470BG;
3041  avctx->color_trc = AVCOL_TRC_BT709;
3042  }
3043 
3044  s->theora_header = 1;
3045  return 0;
3046 }
3047 
3048 static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb)
3049 {
3050  Vp3DecodeContext *s = avctx->priv_data;
3051  int i, n, matrices, inter, plane;
3052 
3053  if (!s->theora_header)
3054  return AVERROR_INVALIDDATA;
3055 
3056  if (s->theora >= 0x030200) {
3057  n = get_bits(gb, 3);
3058  /* loop filter limit values table */
3059  if (n)
3060  for (i = 0; i < 64; i++)
3061  s->filter_limit_values[i] = get_bits(gb, n);
3062  }
3063 
3064  if (s->theora >= 0x030200)
3065  n = get_bits(gb, 4) + 1;
3066  else
3067  n = 16;
3068  /* quality threshold table */
3069  for (i = 0; i < 64; i++)
3070  s->coded_ac_scale_factor[i] = get_bits(gb, n);
3071 
3072  if (s->theora >= 0x030200)
3073  n = get_bits(gb, 4) + 1;
3074  else
3075  n = 16;
3076  /* dc scale factor table */
3077  for (i = 0; i < 64; i++)
3078  s->coded_dc_scale_factor[0][i] =
3079  s->coded_dc_scale_factor[1][i] = get_bits(gb, n);
3080 
3081  if (s->theora >= 0x030200)
3082  matrices = get_bits(gb, 9) + 1;
3083  else
3084  matrices = 3;
3085 
3086  if (matrices > 384) {
3087  av_log(avctx, AV_LOG_ERROR, "invalid number of base matrixes\n");
3088  return -1;
3089  }
3090 
3091  for (n = 0; n < matrices; n++)
3092  for (i = 0; i < 64; i++)
3093  s->base_matrix[n][i] = get_bits(gb, 8);
3094 
3095  for (inter = 0; inter <= 1; inter++) {
3096  for (plane = 0; plane <= 2; plane++) {
3097  int newqr = 1;
3098  if (inter || plane > 0)
3099  newqr = get_bits1(gb);
3100  if (!newqr) {
3101  int qtj, plj;
3102  if (inter && get_bits1(gb)) {
3103  qtj = 0;
3104  plj = plane;
3105  } else {
3106  qtj = (3 * inter + plane - 1) / 3;
3107  plj = (plane + 2) % 3;
3108  }
3109  s->qr_count[inter][plane] = s->qr_count[qtj][plj];
3110  memcpy(s->qr_size[inter][plane], s->qr_size[qtj][plj],
3111  sizeof(s->qr_size[0][0]));
3112  memcpy(s->qr_base[inter][plane], s->qr_base[qtj][plj],
3113  sizeof(s->qr_base[0][0]));
3114  } else {
3115  int qri = 0;
3116  int qi = 0;
3117 
3118  for (;;) {
3119  i = get_bits(gb, av_log2(matrices - 1) + 1);
3120  if (i >= matrices) {
3121  av_log(avctx, AV_LOG_ERROR,
3122  "invalid base matrix index\n");
3123  return -1;
3124  }
3125  s->qr_base[inter][plane][qri] = i;
3126  if (qi >= 63)
3127  break;
3128  i = get_bits(gb, av_log2(63 - qi) + 1) + 1;
3129  s->qr_size[inter][plane][qri++] = i;
3130  qi += i;
3131  }
3132 
3133  if (qi > 63) {
3134  av_log(avctx, AV_LOG_ERROR, "invalid qi %d > 63\n", qi);
3135  return -1;
3136  }
3137  s->qr_count[inter][plane] = qri;
3138  }
3139  }
3140  }
3141 
3142  /* Huffman tables */
3143  for (s->hti = 0; s->hti < 80; s->hti++) {
3144  s->entries = 0;
3145  s->huff_code_size = 1;
3146  if (!get_bits1(gb)) {
3147  s->hbits = 0;
3148  if (read_huffman_tree(avctx, gb))
3149  return -1;
3150  s->hbits = 1;
3151  if (read_huffman_tree(avctx, gb))
3152  return -1;
3153  }
3154  }
3155 
3156  s->theora_tables = 1;
3157 
3158  return 0;
3159 }
3160 
3161 static av_cold int theora_decode_init(AVCodecContext *avctx)
3162 {
3163  Vp3DecodeContext *s = avctx->priv_data;
3164  GetBitContext gb;
3165  int ptype;
3166  const uint8_t *header_start[3];
3167  int header_len[3];
3168  int i;
3169  int ret;
3170 
3171  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
3172 
3173  s->theora = 1;
3174 
3175  if (!avctx->extradata_size) {
3176  av_log(avctx, AV_LOG_ERROR, "Missing extradata!\n");
3177  return -1;
3178  }
3179 
3181  42, header_start, header_len) < 0) {
3182  av_log(avctx, AV_LOG_ERROR, "Corrupt extradata\n");
3183  return -1;
3184  }
3185 
3186  for (i = 0; i < 3; i++) {
3187  if (header_len[i] <= 0)
3188  continue;
3189  ret = init_get_bits8(&gb, header_start[i], header_len[i]);
3190  if (ret < 0)
3191  return ret;
3192 
3193  ptype = get_bits(&gb, 8);
3194 
3195  if (!(ptype & 0x80)) {
3196  av_log(avctx, AV_LOG_ERROR, "Invalid extradata!\n");
3197 // return -1;
3198  }
3199 
3200  // FIXME: Check for this as well.
3201  skip_bits_long(&gb, 6 * 8); /* "theora" */
3202 
3203  switch (ptype) {
3204  case 0x80:
3205  if (theora_decode_header(avctx, &gb) < 0)
3206  return -1;
3207  break;
3208  case 0x81:
3209 // FIXME: is this needed? it breaks sometimes
3210 // theora_decode_comments(avctx, gb);
3211  break;
3212  case 0x82:
3213  if (theora_decode_tables(avctx, &gb))
3214  return -1;
3215  break;
3216  default:
3217  av_log(avctx, AV_LOG_ERROR,
3218  "Unknown Theora config packet: %d\n", ptype & ~0x80);
3219  break;
3220  }
3221  if (ptype != 0x81 && 8 * header_len[i] != get_bits_count(&gb))
3222  av_log(avctx, AV_LOG_WARNING,
3223  "%d bits left in packet %X\n",
3224  8 * header_len[i] - get_bits_count(&gb), ptype);
3225  if (s->theora < 0x030200)
3226  break;
3227  }
3228 
3229  return vp3_decode_init(avctx);
3230 }
3231 
3233  .name = "theora",
3234  .long_name = NULL_IF_CONFIG_SMALL("Theora"),
3235  .type = AVMEDIA_TYPE_VIDEO,
3236  .id = AV_CODEC_ID_THEORA,
3237  .priv_data_size = sizeof(Vp3DecodeContext),
3238  .init = theora_decode_init,
3239  .close = vp3_decode_end,
3244  .update_thread_context = ONLY_IF_THREADS_ENABLED(vp3_update_thread_context),
3247 };
3248 #endif
3249 
3251  .name = "vp3",
3252  .long_name = NULL_IF_CONFIG_SMALL("On2 VP3"),
3253  .type = AVMEDIA_TYPE_VIDEO,
3254  .id = AV_CODEC_ID_VP3,
3255  .priv_data_size = sizeof(Vp3DecodeContext),
3256  .init = vp3_decode_init,
3257  .close = vp3_decode_end,
3262  .update_thread_context = ONLY_IF_THREADS_ENABLED(vp3_update_thread_context),
3264 };
3265 
3266 #if CONFIG_VP4_DECODER
3268  .name = "vp4",
3269  .long_name = NULL_IF_CONFIG_SMALL("On2 VP4"),
3270  .type = AVMEDIA_TYPE_VIDEO,
3271  .id = AV_CODEC_ID_VP4,
3272  .priv_data_size = sizeof(Vp3DecodeContext),
3273  .init = vp3_decode_init,
3274  .close = vp3_decode_end,
3279  .update_thread_context = ONLY_IF_THREADS_ENABLED(vp3_update_thread_context),
3281 };
3282 #endif
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:29
vp4_ac_scale_factor
static const uint16_t vp4_ac_scale_factor[64]
Definition: vp4data.h:64
vp4data.h
AVCodec
AVCodec.
Definition: codec.h:190
PUL
#define PUL
allocate_tables
static av_cold int allocate_tables(AVCodecContext *avctx)
Allocate tables for per-frame data in Vp3DecodeContext.
Definition: vp3.c:2255
stride
int stride
Definition: mace.c:144
skip_bits_long
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:291
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
Vp3Fragment::dc
int16_t dc
Definition: vp3.c:55
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
Vp3DecodeContext::offset_x
uint8_t offset_x
Definition: vp3.c:208
NB_VP4_DC_TYPES
@ NB_VP4_DC_TYPES
Definition: vp3.c:136
Vp3DecodeContext::mode_code_vlc
VLC mode_code_vlc
Definition: vp3.c:269
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
VP3DSPContext
Definition: vp3dsp.h:25
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
vp3_decode_flush
static void vp3_decode_flush(AVCodecContext *avctx)
Definition: vp3.c:322
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:1154
ac_bias_3
static const uint16_t ac_bias_3[16][32][2]
Definition: vp3data.h:2633
Vp3DecodeContext::c_macroblock_height
int c_macroblock_height
Definition: vp3.c:198
zero_run_base
static const uint8_t zero_run_base[32]
Definition: vp3data.h:207
MODE_INTER_PRIOR_LAST
#define MODE_INTER_PRIOR_LAST
Definition: vp3.c:73
VP4_DC_INTER
@ VP4_DC_INTER
Definition: vp3.c:134
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:262
VP4Predictor
Definition: vp3.c:151
Vp3DecodeContext::idct_scantable
uint8_t idct_scantable[64]
Definition: vp3.c:169
VP4_DC_GOLDEN
@ VP4_DC_GOLDEN
Definition: vp3.c:135
MKTAG
#define MKTAG(a, b, c, d)
Definition: common.h:406
VP4Predictor::dc
int dc
Definition: vp3.c:152
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:546
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
PUR
#define PUR
vp3dsp.h
Vp3DecodeContext::motion_vector_vlc
VLC motion_vector_vlc
Definition: vp3.c:270
end
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:92
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:1147
Vp3DecodeContext::superblock_run_length_vlc
VLC superblock_run_length_vlc
Definition: vp3.c:266
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:355
ff_vp3dsp_set_bounding_values
void ff_vp3dsp_set_bounding_values(int *bounding_values_array, int filter_limit)
Definition: vp3dsp.c:473
init_vlc
#define init_vlc(vlc, nb_bits, nb_codes, bits, bits_wrap, bits_size, codes, codes_wrap, codes_size, flags)
Definition: vlc.h:38
table
static const uint16_t table[]
Definition: prosumer.c:206
data
const char data[16]
Definition: mxf.c:91
Vp3DecodeContext::all_fragments
Vp3Fragment * all_fragments
Definition: vp3.c:205
vp4_ac_bias_0
static const uint16_t vp4_ac_bias_0[16][32][2]
Definition: vp4data.h:534
fragment_run_length_vlc_table
static const uint16_t fragment_run_length_vlc_table[30][2]
Definition: vp3data.h:119
Vp3DecodeContext::filter_limit_values
uint8_t filter_limit_values[64]
Definition: vp3.c:296
av_mallocz_array
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:190
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:797
base
uint8_t base
Definition: vp3data.h:202
Vp3Fragment::coding_method
uint8_t coding_method
Definition: vp3.c:56
Vp3DecodeContext::ac_vlc_1
VLC ac_vlc_1[16]
Definition: vp3.c:261
body
static void body(uint32_t ABCD[4], const uint8_t *src, int nblocks)
Definition: md5.c:101
vp4_dc_bias
static const uint16_t vp4_dc_bias[16][32][2]
Definition: vp4data.h:371
thread.h
ff_thread_await_progress
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
unpack_superblocks
static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:465
render_slice
static void render_slice(Vp3DecodeContext *s, int slice)
Definition: vp3.c:2055
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1612
Vp3DecodeContext::height
int height
Definition: vp3.c:162
vp3_dequant
static int vp3_dequant(Vp3DecodeContext *s, Vp3Fragment *frag, int plane, int inter, int16_t block[64])
Pull DCT tokens from the 64 levels to decode and dequant the coefficients for the next block in codin...
Definition: vp3.c:1846
AV_CODEC_FLAG2_IGNORE_CROP
#define AV_CODEC_FLAG2_IGNORE_CROP
Discard cropping information from SPS.
Definition: avcodec.h:371
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
fragment
Definition: dashdec.c:35
Vp3DecodeContext::y_superblock_count
int y_superblock_count
Definition: vp3.c:185
Vp3DecodeContext::ac_vlc_4
VLC ac_vlc_4[16]
Definition: vp3.c:264
xiph.h
bit
#define bit(string, value)
Definition: cbs_mpeg2.c:58
Vp3DecodeContext::bounding_values_array
int bounding_values_array[256+2]
Definition: vp3.c:297
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:2069
Vp3DecodeContext::superblock_fragments
int * superblock_fragments
Definition: vp3.c:281
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:515
Vp3DecodeContext::hti
int hti
Definition: vp3.c:290
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
get_coeff
static int get_coeff(GetBitContext *gb, int token, int16_t *coeff)
Definition: vp3.c:1147
Vp3DecodeContext::qr_count
uint8_t qr_count[2][3]
Definition: vp3.c:218
VLC_TYPE
#define VLC_TYPE
Definition: vlc.h:24
U
#define U(x)
Definition: vp56_arith.h:37
VP4_DC_UNDEFINED
@ VP4_DC_UNDEFINED
Definition: vp3.c:137
Vp3DecodeContext::hdsp
HpelDSPContext hdsp
Definition: vp3.c:170
BLOCK_Y
#define BLOCK_Y
Definition: vp3.c:641
Vp3DecodeContext::y_superblock_width
int y_superblock_width
Definition: vp3.c:183
fail
#define fail()
Definition: checkasm.h:123
CODING_MODE_COUNT
#define CODING_MODE_COUNT
Definition: vp3.c:77
dc_bias
static const uint16_t dc_bias[16][32][2]
Definition: vp3data.h:445
FFSIGN
#define FFSIGN(a)
Definition: common.h:73
GetBitContext
Definition: get_bits.h:61
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
SET_CHROMA_MODES
#define SET_CHROMA_MODES
tables
Writing a table generator This documentation is preliminary Parts of the API are not good and should be changed Basic concepts A table generator consists of two *_tablegen c and *_tablegen h The h file will provide the variable declarations and initialization code for the tables
Definition: tablegen.txt:10
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:606
Vp3DecodeContext::ac_vlc_2
VLC ac_vlc_2[16]
Definition: vp3.c:262
perm
perm
Definition: f_perms.c:74
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2577
MODE_INTER_LAST_MV
#define MODE_INTER_LAST_MV
Definition: vp3.c:72
Vp3DecodeContext::y_superblock_height
int y_superblock_height
Definition: vp3.c:184
vp31_intra_c_dequant
static const uint8_t vp31_intra_c_dequant[64]
Definition: vp3data.h:42
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
Vp3DecodeContext::offset_y
uint8_t offset_y
Definition: vp3.c:209
Vp3DecodeContext::theora
int theora
Definition: vp3.c:160
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:714
eob_run_table
static const struct @167 eob_run_table[7]
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
loop
static int loop
Definition: ffplay.c:341
TRANSPOSE
#define TRANSPOSE(x)
AVRational::num
int num
Numerator.
Definition: rational.h:59
Vp3DecodeContext::num_kf_coded_fragment
int num_kf_coded_fragment[3]
Definition: vp3.c:258
VP4_DC_INTRA
@ VP4_DC_INTRA
Definition: vp3.c:133
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
TOKEN_ZERO_RUN
#define TOKEN_ZERO_RUN(coeff, zero_run)
Definition: vp3.c:242
vp4_pred_block_type_map
static const uint8_t vp4_pred_block_type_map[8]
Definition: vp3.c:140
vp3_decode_frame
static int vp3_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: vp3.c:2641
Vp3DecodeContext::hbits
unsigned int hbits
Definition: vp3.c:291
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:1140
motion_vector_vlc_table
static const uint8_t motion_vector_vlc_table[63][2]
Definition: vp3data.h:151
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
av_cold
#define av_cold
Definition: attributes.h:90
Vp3DecodeContext::huff_code_size
int huff_code_size
Definition: vp3.c:293
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
theora_decode_tables
static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb)
hilbert_offset
static const uint8_t hilbert_offset[16][2]
Definition: vp3.c:125
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:568
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:628
Vp3DecodeContext::fragment_height
int fragment_height[2]
Definition: vp3.c:203
width
#define width
s
#define s(width, name)
Definition: cbs_vp9.c:257
init_loop_filter
static void init_loop_filter(Vp3DecodeContext *s)
Definition: vp3.c:456
Vp3DecodeContext::ac_vlc_3
VLC ac_vlc_3[16]
Definition: vp3.c:263
Vp3DecodeContext::fragment_run_length_vlc
VLC fragment_run_length_vlc
Definition: vp3.c:267
Vp3DecodeContext::vp4_mv_vlc
VLC vp4_mv_vlc[2][7]
Definition: vp3.c:271
vp4_mv_table_selector
static const uint8_t vp4_mv_table_selector[32]
Definition: vp4data.h:105
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:509
s1
#define s1
Definition: regdef.h:38
init_block_mapping
static int init_block_mapping(Vp3DecodeContext *s)
This function sets up all of the various blocks mappings: superblocks <-> fragments,...
Definition: vp3.c:379
SB_PARTIALLY_CODED
#define SB_PARTIALLY_CODED
Definition: vp3.c:61
bits
uint8_t bits
Definition: vp3data.h:202
SB_NOT_CODED
#define SB_NOT_CODED
Definition: vp3.c:60
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
Vp3Fragment::qpi
uint8_t qpi
Definition: vp3.c:57
get_bits.h
reverse_dc_prediction
static void reverse_dc_prediction(Vp3DecodeContext *s, int first_fragment, int fragment_width, int fragment_height)
Definition: vp3.c:1629
unpack_dct_coeffs
static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:1296
ModeAlphabet
static const int ModeAlphabet[6][CODING_MODE_COUNT]
Definition: vp3.c:87
ff_free_vlc
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:359
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
RSHIFT
#define RSHIFT(a, b)
Definition: common.h:54
AVCOL_PRI_BT470BG
@ AVCOL_PRI_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:462
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
MODE_USING_GOLDEN
#define MODE_USING_GOLDEN
Definition: vp3.c:74
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:536
Vp3DecodeContext::macroblock_width
int macroblock_width
Definition: vp3.c:194
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
Vp3DecodeContext::idct_permutation
uint8_t idct_permutation[64]
Definition: vp3.c:168
if
if(ret)
Definition: filter_design.txt:179
ac_bias_1
static const uint16_t ac_bias_1[16][32][2]
Definition: vp3data.h:1539
init_dequantizer
static void init_dequantizer(Vp3DecodeContext *s, int qpi)
Definition: vp3.c:413
MODE_INTER_FOURMV
#define MODE_INTER_FOURMV
Definition: vp3.c:76
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:106
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: avcodec.h:236
Vp3DecodeContext::c_superblock_width
int c_superblock_width
Definition: vp3.c:186
coeff_tables
static const int16_t *const coeff_tables[32]
Definition: vp3data.h:407
Vp3DecodeContext::offset_x_warned
int offset_x_warned
Definition: vp3.c:210
flush
static void flush(AVCodecContext *avctx)
Definition: aacdec_template.c:500
Vp3DecodeContext::block_pattern_vlc
VLC block_pattern_vlc[2]
Definition: vp3.c:268
init_frames
static av_cold int init_frames(Vp3DecodeContext *s)
Definition: vp3.c:2298
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
PU
#define PU
mode_code_vlc_table
static const uint8_t mode_code_vlc_table[8][2]
Definition: vp3data.h:144
unpack_modes
static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:790
transform
static const int8_t transform[32][32]
Definition: hevcdsp.c:27
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
unpack_vlcs
static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb, VLC *table, int coeff_index, int plane, int eob_run)
Definition: vp3.c:1175
Vp3DecodeContext::superblock_count
int superblock_count
Definition: vp3.c:182
ff_vp3dsp_h_loop_filter_12
void ff_vp3dsp_h_loop_filter_12(uint8_t *first_pixel, ptrdiff_t stride, int *bounding_values)
theora_decode_header
static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb)
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
src
#define src
Definition: vp8dsp.c:254
mathops.h
Vp3DecodeContext::theora_header
int theora_header
Definition: vp3.c:160
TOKEN_COEFF
#define TOKEN_COEFF(coeff)
Definition: vp3.c:243
vp4_y_dc_scale_factor
static const uint8_t vp4_y_dc_scale_factor[64]
Definition: vp4data.h:42
ONLY_IF_THREADS_ENABLED
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:227
Vp3DecodeContext::skip_loop_filter
int skip_loop_filter
Definition: vp3.c:176
update_frames
static int update_frames(AVCodecContext *avctx)
Release and shuffle frames after decode finishes.
Definition: vp3.c:2559
Vp3DecodeContext::last_qps
int last_qps[3]
Definition: vp3.c:180
motion_vector_table
static const int8_t motion_vector_table[63]
Definition: vp3data.h:179
AV_CODEC_ID_VP4
@ AV_CODEC_ID_VP4
Definition: codec_id.h:290
FF_CODEC_CAP_EXPORTS_CROPPING
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: internal.h:66
vp31_ac_scale_factor
static const uint16_t vp31_ac_scale_factor[64]
Definition: vp3data.h:76
Vp3DecodeContext::qr_size
uint8_t qr_size[2][3][64]
Definition: vp3.c:219
DC_COEFF
#define DC_COEFF(u)
Definition: vp3.c:1627
Vp3DecodeContext::vp3dsp
VP3DSPContext vp3dsp
Definition: vp3.c:172
Vp3DecodeContext::flipped_image
int flipped_image
Definition: vp3.c:174
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
vp31_intra_y_dequant
static const uint8_t vp31_intra_y_dequant[64]
Definition: vp3data.h:29
ff_thread_release_buffer
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
Definition: pthread_frame.c:1006
ff_vp3dsp_v_loop_filter_12
void ff_vp3dsp_v_loop_filter_12(uint8_t *first_pixel, ptrdiff_t stride, int *bounding_values)
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:29
HpelDSPContext
Half-pel DSP context.
Definition: hpeldsp.h:45
Vp3DecodeContext::fragment_width
int fragment_width[2]
Definition: vp3.c:202
Vp3DecodeContext::total_num_coded_frags
int total_num_coded_frags
Definition: vp3.c:250
SB_FULLY_CODED
#define SB_FULLY_CODED
Definition: vp3.c:62
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: avcodec.h:235
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:613
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:308
AVPacket::size
int size
Definition: packet.h:356
fixed_motion_vector_table
static const int8_t fixed_motion_vector_table[64]
Definition: vp3data.h:189
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:444
vp4_ac_bias_3
static const uint16_t vp4_ac_bias_3[16][32][2]
Definition: vp4data.h:1023
unpack_vectors
static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:903
vp4_get_mv
static int vp4_get_mv(Vp3DecodeContext *s, GetBitContext *gb, int axis, int last_motion)
Definition: vp3.c:893
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
vlc_tables
static VLC_TYPE vlc_tables[VLC_TABLES_SIZE][2]
Definition: imc.c:120
AV_NUM_DATA_POINTERS
#define AV_NUM_DATA_POINTERS
Definition: frame.h:301
Vp3DecodeContext::dct_tokens
int16_t * dct_tokens[3][64]
This is a list of all tokens in bitstream order.
Definition: vp3.c:239
Vp3DecodeContext::coded_dc_scale_factor
uint16_t coded_dc_scale_factor[2][64]
Definition: vp3.c:215
Vp3DecodeContext::qps
int qps[3]
Definition: vp3.c:178
Vp3DecodeContext::current_frame
ThreadFrame current_frame
Definition: vp3.c:166
Vp3DecodeContext::block
int16_t block[64]
Definition: vp3.c:173
height
#define height
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
Vp3DecodeContext::chroma_y_shift
int chroma_y_shift
Definition: vp3.c:163
Vp3DecodeContext::data_offset
int data_offset[3]
Definition: vp3.c:207
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
Vp3DecodeContext::macroblock_coding
unsigned char * macroblock_coding
Definition: vp3.c:285
version
version
Definition: libkvazaar.c:292
vp3data.h
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
AVCOL_TRC_BT709
@ AVCOL_TRC_BT709
also ITU-R BT1361
Definition: pixfmt.h:482
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1796
ff_vp3_decoder
AVCodec ff_vp3_decoder
Definition: vp3.c:3250
Vp3DecodeContext::avctx
AVCodecContext * avctx
Definition: vp3.c:159
AV_CODEC_ID_VP3
@ AV_CODEC_ID_VP3
Definition: codec_id.h:78
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem.h:112
Vp3DecodeContext::nkf_coded_fragment_list
int * nkf_coded_fragment_list
Definition: vp3.c:257
Vp3DecodeContext::keyframe
int keyframe
Definition: vp3.c:167
MODE_INTRA
#define MODE_INTRA
Definition: vp3.c:70
apply_loop_filter
static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int yend)
Definition: vp3.c:1779
Vp3DecodeContext::macroblock_height
int macroblock_height
Definition: vp3.c:195
ff_vp3dsp_init
av_cold void ff_vp3dsp_init(VP3DSPContext *c, int flags)
Definition: vp3dsp.c:445
Vp3DecodeContext::yuv_macroblock_count
int yuv_macroblock_count
Definition: vp3.c:199
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
Vp3DecodeContext::edge_emu_buffer
uint8_t * edge_emu_buffer
Definition: vp3.c:287
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:627
Vp3DecodeContext::entries
int entries
Definition: vp3.c:292
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:446
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:48
Vp3DecodeContext::c_macroblock_count
int c_macroblock_count
Definition: vp3.c:196
AV_CODEC_ID_THEORA
@ AV_CODEC_ID_THEORA
Definition: codec_id.h:79
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
superblock_run_length_vlc_table
static const uint16_t superblock_run_length_vlc_table[34][2]
Definition: vp3data.h:98
ac_bias_2
static const uint16_t ac_bias_2[16][32][2]
Definition: vp3data.h:2086
uint8_t
uint8_t
Definition: audio_convert.c:194
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:554
Vp3DecodeContext::macroblock_count
int macroblock_count
Definition: vp3.c:193
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:197
update_thread_context
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread. Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little speed gain at this point but it should work. If there are inter-frame dependencies
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:1168
Vp3DecodeContext::v_superblock_start
int v_superblock_start
Definition: vp3.c:190
Vp3DecodeContext::c_superblock_height
int c_superblock_height
Definition: vp3.c:187
AVCodecContext::height
int height
Definition: avcodec.h:699
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
ff_theora_decoder
AVCodec ff_theora_decoder
Vp3DecodeContext::coded_fragment_list
int * coded_fragment_list[3]
Definition: vp3.c:254
avcodec.h
Vp3DecodeContext::c_superblock_count
int c_superblock_count
Definition: vp3.c:188
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
PL
#define PL
AVCOL_PRI_BT470M
@ AVCOL_PRI_BT470M
also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:460
ret
ret
Definition: filter_design.txt:187
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:170
Vp3DecodeContext::theora_tables
int theora_tables
Definition: vp3.c:160
free_tables
static av_cold void free_tables(AVCodecContext *avctx)
Definition: vp3.c:306
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
MODE_INTER_PLUS_MV
#define MODE_INTER_PLUS_MV
Definition: vp3.c:71
Vp3DecodeContext::num_coded_frags
int num_coded_frags[3][64]
number of blocks that contain DCT coefficients at the given level or higher
Definition: vp3.c:249
vp4_block_pattern_table_selector
static const uint8_t vp4_block_pattern_table_selector[14]
Definition: vp4data.h:86
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
Vp3DecodeContext::golden_frame
ThreadFrame golden_frame
Definition: vp3.c:164
Vp3DecodeContext::chroma_x_shift
int chroma_x_shift
Definition: vp3.c:163
BLOCK_X
#define BLOCK_X
Definition: vp3.c:640
MODE_COPY
#define MODE_COPY
Definition: vp3.c:80
Vp3DecodeContext
Definition: vp3.c:158
vp4_filter_limit_values
static const uint8_t vp4_filter_limit_values[64]
Definition: vp4data.h:75
ff_set_sar
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
Definition: utils.c:119
MODE_GOLDEN_MV
#define MODE_GOLDEN_MV
Definition: vp3.c:75
FRAGMENT_PIXELS
#define FRAGMENT_PIXELS
Definition: vp3.c:51
AVCodecContext
main external API structure.
Definition: avcodec.h:526
ac_bias_0
static const uint16_t ac_bias_0[16][32][2]
Definition: vp3data.h:992
ThreadFrame
Definition: thread.h:34
Vp3DecodeContext::huffman_table
uint32_t huffman_table[80][32][2]
Definition: vp3.c:294
vp3_draw_horiz_band
static void vp3_draw_horiz_band(Vp3DecodeContext *s, int y)
called when all pixels up to row y are complete
Definition: vp3.c:1891
AVCHROMA_LOC_CENTER
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:557
vp4_generic_dequant
static const uint8_t vp4_generic_dequant[64]
Definition: vp4data.h:31
zero_run_get_bits
static const uint8_t zero_run_get_bits[32]
Definition: vp3data.h:214
AVRational::den
int den
Denominator.
Definition: rational.h:60
await_reference_row
static void await_reference_row(Vp3DecodeContext *s, Vp3Fragment *fragment, int motion_y, int y)
Wait for the reference frame of the current fragment.
Definition: vp3.c:1933
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
VLC
Definition: vlc.h:26
Vp3DecodeContext::coded_ac_scale_factor
uint32_t coded_ac_scale_factor[64]
Definition: vp3.c:216
Vp3DecodeContext::dc_vlc
VLC dc_vlc[16]
Definition: vp3.c:260
output_plane
static void output_plane(const Plane *plane, int buf_sel, uint8_t *dst, ptrdiff_t dst_pitch, int dst_height)
Convert and output the current plane.
Definition: indeo3.c:1027
vp31_inter_dequant
static const uint8_t vp31_inter_dequant[64]
Definition: vp3data.h:54
temp
else temp
Definition: vf_mcdeint.c:256
vp4_block_pattern_vlc
static const uint8_t vp4_block_pattern_vlc[2][14][2]
Definition: vp4data.h:90
avpriv_split_xiph_headers
int avpriv_split_xiph_headers(const uint8_t *extradata, int extradata_size, int first_header_size, const uint8_t *header_start[3], int header_len[3])
Split a single extradata buffer into the three headers that most Xiph codecs use.
Definition: xiph.c:24
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
MODE_INTER_NO_MV
#define MODE_INTER_NO_MV
Definition: vp3.c:69
VideoDSPContext
Definition: videodsp.h:41
vp4_ac_bias_2
static const uint16_t vp4_ac_bias_2[16][32][2]
Definition: vp4data.h:860
Vp3DecodeContext::superblock_coding
unsigned char * superblock_coding
Definition: vp3.c:191
ff_thread_ref_frame
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
Definition: utils.c:1885
COMPATIBLE_FRAME
#define COMPATIBLE_FRAME(x)
Definition: vp3.c:1625
AVERROR_DECODER_NOT_FOUND
#define AVERROR_DECODER_NOT_FOUND
Decoder not found.
Definition: error.h:52
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:714
Vp3DecodeContext::last_frame
ThreadFrame last_frame
Definition: vp3.c:165
Vp3DecodeContext::fragment_start
int fragment_start[3]
Definition: vp3.c:206
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
vp4_ac_bias_1
static const uint16_t vp4_ac_bias_1[16][32][2]
Definition: vp4data.h:697
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:333
FF_CODEC_CAP_ALLOCATE_PROGRESS
#define FF_CODEC_CAP_ALLOCATE_PROGRESS
Definition: internal.h:75
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:39
get_eob_run
static int get_eob_run(GetBitContext *gb, int token)
Definition: vp3.c:1139
ff_vp4_decoder
AVCodec ff_vp4_decoder
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:551
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:48
vp31_filter_limit_values
static const uint8_t vp31_filter_limit_values[64]
Definition: vp3data.h:87
AVPacket
This structure stores compressed data.
Definition: packet.h:332
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:553
VP4Predictor::type
int type
Definition: vp3.c:153
read_huffman_tree
static int read_huffman_tree(AVCodecContext *avctx, GetBitContext *gb)
Definition: vp3.c:2894
vp3_decode_init
static av_cold int vp3_decode_init(AVCodecContext *avctx)
Definition: vp3.c:2310
Vp3DecodeContext::base_matrix
uint8_t base_matrix[384][64]
Definition: vp3.c:217
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
videodsp.h
Vp3DecodeContext::fragment_count
int fragment_count
Definition: vp3.c:201
vp31_dc_scale_factor
static const uint8_t vp31_dc_scale_factor[64]
Definition: vp3data.h:65
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:699
imgutils.h
hpeldsp.h
Vp3DecodeContext::width
int width
Definition: vp3.c:162
Vp3DecodeContext::kf_coded_fragment_list
int * kf_coded_fragment_list
Definition: vp3.c:256
AV_CODEC_CAP_DRAW_HORIZ_BAND
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: codec.h:44
unpack_block_qpis
static int unpack_block_qpis(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:1097
Vp3DecodeContext::qr_base
uint16_t qr_base[2][3][64]
Definition: vp3.c:220
vp3_decode_end
static av_cold int vp3_decode_end(AVCodecContext *avctx)
Definition: vp3.c:334
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:72
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
h
h
Definition: vp9dsp_template.c:2038
vp4_uv_dc_scale_factor
static const uint8_t vp4_uv_dc_scale_factor[64]
Definition: vp4data.h:53
MAXIMUM_LONG_BIT_RUN
#define MAXIMUM_LONG_BIT_RUN
Definition: vp3.c:67
Vp3DecodeContext::version
int version
Definition: vp3.c:161
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:282
Vp3DecodeContext::motion_val
int8_t(*[2] motion_val)[2]
Definition: vp3.c:212
Vp3DecodeContext::last_slice_end
int last_slice_end
Definition: vp3.c:175
Vp3DecodeContext::dc_pred_row
VP4Predictor * dc_pred_row
Definition: vp3.c:299
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
Vp3DecodeContext::u_superblock_start
int u_superblock_start
Definition: vp3.c:189
coeff_get_bits
static const uint8_t coeff_get_bits[32]
Definition: vp3data.h:222
Vp3DecodeContext::dct_tokens_base
int16_t * dct_tokens_base
Definition: vp3.c:240
Vp3Fragment
Definition: vp3.c:54
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:905
Vp3DecodeContext::nqps
int nqps
Definition: vp3.c:179
Vp3DecodeContext::qmat
int16_t qmat[3][2][3][64]
qmat[qpi][is_inter][plane]
Definition: vp3.c:275
Vp3DecodeContext::vdsp
VideoDSPContext vdsp
Definition: vp3.c:171
vp4_mv_vlc
static const uint16_t vp4_mv_vlc[2][7][63][2]
Definition: vp4data.h:112
TOKEN_EOB
#define TOKEN_EOB(eob_run)
Definition: vp3.c:241
Vp3DecodeContext::c_macroblock_width
int c_macroblock_width
Definition: vp3.c:197