FFmpeg
vp3.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2003-2004 The FFmpeg project
3  * Copyright (C) 2019 Peter Ross
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * On2 VP3/VP4 Video Decoder
25  *
26  * VP3 Video Decoder by Mike Melanson (mike at multimedia.cx)
27  * For more information about the VP3 coding process, visit:
28  * http://wiki.multimedia.cx/index.php?title=On2_VP3
29  *
30  * Theora decoder by Alex Beregszaszi
31  */
32 
33 #include "config_components.h"
34 
35 #include <stddef.h>
36 #include <string.h>
37 
38 #include "libavutil/attributes.h"
39 #include "libavutil/emms.h"
40 #include "libavutil/imgutils.h"
41 #include "libavutil/mem.h"
42 #include "libavutil/mem_internal.h"
43 #include "libavutil/thread.h"
44 
45 #include "avcodec.h"
46 #include "codec_internal.h"
47 #include "decode.h"
48 #include "get_bits.h"
49 #include "hpeldsp.h"
50 #include "jpegquanttables.h"
51 #include "mathops.h"
52 #include "progressframe.h"
53 #include "libavutil/refstruct.h"
54 #include "thread.h"
55 #include "videodsp.h"
56 #include "vp3data.h"
57 #include "vp4data.h"
58 #include "vp3dsp.h"
59 #include "xiph.h"
60 
61 #define VP3_MV_VLC_BITS 6
62 #define VP4_MV_VLC_BITS 6
63 #define SUPERBLOCK_VLC_BITS 6
64 
65 #define FRAGMENT_PIXELS 8
66 
67 // FIXME split things out into their own arrays
68 typedef struct Vp3Fragment {
69  int16_t dc;
70  uint8_t coding_method;
71  uint8_t qpi;
72 } Vp3Fragment;
73 
74 #define SB_NOT_CODED 0
75 #define SB_PARTIALLY_CODED 1
76 #define SB_FULLY_CODED 2
77 
78 // This is the maximum length of a single long bit run that can be encoded
79 // for superblock coding or block qps. Theora special-cases this to read a
80 // bit instead of flipping the current bit to allow for runs longer than 4129.
81 #define MAXIMUM_LONG_BIT_RUN 4129
82 
83 #define MODE_INTER_NO_MV 0
84 #define MODE_INTRA 1
85 #define MODE_INTER_PLUS_MV 2
86 #define MODE_INTER_LAST_MV 3
87 #define MODE_INTER_PRIOR_LAST 4
88 #define MODE_USING_GOLDEN 5
89 #define MODE_GOLDEN_MV 6
90 #define MODE_INTER_FOURMV 7
91 #define CODING_MODE_COUNT 8
92 
93 /* special internal mode */
94 #define MODE_COPY 8
95 
96 static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb);
97 static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb);
98 
99 
100 /* There are 6 preset schemes, plus a free-form scheme */
101 static const int ModeAlphabet[6][CODING_MODE_COUNT] = {
102  /* scheme 1: Last motion vector dominates */
107 
108  /* scheme 2 */
113 
114  /* scheme 3 */
119 
120  /* scheme 4 */
125 
126  /* scheme 5: No motion vector dominates */
131 
132  /* scheme 6 */
137 };
138 
139 static const uint8_t hilbert_offset[16][2] = {
140  { 0, 0 }, { 1, 0 }, { 1, 1 }, { 0, 1 },
141  { 0, 2 }, { 0, 3 }, { 1, 3 }, { 1, 2 },
142  { 2, 2 }, { 2, 3 }, { 3, 3 }, { 3, 2 },
143  { 3, 1 }, { 2, 1 }, { 2, 0 }, { 3, 0 }
144 };
145 
146 enum {
152 };
153 
154 static const uint8_t vp4_pred_block_type_map[8] = {
163 };
164 
165 static VLCElem superblock_run_length_vlc[88]; /* version < 2 */
166 static VLCElem fragment_run_length_vlc[56]; /* version < 2 */
167 static VLCElem motion_vector_vlc[112]; /* version < 2 */
168 
169 // The VP4 tables reuse this vlc.
170 static VLCElem mode_code_vlc[24 + 2108 * CONFIG_VP4_DECODER];
171 
172 #if CONFIG_VP4_DECODER
173 static const VLCElem *vp4_mv_vlc_table[2][7]; /* version >= 2 */
174 static const VLCElem *block_pattern_vlc[2]; /* version >= 2 */
175 #endif
176 
177 typedef struct {
178  int dc;
179  int type;
180 } VP4Predictor;
181 
182 #define MIN_DEQUANT_VAL 2
183 
184 typedef struct HuffEntry {
185  uint8_t len, sym;
186 } HuffEntry;
187 
188 typedef struct HuffTable {
190  uint8_t nb_entries;
191 } HuffTable;
192 
193 typedef struct CoeffVLCs {
194  const VLCElem *vlc_tabs[80];
195  VLC vlcs[80];
196 } CoeffVLCs;
197 
198 typedef struct Vp3DecodeContext {
201  int version;
202  int width, height;
207  int keyframe;
208  uint8_t idct_permutation[64];
209  uint8_t idct_scantable[64];
213  DECLARE_ALIGNED(16, int16_t, block)[64];
217 
218  int qps[3];
219  int nqps;
220 
230  unsigned char *superblock_coding;
231 
232  int macroblock_count; /* y macroblock count */
238  int yuv_macroblock_count; /* y+u+v macroblock count */
239 
243 
246  int data_offset[3];
247  uint8_t offset_x;
248  uint8_t offset_y;
250 
251  int8_t (*motion_val[2])[2];
252 
253  /* tables */
254  uint16_t coded_dc_scale_factor[2][64];
255  uint32_t coded_ac_scale_factor[64];
256  uint8_t base_matrix[384][64];
257  uint8_t qr_count[2][3];
258  uint8_t qr_size[2][3][64];
259  uint16_t qr_base[2][3][64];
260 
261  /**
262  * This is a list of all tokens in bitstream order. Reordering takes place
263  * by pulling from each level during IDCT. As a consequence, IDCT must be
264  * in Hilbert order, making the minimum slice height 64 for 4:2:0 and 32
265  * otherwise. The 32 different tokens with up to 12 bits of extradata are
266  * collapsed into 3 types, packed as follows:
267  * (from the low to high bits)
268  *
269  * 2 bits: type (0,1,2)
270  * 0: EOB run, 14 bits for run length (12 needed)
271  * 1: zero run, 7 bits for run length
272  * 7 bits for the next coefficient (3 needed)
273  * 2: coefficient, 14 bits (11 needed)
274  *
275  * Coefficients are signed, so are packed in the highest bits for automatic
276  * sign extension.
277  */
278  int16_t *dct_tokens[3][64];
279  int16_t *dct_tokens_base;
280 #define TOKEN_EOB(eob_run) ((eob_run) << 2)
281 #define TOKEN_ZERO_RUN(coeff, zero_run) (((coeff) * 512) + ((zero_run) << 2) + 1)
282 #define TOKEN_COEFF(coeff) (((coeff) * 4) + 2)
283 
284  /**
285  * number of blocks that contain DCT coefficients at
286  * the given level or higher
287  */
288  int num_coded_frags[3][64];
290 
291  /* this is a list of indexes into the all_fragments array indicating
292  * which of the fragments are coded */
294 
298 
299  /**
300  * The first 16 of the following VLCs are for the dc coefficients;
301  * the others are four groups of 16 VLCs each for ac coefficients.
302  * This is a RefStruct reference to share these VLCs between threads.
303  */
305 
306  /* these arrays need to be on 16-byte boundaries since SSE2 operations
307  * index into them */
308  DECLARE_ALIGNED(16, int16_t, qmat)[3][2][3][64]; ///< qmat[qpi][is_inter][plane]
309 
310  /* This table contains superblock_count * 16 entries. Each set of 16
311  * numbers corresponds to the fragment indexes 0..15 of the superblock.
312  * An entry will be -1 to indicate that no entry corresponds to that
313  * index. */
315 
316  /* This is an array that indicates how a particular macroblock
317  * is coded. */
318  unsigned char *macroblock_coding;
319 
320  uint8_t *edge_emu_buffer;
321 
322  /* Huffman decode */
324 
325  uint8_t filter_limit_values[64];
327 
328  VP4Predictor * dc_pred_row; /* dc_pred_row[y_superblock_width * 4] */
330 
331 /************************************************************************
332  * VP3 specific functions
333  ************************************************************************/
334 
335 static av_cold void free_tables(AVCodecContext *avctx)
336 {
337  Vp3DecodeContext *s = avctx->priv_data;
338 
339  av_freep(&s->superblock_coding);
340  av_freep(&s->all_fragments);
341  av_freep(&s->nkf_coded_fragment_list);
342  av_freep(&s->kf_coded_fragment_list);
343  av_freep(&s->dct_tokens_base);
344  av_freep(&s->superblock_fragments);
345  av_freep(&s->macroblock_coding);
346  av_freep(&s->dc_pred_row);
347  av_freep(&s->motion_val[0]);
348  av_freep(&s->motion_val[1]);
349 }
350 
352 {
353  Vp3DecodeContext *s = avctx->priv_data;
354 
355  ff_progress_frame_unref(&s->golden_frame);
356  ff_progress_frame_unref(&s->last_frame);
357  ff_progress_frame_unref(&s->current_frame);
358 }
359 
361 {
362  Vp3DecodeContext *s = avctx->priv_data;
363 
364  free_tables(avctx);
365  av_freep(&s->edge_emu_buffer);
366 
367  s->theora_tables = 0;
368 
369  /* release all frames */
370  vp3_decode_flush(avctx);
371 
372  av_refstruct_unref(&s->coeff_vlc);
373 
374  return 0;
375 }
376 
377 /**
378  * This function sets up all of the various blocks mappings:
379  * superblocks <-> fragments, macroblocks <-> fragments,
380  * superblocks <-> macroblocks
381  *
382  * @return 0 is successful; returns 1 if *anything* went wrong.
383  */
385 {
386  int j = 0;
387 
388  for (int plane = 0; plane < 3; plane++) {
389  int sb_width = plane ? s->c_superblock_width
390  : s->y_superblock_width;
391  int sb_height = plane ? s->c_superblock_height
392  : s->y_superblock_height;
393  int frag_width = s->fragment_width[!!plane];
394  int frag_height = s->fragment_height[!!plane];
395 
396  for (int sb_y = 0; sb_y < sb_height; sb_y++)
397  for (int sb_x = 0; sb_x < sb_width; sb_x++)
398  for (int i = 0; i < 16; i++) {
399  int x = 4 * sb_x + hilbert_offset[i][0];
400  int y = 4 * sb_y + hilbert_offset[i][1];
401 
402  if (x < frag_width && y < frag_height)
403  s->superblock_fragments[j++] = s->fragment_start[plane] +
404  y * frag_width + x;
405  else
406  s->superblock_fragments[j++] = -1;
407  }
408  }
409 
410  return 0; /* successful path out */
411 }
412 
413 /*
414  * This function sets up the dequantization tables used for a particular
415  * frame.
416  */
417 static void init_dequantizer(Vp3DecodeContext *s, int qpi)
418 {
419  int ac_scale_factor = s->coded_ac_scale_factor[s->qps[qpi]];
420 
421  for (int inter = 0; inter < 2; inter++) {
422  for (int plane = 0; plane < 3; plane++) {
423  int dc_scale_factor = s->coded_dc_scale_factor[!!plane][s->qps[qpi]];
424  int sum = 0, bmi, bmj, qistart, qri;
425  for (qri = 0; qri < s->qr_count[inter][plane]; qri++) {
426  sum += s->qr_size[inter][plane][qri];
427  if (s->qps[qpi] <= sum)
428  break;
429  }
430  qistart = sum - s->qr_size[inter][plane][qri];
431  bmi = s->qr_base[inter][plane][qri];
432  bmj = s->qr_base[inter][plane][qri + 1];
433  for (int i = 0; i < 64; i++) {
434  int coeff = (2 * (sum - s->qps[qpi]) * s->base_matrix[bmi][i] -
435  2 * (qistart - s->qps[qpi]) * s->base_matrix[bmj][i] +
436  s->qr_size[inter][plane][qri]) /
437  (2 * s->qr_size[inter][plane][qri]);
438 
439  int qmin = 8 << (inter + !i);
440  int qscale = i ? ac_scale_factor : dc_scale_factor;
441  int qbias = (1 + inter) * 3;
442  s->qmat[qpi][inter][plane][s->idct_permutation[i]] =
443  (i == 0 || s->version < 2) ? av_clip((qscale * coeff) / 100 * 4, qmin, 4096)
444  : (qscale * (coeff - qbias) / 100 + qbias) * 4;
445  }
446  /* all DC coefficients use the same quant so as not to interfere
447  * with DC prediction */
448  s->qmat[qpi][inter][plane][0] = s->qmat[0][inter][plane][0];
449  }
450  }
451 }
452 
453 /*
454  * This function initializes the loop filter boundary limits if the frame's
455  * quality index is different from the previous frame's.
456  *
457  * The filter_limit_values may not be larger than 127.
458  */
460 {
461  ff_vp3dsp_set_bounding_values(s->bounding_values_array, s->filter_limit_values[s->qps[0]]);
462 }
463 
464 /*
465  * This function unpacks all of the superblock/macroblock/fragment coding
466  * information from the bitstream.
467  */
469 {
470  const int superblock_starts[3] = {
471  0, s->u_superblock_start, s->v_superblock_start
472  };
473  int bit = 0;
474  int current_superblock = 0;
475  int current_run = 0;
476  int num_partial_superblocks = 0;
477 
478  int current_fragment;
479  int plane0_num_coded_frags = 0;
480 
481  if (s->keyframe) {
482  memset(s->superblock_coding, SB_FULLY_CODED, s->superblock_count);
483  } else {
484  /* unpack the list of partially-coded superblocks */
485  bit = get_bits1(gb) ^ 1;
486  current_run = 0;
487 
488  while (current_superblock < s->superblock_count && get_bits_left(gb) > 0) {
489  if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN)
490  bit = get_bits1(gb);
491  else
492  bit ^= 1;
493 
494  current_run = get_vlc2(gb, superblock_run_length_vlc,
496  if (current_run == 34)
497  current_run += get_bits(gb, 12);
498 
499  if (current_run > s->superblock_count - current_superblock) {
500  av_log(s->avctx, AV_LOG_ERROR,
501  "Invalid partially coded superblock run length\n");
502  return -1;
503  }
504 
505  memset(s->superblock_coding + current_superblock, bit, current_run);
506 
507  current_superblock += current_run;
508  if (bit)
509  num_partial_superblocks += current_run;
510  }
511 
512  /* unpack the list of fully coded superblocks if any of the blocks were
513  * not marked as partially coded in the previous step */
514  if (num_partial_superblocks < s->superblock_count) {
515  int superblocks_decoded = 0;
516 
517  current_superblock = 0;
518  bit = get_bits1(gb) ^ 1;
519  current_run = 0;
520 
521  while (superblocks_decoded < s->superblock_count - num_partial_superblocks &&
522  get_bits_left(gb) > 0) {
523  if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN)
524  bit = get_bits1(gb);
525  else
526  bit ^= 1;
527 
528  current_run = get_vlc2(gb, superblock_run_length_vlc,
530  if (current_run == 34)
531  current_run += get_bits(gb, 12);
532 
533  for (int j = 0; j < current_run; current_superblock++) {
534  if (current_superblock >= s->superblock_count) {
535  av_log(s->avctx, AV_LOG_ERROR,
536  "Invalid fully coded superblock run length\n");
537  return -1;
538  }
539 
540  /* skip any superblocks already marked as partially coded */
541  if (s->superblock_coding[current_superblock] == SB_NOT_CODED) {
542  s->superblock_coding[current_superblock] = 2 * bit;
543  j++;
544  }
545  }
546  superblocks_decoded += current_run;
547  }
548  }
549 
550  /* if there were partial blocks, initialize bitstream for
551  * unpacking fragment codings */
552  if (num_partial_superblocks) {
553  current_run = 0;
554  bit = get_bits1(gb);
555  /* toggle the bit because as soon as the first run length is
556  * fetched the bit will be toggled again */
557  bit ^= 1;
558  }
559  }
560 
561  /* figure out which fragments are coded; iterate through each
562  * superblock (all planes) */
563  s->total_num_coded_frags = 0;
564  memset(s->macroblock_coding, MODE_COPY, s->macroblock_count);
565 
566  s->coded_fragment_list[0] = s->keyframe ? s->kf_coded_fragment_list
567  : s->nkf_coded_fragment_list;
568 
569  for (int plane = 0; plane < 3; plane++) {
570  int sb_start = superblock_starts[plane];
571  int sb_end = sb_start + (plane ? s->c_superblock_count
572  : s->y_superblock_count);
573  int num_coded_frags = 0;
574 
575  if (s->keyframe) {
576  if (s->num_kf_coded_fragment[plane] == -1) {
577  for (int i = sb_start; i < sb_end; i++) {
578  /* iterate through all 16 fragments in a superblock */
579  for (int j = 0; j < 16; j++) {
580  /* if the fragment is in bounds, check its coding status */
581  current_fragment = s->superblock_fragments[i * 16 + j];
582  if (current_fragment != -1) {
583  s->coded_fragment_list[plane][num_coded_frags++] =
584  current_fragment;
585  }
586  }
587  }
588  s->num_kf_coded_fragment[plane] = num_coded_frags;
589  } else
590  num_coded_frags = s->num_kf_coded_fragment[plane];
591  } else {
592  for (int i = sb_start; i < sb_end && get_bits_left(gb) > 0; i++) {
593  if (get_bits_left(gb) < plane0_num_coded_frags >> 2) {
594  return AVERROR_INVALIDDATA;
595  }
596  /* iterate through all 16 fragments in a superblock */
597  for (int j = 0; j < 16; j++) {
598  /* if the fragment is in bounds, check its coding status */
599  current_fragment = s->superblock_fragments[i * 16 + j];
600  if (current_fragment != -1) {
601  int coded = s->superblock_coding[i];
602 
603  if (coded == SB_PARTIALLY_CODED) {
604  /* fragment may or may not be coded; this is the case
605  * that cares about the fragment coding runs */
606  if (current_run-- == 0) {
607  bit ^= 1;
608  current_run = get_vlc2(gb, fragment_run_length_vlc, 5, 2);
609  }
610  coded = bit;
611  }
612 
613  if (coded) {
614  /* default mode; actual mode will be decoded in
615  * the next phase */
616  s->all_fragments[current_fragment].coding_method =
618  s->coded_fragment_list[plane][num_coded_frags++] =
619  current_fragment;
620  } else {
621  /* not coded; copy this fragment from the prior frame */
622  s->all_fragments[current_fragment].coding_method =
623  MODE_COPY;
624  }
625  }
626  }
627  }
628  }
629  if (!plane)
630  plane0_num_coded_frags = num_coded_frags;
631  s->total_num_coded_frags += num_coded_frags;
632  for (int i = 0; i < 64; i++)
633  s->num_coded_frags[plane][i] = num_coded_frags;
634  if (plane < 2)
635  s->coded_fragment_list[plane + 1] = s->coded_fragment_list[plane] +
636  num_coded_frags;
637  }
638  return 0;
639 }
640 
641 #define BLOCK_X (2 * mb_x + (k & 1))
642 #define BLOCK_Y (2 * mb_y + (k >> 1))
643 
644 #if CONFIG_VP4_DECODER
645 /**
646  * @return number of blocks, or > yuv_macroblock_count on error.
647  * return value is always >= 1.
648  */
649 static int vp4_get_mb_count(Vp3DecodeContext *s, GetBitContext *gb)
650 {
651  int v = 1;
652  int bits;
653  while ((bits = show_bits(gb, 9)) == 0x1ff) {
654  skip_bits(gb, 9);
655  v += 256;
656  if (v > s->yuv_macroblock_count) {
657  av_log(s->avctx, AV_LOG_ERROR, "Invalid run length\n");
658  return v;
659  }
660  }
661 #define body(n) { \
662  skip_bits(gb, 2 + n); \
663  v += (1 << n) + get_bits(gb, n); }
664 #define thresh(n) (0x200 - (0x80 >> n))
665 #define else_if(n) else if (bits < thresh(n)) body(n)
666  if (bits < 0x100) {
667  skip_bits(gb, 1);
668  } else if (bits < thresh(0)) {
669  skip_bits(gb, 2);
670  v += 1;
671  }
672  else_if(1)
673  else_if(2)
674  else_if(3)
675  else_if(4)
676  else_if(5)
677  else_if(6)
678  else body(7)
679 #undef body
680 #undef thresh
681 #undef else_if
682  return v;
683 }
684 
685 static int vp4_get_block_pattern(GetBitContext *gb, int *next_block_pattern_table)
686 {
687  int v = get_vlc2(gb, block_pattern_vlc[*next_block_pattern_table], 5, 1);
688  *next_block_pattern_table = vp4_block_pattern_table_selector[v];
689  return v + 1;
690 }
691 
692 static int vp4_unpack_macroblocks(Vp3DecodeContext *s, GetBitContext *gb)
693 {
694  int fragment;
695  int next_block_pattern_table;
696  int bit, current_run, has_partial;
697 
698  memset(s->macroblock_coding, MODE_COPY, s->macroblock_count);
699 
700  if (s->keyframe)
701  return 0;
702 
703  has_partial = 0;
704  bit = get_bits1(gb);
705  for (int i = 0; i < s->yuv_macroblock_count; i += current_run) {
706  if (get_bits_left(gb) <= 0)
707  return AVERROR_INVALIDDATA;
708  current_run = vp4_get_mb_count(s, gb);
709  if (current_run > s->yuv_macroblock_count - i)
710  return -1;
711  memset(s->superblock_coding + i, 2 * bit, current_run);
712  bit ^= 1;
713  has_partial |= bit;
714  }
715 
716  if (has_partial) {
717  if (get_bits_left(gb) <= 0)
718  return AVERROR_INVALIDDATA;
719  bit = get_bits1(gb);
720  current_run = vp4_get_mb_count(s, gb);
721  for (int i = 0; i < s->yuv_macroblock_count; i++) {
722  if (!s->superblock_coding[i]) {
723  if (!current_run) {
724  bit ^= 1;
725  current_run = vp4_get_mb_count(s, gb);
726  }
727  s->superblock_coding[i] = bit;
728  current_run--;
729  }
730  }
731  if (current_run) /* handle situation when vp4_get_mb_count() fails */
732  return -1;
733  }
734 
735  next_block_pattern_table = 0;
736  for (int plane = 0, i = 0; plane < 3; plane++) {
737  int sb_width = plane ? s->c_superblock_width : s->y_superblock_width;
738  int sb_height = plane ? s->c_superblock_height : s->y_superblock_height;
739  int mb_width = plane ? s->c_macroblock_width : s->macroblock_width;
740  int mb_height = plane ? s->c_macroblock_height : s->macroblock_height;
741  int fragment_width = s->fragment_width[!!plane];
742  int fragment_height = s->fragment_height[!!plane];
743 
744  for (int sb_y = 0; sb_y < sb_height; sb_y++) {
745  for (int sb_x = 0; sb_x < sb_width; sb_x++) {
746  for (int j = 0; j < 4; j++) {
747  int mb_x = 2 * sb_x + (j >> 1);
748  int mb_y = 2 * sb_y + (j >> 1) ^ (j & 1);
749  int mb_coded, pattern, coded;
750 
751  if (mb_x >= mb_width || mb_y >= mb_height)
752  continue;
753 
754  mb_coded = s->superblock_coding[i++];
755 
756  if (mb_coded == SB_FULLY_CODED)
757  pattern = 0xF;
758  else if (mb_coded == SB_PARTIALLY_CODED)
759  pattern = vp4_get_block_pattern(gb, &next_block_pattern_table);
760  else
761  pattern = 0;
762 
763  for (int k = 0; k < 4; k++) {
764  if (BLOCK_X >= fragment_width || BLOCK_Y >= fragment_height)
765  continue;
766  fragment = s->fragment_start[plane] + BLOCK_Y * fragment_width + BLOCK_X;
767  coded = pattern & (8 >> k);
768  /* MODE_INTER_NO_MV is the default for coded fragments.
769  the actual method is decoded in the next phase. */
770  s->all_fragments[fragment].coding_method = coded ? MODE_INTER_NO_MV : MODE_COPY;
771  }
772  }
773  }
774  }
775  }
776  return 0;
777 }
778 #endif
779 
780 /*
781  * This function unpacks all the coding mode data for individual macroblocks
782  * from the bitstream.
783  */
785 {
786  int scheme;
787  int current_macroblock;
788  int current_fragment;
789  int coding_mode;
790  int custom_mode_alphabet[CODING_MODE_COUNT];
791  const int *alphabet;
792  Vp3Fragment *frag;
793 
794  if (s->keyframe) {
795  for (int i = 0; i < s->fragment_count; i++)
796  s->all_fragments[i].coding_method = MODE_INTRA;
797  } else {
798  /* fetch the mode coding scheme for this frame */
799  scheme = get_bits(gb, 3);
800 
801  /* is it a custom coding scheme? */
802  if (scheme == 0) {
803  for (int i = 0; i < 8; i++)
804  custom_mode_alphabet[i] = MODE_INTER_NO_MV;
805  for (int i = 0; i < 8; i++)
806  custom_mode_alphabet[get_bits(gb, 3)] = i;
807  alphabet = custom_mode_alphabet;
808  } else
809  alphabet = ModeAlphabet[scheme - 1];
810 
811  /* iterate through all of the macroblocks that contain 1 or more
812  * coded fragments */
813  for (int sb_y = 0; sb_y < s->y_superblock_height; sb_y++) {
814  for (int sb_x = 0; sb_x < s->y_superblock_width; sb_x++) {
815  if (get_bits_left(gb) <= 0)
816  return -1;
817 
818  for (int j = 0; j < 4; j++) {
819  int k;
820  int mb_x = 2 * sb_x + (j >> 1);
821  int mb_y = 2 * sb_y + (((j >> 1) + j) & 1);
822  current_macroblock = mb_y * s->macroblock_width + mb_x;
823 
824  if (mb_x >= s->macroblock_width ||
825  mb_y >= s->macroblock_height)
826  continue;
827 
828  /* coding modes are only stored if the macroblock has
829  * at least one luma block coded, otherwise it must be
830  * INTER_NO_MV */
831  for (k = 0; k < 4; k++) {
832  current_fragment = BLOCK_Y *
833  s->fragment_width[0] + BLOCK_X;
834  if (s->all_fragments[current_fragment].coding_method != MODE_COPY)
835  break;
836  }
837  if (k == 4) {
838  s->macroblock_coding[current_macroblock] = MODE_INTER_NO_MV;
839  continue;
840  }
841 
842  /* mode 7 means get 3 bits for each coding mode */
843  if (scheme == 7)
844  coding_mode = get_bits(gb, 3);
845  else
846  coding_mode = alphabet[get_vlc2(gb, mode_code_vlc, 4, 2)];
847 
848  s->macroblock_coding[current_macroblock] = coding_mode;
849  for (k = 0; k < 4; k++) {
850  frag = s->all_fragments + BLOCK_Y * s->fragment_width[0] + BLOCK_X;
851  if (frag->coding_method != MODE_COPY)
852  frag->coding_method = coding_mode;
853  }
854 
855 #define SET_CHROMA_MODES \
856  if (frag[s->fragment_start[1]].coding_method != MODE_COPY) \
857  frag[s->fragment_start[1]].coding_method = coding_mode; \
858  if (frag[s->fragment_start[2]].coding_method != MODE_COPY) \
859  frag[s->fragment_start[2]].coding_method = coding_mode;
860 
861  if (s->chroma_y_shift) {
862  frag = s->all_fragments + mb_y *
863  s->fragment_width[1] + mb_x;
865  } else if (s->chroma_x_shift) {
866  frag = s->all_fragments +
867  2 * mb_y * s->fragment_width[1] + mb_x;
868  for (k = 0; k < 2; k++) {
870  frag += s->fragment_width[1];
871  }
872  } else {
873  for (k = 0; k < 4; k++) {
874  frag = s->all_fragments +
875  BLOCK_Y * s->fragment_width[1] + BLOCK_X;
877  }
878  }
879  }
880  }
881  }
882  }
883 
884  return 0;
885 }
886 
887 static int vp4_get_mv(GetBitContext *gb, int axis, int last_motion)
888 {
889 #if CONFIG_VP4_DECODER
890  int v = get_vlc2(gb, vp4_mv_vlc_table[axis][vp4_mv_table_selector[FFABS(last_motion)]],
891  VP4_MV_VLC_BITS, 2);
892  return last_motion < 0 ? -v : v;
893 #else
894  return 0;
895 #endif
896 }
897 
898 /*
899  * This function unpacks all the motion vectors for the individual
900  * macroblocks from the bitstream.
901  */
903 {
904  int coding_mode;
905  int motion_x[4];
906  int motion_y[4];
907  int last_motion_x = 0;
908  int last_motion_y = 0;
909  int prior_last_motion_x = 0;
910  int prior_last_motion_y = 0;
911  int last_gold_motion_x = 0;
912  int last_gold_motion_y = 0;
913  int current_macroblock;
914  int current_fragment;
915  int frag;
916 
917  if (s->keyframe)
918  return 0;
919 
920  /* coding mode 0 is the VLC scheme; 1 is the fixed code scheme; 2 is VP4 code scheme */
921  coding_mode = s->version < 2 ? get_bits1(gb) : 2;
922 
923  /* iterate through all of the macroblocks that contain 1 or more
924  * coded fragments */
925  for (int sb_y = 0; sb_y < s->y_superblock_height; sb_y++) {
926  for (int sb_x = 0; sb_x < s->y_superblock_width; sb_x++) {
927  if (get_bits_left(gb) <= 0)
928  return -1;
929 
930  for (int j = 0; j < 4; j++) {
931  int mb_x = 2 * sb_x + (j >> 1);
932  int mb_y = 2 * sb_y + (((j >> 1) + j) & 1);
933  current_macroblock = mb_y * s->macroblock_width + mb_x;
934 
935  if (mb_x >= s->macroblock_width ||
936  mb_y >= s->macroblock_height ||
937  s->macroblock_coding[current_macroblock] == MODE_COPY)
938  continue;
939 
940  switch (s->macroblock_coding[current_macroblock]) {
941  case MODE_GOLDEN_MV:
942  if (coding_mode == 2) { /* VP4 */
943  last_gold_motion_x = motion_x[0] = vp4_get_mv(gb, 0, last_gold_motion_x);
944  last_gold_motion_y = motion_y[0] = vp4_get_mv(gb, 1, last_gold_motion_y);
945  break;
946  } /* otherwise fall through */
947  case MODE_INTER_PLUS_MV:
948  /* all 6 fragments use the same motion vector */
949  if (coding_mode == 0) {
950  motion_x[0] = get_vlc2(gb, motion_vector_vlc,
951  VP3_MV_VLC_BITS, 2);
952  motion_y[0] = get_vlc2(gb, motion_vector_vlc,
953  VP3_MV_VLC_BITS, 2);
954  } else if (coding_mode == 1) {
955  motion_x[0] = fixed_motion_vector_table[get_bits(gb, 6)];
956  motion_y[0] = fixed_motion_vector_table[get_bits(gb, 6)];
957  } else { /* VP4 */
958  motion_x[0] = vp4_get_mv(gb, 0, last_motion_x);
959  motion_y[0] = vp4_get_mv(gb, 1, last_motion_y);
960  }
961 
962  /* vector maintenance, only on MODE_INTER_PLUS_MV */
963  if (s->macroblock_coding[current_macroblock] == MODE_INTER_PLUS_MV) {
964  prior_last_motion_x = last_motion_x;
965  prior_last_motion_y = last_motion_y;
966  last_motion_x = motion_x[0];
967  last_motion_y = motion_y[0];
968  }
969  break;
970 
971  case MODE_INTER_FOURMV:
972  /* vector maintenance */
973  prior_last_motion_x = last_motion_x;
974  prior_last_motion_y = last_motion_y;
975 
976  /* fetch 4 vectors from the bitstream, one for each
977  * Y fragment, then average for the C fragment vectors */
978  for (int k = 0; k < 4; k++) {
979  current_fragment = BLOCK_Y * s->fragment_width[0] + BLOCK_X;
980  if (s->all_fragments[current_fragment].coding_method != MODE_COPY) {
981  if (coding_mode == 0) {
982  motion_x[k] = get_vlc2(gb, motion_vector_vlc,
983  VP3_MV_VLC_BITS, 2);
984  motion_y[k] = get_vlc2(gb, motion_vector_vlc,
985  VP3_MV_VLC_BITS, 2);
986  } else if (coding_mode == 1) {
987  motion_x[k] = fixed_motion_vector_table[get_bits(gb, 6)];
988  motion_y[k] = fixed_motion_vector_table[get_bits(gb, 6)];
989  } else { /* VP4 */
990  motion_x[k] = vp4_get_mv(gb, 0, prior_last_motion_x);
991  motion_y[k] = vp4_get_mv(gb, 1, prior_last_motion_y);
992  }
993  last_motion_x = motion_x[k];
994  last_motion_y = motion_y[k];
995  } else {
996  motion_x[k] = 0;
997  motion_y[k] = 0;
998  }
999  }
1000  break;
1001 
1002  case MODE_INTER_LAST_MV:
1003  /* all 6 fragments use the last motion vector */
1004  motion_x[0] = last_motion_x;
1005  motion_y[0] = last_motion_y;
1006 
1007  /* no vector maintenance (last vector remains the
1008  * last vector) */
1009  break;
1010 
1011  case MODE_INTER_PRIOR_LAST:
1012  /* all 6 fragments use the motion vector prior to the
1013  * last motion vector */
1014  motion_x[0] = prior_last_motion_x;
1015  motion_y[0] = prior_last_motion_y;
1016 
1017  /* vector maintenance */
1018  prior_last_motion_x = last_motion_x;
1019  prior_last_motion_y = last_motion_y;
1020  last_motion_x = motion_x[0];
1021  last_motion_y = motion_y[0];
1022  break;
1023 
1024  default:
1025  /* covers intra, inter without MV, golden without MV */
1026  motion_x[0] = 0;
1027  motion_y[0] = 0;
1028 
1029  /* no vector maintenance */
1030  break;
1031  }
1032 
1033  /* assign the motion vectors to the correct fragments */
1034  for (int k = 0; k < 4; k++) {
1035  current_fragment =
1036  BLOCK_Y * s->fragment_width[0] + BLOCK_X;
1037  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
1038  s->motion_val[0][current_fragment][0] = motion_x[k];
1039  s->motion_val[0][current_fragment][1] = motion_y[k];
1040  } else {
1041  s->motion_val[0][current_fragment][0] = motion_x[0];
1042  s->motion_val[0][current_fragment][1] = motion_y[0];
1043  }
1044  }
1045 
1046  if (s->chroma_y_shift) {
1047  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
1048  motion_x[0] = RSHIFT(motion_x[0] + motion_x[1] +
1049  motion_x[2] + motion_x[3], 2);
1050  motion_y[0] = RSHIFT(motion_y[0] + motion_y[1] +
1051  motion_y[2] + motion_y[3], 2);
1052  }
1053  if (s->version <= 2) {
1054  motion_x[0] = (motion_x[0] >> 1) | (motion_x[0] & 1);
1055  motion_y[0] = (motion_y[0] >> 1) | (motion_y[0] & 1);
1056  }
1057  frag = mb_y * s->fragment_width[1] + mb_x;
1058  s->motion_val[1][frag][0] = motion_x[0];
1059  s->motion_val[1][frag][1] = motion_y[0];
1060  } else if (s->chroma_x_shift) {
1061  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
1062  motion_x[0] = RSHIFT(motion_x[0] + motion_x[1], 1);
1063  motion_y[0] = RSHIFT(motion_y[0] + motion_y[1], 1);
1064  motion_x[1] = RSHIFT(motion_x[2] + motion_x[3], 1);
1065  motion_y[1] = RSHIFT(motion_y[2] + motion_y[3], 1);
1066  } else {
1067  motion_x[1] = motion_x[0];
1068  motion_y[1] = motion_y[0];
1069  }
1070  if (s->version <= 2) {
1071  motion_x[0] = (motion_x[0] >> 1) | (motion_x[0] & 1);
1072  motion_x[1] = (motion_x[1] >> 1) | (motion_x[1] & 1);
1073  }
1074  frag = 2 * mb_y * s->fragment_width[1] + mb_x;
1075  for (int k = 0; k < 2; k++) {
1076  s->motion_val[1][frag][0] = motion_x[k];
1077  s->motion_val[1][frag][1] = motion_y[k];
1078  frag += s->fragment_width[1];
1079  }
1080  } else {
1081  for (int k = 0; k < 4; k++) {
1082  frag = BLOCK_Y * s->fragment_width[1] + BLOCK_X;
1083  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
1084  s->motion_val[1][frag][0] = motion_x[k];
1085  s->motion_val[1][frag][1] = motion_y[k];
1086  } else {
1087  s->motion_val[1][frag][0] = motion_x[0];
1088  s->motion_val[1][frag][1] = motion_y[0];
1089  }
1090  }
1091  }
1092  }
1093  }
1094  }
1095 
1096  return 0;
1097 }
1098 
1100 {
1101  int num_blocks = s->total_num_coded_frags;
1102 
1103  for (int qpi = 0; qpi < s->nqps - 1 && num_blocks > 0; qpi++) {
1104  int i = 0, blocks_decoded = 0, num_blocks_at_qpi = 0;
1105  int bit, run_length;
1106 
1107  bit = get_bits1(gb) ^ 1;
1108  run_length = 0;
1109 
1110  do {
1111  if (run_length == MAXIMUM_LONG_BIT_RUN)
1112  bit = get_bits1(gb);
1113  else
1114  bit ^= 1;
1115 
1116  run_length = get_vlc2(gb, superblock_run_length_vlc,
1117  SUPERBLOCK_VLC_BITS, 2);
1118  if (run_length == 34)
1119  run_length += get_bits(gb, 12);
1120  blocks_decoded += run_length;
1121 
1122  if (!bit)
1123  num_blocks_at_qpi += run_length;
1124 
1125  for (int j = 0; j < run_length; i++) {
1126  if (i >= s->total_num_coded_frags)
1127  return -1;
1128 
1129  if (s->all_fragments[s->coded_fragment_list[0][i]].qpi == qpi) {
1130  s->all_fragments[s->coded_fragment_list[0][i]].qpi += bit;
1131  j++;
1132  }
1133  }
1134  } while (blocks_decoded < num_blocks && get_bits_left(gb) > 0);
1135 
1136  num_blocks -= num_blocks_at_qpi;
1137  }
1138 
1139  return 0;
1140 }
1141 
1142 static inline int get_eob_run(GetBitContext *gb, int token)
1143 {
1144  int v = eob_run_table[token].base;
1145  if (eob_run_table[token].bits)
1146  v += get_bits(gb, eob_run_table[token].bits);
1147  return v;
1148 }
1149 
1150 static inline int get_coeff(GetBitContext *gb, int token, int16_t *coeff)
1151 {
1152  int bits_to_get, zero_run;
1153 
1154  bits_to_get = coeff_get_bits[token];
1155  if (bits_to_get)
1156  bits_to_get = get_bits(gb, bits_to_get);
1157  *coeff = coeff_tables[token][bits_to_get];
1158 
1159  zero_run = zero_run_base[token];
1160  if (zero_run_get_bits[token])
1161  zero_run += get_bits(gb, zero_run_get_bits[token]);
1162 
1163  return zero_run;
1164 }
1165 
1166 /*
1167  * This function is called by unpack_dct_coeffs() to extract the VLCs from
1168  * the bitstream. The VLCs encode tokens which are used to unpack DCT
1169  * data. This function unpacks all the VLCs for either the Y plane or both
1170  * C planes, and is called for DC coefficients or different AC coefficient
1171  * levels (since different coefficient types require different VLC tables.
1172  *
1173  * This function returns a residual eob run. E.g, if a particular token gave
1174  * instructions to EOB the next 5 fragments and there were only 2 fragments
1175  * left in the current fragment range, 3 would be returned so that it could
1176  * be passed into the next call to this same function.
1177  */
1179  const VLCElem *vlc_table, int coeff_index,
1180  int plane,
1181  int eob_run)
1182 {
1183  int j = 0;
1184  int token;
1185  int zero_run = 0;
1186  int16_t coeff = 0;
1187  int blocks_ended;
1188  int coeff_i = 0;
1189  int num_coeffs = s->num_coded_frags[plane][coeff_index];
1190  int16_t *dct_tokens = s->dct_tokens[plane][coeff_index];
1191 
1192  /* local references to structure members to avoid repeated dereferences */
1193  const int *coded_fragment_list = s->coded_fragment_list[plane];
1194  Vp3Fragment *all_fragments = s->all_fragments;
1195 
1196  if (num_coeffs < 0) {
1197  av_log(s->avctx, AV_LOG_ERROR,
1198  "Invalid number of coefficients at level %d\n", coeff_index);
1199  return AVERROR_INVALIDDATA;
1200  }
1201 
1202  if (eob_run > num_coeffs) {
1203  coeff_i =
1204  blocks_ended = num_coeffs;
1205  eob_run -= num_coeffs;
1206  } else {
1207  coeff_i =
1208  blocks_ended = eob_run;
1209  eob_run = 0;
1210  }
1211 
1212  // insert fake EOB token to cover the split between planes or zzi
1213  if (blocks_ended)
1214  dct_tokens[j++] = blocks_ended << 2;
1215 
1216  while (coeff_i < num_coeffs && get_bits_left(gb) > 0) {
1217  /* decode a VLC into a token */
1218  token = get_vlc2(gb, vlc_table, 11, 3);
1219  /* use the token to get a zero run, a coefficient, and an eob run */
1220  if ((unsigned) token <= 6U) {
1221  eob_run = get_eob_run(gb, token);
1222  if (!eob_run)
1223  eob_run = INT_MAX;
1224 
1225  // record only the number of blocks ended in this plane,
1226  // any spill will be recorded in the next plane.
1227  if (eob_run > num_coeffs - coeff_i) {
1228  dct_tokens[j++] = TOKEN_EOB(num_coeffs - coeff_i);
1229  blocks_ended += num_coeffs - coeff_i;
1230  eob_run -= num_coeffs - coeff_i;
1231  coeff_i = num_coeffs;
1232  } else {
1233  dct_tokens[j++] = TOKEN_EOB(eob_run);
1234  blocks_ended += eob_run;
1235  coeff_i += eob_run;
1236  eob_run = 0;
1237  }
1238  } else if (token >= 0) {
1239  zero_run = get_coeff(gb, token, &coeff);
1240 
1241  if (zero_run) {
1242  dct_tokens[j++] = TOKEN_ZERO_RUN(coeff, zero_run);
1243  } else {
1244  // Save DC into the fragment structure. DC prediction is
1245  // done in raster order, so the actual DC can't be in with
1246  // other tokens. We still need the token in dct_tokens[]
1247  // however, or else the structure collapses on itself.
1248  if (!coeff_index)
1249  all_fragments[coded_fragment_list[coeff_i]].dc = coeff;
1250 
1251  dct_tokens[j++] = TOKEN_COEFF(coeff);
1252  }
1253 
1254  if (coeff_index + zero_run > 64) {
1255  av_log(s->avctx, AV_LOG_DEBUG,
1256  "Invalid zero run of %d with %d coeffs left\n",
1257  zero_run, 64 - coeff_index);
1258  zero_run = 64 - coeff_index;
1259  }
1260 
1261  // zero runs code multiple coefficients,
1262  // so don't try to decode coeffs for those higher levels
1263  for (int i = coeff_index + 1; i <= coeff_index + zero_run; i++)
1264  s->num_coded_frags[plane][i]--;
1265  coeff_i++;
1266  } else {
1267  av_log(s->avctx, AV_LOG_ERROR, "Invalid token %d\n", token);
1268  return -1;
1269  }
1270  }
1271 
1272  if (blocks_ended > s->num_coded_frags[plane][coeff_index])
1273  av_log(s->avctx, AV_LOG_ERROR, "More blocks ended than coded!\n");
1274 
1275  // decrement the number of blocks that have higher coefficients for each
1276  // EOB run at this level
1277  if (blocks_ended)
1278  for (int i = coeff_index + 1; i < 64; i++)
1279  s->num_coded_frags[plane][i] -= blocks_ended;
1280 
1281  // setup the next buffer
1282  if (plane < 2)
1283  s->dct_tokens[plane + 1][coeff_index] = dct_tokens + j;
1284  else if (coeff_index < 63)
1285  s->dct_tokens[0][coeff_index + 1] = dct_tokens + j;
1286 
1287  return eob_run;
1288 }
1289 
1291  int first_fragment,
1292  int fragment_width,
1293  int fragment_height);
1294 /*
1295  * This function unpacks all of the DCT coefficient data from the
1296  * bitstream.
1297  */
1299 {
1300  const VLCElem *const *coeff_vlc = s->coeff_vlc->vlc_tabs;
1301  int dc_y_table;
1302  int dc_c_table;
1303  int ac_y_table;
1304  int ac_c_table;
1305  int residual_eob_run = 0;
1306  const VLCElem *y_tables[64], *c_tables[64];
1307 
1308  s->dct_tokens[0][0] = s->dct_tokens_base;
1309 
1310  if (get_bits_left(gb) < 16)
1311  return AVERROR_INVALIDDATA;
1312 
1313  /* fetch the DC table indexes */
1314  dc_y_table = get_bits(gb, 4);
1315  dc_c_table = get_bits(gb, 4);
1316 
1317  /* unpack the Y plane DC coefficients */
1318  residual_eob_run = unpack_vlcs(s, gb, coeff_vlc[dc_y_table], 0,
1319  0, residual_eob_run);
1320  if (residual_eob_run < 0)
1321  return residual_eob_run;
1322  if (get_bits_left(gb) < 8)
1323  return AVERROR_INVALIDDATA;
1324 
1325  /* reverse prediction of the Y-plane DC coefficients */
1326  reverse_dc_prediction(s, 0, s->fragment_width[0], s->fragment_height[0]);
1327 
1328  /* unpack the C plane DC coefficients */
1329  residual_eob_run = unpack_vlcs(s, gb, coeff_vlc[dc_c_table], 0,
1330  1, residual_eob_run);
1331  if (residual_eob_run < 0)
1332  return residual_eob_run;
1333  residual_eob_run = unpack_vlcs(s, gb, coeff_vlc[dc_c_table], 0,
1334  2, residual_eob_run);
1335  if (residual_eob_run < 0)
1336  return residual_eob_run;
1337 
1338  /* reverse prediction of the C-plane DC coefficients */
1339  if (!(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1340  reverse_dc_prediction(s, s->fragment_start[1],
1341  s->fragment_width[1], s->fragment_height[1]);
1342  reverse_dc_prediction(s, s->fragment_start[2],
1343  s->fragment_width[1], s->fragment_height[1]);
1344  }
1345 
1346  if (get_bits_left(gb) < 8)
1347  return AVERROR_INVALIDDATA;
1348  /* fetch the AC table indexes */
1349  ac_y_table = get_bits(gb, 4);
1350  ac_c_table = get_bits(gb, 4);
1351 
1352  /* build tables of AC VLC tables */
1353  for (int i = 1; i <= 5; i++) {
1354  /* AC VLC table group 1 */
1355  y_tables[i] = coeff_vlc[ac_y_table + 16];
1356  c_tables[i] = coeff_vlc[ac_c_table + 16];
1357  }
1358  for (int i = 6; i <= 14; i++) {
1359  /* AC VLC table group 2 */
1360  y_tables[i] = coeff_vlc[ac_y_table + 32];
1361  c_tables[i] = coeff_vlc[ac_c_table + 32];
1362  }
1363  for (int i = 15; i <= 27; i++) {
1364  /* AC VLC table group 3 */
1365  y_tables[i] = coeff_vlc[ac_y_table + 48];
1366  c_tables[i] = coeff_vlc[ac_c_table + 48];
1367  }
1368  for (int i = 28; i <= 63; i++) {
1369  /* AC VLC table group 4 */
1370  y_tables[i] = coeff_vlc[ac_y_table + 64];
1371  c_tables[i] = coeff_vlc[ac_c_table + 64];
1372  }
1373 
1374  /* decode all AC coefficients */
1375  for (int i = 1; i <= 63; i++) {
1376  residual_eob_run = unpack_vlcs(s, gb, y_tables[i], i,
1377  0, residual_eob_run);
1378  if (residual_eob_run < 0)
1379  return residual_eob_run;
1380 
1381  residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i,
1382  1, residual_eob_run);
1383  if (residual_eob_run < 0)
1384  return residual_eob_run;
1385  residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i,
1386  2, residual_eob_run);
1387  if (residual_eob_run < 0)
1388  return residual_eob_run;
1389  }
1390 
1391  return 0;
1392 }
1393 
1394 #if CONFIG_VP4_DECODER
1395 /**
1396  * eob_tracker[] is instead of TOKEN_EOB(value)
1397  * a dummy TOKEN_EOB(0) value is used to make vp3_dequant work
1398  *
1399  * @return < 0 on error
1400  */
1401 static int vp4_unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb,
1402  const VLCElem *const vlc_tables[64],
1403  int plane, int eob_tracker[64], int fragment)
1404 {
1405  int token;
1406  int zero_run = 0;
1407  int16_t coeff = 0;
1408  int coeff_i = 0;
1409  int eob_run;
1410 
1411  while (!eob_tracker[coeff_i]) {
1412  if (get_bits_left(gb) < 1)
1413  return AVERROR_INVALIDDATA;
1414 
1415  token = get_vlc2(gb, vlc_tables[coeff_i], 11, 3);
1416 
1417  /* use the token to get a zero run, a coefficient, and an eob run */
1418  if ((unsigned) token <= 6U) {
1419  eob_run = get_eob_run(gb, token);
1420  *s->dct_tokens[plane][coeff_i]++ = TOKEN_EOB(0);
1421  eob_tracker[coeff_i] = eob_run - 1;
1422  return 0;
1423  } else if (token >= 0) {
1424  zero_run = get_coeff(gb, token, &coeff);
1425 
1426  if (zero_run) {
1427  if (coeff_i + zero_run > 64) {
1428  av_log(s->avctx, AV_LOG_DEBUG,
1429  "Invalid zero run of %d with %d coeffs left\n",
1430  zero_run, 64 - coeff_i);
1431  zero_run = 64 - coeff_i;
1432  }
1433  *s->dct_tokens[plane][coeff_i]++ = TOKEN_ZERO_RUN(coeff, zero_run);
1434  coeff_i += zero_run;
1435  } else {
1436  if (!coeff_i)
1437  s->all_fragments[fragment].dc = coeff;
1438 
1439  *s->dct_tokens[plane][coeff_i]++ = TOKEN_COEFF(coeff);
1440  }
1441  coeff_i++;
1442  if (coeff_i >= 64) /* > 64 occurs when there is a zero_run overflow */
1443  return 0; /* stop */
1444  } else {
1445  av_log(s->avctx, AV_LOG_ERROR, "Invalid token %d\n", token);
1446  return -1;
1447  }
1448  }
1449  *s->dct_tokens[plane][coeff_i]++ = TOKEN_EOB(0);
1450  eob_tracker[coeff_i]--;
1451  return 0;
1452 }
1453 
1454 static void vp4_dc_predictor_reset(VP4Predictor *p)
1455 {
1456  p->dc = 0;
1457  p->type = VP4_DC_UNDEFINED;
1458 }
1459 
1460 static void vp4_dc_pred_before(const Vp3DecodeContext *s, VP4Predictor dc_pred[6][6], int sb_x)
1461 {
1462  for (int i = 0; i < 4; i++)
1463  dc_pred[0][i + 1] = s->dc_pred_row[sb_x * 4 + i];
1464 
1465  for (int j = 1; j < 5; j++)
1466  for (int i = 0; i < 4; i++)
1467  vp4_dc_predictor_reset(&dc_pred[j][i + 1]);
1468 }
1469 
1470 static void vp4_dc_pred_after(Vp3DecodeContext *s, VP4Predictor dc_pred[6][6], int sb_x)
1471 {
1472  for (int i = 0; i < 4; i++)
1473  s->dc_pred_row[sb_x * 4 + i] = dc_pred[4][i + 1];
1474 
1475  for (int i = 1; i < 5; i++)
1476  dc_pred[i][0] = dc_pred[i][4];
1477 }
1478 
1479 /* note: dc_pred points to the current block */
1480 static int vp4_dc_pred(const Vp3DecodeContext *s, const VP4Predictor * dc_pred, const int * last_dc, int type, int plane)
1481 {
1482  int count = 0;
1483  int dc = 0;
1484 
1485  if (dc_pred[-6].type == type) {
1486  dc += dc_pred[-6].dc;
1487  count++;
1488  }
1489 
1490  if (dc_pred[6].type == type) {
1491  dc += dc_pred[6].dc;
1492  count++;
1493  }
1494 
1495  if (count != 2 && dc_pred[-1].type == type) {
1496  dc += dc_pred[-1].dc;
1497  count++;
1498  }
1499 
1500  if (count != 2 && dc_pred[1].type == type) {
1501  dc += dc_pred[1].dc;
1502  count++;
1503  }
1504 
1505  /* using division instead of shift to correctly handle negative values */
1506  return count == 2 ? dc / 2 : last_dc[type];
1507 }
1508 
1509 static void vp4_set_tokens_base(Vp3DecodeContext *s)
1510 {
1511  int16_t *base = s->dct_tokens_base;
1512  for (int plane = 0; plane < 3; plane++) {
1513  for (int i = 0; i < 64; i++) {
1514  s->dct_tokens[plane][i] = base;
1515  base += s->fragment_width[!!plane] * s->fragment_height[!!plane];
1516  }
1517  }
1518 }
1519 
1520 static int vp4_unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb)
1521 {
1522  const VLCElem *const *coeff_vlc = s->coeff_vlc->vlc_tabs;
1523  int dc_y_table;
1524  int dc_c_table;
1525  int ac_y_table;
1526  int ac_c_table;
1527  const VLCElem *tables[2][64];
1528  int eob_tracker[64];
1529  VP4Predictor dc_pred[6][6];
1530  int last_dc[NB_VP4_DC_TYPES];
1531 
1532  if (get_bits_left(gb) < 16)
1533  return AVERROR_INVALIDDATA;
1534 
1535  /* fetch the DC table indexes */
1536  dc_y_table = get_bits(gb, 4);
1537  dc_c_table = get_bits(gb, 4);
1538 
1539  ac_y_table = get_bits(gb, 4);
1540  ac_c_table = get_bits(gb, 4);
1541 
1542  /* build tables of DC/AC VLC tables */
1543 
1544  /* DC table group */
1545  tables[0][0] = coeff_vlc[dc_y_table];
1546  tables[1][0] = coeff_vlc[dc_c_table];
1547  for (int i = 1; i <= 5; i++) {
1548  /* AC VLC table group 1 */
1549  tables[0][i] = coeff_vlc[ac_y_table + 16];
1550  tables[1][i] = coeff_vlc[ac_c_table + 16];
1551  }
1552  for (int i = 6; i <= 14; i++) {
1553  /* AC VLC table group 2 */
1554  tables[0][i] = coeff_vlc[ac_y_table + 32];
1555  tables[1][i] = coeff_vlc[ac_c_table + 32];
1556  }
1557  for (int i = 15; i <= 27; i++) {
1558  /* AC VLC table group 3 */
1559  tables[0][i] = coeff_vlc[ac_y_table + 48];
1560  tables[1][i] = coeff_vlc[ac_c_table + 48];
1561  }
1562  for (int i = 28; i <= 63; i++) {
1563  /* AC VLC table group 4 */
1564  tables[0][i] = coeff_vlc[ac_y_table + 64];
1565  tables[1][i] = coeff_vlc[ac_c_table + 64];
1566  }
1567 
1568  vp4_set_tokens_base(s);
1569 
1570  memset(last_dc, 0, sizeof(last_dc));
1571 
1572  for (int plane = 0; plane < ((s->avctx->flags & AV_CODEC_FLAG_GRAY) ? 1 : 3); plane++) {
1573  memset(eob_tracker, 0, sizeof(eob_tracker));
1574 
1575  /* initialise dc prediction */
1576  for (int i = 0; i < s->fragment_width[!!plane]; i++)
1577  vp4_dc_predictor_reset(&s->dc_pred_row[i]);
1578 
1579  for (int j = 0; j < 6; j++)
1580  for (int i = 0; i < 6; i++)
1581  vp4_dc_predictor_reset(&dc_pred[j][i]);
1582 
1583  for (int sb_y = 0; sb_y * 4 < s->fragment_height[!!plane]; sb_y++) {
1584  for (int sb_x = 0; sb_x *4 < s->fragment_width[!!plane]; sb_x++) {
1585  vp4_dc_pred_before(s, dc_pred, sb_x);
1586  for (int j = 0; j < 16; j++) {
1587  int hx = hilbert_offset[j][0];
1588  int hy = hilbert_offset[j][1];
1589  int x = 4 * sb_x + hx;
1590  int y = 4 * sb_y + hy;
1591  VP4Predictor *this_dc_pred = &dc_pred[hy + 1][hx + 1];
1592  int fragment, dc_block_type;
1593 
1594  if (x >= s->fragment_width[!!plane] || y >= s->fragment_height[!!plane])
1595  continue;
1596 
1597  fragment = s->fragment_start[plane] + y * s->fragment_width[!!plane] + x;
1598 
1599  if (s->all_fragments[fragment].coding_method == MODE_COPY)
1600  continue;
1601 
1602  if (vp4_unpack_vlcs(s, gb, tables[!!plane], plane, eob_tracker, fragment) < 0)
1603  return -1;
1604 
1605  dc_block_type = vp4_pred_block_type_map[s->all_fragments[fragment].coding_method];
1606 
1607  s->all_fragments[fragment].dc +=
1608  vp4_dc_pred(s, this_dc_pred, last_dc, dc_block_type, plane);
1609 
1610  this_dc_pred->type = dc_block_type,
1611  this_dc_pred->dc = last_dc[dc_block_type] = s->all_fragments[fragment].dc;
1612  }
1613  vp4_dc_pred_after(s, dc_pred, sb_x);
1614  }
1615  }
1616  }
1617 
1618  vp4_set_tokens_base(s);
1619 
1620  return 0;
1621 }
1622 #endif
1623 
1624 /*
1625  * This function reverses the DC prediction for each coded fragment in
1626  * the frame. Much of this function is adapted directly from the original
1627  * VP3 source code.
1628  */
1629 #define COMPATIBLE_FRAME(x) \
1630  (compatible_frame[s->all_fragments[x].coding_method] == current_frame_type)
1631 #define DC_COEFF(u) s->all_fragments[u].dc
1632 
1634  int first_fragment,
1635  int fragment_width,
1636  int fragment_height)
1637 {
1638 #define PUL 8
1639 #define PU 4
1640 #define PUR 2
1641 #define PL 1
1642 
1643  int i = first_fragment;
1644 
1645  int predicted_dc;
1646 
1647  /* DC values for the left, up-left, up, and up-right fragments */
1648  int vl, vul, vu, vur;
1649 
1650  /* indexes for the left, up-left, up, and up-right fragments */
1651  int l, ul, u, ur;
1652 
1653  /*
1654  * The 6 fields mean:
1655  * 0: up-left multiplier
1656  * 1: up multiplier
1657  * 2: up-right multiplier
1658  * 3: left multiplier
1659  */
1660  static const int predictor_transform[16][4] = {
1661  { 0, 0, 0, 0 },
1662  { 0, 0, 0, 128 }, // PL
1663  { 0, 0, 128, 0 }, // PUR
1664  { 0, 0, 53, 75 }, // PUR|PL
1665  { 0, 128, 0, 0 }, // PU
1666  { 0, 64, 0, 64 }, // PU |PL
1667  { 0, 128, 0, 0 }, // PU |PUR
1668  { 0, 0, 53, 75 }, // PU |PUR|PL
1669  { 128, 0, 0, 0 }, // PUL
1670  { 0, 0, 0, 128 }, // PUL|PL
1671  { 64, 0, 64, 0 }, // PUL|PUR
1672  { 0, 0, 53, 75 }, // PUL|PUR|PL
1673  { 0, 128, 0, 0 }, // PUL|PU
1674  { -104, 116, 0, 116 }, // PUL|PU |PL
1675  { 24, 80, 24, 0 }, // PUL|PU |PUR
1676  { -104, 116, 0, 116 } // PUL|PU |PUR|PL
1677  };
1678 
1679  /* This table shows which types of blocks can use other blocks for
1680  * prediction. For example, INTRA is the only mode in this table to
1681  * have a frame number of 0. That means INTRA blocks can only predict
1682  * from other INTRA blocks. There are 2 golden frame coding types;
1683  * blocks encoding in these modes can only predict from other blocks
1684  * that were encoded with these 1 of these 2 modes. */
1685  static const unsigned char compatible_frame[9] = {
1686  1, /* MODE_INTER_NO_MV */
1687  0, /* MODE_INTRA */
1688  1, /* MODE_INTER_PLUS_MV */
1689  1, /* MODE_INTER_LAST_MV */
1690  1, /* MODE_INTER_PRIOR_MV */
1691  2, /* MODE_USING_GOLDEN */
1692  2, /* MODE_GOLDEN_MV */
1693  1, /* MODE_INTER_FOUR_MV */
1694  3 /* MODE_COPY */
1695  };
1696  int current_frame_type;
1697 
1698  /* there is a last DC predictor for each of the 3 frame types */
1699  short last_dc[3];
1700 
1701  int transform = 0;
1702 
1703  vul =
1704  vu =
1705  vur =
1706  vl = 0;
1707  last_dc[0] =
1708  last_dc[1] =
1709  last_dc[2] = 0;
1710 
1711  /* for each fragment row... */
1712  for (int y = 0; y < fragment_height; y++) {
1713  /* for each fragment in a row... */
1714  for (int x = 0; x < fragment_width; x++, i++) {
1715 
1716  /* reverse prediction if this block was coded */
1717  if (s->all_fragments[i].coding_method != MODE_COPY) {
1718  current_frame_type =
1719  compatible_frame[s->all_fragments[i].coding_method];
1720 
1721  transform = 0;
1722  if (x) {
1723  l = i - 1;
1724  vl = DC_COEFF(l);
1725  if (COMPATIBLE_FRAME(l))
1726  transform |= PL;
1727  }
1728  if (y) {
1729  u = i - fragment_width;
1730  vu = DC_COEFF(u);
1731  if (COMPATIBLE_FRAME(u))
1732  transform |= PU;
1733  if (x) {
1734  ul = i - fragment_width - 1;
1735  vul = DC_COEFF(ul);
1736  if (COMPATIBLE_FRAME(ul))
1737  transform |= PUL;
1738  }
1739  if (x + 1 < fragment_width) {
1740  ur = i - fragment_width + 1;
1741  vur = DC_COEFF(ur);
1742  if (COMPATIBLE_FRAME(ur))
1743  transform |= PUR;
1744  }
1745  }
1746 
1747  if (transform == 0) {
1748  /* if there were no fragments to predict from, use last
1749  * DC saved */
1750  predicted_dc = last_dc[current_frame_type];
1751  } else {
1752  /* apply the appropriate predictor transform */
1753  predicted_dc =
1754  (predictor_transform[transform][0] * vul) +
1755  (predictor_transform[transform][1] * vu) +
1756  (predictor_transform[transform][2] * vur) +
1757  (predictor_transform[transform][3] * vl);
1758 
1759  predicted_dc /= 128;
1760 
1761  /* check for outranging on the [ul u l] and
1762  * [ul u ur l] predictors */
1763  if ((transform == 15) || (transform == 13)) {
1764  if (FFABS(predicted_dc - vu) > 128)
1765  predicted_dc = vu;
1766  else if (FFABS(predicted_dc - vl) > 128)
1767  predicted_dc = vl;
1768  else if (FFABS(predicted_dc - vul) > 128)
1769  predicted_dc = vul;
1770  }
1771  }
1772 
1773  /* at long last, apply the predictor */
1774  DC_COEFF(i) += predicted_dc;
1775  /* save the DC */
1776  last_dc[current_frame_type] = DC_COEFF(i);
1777  }
1778  }
1779  }
1780 }
1781 
1782 static void apply_loop_filter(Vp3DecodeContext *s, int plane,
1783  int ystart, int yend)
1784 {
1785  int *bounding_values = s->bounding_values_array + 127;
1786 
1787  int width = s->fragment_width[!!plane];
1788  int height = s->fragment_height[!!plane];
1789  int fragment = s->fragment_start[plane] + ystart * width;
1790  ptrdiff_t stride = s->current_frame.f->linesize[plane];
1791  uint8_t *plane_data = s->current_frame.f->data[plane];
1792  if (!s->flipped_image)
1793  stride = -stride;
1794  plane_data += s->data_offset[plane] + 8 * ystart * stride;
1795 
1796  for (int y = ystart; y < yend; y++) {
1797  for (int x = 0; x < width; x++) {
1798  /* This code basically just deblocks on the edges of coded blocks.
1799  * However, it has to be much more complicated because of the
1800  * brain damaged deblock ordering used in VP3/Theora. Order matters
1801  * because some pixels get filtered twice. */
1802  if (s->all_fragments[fragment].coding_method != MODE_COPY) {
1803  /* do not perform left edge filter for left columns frags */
1804  if (x > 0) {
1805  s->vp3dsp.h_loop_filter(
1806  plane_data + 8 * x,
1807  stride, bounding_values);
1808  }
1809 
1810  /* do not perform top edge filter for top row fragments */
1811  if (y > 0) {
1812  s->vp3dsp.v_loop_filter(
1813  plane_data + 8 * x,
1814  stride, bounding_values);
1815  }
1816 
1817  /* do not perform right edge filter for right column
1818  * fragments or if right fragment neighbor is also coded
1819  * in this frame (it will be filtered in next iteration) */
1820  if ((x < width - 1) &&
1821  (s->all_fragments[fragment + 1].coding_method == MODE_COPY)) {
1822  s->vp3dsp.h_loop_filter(
1823  plane_data + 8 * x + 8,
1824  stride, bounding_values);
1825  }
1826 
1827  /* do not perform bottom edge filter for bottom row
1828  * fragments or if bottom fragment neighbor is also coded
1829  * in this frame (it will be filtered in the next row) */
1830  if ((y < height - 1) &&
1831  (s->all_fragments[fragment + width].coding_method == MODE_COPY)) {
1832  s->vp3dsp.v_loop_filter(
1833  plane_data + 8 * x + 8 * stride,
1834  stride, bounding_values);
1835  }
1836  }
1837 
1838  fragment++;
1839  }
1840  plane_data += 8 * stride;
1841  }
1842 }
1843 
1844 /**
1845  * Pull DCT tokens from the 64 levels to decode and dequant the coefficients
1846  * for the next block in coding order
1847  */
1848 static inline int vp3_dequant(Vp3DecodeContext *s, const Vp3Fragment *frag,
1849  int plane, int inter, int16_t block[64])
1850 {
1851  const int16_t *dequantizer = s->qmat[frag->qpi][inter][plane];
1852  const uint8_t *perm = s->idct_scantable;
1853  int i = 0;
1854 
1855  do {
1856  int token = *s->dct_tokens[plane][i];
1857  switch (token & 3) {
1858  case 0: // EOB
1859  if (--token < 4) // 0-3 are token types so the EOB run must now be 0
1860  s->dct_tokens[plane][i]++;
1861  else
1862  *s->dct_tokens[plane][i] = token & ~3;
1863  goto end;
1864  case 1: // zero run
1865  s->dct_tokens[plane][i]++;
1866  i += (token >> 2) & 0x7f;
1867  if (i > 63) {
1868  av_log(s->avctx, AV_LOG_ERROR, "Coefficient index overflow\n");
1869  return i;
1870  }
1871  block[perm[i]] = (token >> 9) * dequantizer[perm[i]];
1872  i++;
1873  break;
1874  case 2: // coeff
1875  block[perm[i]] = (token >> 2) * dequantizer[perm[i]];
1876  s->dct_tokens[plane][i++]++;
1877  break;
1878  default: // shouldn't happen
1879  return i;
1880  }
1881  } while (i < 64);
1882  // return value is expected to be a valid level
1883  i--;
1884 end:
1885  // the actual DC+prediction is in the fragment structure
1886  block[0] = frag->dc * s->qmat[0][inter][plane][0];
1887  return i;
1888 }
1889 
1890 /**
1891  * called when all pixels up to row y are complete
1892  */
1894 {
1895  int h, cy;
1897 
1898  if (HAVE_THREADS && s->avctx->active_thread_type & FF_THREAD_FRAME) {
1899  int y_flipped = s->flipped_image ? s->height - y : y;
1900 
1901  /* At the end of the frame, report INT_MAX instead of the height of
1902  * the frame. This makes the other threads' ff_thread_await_progress()
1903  * calls cheaper, because they don't have to clip their values. */
1904  ff_progress_frame_report(&s->current_frame,
1905  y_flipped == s->height ? INT_MAX
1906  : y_flipped - 1);
1907  }
1908 
1909  if (!s->avctx->draw_horiz_band)
1910  return;
1911 
1912  h = y - s->last_slice_end;
1913  s->last_slice_end = y;
1914  y -= h;
1915 
1916  if (!s->flipped_image)
1917  y = s->height - y - h;
1918 
1919  cy = y >> s->chroma_y_shift;
1920  offset[0] = s->current_frame.f->linesize[0] * y;
1921  offset[1] = s->current_frame.f->linesize[1] * cy;
1922  offset[2] = s->current_frame.f->linesize[2] * cy;
1923  for (int i = 3; i < AV_NUM_DATA_POINTERS; i++)
1924  offset[i] = 0;
1925 
1926  emms_c();
1927  s->avctx->draw_horiz_band(s->avctx, s->current_frame.f, offset, y, 3, h);
1928 }
1929 
1930 /**
1931  * Wait for the reference frame of the current fragment.
1932  * The progress value is in luma pixel rows.
1933  */
1935  int motion_y, int y)
1936 {
1937  const ProgressFrame *ref_frame;
1938  int ref_row;
1939  int border = motion_y & 1;
1940 
1941  if (fragment->coding_method == MODE_USING_GOLDEN ||
1942  fragment->coding_method == MODE_GOLDEN_MV)
1943  ref_frame = &s->golden_frame;
1944  else
1945  ref_frame = &s->last_frame;
1946 
1947  ref_row = y + (motion_y >> 1);
1948  ref_row = FFMAX(FFABS(ref_row), ref_row + 8 + border);
1949 
1951 }
1952 
1953 #if CONFIG_VP4_DECODER
1954 /**
1955  * @return non-zero if temp (edge_emu_buffer) was populated
1956  */
1957 static int vp4_mc_loop_filter(Vp3DecodeContext *s, int plane, int motion_x, int motion_y, int bx, int by,
1958  const uint8_t *motion_source, ptrdiff_t stride,
1959  int src_x, int src_y, uint8_t *temp)
1960 {
1961  int motion_shift = plane ? 4 : 2;
1962  int subpel_mask = plane ? 3 : 1;
1963  int *bounding_values = s->bounding_values_array + 127;
1964 
1965  int x, y;
1966  int x2, y2;
1967  int x_subpel, y_subpel;
1968  int x_offset, y_offset;
1969 
1970  int block_width = plane ? 8 : 16;
1971  int plane_width = s->width >> (plane && s->chroma_x_shift);
1972  int plane_height = s->height >> (plane && s->chroma_y_shift);
1973 
1974 #define loop_stride 12
1975  uint8_t loop[12 * loop_stride];
1976 
1977  /* using division instead of shift to correctly handle negative values */
1978  x = 8 * bx + motion_x / motion_shift;
1979  y = 8 * by + motion_y / motion_shift;
1980 
1981  x_subpel = motion_x & subpel_mask;
1982  y_subpel = motion_y & subpel_mask;
1983 
1984  if (x_subpel || y_subpel) {
1985  x--;
1986  y--;
1987 
1988  if (x_subpel)
1989  x = FFMIN(x, x + FFSIGN(motion_x));
1990 
1991  if (y_subpel)
1992  y = FFMIN(y, y + FFSIGN(motion_y));
1993 
1994  x2 = x + block_width;
1995  y2 = y + block_width;
1996 
1997  if (x2 < 0 || x2 >= plane_width || y2 < 0 || y2 >= plane_height)
1998  return 0;
1999 
2000  x_offset = (-(x + 2) & 7) + 2;
2001  y_offset = (-(y + 2) & 7) + 2;
2002 
2003  av_assert1(!(x_offset > 8 + x_subpel && y_offset > 8 + y_subpel));
2004 
2005  s->vdsp.emulated_edge_mc(loop, motion_source - stride - 1,
2006  loop_stride, stride,
2007  12, 12, src_x - 1, src_y - 1,
2008  plane_width,
2009  plane_height);
2010 
2011  if (x_offset <= 8 + x_subpel)
2012  ff_vp3dsp_h_loop_filter_12(loop + x_offset, loop_stride, bounding_values);
2013 
2014  if (y_offset <= 8 + y_subpel)
2015  ff_vp3dsp_v_loop_filter_12(loop + y_offset*loop_stride, loop_stride, bounding_values);
2016 
2017  } else {
2018 
2019  x_offset = -x & 7;
2020  y_offset = -y & 7;
2021 
2022  if (!x_offset && !y_offset)
2023  return 0;
2024 
2025  s->vdsp.emulated_edge_mc(loop, motion_source - stride - 1,
2026  loop_stride, stride,
2027  12, 12, src_x - 1, src_y - 1,
2028  plane_width,
2029  plane_height);
2030 
2031 #define safe_loop_filter(name, ptr, stride, bounding_values) \
2032  if (VP3_LOOP_FILTER_NO_UNALIGNED_SUPPORT && (uintptr_t)(ptr) & 7) \
2033  s->vp3dsp.name##_unaligned(ptr, stride, bounding_values); \
2034  else \
2035  s->vp3dsp.name(ptr, stride, bounding_values);
2036 
2037  if (x_offset)
2038  safe_loop_filter(h_loop_filter, loop + loop_stride + x_offset + 1, loop_stride, bounding_values);
2039 
2040  if (y_offset)
2041  safe_loop_filter(v_loop_filter, loop + (y_offset + 1)*loop_stride + 1, loop_stride, bounding_values);
2042  }
2043 
2044  for (int i = 0; i < 9; i++)
2045  memcpy(temp + i*stride, loop + (i + 1) * loop_stride + 1, 9);
2046 
2047  return 1;
2048 }
2049 #endif
2050 
2051 /*
2052  * Perform the final rendering for a particular slice of data.
2053  * The slice number ranges from 0..(c_superblock_height - 1).
2054  */
2055 static void render_slice(Vp3DecodeContext *s, int slice)
2056 {
2057  int16_t *block = s->block;
2058  int motion_x = 0xdeadbeef, motion_y = 0xdeadbeef;
2059  /* When decoding keyframes, the earlier frames may not be available,
2060  * so we just use the current frame in this case instead;
2061  * it also avoid using undefined pointer arithmetic. Nothing is
2062  * ever read from these frames in case of a keyframe. */
2063  const AVFrame *last_frame = s->last_frame.f ?
2064  s->last_frame.f : s->current_frame.f;
2065  const AVFrame *golden_frame = s->golden_frame.f ?
2066  s->golden_frame.f : s->current_frame.f;
2067  int motion_halfpel_index;
2068  int first_pixel;
2069 
2070  if (slice >= s->c_superblock_height)
2071  return;
2072 
2073  for (int plane = 0; plane < 3; plane++) {
2074  uint8_t *output_plane = s->current_frame.f->data[plane] +
2075  s->data_offset[plane];
2076  const uint8_t *last_plane = last_frame->data[plane] +
2077  s->data_offset[plane];
2078  const uint8_t *golden_plane = golden_frame->data[plane] +
2079  s->data_offset[plane];
2080  ptrdiff_t stride = s->current_frame.f->linesize[plane];
2081  int plane_width = s->width >> (plane && s->chroma_x_shift);
2082  int plane_height = s->height >> (plane && s->chroma_y_shift);
2083  const int8_t (*motion_val)[2] = s->motion_val[!!plane];
2084 
2085  int sb_y = slice << (!plane && s->chroma_y_shift);
2086  int slice_height = sb_y + 1 + (!plane && s->chroma_y_shift);
2087  int slice_width = plane ? s->c_superblock_width
2088  : s->y_superblock_width;
2089 
2090  int fragment_width = s->fragment_width[!!plane];
2091  int fragment_height = s->fragment_height[!!plane];
2092  int fragment_start = s->fragment_start[plane];
2093 
2094  int do_await = !plane && HAVE_THREADS &&
2095  (s->avctx->active_thread_type & FF_THREAD_FRAME);
2096 
2097  if (!s->flipped_image)
2098  stride = -stride;
2099  if (CONFIG_GRAY && plane && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
2100  continue;
2101 
2102  /* for each superblock row in the slice (both of them)... */
2103  for (; sb_y < slice_height; sb_y++) {
2104  /* for each superblock in a row... */
2105  for (int sb_x = 0; sb_x < slice_width; sb_x++) {
2106  /* for each block in a superblock... */
2107  for (int j = 0; j < 16; j++) {
2108  int x = 4 * sb_x + hilbert_offset[j][0];
2109  int y = 4 * sb_y + hilbert_offset[j][1];
2110  int fragment = y * fragment_width + x;
2111 
2112  int i = fragment_start + fragment;
2113 
2114  // bounds check
2115  if (x >= fragment_width || y >= fragment_height)
2116  continue;
2117 
2118  first_pixel = 8 * y * stride + 8 * x;
2119 
2120  if (do_await &&
2121  s->all_fragments[i].coding_method != MODE_INTRA)
2122  await_reference_row(s, &s->all_fragments[i],
2123  motion_val[fragment][1],
2124  (16 * y) >> s->chroma_y_shift);
2125 
2126  /* transform if this block was coded */
2127  if (s->all_fragments[i].coding_method != MODE_COPY) {
2128  const uint8_t *motion_source;
2129  if ((s->all_fragments[i].coding_method == MODE_USING_GOLDEN) ||
2130  (s->all_fragments[i].coding_method == MODE_GOLDEN_MV))
2131  motion_source = golden_plane;
2132  else
2133  motion_source = last_plane;
2134 
2135  motion_source += first_pixel;
2136  motion_halfpel_index = 0;
2137 
2138  /* sort out the motion vector if this fragment is coded
2139  * using a motion vector method */
2140  if ((s->all_fragments[i].coding_method > MODE_INTRA) &&
2141  (s->all_fragments[i].coding_method != MODE_USING_GOLDEN)) {
2142  int src_x, src_y;
2143  int standard_mc = 1;
2144  motion_x = motion_val[fragment][0];
2145  motion_y = motion_val[fragment][1];
2146 #if CONFIG_VP4_DECODER
2147  if (plane && s->version >= 2) {
2148  motion_x = (motion_x >> 1) | (motion_x & 1);
2149  motion_y = (motion_y >> 1) | (motion_y & 1);
2150  }
2151 #endif
2152 
2153  src_x = (motion_x >> 1) + 8 * x;
2154  src_y = (motion_y >> 1) + 8 * y;
2155 
2156  motion_halfpel_index = motion_x & 0x01;
2157  motion_source += (motion_x >> 1);
2158 
2159  motion_halfpel_index |= (motion_y & 0x01) << 1;
2160  motion_source += ((motion_y >> 1) * stride);
2161 
2162 #if CONFIG_VP4_DECODER
2163  if (s->version >= 2) {
2164  uint8_t *temp = s->edge_emu_buffer;
2165  if (stride < 0)
2166  temp -= 8 * stride;
2167  if (vp4_mc_loop_filter(s, plane, motion_val[fragment][0], motion_val[fragment][1], x, y, motion_source, stride, src_x, src_y, temp)) {
2168  motion_source = temp;
2169  standard_mc = 0;
2170  }
2171  }
2172 #endif
2173 
2174  if (standard_mc && (
2175  src_x < 0 || src_y < 0 ||
2176  src_x + 9 >= plane_width ||
2177  src_y + 9 >= plane_height)) {
2178  uint8_t *temp = s->edge_emu_buffer;
2179  if (stride < 0)
2180  temp -= 8 * stride;
2181 
2182  s->vdsp.emulated_edge_mc(temp, motion_source,
2183  stride, stride,
2184  9, 9, src_x, src_y,
2185  plane_width,
2186  plane_height);
2187  motion_source = temp;
2188  }
2189  }
2190 
2191  /* first, take care of copying a block from either the
2192  * previous or the golden frame */
2193  if (s->all_fragments[i].coding_method != MODE_INTRA) {
2194  /* Note, it is possible to implement all MC cases
2195  * with put_no_rnd_pixels_l2 which would look more
2196  * like the VP3 source but this would be slower as
2197  * put_no_rnd_pixels_tab is better optimized */
2198  if (motion_halfpel_index != 3) {
2199  s->hdsp.put_no_rnd_pixels_tab[1][motion_halfpel_index](
2200  output_plane + first_pixel,
2201  motion_source, stride, 8);
2202  } else {
2203  /* d is 0 if motion_x and _y have the same sign,
2204  * else -1 */
2205  int d = (motion_x ^ motion_y) >> 31;
2206  s->vp3dsp.put_no_rnd_pixels_l2(output_plane + first_pixel,
2207  motion_source - d,
2208  motion_source + stride + 1 + d,
2209  stride, 8);
2210  }
2211  }
2212 
2213  /* invert DCT and place (or add) in final output */
2214 
2215  if (s->all_fragments[i].coding_method == MODE_INTRA) {
2216  vp3_dequant(s, s->all_fragments + i,
2217  plane, 0, block);
2218  s->vp3dsp.idct_put(output_plane + first_pixel,
2219  stride,
2220  block);
2221  } else {
2222  if (vp3_dequant(s, s->all_fragments + i,
2223  plane, 1, block)) {
2224  s->vp3dsp.idct_add(output_plane + first_pixel,
2225  stride,
2226  block);
2227  } else {
2228  s->vp3dsp.idct_dc_add(output_plane + first_pixel,
2229  stride, block);
2230  }
2231  }
2232  } else {
2233  /* copy directly from the previous frame */
2234  s->hdsp.put_pixels_tab[1][0](
2235  output_plane + first_pixel,
2236  last_plane + first_pixel,
2237  stride, 8);
2238  }
2239  }
2240  }
2241 
2242  // Filter up to the last row in the superblock row
2243  if (s->version < 2 && !s->skip_loop_filter)
2244  apply_loop_filter(s, plane, 4 * sb_y - !!sb_y,
2245  FFMIN(4 * sb_y + 3, fragment_height - 1));
2246  }
2247  }
2248 
2249  /* this looks like a good place for slice dispatch... */
2250  /* algorithm:
2251  * if (slice == s->macroblock_height - 1)
2252  * dispatch (both last slice & 2nd-to-last slice);
2253  * else if (slice > 0)
2254  * dispatch (slice - 1);
2255  */
2256 
2257  vp3_draw_horiz_band(s, FFMIN((32 << s->chroma_y_shift) * (slice + 1) - 16,
2258  s->height - 16));
2259 }
2260 
2261 static av_cold void init_tables_once(void)
2262 {
2264 
2266  SUPERBLOCK_VLC_BITS, 34,
2268  NULL, 0, 0, 1, 0);
2269 
2272  NULL, 0, 0, 0, 0);
2273 
2275  &motion_vector_vlc_table[0][1], 2,
2276  &motion_vector_vlc_table[0][0], 2, 1,
2277  -31, 0);
2278 
2280  mode_code_vlc_len, 1,
2281  NULL, 0, 0, 0, 0);
2282 
2283 #if CONFIG_VP4_DECODER
2284  for (int j = 0; j < 2; j++)
2285  for (int i = 0; i < 7; i++) {
2286  vp4_mv_vlc_table[j][i] =
2288  &vp4_mv_vlc[j][i][0][1], 2,
2289  &vp4_mv_vlc[j][i][0][0], 2, 1,
2290  -31, 0);
2291  }
2292 
2293  /* version >= 2 */
2294  for (int i = 0; i < 2; i++) {
2295  block_pattern_vlc[i] =
2296  ff_vlc_init_tables(&state, 5, 14,
2297  &vp4_block_pattern_vlc[i][0][1], 2, 1,
2298  &vp4_block_pattern_vlc[i][0][0], 2, 1, 0);
2299  }
2300 #endif
2301 }
2302 
2303 /// Allocate tables for per-frame data in Vp3DecodeContext
2305 {
2306  Vp3DecodeContext *s = avctx->priv_data;
2307  int y_fragment_count, c_fragment_count;
2308 
2309  free_tables(avctx);
2310 
2311  y_fragment_count = s->fragment_width[0] * s->fragment_height[0];
2312  c_fragment_count = s->fragment_width[1] * s->fragment_height[1];
2313 
2314  /* superblock_coding is used by unpack_superblocks (VP3/Theora) and vp4_unpack_macroblocks (VP4) */
2315  s->superblock_coding = av_mallocz(FFMAX(s->superblock_count, s->yuv_macroblock_count));
2316  s->all_fragments = av_calloc(s->fragment_count, sizeof(*s->all_fragments));
2317 
2318  s-> kf_coded_fragment_list = av_calloc(s->fragment_count, sizeof(int));
2319  s->nkf_coded_fragment_list = av_calloc(s->fragment_count, sizeof(int));
2320  memset(s-> num_kf_coded_fragment, -1, sizeof(s-> num_kf_coded_fragment));
2321 
2322  s->dct_tokens_base = av_calloc(s->fragment_count,
2323  64 * sizeof(*s->dct_tokens_base));
2324  s->motion_val[0] = av_calloc(y_fragment_count, sizeof(*s->motion_val[0]));
2325  s->motion_val[1] = av_calloc(c_fragment_count, sizeof(*s->motion_val[1]));
2326 
2327  /* work out the block mapping tables */
2328  s->superblock_fragments = av_calloc(s->superblock_count, 16 * sizeof(int));
2329  s->macroblock_coding = av_mallocz(s->macroblock_count + 1);
2330 
2331  s->dc_pred_row = av_malloc_array(s->y_superblock_width * 4, sizeof(*s->dc_pred_row));
2332 
2333  if (!s->superblock_coding || !s->all_fragments ||
2334  !s->dct_tokens_base || !s->kf_coded_fragment_list ||
2335  !s->nkf_coded_fragment_list ||
2336  !s->superblock_fragments || !s->macroblock_coding ||
2337  !s->dc_pred_row ||
2338  !s->motion_val[0] || !s->motion_val[1]) {
2339  return -1;
2340  }
2341 
2343 
2344  return 0;
2345 }
2346 
2347 
2348 static av_cold void free_vlc_tables(AVRefStructOpaque unused, void *obj)
2349 {
2350  CoeffVLCs *vlcs = obj;
2351 
2352  for (int i = 0; i < FF_ARRAY_ELEMS(vlcs->vlcs); i++)
2353  ff_vlc_free(&vlcs->vlcs[i]);
2354 }
2355 
2357 {
2358  static AVOnce init_static_once = AV_ONCE_INIT;
2359  Vp3DecodeContext *s = avctx->priv_data;
2360  int ret;
2361  int c_width;
2362  int c_height;
2363  int y_fragment_count, c_fragment_count;
2364 
2365  if (avctx->codec_tag == MKTAG('V', 'P', '4', '0')) {
2366  s->version = 3;
2367 #if !CONFIG_VP4_DECODER
2368  av_log(avctx, AV_LOG_ERROR, "This build does not support decoding VP4.\n");
2370 #endif
2371  } else if (avctx->codec_tag == MKTAG('V', 'P', '3', '0'))
2372  s->version = 0;
2373  else
2374  s->version = 1;
2375 
2376  s->avctx = avctx;
2377  s->width = FFALIGN(avctx->coded_width, 16);
2378  s->height = FFALIGN(avctx->coded_height, 16);
2379  if (s->width < 18)
2380  return AVERROR_PATCHWELCOME;
2381  if (avctx->codec_id != AV_CODEC_ID_THEORA)
2382  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
2384  ff_hpeldsp_init(&s->hdsp, avctx->flags | AV_CODEC_FLAG_BITEXACT);
2385  ff_videodsp_init(&s->vdsp, 8);
2386  ff_vp3dsp_init(&s->vp3dsp);
2387 
2388  for (int i = 0; i < 64; i++) {
2389 #define TRANSPOSE(x) (((x) >> 3) | (((x) & 7) << 3))
2390  s->idct_permutation[i] = TRANSPOSE(i);
2391  s->idct_scantable[i] = TRANSPOSE(ff_zigzag_direct[i]);
2392 #undef TRANSPOSE
2393  }
2394 
2395  /* initialize to an impossible value which will force a recalculation
2396  * in the first frame decode */
2397  for (int i = 0; i < 3; i++)
2398  s->qps[i] = -1;
2399 
2400  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_x_shift, &s->chroma_y_shift);
2401  if (ret)
2402  return ret;
2403 
2404  s->y_superblock_width = (s->width + 31) / 32;
2405  s->y_superblock_height = (s->height + 31) / 32;
2406  s->y_superblock_count = s->y_superblock_width * s->y_superblock_height;
2407 
2408  /* work out the dimensions for the C planes */
2409  c_width = s->width >> s->chroma_x_shift;
2410  c_height = s->height >> s->chroma_y_shift;
2411  s->c_superblock_width = (c_width + 31) / 32;
2412  s->c_superblock_height = (c_height + 31) / 32;
2413  s->c_superblock_count = s->c_superblock_width * s->c_superblock_height;
2414 
2415  s->superblock_count = s->y_superblock_count + (s->c_superblock_count * 2);
2416  s->u_superblock_start = s->y_superblock_count;
2417  s->v_superblock_start = s->u_superblock_start + s->c_superblock_count;
2418 
2419  s->macroblock_width = (s->width + 15) / 16;
2420  s->macroblock_height = (s->height + 15) / 16;
2421  s->macroblock_count = s->macroblock_width * s->macroblock_height;
2422  s->c_macroblock_width = (c_width + 15) / 16;
2423  s->c_macroblock_height = (c_height + 15) / 16;
2424  s->c_macroblock_count = s->c_macroblock_width * s->c_macroblock_height;
2425  s->yuv_macroblock_count = s->macroblock_count + 2 * s->c_macroblock_count;
2426 
2427  s->fragment_width[0] = s->width / FRAGMENT_PIXELS;
2428  s->fragment_height[0] = s->height / FRAGMENT_PIXELS;
2429  s->fragment_width[1] = s->fragment_width[0] >> s->chroma_x_shift;
2430  s->fragment_height[1] = s->fragment_height[0] >> s->chroma_y_shift;
2431 
2432  /* fragment count covers all 8x8 blocks for all 3 planes */
2433  y_fragment_count = s->fragment_width[0] * s->fragment_height[0];
2434  c_fragment_count = s->fragment_width[1] * s->fragment_height[1];
2435  s->fragment_count = y_fragment_count + 2 * c_fragment_count;
2436  s->fragment_start[1] = y_fragment_count;
2437  s->fragment_start[2] = y_fragment_count + c_fragment_count;
2438 
2439  if (!s->theora_tables) {
2440  for (int i = 0; i < 64; i++) {
2441  s->coded_dc_scale_factor[0][i] = s->version < 2 ? vp31_dc_scale_factor[i] : vp4_y_dc_scale_factor[i];
2442  s->coded_dc_scale_factor[1][i] = s->version < 2 ? vp31_dc_scale_factor[i] : vp4_uv_dc_scale_factor[i];
2443  s->coded_ac_scale_factor[i] = s->version < 2 ? vp31_ac_scale_factor[i] : vp4_ac_scale_factor[i];
2444  s->base_matrix[0][i] = s->version < 2 ? vp31_intra_y_dequant[i] : vp4_generic_dequant[i];
2445  s->base_matrix[1][i] = s->version < 2 ? ff_mjpeg_std_chrominance_quant_tbl[i] : vp4_generic_dequant[i];
2446  s->base_matrix[2][i] = s->version < 2 ? vp31_inter_dequant[i] : vp4_generic_dequant[i];
2447  s->filter_limit_values[i] = s->version < 2 ? vp31_filter_limit_values[i] : vp4_filter_limit_values[i];
2448  }
2449 
2450  for (int inter = 0; inter < 2; inter++) {
2451  for (int plane = 0; plane < 3; plane++) {
2452  s->qr_count[inter][plane] = 1;
2453  s->qr_size[inter][plane][0] = 63;
2454  s->qr_base[inter][plane][0] =
2455  s->qr_base[inter][plane][1] = 2 * inter + (!!plane) * !inter;
2456  }
2457  }
2458  }
2459 
2460  if (ff_thread_sync_ref(avctx, offsetof(Vp3DecodeContext, coeff_vlc)) != FF_THREAD_IS_COPY) {
2461  CoeffVLCs *vlcs = av_refstruct_alloc_ext(sizeof(*s->coeff_vlc), 0,
2463  if (!vlcs)
2464  return AVERROR(ENOMEM);
2465 
2466  s->coeff_vlc = vlcs;
2467 
2468  if (!s->theora_tables) {
2469  const uint8_t (*bias_tabs)[32][2];
2470 
2471  /* init VLC tables */
2472  bias_tabs = CONFIG_VP4_DECODER && s->version >= 2 ? vp4_bias : vp3_bias;
2473  for (int i = 0; i < FF_ARRAY_ELEMS(vlcs->vlcs); i++) {
2474  ret = ff_vlc_init_from_lengths(&vlcs->vlcs[i], 11, 32,
2475  &bias_tabs[i][0][1], 2,
2476  &bias_tabs[i][0][0], 2, 1,
2477  0, 0, avctx);
2478  if (ret < 0)
2479  return ret;
2480  vlcs->vlc_tabs[i] = vlcs->vlcs[i].table;
2481  }
2482  } else {
2483  for (int i = 0; i < FF_ARRAY_ELEMS(vlcs->vlcs); i++) {
2484  const HuffTable *tab = &s->huffman_table[i];
2485 
2486  ret = ff_vlc_init_from_lengths(&vlcs->vlcs[i], 11, tab->nb_entries,
2487  &tab->entries[0].len, sizeof(*tab->entries),
2488  &tab->entries[0].sym, sizeof(*tab->entries), 1,
2489  0, 0, avctx);
2490  if (ret < 0)
2491  return ret;
2492  vlcs->vlc_tabs[i] = vlcs->vlcs[i].table;
2493  }
2494  }
2495  }
2496 
2497  ff_thread_once(&init_static_once, init_tables_once);
2498 
2499  return allocate_tables(avctx);
2500 }
2501 
2502 #if HAVE_THREADS
2503 static void ref_frames(Vp3DecodeContext *dst, const Vp3DecodeContext *src)
2504 {
2505  ff_progress_frame_replace(&dst->current_frame, &src->current_frame);
2506  ff_progress_frame_replace(&dst->golden_frame, &src->golden_frame);
2507 }
2508 
2509 static int vp3_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
2510 {
2511  Vp3DecodeContext *s = dst->priv_data;
2512  const Vp3DecodeContext *s1 = src->priv_data;
2513  int qps_changed = 0;
2514 
2515  // copy previous frame data
2516  ref_frames(s, s1);
2517 
2518  if (s != s1) {
2519  // copy qscale data if necessary
2520  for (int i = 0; i < 3; i++) {
2521  if (s->qps[i] != s1->qps[1]) {
2522  qps_changed = 1;
2523  memcpy(&s->qmat[i], &s1->qmat[i], sizeof(s->qmat[i]));
2524  }
2525  }
2526 
2527  if (s->qps[0] != s1->qps[0])
2528  memcpy(&s->bounding_values_array, &s1->bounding_values_array,
2529  sizeof(s->bounding_values_array));
2530 
2531  if (qps_changed) {
2532  memcpy(s->qps, s1->qps, sizeof(s->qps));
2533  s->nqps = s1->nqps;
2534  }
2535  }
2536  return 0;
2537 }
2538 #endif
2539 
2541  int *got_frame, AVPacket *avpkt)
2542 {
2543  const uint8_t *buf = avpkt->data;
2544  int buf_size = avpkt->size;
2545  Vp3DecodeContext *s = avctx->priv_data;
2546  GetBitContext gb;
2547  int ret;
2548 
2549  if ((ret = init_get_bits8(&gb, buf, buf_size)) < 0)
2550  return ret;
2551 
2552 #if CONFIG_THEORA_DECODER
2553  if (s->theora && get_bits1(&gb)) {
2554  int type = get_bits(&gb, 7);
2555  skip_bits_long(&gb, 6*8); /* "theora" */
2556 
2557  if (s->avctx->active_thread_type&FF_THREAD_FRAME) {
2558  av_log(avctx, AV_LOG_ERROR, "midstream reconfiguration with multithreading is unsupported, try -threads 1\n");
2559  return AVERROR_PATCHWELCOME;
2560  }
2561  if (type == 0) {
2562  vp3_decode_end(avctx);
2563  ret = theora_decode_header(avctx, &gb);
2564 
2565  if (ret >= 0)
2566  ret = vp3_decode_init(avctx);
2567  if (ret < 0) {
2568  vp3_decode_end(avctx);
2569  return ret;
2570  }
2571  return buf_size;
2572  } else if (type == 2) {
2573  vp3_decode_end(avctx);
2574  ret = theora_decode_tables(avctx, &gb);
2575  if (ret >= 0)
2576  ret = vp3_decode_init(avctx);
2577  if (ret < 0) {
2578  vp3_decode_end(avctx);
2579  return ret;
2580  }
2581  return buf_size;
2582  }
2583 
2584  av_log(avctx, AV_LOG_ERROR,
2585  "Header packet passed to frame decoder, skipping\n");
2586  return -1;
2587  }
2588 #endif
2589 
2590  s->keyframe = !get_bits1(&gb);
2591  if (!s->all_fragments) {
2592  av_log(avctx, AV_LOG_ERROR, "Data packet without prior valid headers\n");
2593  return -1;
2594  }
2595  if (!s->theora)
2596  skip_bits(&gb, 1);
2597 
2598  int last_qps[3];
2599  for (int i = 0; i < 3; i++)
2600  last_qps[i] = s->qps[i];
2601 
2602  s->nqps = 0;
2603  do {
2604  s->qps[s->nqps++] = get_bits(&gb, 6);
2605  } while (s->theora >= 0x030200 && s->nqps < 3 && get_bits1(&gb));
2606  for (int i = s->nqps; i < 3; i++)
2607  s->qps[i] = -1;
2608 
2609  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2610  av_log(s->avctx, AV_LOG_INFO, " VP3 %sframe #%"PRId64": Q index = %d\n",
2611  s->keyframe ? "key" : "", avctx->frame_num + 1, s->qps[0]);
2612 
2613  s->skip_loop_filter = !s->filter_limit_values[s->qps[0]] ||
2614  avctx->skip_loop_filter >= (s->keyframe ? AVDISCARD_ALL
2615  : AVDISCARD_NONKEY);
2616 
2617  if (s->qps[0] != last_qps[0])
2619 
2620  for (int i = 0; i < s->nqps; i++)
2621  // reinit all dequantizers if the first one changed, because
2622  // the DC of the first quantizer must be used for all matrices
2623  if (s->qps[i] != last_qps[i] || s->qps[0] != last_qps[0])
2624  init_dequantizer(s, i);
2625 
2626  if (avctx->skip_frame >= AVDISCARD_NONKEY && !s->keyframe)
2627  return buf_size;
2628 
2629  ret = ff_progress_frame_get_buffer(avctx, &s->last_frame,
2631  if (ret < 0) {
2632  // Don't goto error here, as one can't report progress on or
2633  // unref a non-existent frame.
2634  return ret;
2635  }
2636  FFSWAP(ProgressFrame, s->last_frame, s->current_frame);
2637  s->current_frame.f->pict_type = s->keyframe ? AV_PICTURE_TYPE_I
2639  if (s->keyframe)
2640  s->current_frame.f->flags |= AV_FRAME_FLAG_KEY;
2641  else
2642  s->current_frame.f->flags &= ~AV_FRAME_FLAG_KEY;
2643 
2644  if (!s->edge_emu_buffer) {
2645  s->edge_emu_buffer = av_malloc(9 * FFABS(s->current_frame.f->linesize[0]));
2646  if (!s->edge_emu_buffer) {
2647  ret = AVERROR(ENOMEM);
2648  goto error;
2649  }
2650  }
2651 
2652  if (s->keyframe) {
2653  if (!s->theora) {
2654  skip_bits(&gb, 4); /* width code */
2655  skip_bits(&gb, 4); /* height code */
2656  if (s->version) {
2657  int version = get_bits(&gb, 5);
2658 #if !CONFIG_VP4_DECODER
2659  if (version >= 2) {
2660  av_log(avctx, AV_LOG_ERROR, "This build does not support decoding VP4.\n");
2662  goto error;
2663  }
2664 #endif
2665  s->version = version;
2666  if (avctx->frame_num == 0)
2667  av_log(s->avctx, AV_LOG_DEBUG,
2668  "VP version: %d\n", s->version);
2669  }
2670  }
2671  if (s->version || s->theora) {
2672  if (get_bits1(&gb))
2673  av_log(s->avctx, AV_LOG_ERROR,
2674  "Warning, unsupported keyframe coding type?!\n");
2675  skip_bits(&gb, 2); /* reserved? */
2676 
2677 #if CONFIG_VP4_DECODER
2678  if (s->version >= 2) {
2679  int mb_height, mb_width;
2680  int mb_width_mul, mb_width_div, mb_height_mul, mb_height_div;
2681 
2682  mb_height = get_bits(&gb, 8);
2683  mb_width = get_bits(&gb, 8);
2684  if (mb_height != s->macroblock_height ||
2685  mb_width != s->macroblock_width)
2686  avpriv_request_sample(s->avctx, "macroblock dimension mismatch");
2687 
2688  mb_width_mul = get_bits(&gb, 5);
2689  mb_width_div = get_bits(&gb, 3);
2690  mb_height_mul = get_bits(&gb, 5);
2691  mb_height_div = get_bits(&gb, 3);
2692  if (mb_width_mul != 1 || mb_width_div != 1 || mb_height_mul != 1 || mb_height_div != 1)
2693  avpriv_request_sample(s->avctx, "unexpected macroblock dimension multiplier/divider");
2694 
2695  if (get_bits(&gb, 2))
2696  avpriv_request_sample(s->avctx, "unknown bits");
2697  }
2698 #endif
2699  }
2700  ff_progress_frame_replace(&s->golden_frame, &s->current_frame);
2701  } else {
2702  if (!s->golden_frame.f) {
2703  av_log(s->avctx, AV_LOG_WARNING,
2704  "vp3: first frame not a keyframe\n");
2705 
2706  if ((ret = ff_progress_frame_get_buffer(avctx, &s->golden_frame,
2707  AV_GET_BUFFER_FLAG_REF)) < 0)
2708  goto error;
2709  s->golden_frame.f->pict_type = AV_PICTURE_TYPE_I;
2710  ff_progress_frame_replace(&s->last_frame, &s->golden_frame);
2711  ff_progress_frame_report(&s->golden_frame, INT_MAX);
2712  }
2713  }
2714  ff_thread_finish_setup(avctx);
2715 
2716  memset(s->all_fragments, 0, s->fragment_count * sizeof(Vp3Fragment));
2717 
2718  if (s->version < 2) {
2719  if ((ret = unpack_superblocks(s, &gb)) < 0) {
2720  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_superblocks\n");
2721  goto error;
2722  }
2723 #if CONFIG_VP4_DECODER
2724  } else {
2725  if ((ret = vp4_unpack_macroblocks(s, &gb)) < 0) {
2726  av_log(s->avctx, AV_LOG_ERROR, "error in vp4_unpack_macroblocks\n");
2727  goto error;
2728  }
2729 #endif
2730  }
2731  if ((ret = unpack_modes(s, &gb)) < 0) {
2732  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_modes\n");
2733  goto error;
2734  }
2735  if (ret = unpack_vectors(s, &gb)) {
2736  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_vectors\n");
2737  goto error;
2738  }
2739  if ((ret = unpack_block_qpis(s, &gb)) < 0) {
2740  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_block_qpis\n");
2741  goto error;
2742  }
2743 
2744  if (s->version < 2) {
2745  if ((ret = unpack_dct_coeffs(s, &gb)) < 0) {
2746  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_dct_coeffs\n");
2747  goto error;
2748  }
2749 #if CONFIG_VP4_DECODER
2750  } else {
2751  if ((ret = vp4_unpack_dct_coeffs(s, &gb)) < 0) {
2752  av_log(s->avctx, AV_LOG_ERROR, "error in vp4_unpack_dct_coeffs\n");
2753  goto error;
2754  }
2755 #endif
2756  }
2757 
2758  for (int i = 0; i < 3; i++) {
2759  int height = s->height >> (i && s->chroma_y_shift);
2760  if (s->flipped_image)
2761  s->data_offset[i] = 0;
2762  else
2763  s->data_offset[i] = (height - 1) * s->current_frame.f->linesize[i];
2764  }
2765 
2766  s->last_slice_end = 0;
2767  for (int i = 0; i < s->c_superblock_height; i++)
2768  render_slice(s, i);
2769 
2770  // filter the last row
2771  if (s->version < 2)
2772  for (int i = 0; i < 3; i++) {
2773  int row = (s->height >> (3 + (i && s->chroma_y_shift))) - 1;
2774  apply_loop_filter(s, i, row, row + 1);
2775  }
2776  vp3_draw_horiz_band(s, s->height);
2777 
2778  ff_progress_frame_unref(&s->last_frame);
2779 
2780  /* output frame, offset as needed */
2781  if ((ret = av_frame_ref(frame, s->current_frame.f)) < 0)
2782  return ret;
2783 
2784  frame->crop_left = s->offset_x;
2785  frame->crop_right = avctx->coded_width - avctx->width - s->offset_x;
2786  frame->crop_top = s->offset_y;
2787  frame->crop_bottom = avctx->coded_height - avctx->height - s->offset_y;
2788 
2789  *got_frame = 1;
2790 
2791  return buf_size;
2792 
2793 error:
2794  ff_progress_frame_report(&s->current_frame, INT_MAX);
2795  ff_progress_frame_unref(&s->last_frame);
2796 
2797  return ret;
2798 }
2799 
2800 static int read_huffman_tree(HuffTable *huff, GetBitContext *gb, int length,
2801  AVCodecContext *avctx)
2802 {
2803  if (get_bits1(gb)) {
2804  int token;
2805  if (huff->nb_entries >= 32) { /* overflow */
2806  av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n");
2807  return -1;
2808  }
2809  token = get_bits(gb, 5);
2810  ff_dlog(avctx, "code length %d, curr entry %d, token %d\n",
2811  length, huff->nb_entries, token);
2812  huff->entries[huff->nb_entries++] = (HuffEntry){ length, token };
2813  } else {
2814  /* The following bound follows from the fact that nb_entries <= 32. */
2815  if (length >= 31) { /* overflow */
2816  av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n");
2817  return -1;
2818  }
2819  length++;
2820  if (read_huffman_tree(huff, gb, length, avctx))
2821  return -1;
2822  if (read_huffman_tree(huff, gb, length, avctx))
2823  return -1;
2824  }
2825  return 0;
2826 }
2827 
2828 #if CONFIG_THEORA_DECODER
2829 static const enum AVPixelFormat theora_pix_fmts[4] = {
2831 };
2832 
2833 static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb)
2834 {
2835  Vp3DecodeContext *s = avctx->priv_data;
2836  int visible_width, visible_height, colorspace;
2837  uint8_t offset_x = 0, offset_y = 0;
2838  int ret;
2839  AVRational fps, aspect;
2840 
2841  if (get_bits_left(gb) < 206)
2842  return AVERROR_INVALIDDATA;
2843 
2844  s->theora_header = 0;
2845  s->theora = get_bits(gb, 24);
2846  av_log(avctx, AV_LOG_DEBUG, "Theora bitstream version %X\n", s->theora);
2847  if (!s->theora) {
2848  s->theora = 1;
2849  avpriv_request_sample(s->avctx, "theora 0");
2850  }
2851 
2852  /* 3.2.0 aka alpha3 has the same frame orientation as original vp3
2853  * but previous versions have the image flipped relative to vp3 */
2854  if (s->theora < 0x030200) {
2855  s->flipped_image = 1;
2856  av_log(avctx, AV_LOG_DEBUG,
2857  "Old (<alpha3) Theora bitstream, flipped image\n");
2858  }
2859 
2860  visible_width =
2861  s->width = get_bits(gb, 16) << 4;
2862  visible_height =
2863  s->height = get_bits(gb, 16) << 4;
2864 
2865  if (s->theora >= 0x030200) {
2866  visible_width = get_bits(gb, 24);
2867  visible_height = get_bits(gb, 24);
2868 
2869  offset_x = get_bits(gb, 8); /* offset x */
2870  offset_y = get_bits(gb, 8); /* offset y, from bottom */
2871  }
2872 
2873  /* sanity check */
2874  if (av_image_check_size(visible_width, visible_height, 0, avctx) < 0 ||
2875  visible_width + offset_x > s->width ||
2876  visible_height + offset_y > s->height ||
2877  visible_width < 18
2878  ) {
2879  av_log(avctx, AV_LOG_ERROR,
2880  "Invalid frame dimensions - w:%d h:%d x:%d y:%d (%dx%d).\n",
2881  visible_width, visible_height, offset_x, offset_y,
2882  s->width, s->height);
2883  return AVERROR_INVALIDDATA;
2884  }
2885 
2886  fps.num = get_bits_long(gb, 32);
2887  fps.den = get_bits_long(gb, 32);
2888  if (fps.num && fps.den) {
2889  if (fps.num < 0 || fps.den < 0) {
2890  av_log(avctx, AV_LOG_ERROR, "Invalid framerate\n");
2891  return AVERROR_INVALIDDATA;
2892  }
2893  av_reduce(&avctx->framerate.den, &avctx->framerate.num,
2894  fps.den, fps.num, 1 << 30);
2895  }
2896 
2897  aspect.num = get_bits(gb, 24);
2898  aspect.den = get_bits(gb, 24);
2899  if (aspect.num && aspect.den) {
2901  &avctx->sample_aspect_ratio.den,
2902  aspect.num, aspect.den, 1 << 30);
2903  ff_set_sar(avctx, avctx->sample_aspect_ratio);
2904  }
2905 
2906  if (s->theora < 0x030200)
2907  skip_bits(gb, 5); /* keyframe frequency force */
2908  colorspace = get_bits(gb, 8);
2909  skip_bits(gb, 24); /* bitrate */
2910 
2911  skip_bits(gb, 6); /* quality hint */
2912 
2913  if (s->theora >= 0x030200) {
2914  skip_bits(gb, 5); /* keyframe frequency force */
2915  avctx->pix_fmt = theora_pix_fmts[get_bits(gb, 2)];
2916  if (avctx->pix_fmt == AV_PIX_FMT_NONE) {
2917  av_log(avctx, AV_LOG_ERROR, "Invalid pixel format\n");
2918  return AVERROR_INVALIDDATA;
2919  }
2920  skip_bits(gb, 3); /* reserved */
2921  } else
2922  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
2923 
2924  if (s->width < 18)
2925  return AVERROR_PATCHWELCOME;
2926  ret = ff_set_dimensions(avctx, s->width, s->height);
2927  if (ret < 0)
2928  return ret;
2929  if (!(avctx->flags2 & AV_CODEC_FLAG2_IGNORE_CROP)) {
2930  avctx->width = visible_width;
2931  avctx->height = visible_height;
2932  // translate offsets from theora axis ([0,0] lower left)
2933  // to normal axis ([0,0] upper left)
2934  s->offset_x = offset_x;
2935  s->offset_y = s->height - visible_height - offset_y;
2936  }
2937 
2938  if (colorspace == 1)
2940  else if (colorspace == 2)
2942 
2943  if (colorspace == 1 || colorspace == 2) {
2944  avctx->colorspace = AVCOL_SPC_BT470BG;
2945  avctx->color_trc = AVCOL_TRC_BT709;
2946  }
2947 
2948  s->theora_header = 1;
2949  return 0;
2950 }
2951 
2952 static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb)
2953 {
2954  Vp3DecodeContext *s = avctx->priv_data;
2955  int n, matrices, ret;
2956 
2957  if (!s->theora_header)
2958  return AVERROR_INVALIDDATA;
2959 
2960  if (s->theora >= 0x030200) {
2961  n = get_bits(gb, 3);
2962  /* loop filter limit values table */
2963  if (n)
2964  for (int i = 0; i < 64; i++)
2965  s->filter_limit_values[i] = get_bits(gb, n);
2966  }
2967 
2968  if (s->theora >= 0x030200)
2969  n = get_bits(gb, 4) + 1;
2970  else
2971  n = 16;
2972  /* quality threshold table */
2973  for (int i = 0; i < 64; i++)
2974  s->coded_ac_scale_factor[i] = get_bits(gb, n);
2975 
2976  if (s->theora >= 0x030200)
2977  n = get_bits(gb, 4) + 1;
2978  else
2979  n = 16;
2980  /* dc scale factor table */
2981  for (int i = 0; i < 64; i++)
2982  s->coded_dc_scale_factor[0][i] =
2983  s->coded_dc_scale_factor[1][i] = get_bits(gb, n);
2984 
2985  if (s->theora >= 0x030200)
2986  matrices = get_bits(gb, 9) + 1;
2987  else
2988  matrices = 3;
2989 
2990  if (matrices > 384) {
2991  av_log(avctx, AV_LOG_ERROR, "invalid number of base matrixes\n");
2992  return -1;
2993  }
2994 
2995  for (int j = 0; j < matrices; j++)
2996  for (int i = 0; i < 64; i++)
2997  s->base_matrix[j][i] = get_bits(gb, 8);
2998 
2999  for (int inter = 0; inter <= 1; inter++) {
3000  for (int plane = 0; plane <= 2; plane++) {
3001  int newqr = 1;
3002  if (inter || plane > 0)
3003  newqr = get_bits1(gb);
3004  if (!newqr) {
3005  int qtj, plj;
3006  if (inter && get_bits1(gb)) {
3007  qtj = 0;
3008  plj = plane;
3009  } else {
3010  qtj = (3 * inter + plane - 1) / 3;
3011  plj = (plane + 2) % 3;
3012  }
3013  s->qr_count[inter][plane] = s->qr_count[qtj][plj];
3014  memcpy(s->qr_size[inter][plane], s->qr_size[qtj][plj],
3015  sizeof(s->qr_size[0][0]));
3016  memcpy(s->qr_base[inter][plane], s->qr_base[qtj][plj],
3017  sizeof(s->qr_base[0][0]));
3018  } else {
3019  int qri = 0;
3020  int qi = 0;
3021 
3022  for (;;) {
3023  int i = get_bits(gb, av_log2(matrices - 1) + 1);
3024  if (i >= matrices) {
3025  av_log(avctx, AV_LOG_ERROR,
3026  "invalid base matrix index\n");
3027  return -1;
3028  }
3029  s->qr_base[inter][plane][qri] = i;
3030  if (qi >= 63)
3031  break;
3032  i = get_bits(gb, av_log2(63 - qi) + 1) + 1;
3033  s->qr_size[inter][plane][qri++] = i;
3034  qi += i;
3035  }
3036 
3037  if (qi > 63) {
3038  av_log(avctx, AV_LOG_ERROR, "invalid qi %d > 63\n", qi);
3039  return -1;
3040  }
3041  s->qr_count[inter][plane] = qri;
3042  }
3043  }
3044  }
3045 
3046  /* Huffman tables */
3047  for (int i = 0; i < FF_ARRAY_ELEMS(s->huffman_table); i++) {
3048  s->huffman_table[i].nb_entries = 0;
3049  if ((ret = read_huffman_tree(&s->huffman_table[i], gb, 0, avctx)) < 0)
3050  return ret;
3051  }
3052 
3053  s->theora_tables = 1;
3054 
3055  return 0;
3056 }
3057 
3058 static av_cold int theora_decode_init(AVCodecContext *avctx)
3059 {
3060  Vp3DecodeContext *s = avctx->priv_data;
3061  GetBitContext gb;
3062  int ptype;
3063  const uint8_t *header_start[3];
3064  int header_len[3];
3065  int ret;
3066 
3067  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
3068 
3069  s->theora = 1;
3070 
3071  if (!avctx->extradata_size) {
3072  av_log(avctx, AV_LOG_ERROR, "Missing extradata!\n");
3073  return -1;
3074  }
3075 
3077  42, header_start, header_len) < 0) {
3078  av_log(avctx, AV_LOG_ERROR, "Corrupt extradata\n");
3079  return -1;
3080  }
3081 
3082  for (int i = 0; i < 3; i++) {
3083  if (header_len[i] <= 0)
3084  continue;
3085  ret = init_get_bits8(&gb, header_start[i], header_len[i]);
3086  if (ret < 0)
3087  return ret;
3088 
3089  ptype = get_bits(&gb, 8);
3090 
3091  if (!(ptype & 0x80)) {
3092  av_log(avctx, AV_LOG_ERROR, "Invalid extradata!\n");
3093 // return -1;
3094  }
3095 
3096  // FIXME: Check for this as well.
3097  skip_bits_long(&gb, 6 * 8); /* "theora" */
3098 
3099  switch (ptype) {
3100  case 0x80:
3101  if (theora_decode_header(avctx, &gb) < 0)
3102  return -1;
3103  break;
3104  case 0x81:
3105 // FIXME: is this needed? it breaks sometimes
3106 // theora_decode_comments(avctx, gb);
3107  break;
3108  case 0x82:
3109  if (theora_decode_tables(avctx, &gb))
3110  return -1;
3111  break;
3112  default:
3113  av_log(avctx, AV_LOG_ERROR,
3114  "Unknown Theora config packet: %d\n", ptype & ~0x80);
3115  break;
3116  }
3117  if (ptype != 0x81 && get_bits_left(&gb) >= 8U)
3118  av_log(avctx, AV_LOG_WARNING,
3119  "%d bits left in packet %X\n",
3120  get_bits_left(&gb), ptype);
3121  if (s->theora < 0x030200)
3122  break;
3123  }
3124 
3125  return vp3_decode_init(avctx);
3126 }
3127 
3128 const FFCodec ff_theora_decoder = {
3129  .p.name = "theora",
3130  CODEC_LONG_NAME("Theora"),
3131  .p.type = AVMEDIA_TYPE_VIDEO,
3132  .p.id = AV_CODEC_ID_THEORA,
3133  .priv_data_size = sizeof(Vp3DecodeContext),
3134  .init = theora_decode_init,
3135  .close = vp3_decode_end,
3137  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DRAW_HORIZ_BAND |
3139  .flush = vp3_decode_flush,
3140  UPDATE_THREAD_CONTEXT(vp3_update_thread_context),
3141  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
3144 };
3145 #endif
3146 
3148  .p.name = "vp3",
3149  CODEC_LONG_NAME("On2 VP3"),
3150  .p.type = AVMEDIA_TYPE_VIDEO,
3151  .p.id = AV_CODEC_ID_VP3,
3152  .priv_data_size = sizeof(Vp3DecodeContext),
3153  .init = vp3_decode_init,
3154  .close = vp3_decode_end,
3156  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DRAW_HORIZ_BAND |
3158  .flush = vp3_decode_flush,
3159  UPDATE_THREAD_CONTEXT(vp3_update_thread_context),
3160  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
3162 };
3163 
3164 #if CONFIG_VP4_DECODER
3165 const FFCodec ff_vp4_decoder = {
3166  .p.name = "vp4",
3167  CODEC_LONG_NAME("On2 VP4"),
3168  .p.type = AVMEDIA_TYPE_VIDEO,
3169  .p.id = AV_CODEC_ID_VP4,
3170  .priv_data_size = sizeof(Vp3DecodeContext),
3171  .init = vp3_decode_init,
3172  .close = vp3_decode_end,
3174  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DRAW_HORIZ_BAND |
3176  .flush = vp3_decode_flush,
3177  UPDATE_THREAD_CONTEXT(vp3_update_thread_context),
3178  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
3180 };
3181 #endif
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:32
vp4_ac_scale_factor
static const uint16_t vp4_ac_scale_factor[64]
Definition: vp4data.h:64
ff_progress_frame_report
void ff_progress_frame_report(ProgressFrame *f, int n)
Notify later decoding threads when part of their reference frame is ready.
Definition: decode.c:1921
vp4data.h
PUL
#define PUL
allocate_tables
static av_cold int allocate_tables(AVCodecContext *avctx)
Allocate tables for per-frame data in Vp3DecodeContext.
Definition: vp3.c:2304
vp3_dequant
static int vp3_dequant(Vp3DecodeContext *s, const Vp3Fragment *frag, int plane, int inter, int16_t block[64])
Pull DCT tokens from the 64 levels to decode and dequant the coefficients for the next block in codin...
Definition: vp3.c:1848
skip_bits_long
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:280
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
Vp3Fragment::dc
int16_t dc
Definition: vp3.c:69
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
ff_vlc_init_from_lengths
int ff_vlc_init_from_lengths(VLC *vlc, int nb_bits, int nb_codes, const int8_t *lens, int lens_wrap, const void *symbols, int symbols_wrap, int symbols_size, int offset, int flags, void *logctx)
Build VLC decoding tables suitable for use with get_vlc2()
Definition: vlc.c:306
av_clip
#define av_clip
Definition: common.h:100
Vp3DecodeContext::offset_x
uint8_t offset_x
Definition: vp3.c:247
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
VP3DSPContext
Definition: vp3dsp.h:29
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:694
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
vp4_get_mv
static int vp4_get_mv(GetBitContext *gb, int axis, int last_motion)
Definition: vp3.c:887
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:659
mem_internal.h
Vp3DecodeContext::c_macroblock_height
int c_macroblock_height
Definition: vp3.c:237
zero_run_base
static const uint8_t zero_run_base[32]
Definition: vp3data.h:133
MODE_INTER_PRIOR_LAST
#define MODE_INTER_PRIOR_LAST
Definition: vp3.c:87
VP4Predictor
Definition: vp3.c:177
Vp3DecodeContext::idct_scantable
uint8_t idct_scantable[64]
Definition: vp3.c:209
thread.h
HuffEntry::len
uint8_t len
Definition: exr.c:97
AVRefStructOpaque
RefStruct is an API for creating reference-counted objects with minimal overhead.
Definition: refstruct.h:58
VP4Predictor::dc
int dc
Definition: vp3.c:178
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:424
mode_code_vlc_len
static const uint8_t mode_code_vlc_len[8]
Definition: vp3data.h:97
superblock_run_length_vlc
static VLCElem superblock_run_length_vlc[88]
Definition: vp3.c:165
read_huffman_tree
static int read_huffman_tree(HuffTable *huff, GetBitContext *gb, int length, AVCodecContext *avctx)
Definition: vp3.c:2800
PUR
#define PUR
vp3dsp.h
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:652
u
#define u(width, name, range_min, range_max)
Definition: cbs_apv.c:68
AVPacket::data
uint8_t * data
Definition: packet.h:588
NB_VP4_DC_TYPES
@ NB_VP4_DC_TYPES
Definition: vp3.c:150
ff_vp3_decoder
const FFCodec ff_vp3_decoder
Definition: vp3.c:3147
ff_progress_frame_get_buffer
int ff_progress_frame_get_buffer(AVCodecContext *avctx, ProgressFrame *f, int flags)
Wrapper around ff_progress_frame_alloc() and ff_thread_get_buffer().
Definition: decode.c:1881
Vp3DecodeContext::all_fragments
Vp3Fragment * all_fragments
Definition: vp3.c:244
mode_code_vlc
static VLCElem mode_code_vlc[24+2108 *CONFIG_VP4_DECODER]
Definition: vp3.c:170
Vp3DecodeContext::filter_limit_values
uint8_t filter_limit_values[64]
Definition: vp3.c:325
FFCodec
Definition: codec_internal.h:127
fragment_run_length_vlc
static VLCElem fragment_run_length_vlc[56]
Definition: vp3.c:166
VP4_DC_GOLDEN
@ VP4_DC_GOLDEN
Definition: vp3.c:149
motion_vector_vlc
static VLCElem motion_vector_vlc[112]
Definition: vp3.c:167
base
uint8_t base
Definition: vp3data.h:128
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:91
Vp3Fragment::coding_method
uint8_t coding_method
Definition: vp3.c:70
thread.h
unpack_superblocks
static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:468
render_slice
static void render_slice(Vp3DecodeContext *s, int slice)
Definition: vp3.c:2055
CoeffVLCs::vlc_tabs
const VLCElem * vlc_tabs[80]
Definition: vp3.c:194
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1375
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:448
Vp3DecodeContext::height
int height
Definition: vp3.c:202
vlc_tables
static VLCElem vlc_tables[VLC_TABLES_SIZE]
Definition: imc.c:115
AV_CODEC_FLAG2_IGNORE_CROP
#define AV_CODEC_FLAG2_IGNORE_CROP
Discard cropping information from SPS.
Definition: avcodec.h:355
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
fragment
Definition: dashdec.c:37
Vp3DecodeContext::y_superblock_count
int y_superblock_count
Definition: vp3.c:224
xiph.h
bit
#define bit(string, value)
Definition: cbs_mpeg2.c:56
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:383
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:551
close
static av_cold void close(AVCodecParserContext *s)
Definition: apv_parser.c:136
Vp3DecodeContext::superblock_fragments
int * superblock_fragments
Definition: vp3.c:314
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:706
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:337
Vp3DecodeContext::golden_frame
ProgressFrame golden_frame
Definition: vp3.c:204
get_coeff
static int get_coeff(GetBitContext *gb, int token, int16_t *coeff)
Definition: vp3.c:1150
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
Vp3DecodeContext::qr_count
uint8_t qr_count[2][3]
Definition: vp3.c:257
Vp3DecodeContext::hdsp
HpelDSPContext hdsp
Definition: vp3.c:210
vp4_mv_vlc
static const uint8_t vp4_mv_vlc[2][7][63][2]
Definition: vp4data.h:112
BLOCK_Y
#define BLOCK_Y
Definition: vp3.c:642
Vp3DecodeContext::y_superblock_width
int y_superblock_width
Definition: vp3.c:222
CODING_MODE_COUNT
#define CODING_MODE_COUNT
Definition: vp3.c:91
FFSIGN
#define FFSIGN(a)
Definition: common.h:75
CoeffVLCs
Definition: rv60dec.c:89
GetBitContext
Definition: get_bits.h:109
tab
static const struct twinvq_data tab
Definition: twinvq_data.h:10345
SET_CHROMA_MODES
#define SET_CHROMA_MODES
tables
Writing a table generator This documentation is preliminary Parts of the API are not good and should be changed Basic concepts A table generator consists of two *_tablegen c and *_tablegen h The h file will provide the variable declarations and initialization code for the tables
Definition: tablegen.txt:10
state
static struct @545 state
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:488
perm
perm
Definition: f_perms.c:75
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:3484
MODE_INTER_LAST_MV
#define MODE_INTER_LAST_MV
Definition: vp3.c:86
Vp3DecodeContext::y_superblock_height
int y_superblock_height
Definition: vp3.c:223
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
Vp3DecodeContext::offset_y
uint8_t offset_y
Definition: vp3.c:248
Vp3DecodeContext::theora
int theora
Definition: vp3.c:200
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:607
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
loop
static int loop
Definition: ffplay.c:335
TRANSPOSE
#define TRANSPOSE(x)
AVRational::num
int num
Numerator.
Definition: rational.h:59
progressframe.h
refstruct.h
Vp3DecodeContext::num_kf_coded_fragment
int num_kf_coded_fragment[3]
Definition: vp3.c:297
TOKEN_ZERO_RUN
#define TOKEN_ZERO_RUN(coeff, zero_run)
Definition: vp3.c:281
vp4_pred_block_type_map
static const uint8_t vp4_pred_block_type_map[8]
Definition: vp3.c:154
FF_CODEC_CAP_USES_PROGRESSFRAMES
#define FF_CODEC_CAP_USES_PROGRESSFRAMES
The decoder might make use of the ProgressFrame API.
Definition: codec_internal.h:68
await_reference_row
static void await_reference_row(Vp3DecodeContext *s, const Vp3Fragment *fragment, int motion_y, int y)
Wait for the reference frame of the current fragment.
Definition: vp3.c:1934
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:645
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:205
motion_vector_vlc_table
static const uint8_t motion_vector_vlc_table[63][2]
Definition: vp3data.h:101
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:106
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:544
theora_decode_tables
static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb)
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:642
hilbert_offset
static const uint8_t hilbert_offset[16][2]
Definition: vp3.c:139
VLCInitState
For static VLCs, the number of bits can often be hardcoded at each get_vlc2() callsite.
Definition: vlc.h:220
emms_c
#define emms_c()
Definition: emms.h:63
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:515
Vp3DecodeContext::fragment_height
int fragment_height[2]
Definition: vp3.c:242
CoeffVLCs::vlcs
VLC vlcs[80]
Definition: vp3.c:195
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:347
s
#define s(width, name)
Definition: cbs_vp9.c:198
init_loop_filter
static void init_loop_filter(Vp3DecodeContext *s)
Definition: vp3.c:459
vp4_mv_table_selector
static const uint8_t vp4_mv_table_selector[32]
Definition: vp4data.h:105
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:411
vp3_decode_flush
static av_cold void vp3_decode_flush(AVCodecContext *avctx)
Definition: vp3.c:351
transform
static const int8_t transform[32][32]
Definition: dsp.c:27
HuffTable::nb_entries
uint8_t nb_entries
Definition: vp3.c:190
init_block_mapping
static int init_block_mapping(Vp3DecodeContext *s)
This function sets up all of the various blocks mappings: superblocks <-> fragments,...
Definition: vp3.c:384
SB_PARTIALLY_CODED
#define SB_PARTIALLY_CODED
Definition: vp3.c:75
bits
uint8_t bits
Definition: vp3data.h:128
SB_NOT_CODED
#define SB_NOT_CODED
Definition: vp3.c:74
av_refstruct_alloc_ext
static void * av_refstruct_alloc_ext(size_t size, unsigned flags, void *opaque, void(*free_cb)(AVRefStructOpaque opaque, void *obj))
A wrapper around av_refstruct_alloc_ext_c() for the common case of a non-const qualified opaque.
Definition: refstruct.h:94
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
Vp3Fragment::qpi
uint8_t qpi
Definition: vp3.c:71
ff_progress_frame_unref
void ff_progress_frame_unref(ProgressFrame *f)
Give up a reference to the underlying frame contained in a ProgressFrame and reset the ProgressFrame,...
Definition: decode.c:1904
ff_progress_frame_await
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_progress_frame_await() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_progress_frame_report() has been called on them. This includes draw_edges(). Porting codecs to frame threading
decode.h
get_bits.h
reverse_dc_prediction
static void reverse_dc_prediction(Vp3DecodeContext *s, int first_fragment, int fragment_width, int fragment_height)
Definition: vp3.c:1633
unpack_dct_coeffs
static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:1298
ModeAlphabet
static const int ModeAlphabet[6][CODING_MODE_COUNT]
Definition: vp3.c:101
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
RSHIFT
#define RSHIFT(a, b)
Definition: common.h:56
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:332
AVCOL_PRI_BT470BG
@ AVCOL_PRI_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:643
MODE_USING_GOLDEN
#define MODE_USING_GOLDEN
Definition: vp3.c:88
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:441
Vp3DecodeContext::macroblock_width
int macroblock_width
Definition: vp3.c:233
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
Vp3DecodeContext::idct_permutation
uint8_t idct_permutation[64]
Definition: vp3.c:208
if
if(ret)
Definition: filter_design.txt:179
init_dequantizer
static void init_dequantizer(Vp3DecodeContext *s, int qpi)
Definition: vp3.c:417
MODE_INTER_FOURMV
#define MODE_INTER_FOURMV
Definition: vp3.c:90
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:95
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:232
Vp3DecodeContext::c_superblock_width
int c_superblock_width
Definition: vp3.c:225
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:203
coeff_tables
static const int16_t *const coeff_tables[32]
Definition: vp3data.h:332
Vp3DecodeContext::offset_x_warned
int offset_x_warned
Definition: vp3.c:249
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
free_vlc_tables
static av_cold void free_vlc_tables(AVRefStructOpaque unused, void *obj)
Definition: vp3.c:2348
HuffTable
Definition: vp3.c:188
PU
#define PU
unpack_modes
static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:784
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
Vp3DecodeContext::superblock_count
int superblock_count
Definition: vp3.c:221
ff_vp3dsp_h_loop_filter_12
void ff_vp3dsp_h_loop_filter_12(uint8_t *first_pixel, ptrdiff_t stride, int *bounding_values)
theora_decode_header
static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb)
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:391
fragment_run_length_vlc_len
static const uint8_t fragment_run_length_vlc_len[30]
Definition: vp3data.h:92
vp4_bias
static const uint8_t vp4_bias[5 *16][32][2]
Definition: vp4data.h:329
ff_set_sar
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
Definition: utils.c:106
mathops.h
Vp3DecodeContext::theora_header
int theora_header
Definition: vp3.c:200
TOKEN_COEFF
#define TOKEN_COEFF(coeff)
Definition: vp3.c:282
vp4_y_dc_scale_factor
static const uint8_t vp4_y_dc_scale_factor[64]
Definition: vp4data.h:42
Vp3DecodeContext::skip_loop_filter
int skip_loop_filter
Definition: vp3.c:216
FF_THREAD_IS_COPY
@ FF_THREAD_IS_COPY
Definition: thread.h:61
UPDATE_THREAD_CONTEXT
#define UPDATE_THREAD_CONTEXT(func)
Definition: codec_internal.h:341
Vp3DecodeContext::coeff_vlc
CoeffVLCs * coeff_vlc
The first 16 of the following VLCs are for the dc coefficients; the others are four groups of 16 VLCs...
Definition: vp3.c:304
AV_CODEC_ID_VP4
@ AV_CODEC_ID_VP4
Definition: codec_id.h:300
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:651
jpegquanttables.h
vp31_ac_scale_factor
static const uint16_t vp31_ac_scale_factor[64]
Definition: vp3data.h:63
Vp3DecodeContext::qr_size
uint8_t qr_size[2][3][64]
Definition: vp3.c:258
VP4_DC_INTRA
@ VP4_DC_INTRA
Definition: vp3.c:147
AVOnce
#define AVOnce
Definition: thread.h:202
DC_COEFF
#define DC_COEFF(u)
Definition: vp3.c:1631
Vp3DecodeContext::vp3dsp
VP3DSPContext vp3dsp
Definition: vp3.c:212
Vp3DecodeContext::flipped_image
int flipped_image
Definition: vp3.c:214
vp31_intra_y_dequant
static const uint8_t vp31_intra_y_dequant[64]
Definition: vp3data.h:29
ff_vp3dsp_v_loop_filter_12
void ff_vp3dsp_v_loop_filter_12(uint8_t *first_pixel, ptrdiff_t stride, int *bounding_values)
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
HpelDSPContext
Half-pel DSP context.
Definition: hpeldsp.h:46
Vp3DecodeContext::fragment_width
int fragment_width[2]
Definition: vp3.c:241
ff_vp3dsp_set_bounding_values
void ff_vp3dsp_set_bounding_values(int *bounding_values_array, int filter_limit)
Definition: vp3dsp.c:477
Vp3DecodeContext::total_num_coded_frags
int total_num_coded_frags
Definition: vp3.c:289
SB_FULLY_CODED
#define SB_FULLY_CODED
Definition: vp3.c:76
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:231
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:495
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:550
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:302
AVPacket::size
int size
Definition: packet.h:589
fixed_motion_vector_table
static const int8_t fixed_motion_vector_table[64]
Definition: vp3data.h:115
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
height
#define height
Definition: dsp.h:89
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:278
codec_internal.h
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:104
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
unpack_vectors
static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:902
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
ff_vp4_decoder
const FFCodec ff_vp4_decoder
FF_CODEC_CAP_EXPORTS_CROPPING
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: codec_internal.h:60
VLCElem
Definition: vlc.h:32
AV_NUM_DATA_POINTERS
#define AV_NUM_DATA_POINTERS
Definition: frame.h:428
ref_frame
static int ref_frame(VVCFrame *dst, const VVCFrame *src)
Definition: dec.c:616
Vp3DecodeContext::dct_tokens
int16_t * dct_tokens[3][64]
This is a list of all tokens in bitstream order.
Definition: vp3.c:278
Vp3DecodeContext::coded_dc_scale_factor
uint16_t coded_dc_scale_factor[2][64]
Definition: vp3.c:254
Vp3DecodeContext::qps
int qps[3]
Definition: vp3.c:218
Vp3DecodeContext::block
int16_t block[64]
Definition: vp3.c:213
Vp3DecodeContext::chroma_y_shift
int chroma_y_shift
Definition: vp3.c:203
Vp3DecodeContext::data_offset
int data_offset[3]
Definition: vp3.c:246
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
attributes.h
Vp3DecodeContext::macroblock_coding
unsigned char * macroblock_coding
Definition: vp3.c:318
version
version
Definition: libkvazaar.c:313
vp3data.h
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:221
AVCOL_TRC_BT709
@ AVCOL_TRC_BT709
also ITU-R BT1361
Definition: pixfmt.h:668
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1572
av_refstruct_unref
void av_refstruct_unref(void *objp)
Decrement the reference count of the underlying object and automatically free the object if there are...
Definition: refstruct.c:120
Vp3DecodeContext::avctx
AVCodecContext * avctx
Definition: vp3.c:199
AV_CODEC_ID_VP3
@ AV_CODEC_ID_VP3
Definition: codec_id.h:81
emms.h
Vp3DecodeContext::nkf_coded_fragment_list
int * nkf_coded_fragment_list
Definition: vp3.c:296
Vp3DecodeContext::keyframe
int keyframe
Definition: vp3.c:207
MODE_INTRA
#define MODE_INTRA
Definition: vp3.c:84
apply_loop_filter
static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int yend)
Definition: vp3.c:1782
Vp3DecodeContext::macroblock_height
int macroblock_height
Definition: vp3.c:234
Vp3DecodeContext::yuv_macroblock_count
int yuv_macroblock_count
Definition: vp3.c:238
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
Vp3DecodeContext::edge_emu_buffer
uint8_t * edge_emu_buffer
Definition: vp3.c:320
AVCodecContext::extradata
uint8_t * extradata
Out-of-band global headers that may be used by some codecs.
Definition: avcodec.h:514
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:373
Vp3DecodeContext::c_macroblock_count
int c_macroblock_count
Definition: vp3.c:235
AV_CODEC_ID_THEORA
@ AV_CODEC_ID_THEORA
Definition: codec_id.h:82
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
vp3_decode_frame
static int vp3_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: vp3.c:2540
superblock_run_length_vlc_lens
static const uint8_t superblock_run_length_vlc_lens[34]
Definition: vp3data.h:85
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:57
ff_mjpeg_std_chrominance_quant_tbl
const uint8_t ff_mjpeg_std_chrominance_quant_tbl[64]
Definition: jpegquanttables.c:45
eob_run_table
static const struct @302 eob_run_table[7]
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
Vp3DecodeContext::macroblock_count
int macroblock_count
Definition: vp3.c:232
SUPERBLOCK_VLC_BITS
#define SUPERBLOCK_VLC_BITS
Definition: vp3.c:63
VP4_DC_UNDEFINED
@ VP4_DC_UNDEFINED
Definition: vp3.c:151
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:179
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:676
Vp3DecodeContext::current_frame
ProgressFrame current_frame
Definition: vp3.c:206
Vp3DecodeContext::v_superblock_start
int v_superblock_start
Definition: vp3.c:229
Vp3DecodeContext::c_superblock_height
int c_superblock_height
Definition: vp3.c:226
AVCodecContext::height
int height
Definition: avcodec.h:592
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:631
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
VP4_MV_VLC_BITS
#define VP4_MV_VLC_BITS
Definition: vp3.c:62
Vp3DecodeContext::coded_fragment_list
int * coded_fragment_list[3]
Definition: vp3.c:293
avcodec.h
Vp3DecodeContext::c_superblock_count
int c_superblock_count
Definition: vp3.c:227
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:137
PL
#define PL
AVCOL_PRI_BT470M
@ AVCOL_PRI_BT470M
also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:641
ff_vlc_free
void ff_vlc_free(VLC *vlc)
Definition: vlc.c:580
ret
ret
Definition: filter_design.txt:187
Vp3DecodeContext::theora_tables
int theora_tables
Definition: vp3.c:200
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
free_tables
static av_cold void free_tables(AVCodecContext *avctx)
Definition: vp3.c:335
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
unpack_vlcs
static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb, const VLCElem *vlc_table, int coeff_index, int plane, int eob_run)
Definition: vp3.c:1178
MODE_INTER_PLUS_MV
#define MODE_INTER_PLUS_MV
Definition: vp3.c:85
Vp3DecodeContext::num_coded_frags
int num_coded_frags[3][64]
number of blocks that contain DCT coefficients at the given level or higher
Definition: vp3.c:288
ff_thread_sync_ref
enum ThreadingStatus ff_thread_sync_ref(AVCodecContext *avctx, size_t offset)
Allows to synchronize objects whose lifetime is the whole decoding process among all frame threads.
Definition: decode.c:1932
vp4_block_pattern_table_selector
static const uint8_t vp4_block_pattern_table_selector[14]
Definition: vp4data.h:86
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
Vp3DecodeContext::chroma_x_shift
int chroma_x_shift
Definition: vp3.c:203
BLOCK_X
#define BLOCK_X
Definition: vp3.c:641
U
#define U(x)
Definition: vpx_arith.h:37
MODE_COPY
#define MODE_COPY
Definition: vp3.c:94
Vp3DecodeContext
Definition: vp3.c:198
ff_progress_frame_replace
void ff_progress_frame_replace(ProgressFrame *dst, const ProgressFrame *src)
Do nothing if dst and src already refer to the same AVFrame; otherwise unreference dst and if src is ...
Definition: decode.c:1911
ff_theora_decoder
const FFCodec ff_theora_decoder
vp4_filter_limit_values
static const uint8_t vp4_filter_limit_values[64]
Definition: vp4data.h:75
MODE_GOLDEN_MV
#define MODE_GOLDEN_MV
Definition: vp3.c:89
coeff_vlc
static const VLCElem * coeff_vlc[2][8][4]
Definition: atrac9dec.c:110
FRAGMENT_PIXELS
#define FRAGMENT_PIXELS
Definition: vp3.c:65
AVCodecContext
main external API structure.
Definition: avcodec.h:431
VP4_DC_INTER
@ VP4_DC_INTER
Definition: vp3.c:148
vp3_draw_horiz_band
static void vp3_draw_horiz_band(Vp3DecodeContext *s, int y)
called when all pixels up to row y are complete
Definition: vp3.c:1893
AVCHROMA_LOC_CENTER
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:799
vp4_generic_dequant
static const uint8_t vp4_generic_dequant[64]
Definition: vp4data.h:31
zero_run_get_bits
static const uint8_t zero_run_get_bits[32]
Definition: vp3data.h:140
AVRational::den
int den
Denominator.
Definition: rational.h:60
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
VLC
Definition: vlc.h:50
Vp3DecodeContext::coded_ac_scale_factor
uint32_t coded_ac_scale_factor[64]
Definition: vp3.c:255
Vp3DecodeContext::bounding_values_array
int bounding_values_array[256+4]
Definition: vp3.c:326
output_plane
static void output_plane(const Plane *plane, int buf_sel, uint8_t *dst, ptrdiff_t dst_pitch, int dst_height)
Convert and output the current plane.
Definition: indeo3.c:1031
HuffEntry
Definition: exr.c:96
vp31_inter_dequant
static const uint8_t vp31_inter_dequant[64]
Definition: vp3data.h:41
temp
else temp
Definition: vf_mcdeint.c:271
body
static void body(uint32_t ABCD[4], const uint8_t *src, size_t nblocks)
Definition: md5.c:103
VLC::table
VLCElem * table
Definition: vlc.h:52
vp4_block_pattern_vlc
static const uint8_t vp4_block_pattern_vlc[2][14][2]
Definition: vp4data.h:90
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
avpriv_split_xiph_headers
int avpriv_split_xiph_headers(const uint8_t *extradata, int extradata_size, int first_header_size, const uint8_t *header_start[3], int header_len[3])
Split a single extradata buffer into the three headers that most Xiph codecs use.
Definition: xiph.c:26
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
MODE_INTER_NO_MV
#define MODE_INTER_NO_MV
Definition: vp3.c:83
VideoDSPContext
Definition: videodsp.h:40
ff_vlc_init_tables_from_lengths
const av_cold VLCElem * ff_vlc_init_tables_from_lengths(VLCInitState *state, int nb_bits, int nb_codes, const int8_t *lens, int lens_wrap, const void *symbols, int symbols_wrap, int symbols_size, int offset, int flags)
Definition: vlc.c:366
HuffEntry::sym
uint8_t sym
Definition: vp3.c:185
Vp3DecodeContext::superblock_coding
unsigned char * superblock_coding
Definition: vp3.c:230
COMPATIBLE_FRAME
#define COMPATIBLE_FRAME(x)
Definition: vp3.c:1629
AVERROR_DECODER_NOT_FOUND
#define AVERROR_DECODER_NOT_FOUND
Decoder not found.
Definition: error.h:54
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:607
Vp3DecodeContext::fragment_start
int fragment_start[3]
Definition: vp3.c:245
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:279
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
ff_vlc_init_tables
static const VLCElem * ff_vlc_init_tables(VLCInitState *state, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, int flags)
Definition: vlc.h:254
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:322
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:37
vp3_bias
static const uint8_t vp3_bias[5 *16][32][2]
Definition: vp3data.h:370
get_eob_run
static int get_eob_run(GetBitContext *gb, int token)
Definition: vp3.c:1142
HuffTable::entries
HuffEntry entries[32]
Definition: vp3.c:189
VLC_INIT_STATIC_TABLE_FROM_LENGTHS
#define VLC_INIT_STATIC_TABLE_FROM_LENGTHS(vlc_table, nb_bits, nb_codes, lens, lens_wrap, syms, syms_wrap, syms_size, offset, flags)
Definition: vlc.h:288
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:456
Vp3DecodeContext::huffman_table
HuffTable huffman_table[5 *16]
Definition: vp3.c:323
ProgressFrame
The ProgressFrame structure.
Definition: progressframe.h:73
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
VLC_INIT_STATE
#define VLC_INIT_STATE(_table)
Definition: vlc.h:225
vp31_filter_limit_values
static const uint8_t vp31_filter_limit_values[64]
Definition: vp3data.h:74
AVPacket
This structure stores compressed data.
Definition: packet.h:565
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:458
VP4Predictor::type
int type
Definition: vp3.c:179
vp3_decode_init
static av_cold int vp3_decode_init(AVCodecContext *avctx)
Definition: vp3.c:2356
Vp3DecodeContext::base_matrix
uint8_t base_matrix[384][64]
Definition: vp3.c:256
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
videodsp.h
VP3_MV_VLC_BITS
#define VP3_MV_VLC_BITS
Definition: vp3.c:61
Vp3DecodeContext::fragment_count
int fragment_count
Definition: vp3.c:240
vp31_dc_scale_factor
static const uint8_t vp31_dc_scale_factor[64]
Definition: vp3data.h:52
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:592
imgutils.h
hpeldsp.h
Vp3DecodeContext::width
int width
Definition: vp3.c:202
Vp3DecodeContext::kf_coded_fragment_list
int * kf_coded_fragment_list
Definition: vp3.c:295
AV_CODEC_CAP_DRAW_HORIZ_BAND
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: codec.h:44
unpack_block_qpis
static int unpack_block_qpis(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:1099
Vp3DecodeContext::qr_base
uint16_t qr_base[2][3][64]
Definition: vp3.c:259
vp3_decode_end
static av_cold int vp3_decode_end(AVCodecContext *avctx)
Definition: vp3.c:360
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:80
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
h
h
Definition: vp9dsp_template.c:2070
vp4_uv_dc_scale_factor
static const uint8_t vp4_uv_dc_scale_factor[64]
Definition: vp4data.h:53
MAXIMUM_LONG_BIT_RUN
#define MAXIMUM_LONG_BIT_RUN
Definition: vp3.c:81
init_tables_once
static av_cold void init_tables_once(void)
Definition: vp3.c:2261
stride
#define stride
Definition: h264pred_template.c:536
Vp3DecodeContext::version
int version
Definition: vp3.c:201
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
width
#define width
Definition: dsp.h:89
Vp3DecodeContext::motion_val
int8_t(*[2] motion_val)[2]
Definition: vp3.c:251
Vp3DecodeContext::last_slice_end
int last_slice_end
Definition: vp3.c:215
ff_vp3dsp_init
av_cold void ff_vp3dsp_init(VP3DSPContext *c)
Definition: vp3dsp.c:448
Vp3DecodeContext::dc_pred_row
VP4Predictor * dc_pred_row
Definition: vp3.c:328
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
Vp3DecodeContext::u_superblock_start
int u_superblock_start
Definition: vp3.c:228
coeff_get_bits
static const uint8_t coeff_get_bits[32]
Definition: vp3data.h:148
Vp3DecodeContext::dct_tokens_base
int16_t * dct_tokens_base
Definition: vp3.c:279
Vp3Fragment
Definition: vp3.c:68
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:616
src
#define src
Definition: vp8dsp.c:248
Vp3DecodeContext::nqps
int nqps
Definition: vp3.c:219
Vp3DecodeContext::qmat
int16_t qmat[3][2][3][64]
qmat[qpi][is_inter][plane]
Definition: vp3.c:308
Vp3DecodeContext::vdsp
VideoDSPContext vdsp
Definition: vp3.c:211
TOKEN_EOB
#define TOKEN_EOB(eob_run)
Definition: vp3.c:280
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:337
Vp3DecodeContext::c_macroblock_width
int c_macroblock_width
Definition: vp3.c:236
Vp3DecodeContext::last_frame
ProgressFrame last_frame
Definition: vp3.c:205