FFmpeg
vp3.c
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2003-2004 The FFmpeg project
3  * Copyright (C) 2019 Peter Ross
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * On2 VP3/VP4 Video Decoder
25  *
26  * VP3 Video Decoder by Mike Melanson (mike at multimedia.cx)
27  * For more information about the VP3 coding process, visit:
28  * http://wiki.multimedia.cx/index.php?title=On2_VP3
29  *
30  * Theora decoder by Alex Beregszaszi
31  */
32 
33 #include "config_components.h"
34 
35 #include <stddef.h>
36 #include <string.h>
37 
38 #include "libavutil/attributes.h"
39 #include "libavutil/emms.h"
40 #include "libavutil/imgutils.h"
41 #include "libavutil/mem.h"
42 #include "libavutil/mem_internal.h"
43 #include "libavutil/thread.h"
44 
45 #include "avcodec.h"
46 #include "codec_internal.h"
47 #include "decode.h"
48 #include "get_bits.h"
49 #include "hpeldsp.h"
50 #include "internal.h"
51 #include "jpegquanttables.h"
52 #include "mathops.h"
53 #include "progressframe.h"
54 #include "libavutil/refstruct.h"
55 #include "thread.h"
56 #include "videodsp.h"
57 #include "vp3data.h"
58 #include "vp4data.h"
59 #include "vp3dsp.h"
60 #include "xiph.h"
61 
62 #define VP3_MV_VLC_BITS 6
63 #define VP4_MV_VLC_BITS 6
64 #define SUPERBLOCK_VLC_BITS 6
65 
66 #define FRAGMENT_PIXELS 8
67 
68 // FIXME split things out into their own arrays
69 typedef struct Vp3Fragment {
70  int16_t dc;
71  uint8_t coding_method;
72  uint8_t qpi;
73 } Vp3Fragment;
74 
75 #define SB_NOT_CODED 0
76 #define SB_PARTIALLY_CODED 1
77 #define SB_FULLY_CODED 2
78 
79 // This is the maximum length of a single long bit run that can be encoded
80 // for superblock coding or block qps. Theora special-cases this to read a
81 // bit instead of flipping the current bit to allow for runs longer than 4129.
82 #define MAXIMUM_LONG_BIT_RUN 4129
83 
84 #define MODE_INTER_NO_MV 0
85 #define MODE_INTRA 1
86 #define MODE_INTER_PLUS_MV 2
87 #define MODE_INTER_LAST_MV 3
88 #define MODE_INTER_PRIOR_LAST 4
89 #define MODE_USING_GOLDEN 5
90 #define MODE_GOLDEN_MV 6
91 #define MODE_INTER_FOURMV 7
92 #define CODING_MODE_COUNT 8
93 
94 /* special internal mode */
95 #define MODE_COPY 8
96 
97 static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb);
98 static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb);
99 
100 
101 /* There are 6 preset schemes, plus a free-form scheme */
102 static const int ModeAlphabet[6][CODING_MODE_COUNT] = {
103  /* scheme 1: Last motion vector dominates */
108 
109  /* scheme 2 */
114 
115  /* scheme 3 */
120 
121  /* scheme 4 */
126 
127  /* scheme 5: No motion vector dominates */
132 
133  /* scheme 6 */
138 };
139 
140 static const uint8_t hilbert_offset[16][2] = {
141  { 0, 0 }, { 1, 0 }, { 1, 1 }, { 0, 1 },
142  { 0, 2 }, { 0, 3 }, { 1, 3 }, { 1, 2 },
143  { 2, 2 }, { 2, 3 }, { 3, 3 }, { 3, 2 },
144  { 3, 1 }, { 2, 1 }, { 2, 0 }, { 3, 0 }
145 };
146 
147 enum {
153 };
154 
155 static const uint8_t vp4_pred_block_type_map[8] = {
164 };
165 
166 static VLCElem superblock_run_length_vlc[88]; /* version < 2 */
167 static VLCElem fragment_run_length_vlc[56]; /* version < 2 */
168 static VLCElem motion_vector_vlc[112]; /* version < 2 */
169 
170 // The VP4 tables reuse this vlc.
171 static VLCElem mode_code_vlc[24 + 2108 * CONFIG_VP4_DECODER];
172 
173 #if CONFIG_VP4_DECODER
174 static const VLCElem *vp4_mv_vlc_table[2][7]; /* version >= 2 */
175 static const VLCElem *block_pattern_vlc[2]; /* version >= 2 */
176 #endif
177 
178 typedef struct {
179  int dc;
180  int type;
181 } VP4Predictor;
182 
183 #define MIN_DEQUANT_VAL 2
184 
185 typedef struct HuffEntry {
186  uint8_t len, sym;
187 } HuffEntry;
188 
189 typedef struct HuffTable {
191  uint8_t nb_entries;
192 } HuffTable;
193 
194 typedef struct CoeffVLCs {
195  const VLCElem *vlc_tabs[80];
196  VLC vlcs[80];
197 } CoeffVLCs;
198 
199 typedef struct Vp3DecodeContext {
202  int version;
203  int width, height;
208  int keyframe;
209  uint8_t idct_permutation[64];
210  uint8_t idct_scantable[64];
214  DECLARE_ALIGNED(16, int16_t, block)[64];
218 
219  int qps[3];
220  int nqps;
221  int last_qps[3];
222 
232  unsigned char *superblock_coding;
233 
234  int macroblock_count; /* y macroblock count */
240  int yuv_macroblock_count; /* y+u+v macroblock count */
241 
245 
248  int data_offset[3];
249  uint8_t offset_x;
250  uint8_t offset_y;
252 
253  int8_t (*motion_val[2])[2];
254 
255  /* tables */
256  uint16_t coded_dc_scale_factor[2][64];
257  uint32_t coded_ac_scale_factor[64];
258  uint8_t base_matrix[384][64];
259  uint8_t qr_count[2][3];
260  uint8_t qr_size[2][3][64];
261  uint16_t qr_base[2][3][64];
262 
263  /**
264  * This is a list of all tokens in bitstream order. Reordering takes place
265  * by pulling from each level during IDCT. As a consequence, IDCT must be
266  * in Hilbert order, making the minimum slice height 64 for 4:2:0 and 32
267  * otherwise. The 32 different tokens with up to 12 bits of extradata are
268  * collapsed into 3 types, packed as follows:
269  * (from the low to high bits)
270  *
271  * 2 bits: type (0,1,2)
272  * 0: EOB run, 14 bits for run length (12 needed)
273  * 1: zero run, 7 bits for run length
274  * 7 bits for the next coefficient (3 needed)
275  * 2: coefficient, 14 bits (11 needed)
276  *
277  * Coefficients are signed, so are packed in the highest bits for automatic
278  * sign extension.
279  */
280  int16_t *dct_tokens[3][64];
281  int16_t *dct_tokens_base;
282 #define TOKEN_EOB(eob_run) ((eob_run) << 2)
283 #define TOKEN_ZERO_RUN(coeff, zero_run) (((coeff) * 512) + ((zero_run) << 2) + 1)
284 #define TOKEN_COEFF(coeff) (((coeff) * 4) + 2)
285 
286  /**
287  * number of blocks that contain DCT coefficients at
288  * the given level or higher
289  */
290  int num_coded_frags[3][64];
292 
293  /* this is a list of indexes into the all_fragments array indicating
294  * which of the fragments are coded */
296 
300 
301  /**
302  * The first 16 of the following VLCs are for the dc coefficients;
303  * the others are four groups of 16 VLCs each for ac coefficients.
304  * This is a RefStruct reference to share these VLCs between threads.
305  */
307 
308  /* these arrays need to be on 16-byte boundaries since SSE2 operations
309  * index into them */
310  DECLARE_ALIGNED(16, int16_t, qmat)[3][2][3][64]; ///< qmat[qpi][is_inter][plane]
311 
312  /* This table contains superblock_count * 16 entries. Each set of 16
313  * numbers corresponds to the fragment indexes 0..15 of the superblock.
314  * An entry will be -1 to indicate that no entry corresponds to that
315  * index. */
317 
318  /* This is an array that indicates how a particular macroblock
319  * is coded. */
320  unsigned char *macroblock_coding;
321 
322  uint8_t *edge_emu_buffer;
323 
324  /* Huffman decode */
326 
327  uint8_t filter_limit_values[64];
329 
330  VP4Predictor * dc_pred_row; /* dc_pred_row[y_superblock_width * 4] */
332 
333 /************************************************************************
334  * VP3 specific functions
335  ************************************************************************/
336 
337 static av_cold void free_tables(AVCodecContext *avctx)
338 {
339  Vp3DecodeContext *s = avctx->priv_data;
340 
341  av_freep(&s->superblock_coding);
342  av_freep(&s->all_fragments);
343  av_freep(&s->nkf_coded_fragment_list);
344  av_freep(&s->kf_coded_fragment_list);
345  av_freep(&s->dct_tokens_base);
346  av_freep(&s->superblock_fragments);
347  av_freep(&s->macroblock_coding);
348  av_freep(&s->dc_pred_row);
349  av_freep(&s->motion_val[0]);
350  av_freep(&s->motion_val[1]);
351 }
352 
354 {
355  Vp3DecodeContext *s = avctx->priv_data;
356 
357  ff_progress_frame_unref(&s->golden_frame);
358  ff_progress_frame_unref(&s->last_frame);
359  ff_progress_frame_unref(&s->current_frame);
360 }
361 
363 {
364  Vp3DecodeContext *s = avctx->priv_data;
365 
366  free_tables(avctx);
367  av_freep(&s->edge_emu_buffer);
368 
369  s->theora_tables = 0;
370 
371  /* release all frames */
372  vp3_decode_flush(avctx);
373 
374  av_refstruct_unref(&s->coeff_vlc);
375 
376  return 0;
377 }
378 
379 /**
380  * This function sets up all of the various blocks mappings:
381  * superblocks <-> fragments, macroblocks <-> fragments,
382  * superblocks <-> macroblocks
383  *
384  * @return 0 is successful; returns 1 if *anything* went wrong.
385  */
387 {
388  int j = 0;
389 
390  for (int plane = 0; plane < 3; plane++) {
391  int sb_width = plane ? s->c_superblock_width
392  : s->y_superblock_width;
393  int sb_height = plane ? s->c_superblock_height
394  : s->y_superblock_height;
395  int frag_width = s->fragment_width[!!plane];
396  int frag_height = s->fragment_height[!!plane];
397 
398  for (int sb_y = 0; sb_y < sb_height; sb_y++)
399  for (int sb_x = 0; sb_x < sb_width; sb_x++)
400  for (int i = 0; i < 16; i++) {
401  int x = 4 * sb_x + hilbert_offset[i][0];
402  int y = 4 * sb_y + hilbert_offset[i][1];
403 
404  if (x < frag_width && y < frag_height)
405  s->superblock_fragments[j++] = s->fragment_start[plane] +
406  y * frag_width + x;
407  else
408  s->superblock_fragments[j++] = -1;
409  }
410  }
411 
412  return 0; /* successful path out */
413 }
414 
415 /*
416  * This function sets up the dequantization tables used for a particular
417  * frame.
418  */
419 static void init_dequantizer(Vp3DecodeContext *s, int qpi)
420 {
421  int ac_scale_factor = s->coded_ac_scale_factor[s->qps[qpi]];
422 
423  for (int inter = 0; inter < 2; inter++) {
424  for (int plane = 0; plane < 3; plane++) {
425  int dc_scale_factor = s->coded_dc_scale_factor[!!plane][s->qps[qpi]];
426  int sum = 0, bmi, bmj, qistart, qri;
427  for (qri = 0; qri < s->qr_count[inter][plane]; qri++) {
428  sum += s->qr_size[inter][plane][qri];
429  if (s->qps[qpi] <= sum)
430  break;
431  }
432  qistart = sum - s->qr_size[inter][plane][qri];
433  bmi = s->qr_base[inter][plane][qri];
434  bmj = s->qr_base[inter][plane][qri + 1];
435  for (int i = 0; i < 64; i++) {
436  int coeff = (2 * (sum - s->qps[qpi]) * s->base_matrix[bmi][i] -
437  2 * (qistart - s->qps[qpi]) * s->base_matrix[bmj][i] +
438  s->qr_size[inter][plane][qri]) /
439  (2 * s->qr_size[inter][plane][qri]);
440 
441  int qmin = 8 << (inter + !i);
442  int qscale = i ? ac_scale_factor : dc_scale_factor;
443  int qbias = (1 + inter) * 3;
444  s->qmat[qpi][inter][plane][s->idct_permutation[i]] =
445  (i == 0 || s->version < 2) ? av_clip((qscale * coeff) / 100 * 4, qmin, 4096)
446  : (qscale * (coeff - qbias) / 100 + qbias) * 4;
447  }
448  /* all DC coefficients use the same quant so as not to interfere
449  * with DC prediction */
450  s->qmat[qpi][inter][plane][0] = s->qmat[0][inter][plane][0];
451  }
452  }
453 }
454 
455 /*
456  * This function initializes the loop filter boundary limits if the frame's
457  * quality index is different from the previous frame's.
458  *
459  * The filter_limit_values may not be larger than 127.
460  */
462 {
463  ff_vp3dsp_set_bounding_values(s->bounding_values_array, s->filter_limit_values[s->qps[0]]);
464 }
465 
466 /*
467  * This function unpacks all of the superblock/macroblock/fragment coding
468  * information from the bitstream.
469  */
471 {
472  const int superblock_starts[3] = {
473  0, s->u_superblock_start, s->v_superblock_start
474  };
475  int bit = 0;
476  int current_superblock = 0;
477  int current_run = 0;
478  int num_partial_superblocks = 0;
479 
480  int current_fragment;
481  int plane0_num_coded_frags = 0;
482 
483  if (s->keyframe) {
484  memset(s->superblock_coding, SB_FULLY_CODED, s->superblock_count);
485  } else {
486  /* unpack the list of partially-coded superblocks */
487  bit = get_bits1(gb) ^ 1;
488  current_run = 0;
489 
490  while (current_superblock < s->superblock_count && get_bits_left(gb) > 0) {
491  if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN)
492  bit = get_bits1(gb);
493  else
494  bit ^= 1;
495 
496  current_run = get_vlc2(gb, superblock_run_length_vlc,
498  if (current_run == 34)
499  current_run += get_bits(gb, 12);
500 
501  if (current_run > s->superblock_count - current_superblock) {
502  av_log(s->avctx, AV_LOG_ERROR,
503  "Invalid partially coded superblock run length\n");
504  return -1;
505  }
506 
507  memset(s->superblock_coding + current_superblock, bit, current_run);
508 
509  current_superblock += current_run;
510  if (bit)
511  num_partial_superblocks += current_run;
512  }
513 
514  /* unpack the list of fully coded superblocks if any of the blocks were
515  * not marked as partially coded in the previous step */
516  if (num_partial_superblocks < s->superblock_count) {
517  int superblocks_decoded = 0;
518 
519  current_superblock = 0;
520  bit = get_bits1(gb) ^ 1;
521  current_run = 0;
522 
523  while (superblocks_decoded < s->superblock_count - num_partial_superblocks &&
524  get_bits_left(gb) > 0) {
525  if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN)
526  bit = get_bits1(gb);
527  else
528  bit ^= 1;
529 
530  current_run = get_vlc2(gb, superblock_run_length_vlc,
532  if (current_run == 34)
533  current_run += get_bits(gb, 12);
534 
535  for (int j = 0; j < current_run; current_superblock++) {
536  if (current_superblock >= s->superblock_count) {
537  av_log(s->avctx, AV_LOG_ERROR,
538  "Invalid fully coded superblock run length\n");
539  return -1;
540  }
541 
542  /* skip any superblocks already marked as partially coded */
543  if (s->superblock_coding[current_superblock] == SB_NOT_CODED) {
544  s->superblock_coding[current_superblock] = 2 * bit;
545  j++;
546  }
547  }
548  superblocks_decoded += current_run;
549  }
550  }
551 
552  /* if there were partial blocks, initialize bitstream for
553  * unpacking fragment codings */
554  if (num_partial_superblocks) {
555  current_run = 0;
556  bit = get_bits1(gb);
557  /* toggle the bit because as soon as the first run length is
558  * fetched the bit will be toggled again */
559  bit ^= 1;
560  }
561  }
562 
563  /* figure out which fragments are coded; iterate through each
564  * superblock (all planes) */
565  s->total_num_coded_frags = 0;
566  memset(s->macroblock_coding, MODE_COPY, s->macroblock_count);
567 
568  s->coded_fragment_list[0] = s->keyframe ? s->kf_coded_fragment_list
569  : s->nkf_coded_fragment_list;
570 
571  for (int plane = 0; plane < 3; plane++) {
572  int sb_start = superblock_starts[plane];
573  int sb_end = sb_start + (plane ? s->c_superblock_count
574  : s->y_superblock_count);
575  int num_coded_frags = 0;
576 
577  if (s->keyframe) {
578  if (s->num_kf_coded_fragment[plane] == -1) {
579  for (int i = sb_start; i < sb_end; i++) {
580  /* iterate through all 16 fragments in a superblock */
581  for (int j = 0; j < 16; j++) {
582  /* if the fragment is in bounds, check its coding status */
583  current_fragment = s->superblock_fragments[i * 16 + j];
584  if (current_fragment != -1) {
585  s->coded_fragment_list[plane][num_coded_frags++] =
586  current_fragment;
587  }
588  }
589  }
590  s->num_kf_coded_fragment[plane] = num_coded_frags;
591  } else
592  num_coded_frags = s->num_kf_coded_fragment[plane];
593  } else {
594  for (int i = sb_start; i < sb_end && get_bits_left(gb) > 0; i++) {
595  if (get_bits_left(gb) < plane0_num_coded_frags >> 2) {
596  return AVERROR_INVALIDDATA;
597  }
598  /* iterate through all 16 fragments in a superblock */
599  for (int j = 0; j < 16; j++) {
600  /* if the fragment is in bounds, check its coding status */
601  current_fragment = s->superblock_fragments[i * 16 + j];
602  if (current_fragment != -1) {
603  int coded = s->superblock_coding[i];
604 
605  if (coded == SB_PARTIALLY_CODED) {
606  /* fragment may or may not be coded; this is the case
607  * that cares about the fragment coding runs */
608  if (current_run-- == 0) {
609  bit ^= 1;
610  current_run = get_vlc2(gb, fragment_run_length_vlc, 5, 2);
611  }
612  coded = bit;
613  }
614 
615  if (coded) {
616  /* default mode; actual mode will be decoded in
617  * the next phase */
618  s->all_fragments[current_fragment].coding_method =
620  s->coded_fragment_list[plane][num_coded_frags++] =
621  current_fragment;
622  } else {
623  /* not coded; copy this fragment from the prior frame */
624  s->all_fragments[current_fragment].coding_method =
625  MODE_COPY;
626  }
627  }
628  }
629  }
630  }
631  if (!plane)
632  plane0_num_coded_frags = num_coded_frags;
633  s->total_num_coded_frags += num_coded_frags;
634  for (int i = 0; i < 64; i++)
635  s->num_coded_frags[plane][i] = num_coded_frags;
636  if (plane < 2)
637  s->coded_fragment_list[plane + 1] = s->coded_fragment_list[plane] +
638  num_coded_frags;
639  }
640  return 0;
641 }
642 
643 #define BLOCK_X (2 * mb_x + (k & 1))
644 #define BLOCK_Y (2 * mb_y + (k >> 1))
645 
646 #if CONFIG_VP4_DECODER
647 /**
648  * @return number of blocks, or > yuv_macroblock_count on error.
649  * return value is always >= 1.
650  */
651 static int vp4_get_mb_count(Vp3DecodeContext *s, GetBitContext *gb)
652 {
653  int v = 1;
654  int bits;
655  while ((bits = show_bits(gb, 9)) == 0x1ff) {
656  skip_bits(gb, 9);
657  v += 256;
658  if (v > s->yuv_macroblock_count) {
659  av_log(s->avctx, AV_LOG_ERROR, "Invalid run length\n");
660  return v;
661  }
662  }
663 #define body(n) { \
664  skip_bits(gb, 2 + n); \
665  v += (1 << n) + get_bits(gb, n); }
666 #define thresh(n) (0x200 - (0x80 >> n))
667 #define else_if(n) else if (bits < thresh(n)) body(n)
668  if (bits < 0x100) {
669  skip_bits(gb, 1);
670  } else if (bits < thresh(0)) {
671  skip_bits(gb, 2);
672  v += 1;
673  }
674  else_if(1)
675  else_if(2)
676  else_if(3)
677  else_if(4)
678  else_if(5)
679  else_if(6)
680  else body(7)
681 #undef body
682 #undef thresh
683 #undef else_if
684  return v;
685 }
686 
687 static int vp4_get_block_pattern(GetBitContext *gb, int *next_block_pattern_table)
688 {
689  int v = get_vlc2(gb, block_pattern_vlc[*next_block_pattern_table], 5, 1);
690  *next_block_pattern_table = vp4_block_pattern_table_selector[v];
691  return v + 1;
692 }
693 
694 static int vp4_unpack_macroblocks(Vp3DecodeContext *s, GetBitContext *gb)
695 {
696  int fragment;
697  int next_block_pattern_table;
698  int bit, current_run, has_partial;
699 
700  memset(s->macroblock_coding, MODE_COPY, s->macroblock_count);
701 
702  if (s->keyframe)
703  return 0;
704 
705  has_partial = 0;
706  bit = get_bits1(gb);
707  for (int i = 0; i < s->yuv_macroblock_count; i += current_run) {
708  if (get_bits_left(gb) <= 0)
709  return AVERROR_INVALIDDATA;
710  current_run = vp4_get_mb_count(s, gb);
711  if (current_run > s->yuv_macroblock_count - i)
712  return -1;
713  memset(s->superblock_coding + i, 2 * bit, current_run);
714  bit ^= 1;
715  has_partial |= bit;
716  }
717 
718  if (has_partial) {
719  if (get_bits_left(gb) <= 0)
720  return AVERROR_INVALIDDATA;
721  bit = get_bits1(gb);
722  current_run = vp4_get_mb_count(s, gb);
723  for (int i = 0; i < s->yuv_macroblock_count; i++) {
724  if (!s->superblock_coding[i]) {
725  if (!current_run) {
726  bit ^= 1;
727  current_run = vp4_get_mb_count(s, gb);
728  }
729  s->superblock_coding[i] = bit;
730  current_run--;
731  }
732  }
733  if (current_run) /* handle situation when vp4_get_mb_count() fails */
734  return -1;
735  }
736 
737  next_block_pattern_table = 0;
738  for (int plane = 0, i = 0; plane < 3; plane++) {
739  int sb_width = plane ? s->c_superblock_width : s->y_superblock_width;
740  int sb_height = plane ? s->c_superblock_height : s->y_superblock_height;
741  int mb_width = plane ? s->c_macroblock_width : s->macroblock_width;
742  int mb_height = plane ? s->c_macroblock_height : s->macroblock_height;
743  int fragment_width = s->fragment_width[!!plane];
744  int fragment_height = s->fragment_height[!!plane];
745 
746  for (int sb_y = 0; sb_y < sb_height; sb_y++) {
747  for (int sb_x = 0; sb_x < sb_width; sb_x++) {
748  for (int j = 0; j < 4; j++) {
749  int mb_x = 2 * sb_x + (j >> 1);
750  int mb_y = 2 * sb_y + (j >> 1) ^ (j & 1);
751  int mb_coded, pattern, coded;
752 
753  if (mb_x >= mb_width || mb_y >= mb_height)
754  continue;
755 
756  mb_coded = s->superblock_coding[i++];
757 
758  if (mb_coded == SB_FULLY_CODED)
759  pattern = 0xF;
760  else if (mb_coded == SB_PARTIALLY_CODED)
761  pattern = vp4_get_block_pattern(gb, &next_block_pattern_table);
762  else
763  pattern = 0;
764 
765  for (int k = 0; k < 4; k++) {
766  if (BLOCK_X >= fragment_width || BLOCK_Y >= fragment_height)
767  continue;
768  fragment = s->fragment_start[plane] + BLOCK_Y * fragment_width + BLOCK_X;
769  coded = pattern & (8 >> k);
770  /* MODE_INTER_NO_MV is the default for coded fragments.
771  the actual method is decoded in the next phase. */
772  s->all_fragments[fragment].coding_method = coded ? MODE_INTER_NO_MV : MODE_COPY;
773  }
774  }
775  }
776  }
777  }
778  return 0;
779 }
780 #endif
781 
782 /*
783  * This function unpacks all the coding mode data for individual macroblocks
784  * from the bitstream.
785  */
787 {
788  int scheme;
789  int current_macroblock;
790  int current_fragment;
791  int coding_mode;
792  int custom_mode_alphabet[CODING_MODE_COUNT];
793  const int *alphabet;
794  Vp3Fragment *frag;
795 
796  if (s->keyframe) {
797  for (int i = 0; i < s->fragment_count; i++)
798  s->all_fragments[i].coding_method = MODE_INTRA;
799  } else {
800  /* fetch the mode coding scheme for this frame */
801  scheme = get_bits(gb, 3);
802 
803  /* is it a custom coding scheme? */
804  if (scheme == 0) {
805  for (int i = 0; i < 8; i++)
806  custom_mode_alphabet[i] = MODE_INTER_NO_MV;
807  for (int i = 0; i < 8; i++)
808  custom_mode_alphabet[get_bits(gb, 3)] = i;
809  alphabet = custom_mode_alphabet;
810  } else
811  alphabet = ModeAlphabet[scheme - 1];
812 
813  /* iterate through all of the macroblocks that contain 1 or more
814  * coded fragments */
815  for (int sb_y = 0; sb_y < s->y_superblock_height; sb_y++) {
816  for (int sb_x = 0; sb_x < s->y_superblock_width; sb_x++) {
817  if (get_bits_left(gb) <= 0)
818  return -1;
819 
820  for (int j = 0; j < 4; j++) {
821  int k;
822  int mb_x = 2 * sb_x + (j >> 1);
823  int mb_y = 2 * sb_y + (((j >> 1) + j) & 1);
824  current_macroblock = mb_y * s->macroblock_width + mb_x;
825 
826  if (mb_x >= s->macroblock_width ||
827  mb_y >= s->macroblock_height)
828  continue;
829 
830  /* coding modes are only stored if the macroblock has
831  * at least one luma block coded, otherwise it must be
832  * INTER_NO_MV */
833  for (k = 0; k < 4; k++) {
834  current_fragment = BLOCK_Y *
835  s->fragment_width[0] + BLOCK_X;
836  if (s->all_fragments[current_fragment].coding_method != MODE_COPY)
837  break;
838  }
839  if (k == 4) {
840  s->macroblock_coding[current_macroblock] = MODE_INTER_NO_MV;
841  continue;
842  }
843 
844  /* mode 7 means get 3 bits for each coding mode */
845  if (scheme == 7)
846  coding_mode = get_bits(gb, 3);
847  else
848  coding_mode = alphabet[get_vlc2(gb, mode_code_vlc, 4, 2)];
849 
850  s->macroblock_coding[current_macroblock] = coding_mode;
851  for (k = 0; k < 4; k++) {
852  frag = s->all_fragments + BLOCK_Y * s->fragment_width[0] + BLOCK_X;
853  if (frag->coding_method != MODE_COPY)
854  frag->coding_method = coding_mode;
855  }
856 
857 #define SET_CHROMA_MODES \
858  if (frag[s->fragment_start[1]].coding_method != MODE_COPY) \
859  frag[s->fragment_start[1]].coding_method = coding_mode; \
860  if (frag[s->fragment_start[2]].coding_method != MODE_COPY) \
861  frag[s->fragment_start[2]].coding_method = coding_mode;
862 
863  if (s->chroma_y_shift) {
864  frag = s->all_fragments + mb_y *
865  s->fragment_width[1] + mb_x;
867  } else if (s->chroma_x_shift) {
868  frag = s->all_fragments +
869  2 * mb_y * s->fragment_width[1] + mb_x;
870  for (k = 0; k < 2; k++) {
872  frag += s->fragment_width[1];
873  }
874  } else {
875  for (k = 0; k < 4; k++) {
876  frag = s->all_fragments +
877  BLOCK_Y * s->fragment_width[1] + BLOCK_X;
879  }
880  }
881  }
882  }
883  }
884  }
885 
886  return 0;
887 }
888 
889 static int vp4_get_mv(GetBitContext *gb, int axis, int last_motion)
890 {
891 #if CONFIG_VP4_DECODER
892  int v = get_vlc2(gb, vp4_mv_vlc_table[axis][vp4_mv_table_selector[FFABS(last_motion)]],
893  VP4_MV_VLC_BITS, 2);
894  return last_motion < 0 ? -v : v;
895 #else
896  return 0;
897 #endif
898 }
899 
900 /*
901  * This function unpacks all the motion vectors for the individual
902  * macroblocks from the bitstream.
903  */
905 {
906  int coding_mode;
907  int motion_x[4];
908  int motion_y[4];
909  int last_motion_x = 0;
910  int last_motion_y = 0;
911  int prior_last_motion_x = 0;
912  int prior_last_motion_y = 0;
913  int last_gold_motion_x = 0;
914  int last_gold_motion_y = 0;
915  int current_macroblock;
916  int current_fragment;
917  int frag;
918 
919  if (s->keyframe)
920  return 0;
921 
922  /* coding mode 0 is the VLC scheme; 1 is the fixed code scheme; 2 is VP4 code scheme */
923  coding_mode = s->version < 2 ? get_bits1(gb) : 2;
924 
925  /* iterate through all of the macroblocks that contain 1 or more
926  * coded fragments */
927  for (int sb_y = 0; sb_y < s->y_superblock_height; sb_y++) {
928  for (int sb_x = 0; sb_x < s->y_superblock_width; sb_x++) {
929  if (get_bits_left(gb) <= 0)
930  return -1;
931 
932  for (int j = 0; j < 4; j++) {
933  int mb_x = 2 * sb_x + (j >> 1);
934  int mb_y = 2 * sb_y + (((j >> 1) + j) & 1);
935  current_macroblock = mb_y * s->macroblock_width + mb_x;
936 
937  if (mb_x >= s->macroblock_width ||
938  mb_y >= s->macroblock_height ||
939  s->macroblock_coding[current_macroblock] == MODE_COPY)
940  continue;
941 
942  switch (s->macroblock_coding[current_macroblock]) {
943  case MODE_GOLDEN_MV:
944  if (coding_mode == 2) { /* VP4 */
945  last_gold_motion_x = motion_x[0] = vp4_get_mv(gb, 0, last_gold_motion_x);
946  last_gold_motion_y = motion_y[0] = vp4_get_mv(gb, 1, last_gold_motion_y);
947  break;
948  } /* otherwise fall through */
949  case MODE_INTER_PLUS_MV:
950  /* all 6 fragments use the same motion vector */
951  if (coding_mode == 0) {
952  motion_x[0] = get_vlc2(gb, motion_vector_vlc,
953  VP3_MV_VLC_BITS, 2);
954  motion_y[0] = get_vlc2(gb, motion_vector_vlc,
955  VP3_MV_VLC_BITS, 2);
956  } else if (coding_mode == 1) {
957  motion_x[0] = fixed_motion_vector_table[get_bits(gb, 6)];
958  motion_y[0] = fixed_motion_vector_table[get_bits(gb, 6)];
959  } else { /* VP4 */
960  motion_x[0] = vp4_get_mv(gb, 0, last_motion_x);
961  motion_y[0] = vp4_get_mv(gb, 1, last_motion_y);
962  }
963 
964  /* vector maintenance, only on MODE_INTER_PLUS_MV */
965  if (s->macroblock_coding[current_macroblock] == MODE_INTER_PLUS_MV) {
966  prior_last_motion_x = last_motion_x;
967  prior_last_motion_y = last_motion_y;
968  last_motion_x = motion_x[0];
969  last_motion_y = motion_y[0];
970  }
971  break;
972 
973  case MODE_INTER_FOURMV:
974  /* vector maintenance */
975  prior_last_motion_x = last_motion_x;
976  prior_last_motion_y = last_motion_y;
977 
978  /* fetch 4 vectors from the bitstream, one for each
979  * Y fragment, then average for the C fragment vectors */
980  for (int k = 0; k < 4; k++) {
981  current_fragment = BLOCK_Y * s->fragment_width[0] + BLOCK_X;
982  if (s->all_fragments[current_fragment].coding_method != MODE_COPY) {
983  if (coding_mode == 0) {
984  motion_x[k] = get_vlc2(gb, motion_vector_vlc,
985  VP3_MV_VLC_BITS, 2);
986  motion_y[k] = get_vlc2(gb, motion_vector_vlc,
987  VP3_MV_VLC_BITS, 2);
988  } else if (coding_mode == 1) {
989  motion_x[k] = fixed_motion_vector_table[get_bits(gb, 6)];
990  motion_y[k] = fixed_motion_vector_table[get_bits(gb, 6)];
991  } else { /* VP4 */
992  motion_x[k] = vp4_get_mv(gb, 0, prior_last_motion_x);
993  motion_y[k] = vp4_get_mv(gb, 1, prior_last_motion_y);
994  }
995  last_motion_x = motion_x[k];
996  last_motion_y = motion_y[k];
997  } else {
998  motion_x[k] = 0;
999  motion_y[k] = 0;
1000  }
1001  }
1002  break;
1003 
1004  case MODE_INTER_LAST_MV:
1005  /* all 6 fragments use the last motion vector */
1006  motion_x[0] = last_motion_x;
1007  motion_y[0] = last_motion_y;
1008 
1009  /* no vector maintenance (last vector remains the
1010  * last vector) */
1011  break;
1012 
1013  case MODE_INTER_PRIOR_LAST:
1014  /* all 6 fragments use the motion vector prior to the
1015  * last motion vector */
1016  motion_x[0] = prior_last_motion_x;
1017  motion_y[0] = prior_last_motion_y;
1018 
1019  /* vector maintenance */
1020  prior_last_motion_x = last_motion_x;
1021  prior_last_motion_y = last_motion_y;
1022  last_motion_x = motion_x[0];
1023  last_motion_y = motion_y[0];
1024  break;
1025 
1026  default:
1027  /* covers intra, inter without MV, golden without MV */
1028  motion_x[0] = 0;
1029  motion_y[0] = 0;
1030 
1031  /* no vector maintenance */
1032  break;
1033  }
1034 
1035  /* assign the motion vectors to the correct fragments */
1036  for (int k = 0; k < 4; k++) {
1037  current_fragment =
1038  BLOCK_Y * s->fragment_width[0] + BLOCK_X;
1039  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
1040  s->motion_val[0][current_fragment][0] = motion_x[k];
1041  s->motion_val[0][current_fragment][1] = motion_y[k];
1042  } else {
1043  s->motion_val[0][current_fragment][0] = motion_x[0];
1044  s->motion_val[0][current_fragment][1] = motion_y[0];
1045  }
1046  }
1047 
1048  if (s->chroma_y_shift) {
1049  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
1050  motion_x[0] = RSHIFT(motion_x[0] + motion_x[1] +
1051  motion_x[2] + motion_x[3], 2);
1052  motion_y[0] = RSHIFT(motion_y[0] + motion_y[1] +
1053  motion_y[2] + motion_y[3], 2);
1054  }
1055  if (s->version <= 2) {
1056  motion_x[0] = (motion_x[0] >> 1) | (motion_x[0] & 1);
1057  motion_y[0] = (motion_y[0] >> 1) | (motion_y[0] & 1);
1058  }
1059  frag = mb_y * s->fragment_width[1] + mb_x;
1060  s->motion_val[1][frag][0] = motion_x[0];
1061  s->motion_val[1][frag][1] = motion_y[0];
1062  } else if (s->chroma_x_shift) {
1063  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
1064  motion_x[0] = RSHIFT(motion_x[0] + motion_x[1], 1);
1065  motion_y[0] = RSHIFT(motion_y[0] + motion_y[1], 1);
1066  motion_x[1] = RSHIFT(motion_x[2] + motion_x[3], 1);
1067  motion_y[1] = RSHIFT(motion_y[2] + motion_y[3], 1);
1068  } else {
1069  motion_x[1] = motion_x[0];
1070  motion_y[1] = motion_y[0];
1071  }
1072  if (s->version <= 2) {
1073  motion_x[0] = (motion_x[0] >> 1) | (motion_x[0] & 1);
1074  motion_x[1] = (motion_x[1] >> 1) | (motion_x[1] & 1);
1075  }
1076  frag = 2 * mb_y * s->fragment_width[1] + mb_x;
1077  for (int k = 0; k < 2; k++) {
1078  s->motion_val[1][frag][0] = motion_x[k];
1079  s->motion_val[1][frag][1] = motion_y[k];
1080  frag += s->fragment_width[1];
1081  }
1082  } else {
1083  for (int k = 0; k < 4; k++) {
1084  frag = BLOCK_Y * s->fragment_width[1] + BLOCK_X;
1085  if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
1086  s->motion_val[1][frag][0] = motion_x[k];
1087  s->motion_val[1][frag][1] = motion_y[k];
1088  } else {
1089  s->motion_val[1][frag][0] = motion_x[0];
1090  s->motion_val[1][frag][1] = motion_y[0];
1091  }
1092  }
1093  }
1094  }
1095  }
1096  }
1097 
1098  return 0;
1099 }
1100 
1102 {
1103  int num_blocks = s->total_num_coded_frags;
1104 
1105  for (int qpi = 0; qpi < s->nqps - 1 && num_blocks > 0; qpi++) {
1106  int i = 0, blocks_decoded = 0, num_blocks_at_qpi = 0;
1107  int bit, run_length;
1108 
1109  bit = get_bits1(gb) ^ 1;
1110  run_length = 0;
1111 
1112  do {
1113  if (run_length == MAXIMUM_LONG_BIT_RUN)
1114  bit = get_bits1(gb);
1115  else
1116  bit ^= 1;
1117 
1118  run_length = get_vlc2(gb, superblock_run_length_vlc,
1119  SUPERBLOCK_VLC_BITS, 2);
1120  if (run_length == 34)
1121  run_length += get_bits(gb, 12);
1122  blocks_decoded += run_length;
1123 
1124  if (!bit)
1125  num_blocks_at_qpi += run_length;
1126 
1127  for (int j = 0; j < run_length; i++) {
1128  if (i >= s->total_num_coded_frags)
1129  return -1;
1130 
1131  if (s->all_fragments[s->coded_fragment_list[0][i]].qpi == qpi) {
1132  s->all_fragments[s->coded_fragment_list[0][i]].qpi += bit;
1133  j++;
1134  }
1135  }
1136  } while (blocks_decoded < num_blocks && get_bits_left(gb) > 0);
1137 
1138  num_blocks -= num_blocks_at_qpi;
1139  }
1140 
1141  return 0;
1142 }
1143 
1144 static inline int get_eob_run(GetBitContext *gb, int token)
1145 {
1146  int v = eob_run_table[token].base;
1147  if (eob_run_table[token].bits)
1148  v += get_bits(gb, eob_run_table[token].bits);
1149  return v;
1150 }
1151 
1152 static inline int get_coeff(GetBitContext *gb, int token, int16_t *coeff)
1153 {
1154  int bits_to_get, zero_run;
1155 
1156  bits_to_get = coeff_get_bits[token];
1157  if (bits_to_get)
1158  bits_to_get = get_bits(gb, bits_to_get);
1159  *coeff = coeff_tables[token][bits_to_get];
1160 
1161  zero_run = zero_run_base[token];
1162  if (zero_run_get_bits[token])
1163  zero_run += get_bits(gb, zero_run_get_bits[token]);
1164 
1165  return zero_run;
1166 }
1167 
1168 /*
1169  * This function is called by unpack_dct_coeffs() to extract the VLCs from
1170  * the bitstream. The VLCs encode tokens which are used to unpack DCT
1171  * data. This function unpacks all the VLCs for either the Y plane or both
1172  * C planes, and is called for DC coefficients or different AC coefficient
1173  * levels (since different coefficient types require different VLC tables.
1174  *
1175  * This function returns a residual eob run. E.g, if a particular token gave
1176  * instructions to EOB the next 5 fragments and there were only 2 fragments
1177  * left in the current fragment range, 3 would be returned so that it could
1178  * be passed into the next call to this same function.
1179  */
1181  const VLCElem *vlc_table, int coeff_index,
1182  int plane,
1183  int eob_run)
1184 {
1185  int j = 0;
1186  int token;
1187  int zero_run = 0;
1188  int16_t coeff = 0;
1189  int blocks_ended;
1190  int coeff_i = 0;
1191  int num_coeffs = s->num_coded_frags[plane][coeff_index];
1192  int16_t *dct_tokens = s->dct_tokens[plane][coeff_index];
1193 
1194  /* local references to structure members to avoid repeated dereferences */
1195  const int *coded_fragment_list = s->coded_fragment_list[plane];
1196  Vp3Fragment *all_fragments = s->all_fragments;
1197 
1198  if (num_coeffs < 0) {
1199  av_log(s->avctx, AV_LOG_ERROR,
1200  "Invalid number of coefficients at level %d\n", coeff_index);
1201  return AVERROR_INVALIDDATA;
1202  }
1203 
1204  if (eob_run > num_coeffs) {
1205  coeff_i =
1206  blocks_ended = num_coeffs;
1207  eob_run -= num_coeffs;
1208  } else {
1209  coeff_i =
1210  blocks_ended = eob_run;
1211  eob_run = 0;
1212  }
1213 
1214  // insert fake EOB token to cover the split between planes or zzi
1215  if (blocks_ended)
1216  dct_tokens[j++] = blocks_ended << 2;
1217 
1218  while (coeff_i < num_coeffs && get_bits_left(gb) > 0) {
1219  /* decode a VLC into a token */
1220  token = get_vlc2(gb, vlc_table, 11, 3);
1221  /* use the token to get a zero run, a coefficient, and an eob run */
1222  if ((unsigned) token <= 6U) {
1223  eob_run = get_eob_run(gb, token);
1224  if (!eob_run)
1225  eob_run = INT_MAX;
1226 
1227  // record only the number of blocks ended in this plane,
1228  // any spill will be recorded in the next plane.
1229  if (eob_run > num_coeffs - coeff_i) {
1230  dct_tokens[j++] = TOKEN_EOB(num_coeffs - coeff_i);
1231  blocks_ended += num_coeffs - coeff_i;
1232  eob_run -= num_coeffs - coeff_i;
1233  coeff_i = num_coeffs;
1234  } else {
1235  dct_tokens[j++] = TOKEN_EOB(eob_run);
1236  blocks_ended += eob_run;
1237  coeff_i += eob_run;
1238  eob_run = 0;
1239  }
1240  } else if (token >= 0) {
1241  zero_run = get_coeff(gb, token, &coeff);
1242 
1243  if (zero_run) {
1244  dct_tokens[j++] = TOKEN_ZERO_RUN(coeff, zero_run);
1245  } else {
1246  // Save DC into the fragment structure. DC prediction is
1247  // done in raster order, so the actual DC can't be in with
1248  // other tokens. We still need the token in dct_tokens[]
1249  // however, or else the structure collapses on itself.
1250  if (!coeff_index)
1251  all_fragments[coded_fragment_list[coeff_i]].dc = coeff;
1252 
1253  dct_tokens[j++] = TOKEN_COEFF(coeff);
1254  }
1255 
1256  if (coeff_index + zero_run > 64) {
1257  av_log(s->avctx, AV_LOG_DEBUG,
1258  "Invalid zero run of %d with %d coeffs left\n",
1259  zero_run, 64 - coeff_index);
1260  zero_run = 64 - coeff_index;
1261  }
1262 
1263  // zero runs code multiple coefficients,
1264  // so don't try to decode coeffs for those higher levels
1265  for (int i = coeff_index + 1; i <= coeff_index + zero_run; i++)
1266  s->num_coded_frags[plane][i]--;
1267  coeff_i++;
1268  } else {
1269  av_log(s->avctx, AV_LOG_ERROR, "Invalid token %d\n", token);
1270  return -1;
1271  }
1272  }
1273 
1274  if (blocks_ended > s->num_coded_frags[plane][coeff_index])
1275  av_log(s->avctx, AV_LOG_ERROR, "More blocks ended than coded!\n");
1276 
1277  // decrement the number of blocks that have higher coefficients for each
1278  // EOB run at this level
1279  if (blocks_ended)
1280  for (int i = coeff_index + 1; i < 64; i++)
1281  s->num_coded_frags[plane][i] -= blocks_ended;
1282 
1283  // setup the next buffer
1284  if (plane < 2)
1285  s->dct_tokens[plane + 1][coeff_index] = dct_tokens + j;
1286  else if (coeff_index < 63)
1287  s->dct_tokens[0][coeff_index + 1] = dct_tokens + j;
1288 
1289  return eob_run;
1290 }
1291 
1293  int first_fragment,
1294  int fragment_width,
1295  int fragment_height);
1296 /*
1297  * This function unpacks all of the DCT coefficient data from the
1298  * bitstream.
1299  */
1301 {
1302  const VLCElem *const *coeff_vlc = s->coeff_vlc->vlc_tabs;
1303  int dc_y_table;
1304  int dc_c_table;
1305  int ac_y_table;
1306  int ac_c_table;
1307  int residual_eob_run = 0;
1308  const VLCElem *y_tables[64], *c_tables[64];
1309 
1310  s->dct_tokens[0][0] = s->dct_tokens_base;
1311 
1312  if (get_bits_left(gb) < 16)
1313  return AVERROR_INVALIDDATA;
1314 
1315  /* fetch the DC table indexes */
1316  dc_y_table = get_bits(gb, 4);
1317  dc_c_table = get_bits(gb, 4);
1318 
1319  /* unpack the Y plane DC coefficients */
1320  residual_eob_run = unpack_vlcs(s, gb, coeff_vlc[dc_y_table], 0,
1321  0, residual_eob_run);
1322  if (residual_eob_run < 0)
1323  return residual_eob_run;
1324  if (get_bits_left(gb) < 8)
1325  return AVERROR_INVALIDDATA;
1326 
1327  /* reverse prediction of the Y-plane DC coefficients */
1328  reverse_dc_prediction(s, 0, s->fragment_width[0], s->fragment_height[0]);
1329 
1330  /* unpack the C plane DC coefficients */
1331  residual_eob_run = unpack_vlcs(s, gb, coeff_vlc[dc_c_table], 0,
1332  1, residual_eob_run);
1333  if (residual_eob_run < 0)
1334  return residual_eob_run;
1335  residual_eob_run = unpack_vlcs(s, gb, coeff_vlc[dc_c_table], 0,
1336  2, residual_eob_run);
1337  if (residual_eob_run < 0)
1338  return residual_eob_run;
1339 
1340  /* reverse prediction of the C-plane DC coefficients */
1341  if (!(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1342  reverse_dc_prediction(s, s->fragment_start[1],
1343  s->fragment_width[1], s->fragment_height[1]);
1344  reverse_dc_prediction(s, s->fragment_start[2],
1345  s->fragment_width[1], s->fragment_height[1]);
1346  }
1347 
1348  if (get_bits_left(gb) < 8)
1349  return AVERROR_INVALIDDATA;
1350  /* fetch the AC table indexes */
1351  ac_y_table = get_bits(gb, 4);
1352  ac_c_table = get_bits(gb, 4);
1353 
1354  /* build tables of AC VLC tables */
1355  for (int i = 1; i <= 5; i++) {
1356  /* AC VLC table group 1 */
1357  y_tables[i] = coeff_vlc[ac_y_table + 16];
1358  c_tables[i] = coeff_vlc[ac_c_table + 16];
1359  }
1360  for (int i = 6; i <= 14; i++) {
1361  /* AC VLC table group 2 */
1362  y_tables[i] = coeff_vlc[ac_y_table + 32];
1363  c_tables[i] = coeff_vlc[ac_c_table + 32];
1364  }
1365  for (int i = 15; i <= 27; i++) {
1366  /* AC VLC table group 3 */
1367  y_tables[i] = coeff_vlc[ac_y_table + 48];
1368  c_tables[i] = coeff_vlc[ac_c_table + 48];
1369  }
1370  for (int i = 28; i <= 63; i++) {
1371  /* AC VLC table group 4 */
1372  y_tables[i] = coeff_vlc[ac_y_table + 64];
1373  c_tables[i] = coeff_vlc[ac_c_table + 64];
1374  }
1375 
1376  /* decode all AC coefficients */
1377  for (int i = 1; i <= 63; i++) {
1378  residual_eob_run = unpack_vlcs(s, gb, y_tables[i], i,
1379  0, residual_eob_run);
1380  if (residual_eob_run < 0)
1381  return residual_eob_run;
1382 
1383  residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i,
1384  1, residual_eob_run);
1385  if (residual_eob_run < 0)
1386  return residual_eob_run;
1387  residual_eob_run = unpack_vlcs(s, gb, c_tables[i], i,
1388  2, residual_eob_run);
1389  if (residual_eob_run < 0)
1390  return residual_eob_run;
1391  }
1392 
1393  return 0;
1394 }
1395 
1396 #if CONFIG_VP4_DECODER
1397 /**
1398  * eob_tracker[] is instead of TOKEN_EOB(value)
1399  * a dummy TOKEN_EOB(0) value is used to make vp3_dequant work
1400  *
1401  * @return < 0 on error
1402  */
1403 static int vp4_unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb,
1404  const VLCElem *const vlc_tables[64],
1405  int plane, int eob_tracker[64], int fragment)
1406 {
1407  int token;
1408  int zero_run = 0;
1409  int16_t coeff = 0;
1410  int coeff_i = 0;
1411  int eob_run;
1412 
1413  while (!eob_tracker[coeff_i]) {
1414  if (get_bits_left(gb) < 1)
1415  return AVERROR_INVALIDDATA;
1416 
1417  token = get_vlc2(gb, vlc_tables[coeff_i], 11, 3);
1418 
1419  /* use the token to get a zero run, a coefficient, and an eob run */
1420  if ((unsigned) token <= 6U) {
1421  eob_run = get_eob_run(gb, token);
1422  *s->dct_tokens[plane][coeff_i]++ = TOKEN_EOB(0);
1423  eob_tracker[coeff_i] = eob_run - 1;
1424  return 0;
1425  } else if (token >= 0) {
1426  zero_run = get_coeff(gb, token, &coeff);
1427 
1428  if (zero_run) {
1429  if (coeff_i + zero_run > 64) {
1430  av_log(s->avctx, AV_LOG_DEBUG,
1431  "Invalid zero run of %d with %d coeffs left\n",
1432  zero_run, 64 - coeff_i);
1433  zero_run = 64 - coeff_i;
1434  }
1435  *s->dct_tokens[plane][coeff_i]++ = TOKEN_ZERO_RUN(coeff, zero_run);
1436  coeff_i += zero_run;
1437  } else {
1438  if (!coeff_i)
1439  s->all_fragments[fragment].dc = coeff;
1440 
1441  *s->dct_tokens[plane][coeff_i]++ = TOKEN_COEFF(coeff);
1442  }
1443  coeff_i++;
1444  if (coeff_i >= 64) /* > 64 occurs when there is a zero_run overflow */
1445  return 0; /* stop */
1446  } else {
1447  av_log(s->avctx, AV_LOG_ERROR, "Invalid token %d\n", token);
1448  return -1;
1449  }
1450  }
1451  *s->dct_tokens[plane][coeff_i]++ = TOKEN_EOB(0);
1452  eob_tracker[coeff_i]--;
1453  return 0;
1454 }
1455 
1456 static void vp4_dc_predictor_reset(VP4Predictor *p)
1457 {
1458  p->dc = 0;
1459  p->type = VP4_DC_UNDEFINED;
1460 }
1461 
1462 static void vp4_dc_pred_before(const Vp3DecodeContext *s, VP4Predictor dc_pred[6][6], int sb_x)
1463 {
1464  for (int i = 0; i < 4; i++)
1465  dc_pred[0][i + 1] = s->dc_pred_row[sb_x * 4 + i];
1466 
1467  for (int j = 1; j < 5; j++)
1468  for (int i = 0; i < 4; i++)
1469  vp4_dc_predictor_reset(&dc_pred[j][i + 1]);
1470 }
1471 
1472 static void vp4_dc_pred_after(Vp3DecodeContext *s, VP4Predictor dc_pred[6][6], int sb_x)
1473 {
1474  for (int i = 0; i < 4; i++)
1475  s->dc_pred_row[sb_x * 4 + i] = dc_pred[4][i + 1];
1476 
1477  for (int i = 1; i < 5; i++)
1478  dc_pred[i][0] = dc_pred[i][4];
1479 }
1480 
1481 /* note: dc_pred points to the current block */
1482 static int vp4_dc_pred(const Vp3DecodeContext *s, const VP4Predictor * dc_pred, const int * last_dc, int type, int plane)
1483 {
1484  int count = 0;
1485  int dc = 0;
1486 
1487  if (dc_pred[-6].type == type) {
1488  dc += dc_pred[-6].dc;
1489  count++;
1490  }
1491 
1492  if (dc_pred[6].type == type) {
1493  dc += dc_pred[6].dc;
1494  count++;
1495  }
1496 
1497  if (count != 2 && dc_pred[-1].type == type) {
1498  dc += dc_pred[-1].dc;
1499  count++;
1500  }
1501 
1502  if (count != 2 && dc_pred[1].type == type) {
1503  dc += dc_pred[1].dc;
1504  count++;
1505  }
1506 
1507  /* using division instead of shift to correctly handle negative values */
1508  return count == 2 ? dc / 2 : last_dc[type];
1509 }
1510 
1511 static void vp4_set_tokens_base(Vp3DecodeContext *s)
1512 {
1513  int16_t *base = s->dct_tokens_base;
1514  for (int plane = 0; plane < 3; plane++) {
1515  for (int i = 0; i < 64; i++) {
1516  s->dct_tokens[plane][i] = base;
1517  base += s->fragment_width[!!plane] * s->fragment_height[!!plane];
1518  }
1519  }
1520 }
1521 
1522 static int vp4_unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb)
1523 {
1524  const VLCElem *const *coeff_vlc = s->coeff_vlc->vlc_tabs;
1525  int dc_y_table;
1526  int dc_c_table;
1527  int ac_y_table;
1528  int ac_c_table;
1529  const VLCElem *tables[2][64];
1530  int eob_tracker[64];
1531  VP4Predictor dc_pred[6][6];
1532  int last_dc[NB_VP4_DC_TYPES];
1533 
1534  if (get_bits_left(gb) < 16)
1535  return AVERROR_INVALIDDATA;
1536 
1537  /* fetch the DC table indexes */
1538  dc_y_table = get_bits(gb, 4);
1539  dc_c_table = get_bits(gb, 4);
1540 
1541  ac_y_table = get_bits(gb, 4);
1542  ac_c_table = get_bits(gb, 4);
1543 
1544  /* build tables of DC/AC VLC tables */
1545 
1546  /* DC table group */
1547  tables[0][0] = coeff_vlc[dc_y_table];
1548  tables[1][0] = coeff_vlc[dc_c_table];
1549  for (int i = 1; i <= 5; i++) {
1550  /* AC VLC table group 1 */
1551  tables[0][i] = coeff_vlc[ac_y_table + 16];
1552  tables[1][i] = coeff_vlc[ac_c_table + 16];
1553  }
1554  for (int i = 6; i <= 14; i++) {
1555  /* AC VLC table group 2 */
1556  tables[0][i] = coeff_vlc[ac_y_table + 32];
1557  tables[1][i] = coeff_vlc[ac_c_table + 32];
1558  }
1559  for (int i = 15; i <= 27; i++) {
1560  /* AC VLC table group 3 */
1561  tables[0][i] = coeff_vlc[ac_y_table + 48];
1562  tables[1][i] = coeff_vlc[ac_c_table + 48];
1563  }
1564  for (int i = 28; i <= 63; i++) {
1565  /* AC VLC table group 4 */
1566  tables[0][i] = coeff_vlc[ac_y_table + 64];
1567  tables[1][i] = coeff_vlc[ac_c_table + 64];
1568  }
1569 
1570  vp4_set_tokens_base(s);
1571 
1572  memset(last_dc, 0, sizeof(last_dc));
1573 
1574  for (int plane = 0; plane < ((s->avctx->flags & AV_CODEC_FLAG_GRAY) ? 1 : 3); plane++) {
1575  memset(eob_tracker, 0, sizeof(eob_tracker));
1576 
1577  /* initialise dc prediction */
1578  for (int i = 0; i < s->fragment_width[!!plane]; i++)
1579  vp4_dc_predictor_reset(&s->dc_pred_row[i]);
1580 
1581  for (int j = 0; j < 6; j++)
1582  for (int i = 0; i < 6; i++)
1583  vp4_dc_predictor_reset(&dc_pred[j][i]);
1584 
1585  for (int sb_y = 0; sb_y * 4 < s->fragment_height[!!plane]; sb_y++) {
1586  for (int sb_x = 0; sb_x *4 < s->fragment_width[!!plane]; sb_x++) {
1587  vp4_dc_pred_before(s, dc_pred, sb_x);
1588  for (int j = 0; j < 16; j++) {
1589  int hx = hilbert_offset[j][0];
1590  int hy = hilbert_offset[j][1];
1591  int x = 4 * sb_x + hx;
1592  int y = 4 * sb_y + hy;
1593  VP4Predictor *this_dc_pred = &dc_pred[hy + 1][hx + 1];
1594  int fragment, dc_block_type;
1595 
1596  if (x >= s->fragment_width[!!plane] || y >= s->fragment_height[!!plane])
1597  continue;
1598 
1599  fragment = s->fragment_start[plane] + y * s->fragment_width[!!plane] + x;
1600 
1601  if (s->all_fragments[fragment].coding_method == MODE_COPY)
1602  continue;
1603 
1604  if (vp4_unpack_vlcs(s, gb, tables[!!plane], plane, eob_tracker, fragment) < 0)
1605  return -1;
1606 
1607  dc_block_type = vp4_pred_block_type_map[s->all_fragments[fragment].coding_method];
1608 
1609  s->all_fragments[fragment].dc +=
1610  vp4_dc_pred(s, this_dc_pred, last_dc, dc_block_type, plane);
1611 
1612  this_dc_pred->type = dc_block_type,
1613  this_dc_pred->dc = last_dc[dc_block_type] = s->all_fragments[fragment].dc;
1614  }
1615  vp4_dc_pred_after(s, dc_pred, sb_x);
1616  }
1617  }
1618  }
1619 
1620  vp4_set_tokens_base(s);
1621 
1622  return 0;
1623 }
1624 #endif
1625 
1626 /*
1627  * This function reverses the DC prediction for each coded fragment in
1628  * the frame. Much of this function is adapted directly from the original
1629  * VP3 source code.
1630  */
1631 #define COMPATIBLE_FRAME(x) \
1632  (compatible_frame[s->all_fragments[x].coding_method] == current_frame_type)
1633 #define DC_COEFF(u) s->all_fragments[u].dc
1634 
1636  int first_fragment,
1637  int fragment_width,
1638  int fragment_height)
1639 {
1640 #define PUL 8
1641 #define PU 4
1642 #define PUR 2
1643 #define PL 1
1644 
1645  int i = first_fragment;
1646 
1647  int predicted_dc;
1648 
1649  /* DC values for the left, up-left, up, and up-right fragments */
1650  int vl, vul, vu, vur;
1651 
1652  /* indexes for the left, up-left, up, and up-right fragments */
1653  int l, ul, u, ur;
1654 
1655  /*
1656  * The 6 fields mean:
1657  * 0: up-left multiplier
1658  * 1: up multiplier
1659  * 2: up-right multiplier
1660  * 3: left multiplier
1661  */
1662  static const int predictor_transform[16][4] = {
1663  { 0, 0, 0, 0 },
1664  { 0, 0, 0, 128 }, // PL
1665  { 0, 0, 128, 0 }, // PUR
1666  { 0, 0, 53, 75 }, // PUR|PL
1667  { 0, 128, 0, 0 }, // PU
1668  { 0, 64, 0, 64 }, // PU |PL
1669  { 0, 128, 0, 0 }, // PU |PUR
1670  { 0, 0, 53, 75 }, // PU |PUR|PL
1671  { 128, 0, 0, 0 }, // PUL
1672  { 0, 0, 0, 128 }, // PUL|PL
1673  { 64, 0, 64, 0 }, // PUL|PUR
1674  { 0, 0, 53, 75 }, // PUL|PUR|PL
1675  { 0, 128, 0, 0 }, // PUL|PU
1676  { -104, 116, 0, 116 }, // PUL|PU |PL
1677  { 24, 80, 24, 0 }, // PUL|PU |PUR
1678  { -104, 116, 0, 116 } // PUL|PU |PUR|PL
1679  };
1680 
1681  /* This table shows which types of blocks can use other blocks for
1682  * prediction. For example, INTRA is the only mode in this table to
1683  * have a frame number of 0. That means INTRA blocks can only predict
1684  * from other INTRA blocks. There are 2 golden frame coding types;
1685  * blocks encoding in these modes can only predict from other blocks
1686  * that were encoded with these 1 of these 2 modes. */
1687  static const unsigned char compatible_frame[9] = {
1688  1, /* MODE_INTER_NO_MV */
1689  0, /* MODE_INTRA */
1690  1, /* MODE_INTER_PLUS_MV */
1691  1, /* MODE_INTER_LAST_MV */
1692  1, /* MODE_INTER_PRIOR_MV */
1693  2, /* MODE_USING_GOLDEN */
1694  2, /* MODE_GOLDEN_MV */
1695  1, /* MODE_INTER_FOUR_MV */
1696  3 /* MODE_COPY */
1697  };
1698  int current_frame_type;
1699 
1700  /* there is a last DC predictor for each of the 3 frame types */
1701  short last_dc[3];
1702 
1703  int transform = 0;
1704 
1705  vul =
1706  vu =
1707  vur =
1708  vl = 0;
1709  last_dc[0] =
1710  last_dc[1] =
1711  last_dc[2] = 0;
1712 
1713  /* for each fragment row... */
1714  for (int y = 0; y < fragment_height; y++) {
1715  /* for each fragment in a row... */
1716  for (int x = 0; x < fragment_width; x++, i++) {
1717 
1718  /* reverse prediction if this block was coded */
1719  if (s->all_fragments[i].coding_method != MODE_COPY) {
1720  current_frame_type =
1721  compatible_frame[s->all_fragments[i].coding_method];
1722 
1723  transform = 0;
1724  if (x) {
1725  l = i - 1;
1726  vl = DC_COEFF(l);
1727  if (COMPATIBLE_FRAME(l))
1728  transform |= PL;
1729  }
1730  if (y) {
1731  u = i - fragment_width;
1732  vu = DC_COEFF(u);
1733  if (COMPATIBLE_FRAME(u))
1734  transform |= PU;
1735  if (x) {
1736  ul = i - fragment_width - 1;
1737  vul = DC_COEFF(ul);
1738  if (COMPATIBLE_FRAME(ul))
1739  transform |= PUL;
1740  }
1741  if (x + 1 < fragment_width) {
1742  ur = i - fragment_width + 1;
1743  vur = DC_COEFF(ur);
1744  if (COMPATIBLE_FRAME(ur))
1745  transform |= PUR;
1746  }
1747  }
1748 
1749  if (transform == 0) {
1750  /* if there were no fragments to predict from, use last
1751  * DC saved */
1752  predicted_dc = last_dc[current_frame_type];
1753  } else {
1754  /* apply the appropriate predictor transform */
1755  predicted_dc =
1756  (predictor_transform[transform][0] * vul) +
1757  (predictor_transform[transform][1] * vu) +
1758  (predictor_transform[transform][2] * vur) +
1759  (predictor_transform[transform][3] * vl);
1760 
1761  predicted_dc /= 128;
1762 
1763  /* check for outranging on the [ul u l] and
1764  * [ul u ur l] predictors */
1765  if ((transform == 15) || (transform == 13)) {
1766  if (FFABS(predicted_dc - vu) > 128)
1767  predicted_dc = vu;
1768  else if (FFABS(predicted_dc - vl) > 128)
1769  predicted_dc = vl;
1770  else if (FFABS(predicted_dc - vul) > 128)
1771  predicted_dc = vul;
1772  }
1773  }
1774 
1775  /* at long last, apply the predictor */
1776  DC_COEFF(i) += predicted_dc;
1777  /* save the DC */
1778  last_dc[current_frame_type] = DC_COEFF(i);
1779  }
1780  }
1781  }
1782 }
1783 
1784 static void apply_loop_filter(Vp3DecodeContext *s, int plane,
1785  int ystart, int yend)
1786 {
1787  int *bounding_values = s->bounding_values_array + 127;
1788 
1789  int width = s->fragment_width[!!plane];
1790  int height = s->fragment_height[!!plane];
1791  int fragment = s->fragment_start[plane] + ystart * width;
1792  ptrdiff_t stride = s->current_frame.f->linesize[plane];
1793  uint8_t *plane_data = s->current_frame.f->data[plane];
1794  if (!s->flipped_image)
1795  stride = -stride;
1796  plane_data += s->data_offset[plane] + 8 * ystart * stride;
1797 
1798  for (int y = ystart; y < yend; y++) {
1799  for (int x = 0; x < width; x++) {
1800  /* This code basically just deblocks on the edges of coded blocks.
1801  * However, it has to be much more complicated because of the
1802  * brain damaged deblock ordering used in VP3/Theora. Order matters
1803  * because some pixels get filtered twice. */
1804  if (s->all_fragments[fragment].coding_method != MODE_COPY) {
1805  /* do not perform left edge filter for left columns frags */
1806  if (x > 0) {
1807  s->vp3dsp.h_loop_filter(
1808  plane_data + 8 * x,
1809  stride, bounding_values);
1810  }
1811 
1812  /* do not perform top edge filter for top row fragments */
1813  if (y > 0) {
1814  s->vp3dsp.v_loop_filter(
1815  plane_data + 8 * x,
1816  stride, bounding_values);
1817  }
1818 
1819  /* do not perform right edge filter for right column
1820  * fragments or if right fragment neighbor is also coded
1821  * in this frame (it will be filtered in next iteration) */
1822  if ((x < width - 1) &&
1823  (s->all_fragments[fragment + 1].coding_method == MODE_COPY)) {
1824  s->vp3dsp.h_loop_filter(
1825  plane_data + 8 * x + 8,
1826  stride, bounding_values);
1827  }
1828 
1829  /* do not perform bottom edge filter for bottom row
1830  * fragments or if bottom fragment neighbor is also coded
1831  * in this frame (it will be filtered in the next row) */
1832  if ((y < height - 1) &&
1833  (s->all_fragments[fragment + width].coding_method == MODE_COPY)) {
1834  s->vp3dsp.v_loop_filter(
1835  plane_data + 8 * x + 8 * stride,
1836  stride, bounding_values);
1837  }
1838  }
1839 
1840  fragment++;
1841  }
1842  plane_data += 8 * stride;
1843  }
1844 }
1845 
1846 /**
1847  * Pull DCT tokens from the 64 levels to decode and dequant the coefficients
1848  * for the next block in coding order
1849  */
1850 static inline int vp3_dequant(Vp3DecodeContext *s, const Vp3Fragment *frag,
1851  int plane, int inter, int16_t block[64])
1852 {
1853  const int16_t *dequantizer = s->qmat[frag->qpi][inter][plane];
1854  const uint8_t *perm = s->idct_scantable;
1855  int i = 0;
1856 
1857  do {
1858  int token = *s->dct_tokens[plane][i];
1859  switch (token & 3) {
1860  case 0: // EOB
1861  if (--token < 4) // 0-3 are token types so the EOB run must now be 0
1862  s->dct_tokens[plane][i]++;
1863  else
1864  *s->dct_tokens[plane][i] = token & ~3;
1865  goto end;
1866  case 1: // zero run
1867  s->dct_tokens[plane][i]++;
1868  i += (token >> 2) & 0x7f;
1869  if (i > 63) {
1870  av_log(s->avctx, AV_LOG_ERROR, "Coefficient index overflow\n");
1871  return i;
1872  }
1873  block[perm[i]] = (token >> 9) * dequantizer[perm[i]];
1874  i++;
1875  break;
1876  case 2: // coeff
1877  block[perm[i]] = (token >> 2) * dequantizer[perm[i]];
1878  s->dct_tokens[plane][i++]++;
1879  break;
1880  default: // shouldn't happen
1881  return i;
1882  }
1883  } while (i < 64);
1884  // return value is expected to be a valid level
1885  i--;
1886 end:
1887  // the actual DC+prediction is in the fragment structure
1888  block[0] = frag->dc * s->qmat[0][inter][plane][0];
1889  return i;
1890 }
1891 
1892 /**
1893  * called when all pixels up to row y are complete
1894  */
1896 {
1897  int h, cy;
1899 
1900  if (HAVE_THREADS && s->avctx->active_thread_type & FF_THREAD_FRAME) {
1901  int y_flipped = s->flipped_image ? s->height - y : y;
1902 
1903  /* At the end of the frame, report INT_MAX instead of the height of
1904  * the frame. This makes the other threads' ff_thread_await_progress()
1905  * calls cheaper, because they don't have to clip their values. */
1906  ff_progress_frame_report(&s->current_frame,
1907  y_flipped == s->height ? INT_MAX
1908  : y_flipped - 1);
1909  }
1910 
1911  if (!s->avctx->draw_horiz_band)
1912  return;
1913 
1914  h = y - s->last_slice_end;
1915  s->last_slice_end = y;
1916  y -= h;
1917 
1918  if (!s->flipped_image)
1919  y = s->height - y - h;
1920 
1921  cy = y >> s->chroma_y_shift;
1922  offset[0] = s->current_frame.f->linesize[0] * y;
1923  offset[1] = s->current_frame.f->linesize[1] * cy;
1924  offset[2] = s->current_frame.f->linesize[2] * cy;
1925  for (int i = 3; i < AV_NUM_DATA_POINTERS; i++)
1926  offset[i] = 0;
1927 
1928  emms_c();
1929  s->avctx->draw_horiz_band(s->avctx, s->current_frame.f, offset, y, 3, h);
1930 }
1931 
1932 /**
1933  * Wait for the reference frame of the current fragment.
1934  * The progress value is in luma pixel rows.
1935  */
1937  int motion_y, int y)
1938 {
1939  const ProgressFrame *ref_frame;
1940  int ref_row;
1941  int border = motion_y & 1;
1942 
1943  if (fragment->coding_method == MODE_USING_GOLDEN ||
1944  fragment->coding_method == MODE_GOLDEN_MV)
1945  ref_frame = &s->golden_frame;
1946  else
1947  ref_frame = &s->last_frame;
1948 
1949  ref_row = y + (motion_y >> 1);
1950  ref_row = FFMAX(FFABS(ref_row), ref_row + 8 + border);
1951 
1953 }
1954 
1955 #if CONFIG_VP4_DECODER
1956 /**
1957  * @return non-zero if temp (edge_emu_buffer) was populated
1958  */
1959 static int vp4_mc_loop_filter(Vp3DecodeContext *s, int plane, int motion_x, int motion_y, int bx, int by,
1960  const uint8_t *motion_source, ptrdiff_t stride,
1961  int src_x, int src_y, uint8_t *temp)
1962 {
1963  int motion_shift = plane ? 4 : 2;
1964  int subpel_mask = plane ? 3 : 1;
1965  int *bounding_values = s->bounding_values_array + 127;
1966 
1967  int x, y;
1968  int x2, y2;
1969  int x_subpel, y_subpel;
1970  int x_offset, y_offset;
1971 
1972  int block_width = plane ? 8 : 16;
1973  int plane_width = s->width >> (plane && s->chroma_x_shift);
1974  int plane_height = s->height >> (plane && s->chroma_y_shift);
1975 
1976 #define loop_stride 12
1977  uint8_t loop[12 * loop_stride];
1978 
1979  /* using division instead of shift to correctly handle negative values */
1980  x = 8 * bx + motion_x / motion_shift;
1981  y = 8 * by + motion_y / motion_shift;
1982 
1983  x_subpel = motion_x & subpel_mask;
1984  y_subpel = motion_y & subpel_mask;
1985 
1986  if (x_subpel || y_subpel) {
1987  x--;
1988  y--;
1989 
1990  if (x_subpel)
1991  x = FFMIN(x, x + FFSIGN(motion_x));
1992 
1993  if (y_subpel)
1994  y = FFMIN(y, y + FFSIGN(motion_y));
1995 
1996  x2 = x + block_width;
1997  y2 = y + block_width;
1998 
1999  if (x2 < 0 || x2 >= plane_width || y2 < 0 || y2 >= plane_height)
2000  return 0;
2001 
2002  x_offset = (-(x + 2) & 7) + 2;
2003  y_offset = (-(y + 2) & 7) + 2;
2004 
2005  av_assert1(!(x_offset > 8 + x_subpel && y_offset > 8 + y_subpel));
2006 
2007  s->vdsp.emulated_edge_mc(loop, motion_source - stride - 1,
2008  loop_stride, stride,
2009  12, 12, src_x - 1, src_y - 1,
2010  plane_width,
2011  plane_height);
2012 
2013  if (x_offset <= 8 + x_subpel)
2014  ff_vp3dsp_h_loop_filter_12(loop + x_offset, loop_stride, bounding_values);
2015 
2016  if (y_offset <= 8 + y_subpel)
2017  ff_vp3dsp_v_loop_filter_12(loop + y_offset*loop_stride, loop_stride, bounding_values);
2018 
2019  } else {
2020 
2021  x_offset = -x & 7;
2022  y_offset = -y & 7;
2023 
2024  if (!x_offset && !y_offset)
2025  return 0;
2026 
2027  s->vdsp.emulated_edge_mc(loop, motion_source - stride - 1,
2028  loop_stride, stride,
2029  12, 12, src_x - 1, src_y - 1,
2030  plane_width,
2031  plane_height);
2032 
2033 #define safe_loop_filter(name, ptr, stride, bounding_values) \
2034  if (VP3_LOOP_FILTER_NO_UNALIGNED_SUPPORT && (uintptr_t)(ptr) & 7) \
2035  s->vp3dsp.name##_unaligned(ptr, stride, bounding_values); \
2036  else \
2037  s->vp3dsp.name(ptr, stride, bounding_values);
2038 
2039  if (x_offset)
2040  safe_loop_filter(h_loop_filter, loop + loop_stride + x_offset + 1, loop_stride, bounding_values);
2041 
2042  if (y_offset)
2043  safe_loop_filter(v_loop_filter, loop + (y_offset + 1)*loop_stride + 1, loop_stride, bounding_values);
2044  }
2045 
2046  for (int i = 0; i < 9; i++)
2047  memcpy(temp + i*stride, loop + (i + 1) * loop_stride + 1, 9);
2048 
2049  return 1;
2050 }
2051 #endif
2052 
2053 /*
2054  * Perform the final rendering for a particular slice of data.
2055  * The slice number ranges from 0..(c_superblock_height - 1).
2056  */
2057 static void render_slice(Vp3DecodeContext *s, int slice)
2058 {
2059  int16_t *block = s->block;
2060  int motion_x = 0xdeadbeef, motion_y = 0xdeadbeef;
2061  /* When decoding keyframes, the earlier frames may not be available,
2062  * so we just use the current frame in this case instead;
2063  * it also avoid using undefined pointer arithmetic. Nothing is
2064  * ever read from these frames in case of a keyframe. */
2065  const AVFrame *last_frame = s->last_frame.f ?
2066  s->last_frame.f : s->current_frame.f;
2067  const AVFrame *golden_frame = s->golden_frame.f ?
2068  s->golden_frame.f : s->current_frame.f;
2069  int motion_halfpel_index;
2070  int first_pixel;
2071 
2072  if (slice >= s->c_superblock_height)
2073  return;
2074 
2075  for (int plane = 0; plane < 3; plane++) {
2076  uint8_t *output_plane = s->current_frame.f->data[plane] +
2077  s->data_offset[plane];
2078  const uint8_t *last_plane = last_frame->data[plane] +
2079  s->data_offset[plane];
2080  const uint8_t *golden_plane = golden_frame->data[plane] +
2081  s->data_offset[plane];
2082  ptrdiff_t stride = s->current_frame.f->linesize[plane];
2083  int plane_width = s->width >> (plane && s->chroma_x_shift);
2084  int plane_height = s->height >> (plane && s->chroma_y_shift);
2085  const int8_t (*motion_val)[2] = s->motion_val[!!plane];
2086 
2087  int sb_y = slice << (!plane && s->chroma_y_shift);
2088  int slice_height = sb_y + 1 + (!plane && s->chroma_y_shift);
2089  int slice_width = plane ? s->c_superblock_width
2090  : s->y_superblock_width;
2091 
2092  int fragment_width = s->fragment_width[!!plane];
2093  int fragment_height = s->fragment_height[!!plane];
2094  int fragment_start = s->fragment_start[plane];
2095 
2096  int do_await = !plane && HAVE_THREADS &&
2097  (s->avctx->active_thread_type & FF_THREAD_FRAME);
2098 
2099  if (!s->flipped_image)
2100  stride = -stride;
2101  if (CONFIG_GRAY && plane && (s->avctx->flags & AV_CODEC_FLAG_GRAY))
2102  continue;
2103 
2104  /* for each superblock row in the slice (both of them)... */
2105  for (; sb_y < slice_height; sb_y++) {
2106  /* for each superblock in a row... */
2107  for (int sb_x = 0; sb_x < slice_width; sb_x++) {
2108  /* for each block in a superblock... */
2109  for (int j = 0; j < 16; j++) {
2110  int x = 4 * sb_x + hilbert_offset[j][0];
2111  int y = 4 * sb_y + hilbert_offset[j][1];
2112  int fragment = y * fragment_width + x;
2113 
2114  int i = fragment_start + fragment;
2115 
2116  // bounds check
2117  if (x >= fragment_width || y >= fragment_height)
2118  continue;
2119 
2120  first_pixel = 8 * y * stride + 8 * x;
2121 
2122  if (do_await &&
2123  s->all_fragments[i].coding_method != MODE_INTRA)
2124  await_reference_row(s, &s->all_fragments[i],
2125  motion_val[fragment][1],
2126  (16 * y) >> s->chroma_y_shift);
2127 
2128  /* transform if this block was coded */
2129  if (s->all_fragments[i].coding_method != MODE_COPY) {
2130  const uint8_t *motion_source;
2131  if ((s->all_fragments[i].coding_method == MODE_USING_GOLDEN) ||
2132  (s->all_fragments[i].coding_method == MODE_GOLDEN_MV))
2133  motion_source = golden_plane;
2134  else
2135  motion_source = last_plane;
2136 
2137  motion_source += first_pixel;
2138  motion_halfpel_index = 0;
2139 
2140  /* sort out the motion vector if this fragment is coded
2141  * using a motion vector method */
2142  if ((s->all_fragments[i].coding_method > MODE_INTRA) &&
2143  (s->all_fragments[i].coding_method != MODE_USING_GOLDEN)) {
2144  int src_x, src_y;
2145  int standard_mc = 1;
2146  motion_x = motion_val[fragment][0];
2147  motion_y = motion_val[fragment][1];
2148 #if CONFIG_VP4_DECODER
2149  if (plane && s->version >= 2) {
2150  motion_x = (motion_x >> 1) | (motion_x & 1);
2151  motion_y = (motion_y >> 1) | (motion_y & 1);
2152  }
2153 #endif
2154 
2155  src_x = (motion_x >> 1) + 8 * x;
2156  src_y = (motion_y >> 1) + 8 * y;
2157 
2158  motion_halfpel_index = motion_x & 0x01;
2159  motion_source += (motion_x >> 1);
2160 
2161  motion_halfpel_index |= (motion_y & 0x01) << 1;
2162  motion_source += ((motion_y >> 1) * stride);
2163 
2164 #if CONFIG_VP4_DECODER
2165  if (s->version >= 2) {
2166  uint8_t *temp = s->edge_emu_buffer;
2167  if (stride < 0)
2168  temp -= 8 * stride;
2169  if (vp4_mc_loop_filter(s, plane, motion_val[fragment][0], motion_val[fragment][1], x, y, motion_source, stride, src_x, src_y, temp)) {
2170  motion_source = temp;
2171  standard_mc = 0;
2172  }
2173  }
2174 #endif
2175 
2176  if (standard_mc && (
2177  src_x < 0 || src_y < 0 ||
2178  src_x + 9 >= plane_width ||
2179  src_y + 9 >= plane_height)) {
2180  uint8_t *temp = s->edge_emu_buffer;
2181  if (stride < 0)
2182  temp -= 8 * stride;
2183 
2184  s->vdsp.emulated_edge_mc(temp, motion_source,
2185  stride, stride,
2186  9, 9, src_x, src_y,
2187  plane_width,
2188  plane_height);
2189  motion_source = temp;
2190  }
2191  }
2192 
2193  /* first, take care of copying a block from either the
2194  * previous or the golden frame */
2195  if (s->all_fragments[i].coding_method != MODE_INTRA) {
2196  /* Note, it is possible to implement all MC cases
2197  * with put_no_rnd_pixels_l2 which would look more
2198  * like the VP3 source but this would be slower as
2199  * put_no_rnd_pixels_tab is better optimized */
2200  if (motion_halfpel_index != 3) {
2201  s->hdsp.put_no_rnd_pixels_tab[1][motion_halfpel_index](
2202  output_plane + first_pixel,
2203  motion_source, stride, 8);
2204  } else {
2205  /* d is 0 if motion_x and _y have the same sign,
2206  * else -1 */
2207  int d = (motion_x ^ motion_y) >> 31;
2208  s->vp3dsp.put_no_rnd_pixels_l2(output_plane + first_pixel,
2209  motion_source - d,
2210  motion_source + stride + 1 + d,
2211  stride, 8);
2212  }
2213  }
2214 
2215  /* invert DCT and place (or add) in final output */
2216 
2217  if (s->all_fragments[i].coding_method == MODE_INTRA) {
2218  vp3_dequant(s, s->all_fragments + i,
2219  plane, 0, block);
2220  s->vp3dsp.idct_put(output_plane + first_pixel,
2221  stride,
2222  block);
2223  } else {
2224  if (vp3_dequant(s, s->all_fragments + i,
2225  plane, 1, block)) {
2226  s->vp3dsp.idct_add(output_plane + first_pixel,
2227  stride,
2228  block);
2229  } else {
2230  s->vp3dsp.idct_dc_add(output_plane + first_pixel,
2231  stride, block);
2232  }
2233  }
2234  } else {
2235  /* copy directly from the previous frame */
2236  s->hdsp.put_pixels_tab[1][0](
2237  output_plane + first_pixel,
2238  last_plane + first_pixel,
2239  stride, 8);
2240  }
2241  }
2242  }
2243 
2244  // Filter up to the last row in the superblock row
2245  if (s->version < 2 && !s->skip_loop_filter)
2246  apply_loop_filter(s, plane, 4 * sb_y - !!sb_y,
2247  FFMIN(4 * sb_y + 3, fragment_height - 1));
2248  }
2249  }
2250 
2251  /* this looks like a good place for slice dispatch... */
2252  /* algorithm:
2253  * if (slice == s->macroblock_height - 1)
2254  * dispatch (both last slice & 2nd-to-last slice);
2255  * else if (slice > 0)
2256  * dispatch (slice - 1);
2257  */
2258 
2259  vp3_draw_horiz_band(s, FFMIN((32 << s->chroma_y_shift) * (slice + 1) - 16,
2260  s->height - 16));
2261 }
2262 
2263 static av_cold void init_tables_once(void)
2264 {
2266 
2268  SUPERBLOCK_VLC_BITS, 34,
2270  NULL, 0, 0, 1, 0);
2271 
2274  NULL, 0, 0, 0, 0);
2275 
2277  &motion_vector_vlc_table[0][1], 2,
2278  &motion_vector_vlc_table[0][0], 2, 1,
2279  -31, 0);
2280 
2282  mode_code_vlc_len, 1,
2283  NULL, 0, 0, 0, 0);
2284 
2285 #if CONFIG_VP4_DECODER
2286  for (int j = 0; j < 2; j++)
2287  for (int i = 0; i < 7; i++) {
2288  vp4_mv_vlc_table[j][i] =
2290  &vp4_mv_vlc[j][i][0][1], 2,
2291  &vp4_mv_vlc[j][i][0][0], 2, 1,
2292  -31, 0);
2293  }
2294 
2295  /* version >= 2 */
2296  for (int i = 0; i < 2; i++) {
2297  block_pattern_vlc[i] =
2298  ff_vlc_init_tables(&state, 5, 14,
2299  &vp4_block_pattern_vlc[i][0][1], 2, 1,
2300  &vp4_block_pattern_vlc[i][0][0], 2, 1, 0);
2301  }
2302 #endif
2303 }
2304 
2305 /// Allocate tables for per-frame data in Vp3DecodeContext
2307 {
2308  Vp3DecodeContext *s = avctx->priv_data;
2309  int y_fragment_count, c_fragment_count;
2310 
2311  free_tables(avctx);
2312 
2313  y_fragment_count = s->fragment_width[0] * s->fragment_height[0];
2314  c_fragment_count = s->fragment_width[1] * s->fragment_height[1];
2315 
2316  /* superblock_coding is used by unpack_superblocks (VP3/Theora) and vp4_unpack_macroblocks (VP4) */
2317  s->superblock_coding = av_mallocz(FFMAX(s->superblock_count, s->yuv_macroblock_count));
2318  s->all_fragments = av_calloc(s->fragment_count, sizeof(*s->all_fragments));
2319 
2320  s-> kf_coded_fragment_list = av_calloc(s->fragment_count, sizeof(int));
2321  s->nkf_coded_fragment_list = av_calloc(s->fragment_count, sizeof(int));
2322  memset(s-> num_kf_coded_fragment, -1, sizeof(s-> num_kf_coded_fragment));
2323 
2324  s->dct_tokens_base = av_calloc(s->fragment_count,
2325  64 * sizeof(*s->dct_tokens_base));
2326  s->motion_val[0] = av_calloc(y_fragment_count, sizeof(*s->motion_val[0]));
2327  s->motion_val[1] = av_calloc(c_fragment_count, sizeof(*s->motion_val[1]));
2328 
2329  /* work out the block mapping tables */
2330  s->superblock_fragments = av_calloc(s->superblock_count, 16 * sizeof(int));
2331  s->macroblock_coding = av_mallocz(s->macroblock_count + 1);
2332 
2333  s->dc_pred_row = av_malloc_array(s->y_superblock_width * 4, sizeof(*s->dc_pred_row));
2334 
2335  if (!s->superblock_coding || !s->all_fragments ||
2336  !s->dct_tokens_base || !s->kf_coded_fragment_list ||
2337  !s->nkf_coded_fragment_list ||
2338  !s->superblock_fragments || !s->macroblock_coding ||
2339  !s->dc_pred_row ||
2340  !s->motion_val[0] || !s->motion_val[1]) {
2341  return -1;
2342  }
2343 
2345 
2346  return 0;
2347 }
2348 
2349 
2350 static av_cold void free_vlc_tables(AVRefStructOpaque unused, void *obj)
2351 {
2352  CoeffVLCs *vlcs = obj;
2353 
2354  for (int i = 0; i < FF_ARRAY_ELEMS(vlcs->vlcs); i++)
2355  ff_vlc_free(&vlcs->vlcs[i]);
2356 }
2357 
2359 {
2360  static AVOnce init_static_once = AV_ONCE_INIT;
2361  Vp3DecodeContext *s = avctx->priv_data;
2362  int ret;
2363  int c_width;
2364  int c_height;
2365  int y_fragment_count, c_fragment_count;
2366 
2367  if (avctx->codec_tag == MKTAG('V', 'P', '4', '0')) {
2368  s->version = 3;
2369 #if !CONFIG_VP4_DECODER
2370  av_log(avctx, AV_LOG_ERROR, "This build does not support decoding VP4.\n");
2372 #endif
2373  } else if (avctx->codec_tag == MKTAG('V', 'P', '3', '0'))
2374  s->version = 0;
2375  else
2376  s->version = 1;
2377 
2378  s->avctx = avctx;
2379  s->width = FFALIGN(avctx->coded_width, 16);
2380  s->height = FFALIGN(avctx->coded_height, 16);
2381  if (s->width < 18)
2382  return AVERROR_PATCHWELCOME;
2383  if (avctx->codec_id != AV_CODEC_ID_THEORA)
2384  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
2386  ff_hpeldsp_init(&s->hdsp, avctx->flags | AV_CODEC_FLAG_BITEXACT);
2387  ff_videodsp_init(&s->vdsp, 8);
2388  ff_vp3dsp_init(&s->vp3dsp);
2389 
2390  for (int i = 0; i < 64; i++) {
2391 #define TRANSPOSE(x) (((x) >> 3) | (((x) & 7) << 3))
2392  s->idct_permutation[i] = TRANSPOSE(i);
2393  s->idct_scantable[i] = TRANSPOSE(ff_zigzag_direct[i]);
2394 #undef TRANSPOSE
2395  }
2396 
2397  /* initialize to an impossible value which will force a recalculation
2398  * in the first frame decode */
2399  for (int i = 0; i < 3; i++)
2400  s->qps[i] = -1;
2401 
2402  ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &s->chroma_x_shift, &s->chroma_y_shift);
2403  if (ret)
2404  return ret;
2405 
2406  s->y_superblock_width = (s->width + 31) / 32;
2407  s->y_superblock_height = (s->height + 31) / 32;
2408  s->y_superblock_count = s->y_superblock_width * s->y_superblock_height;
2409 
2410  /* work out the dimensions for the C planes */
2411  c_width = s->width >> s->chroma_x_shift;
2412  c_height = s->height >> s->chroma_y_shift;
2413  s->c_superblock_width = (c_width + 31) / 32;
2414  s->c_superblock_height = (c_height + 31) / 32;
2415  s->c_superblock_count = s->c_superblock_width * s->c_superblock_height;
2416 
2417  s->superblock_count = s->y_superblock_count + (s->c_superblock_count * 2);
2418  s->u_superblock_start = s->y_superblock_count;
2419  s->v_superblock_start = s->u_superblock_start + s->c_superblock_count;
2420 
2421  s->macroblock_width = (s->width + 15) / 16;
2422  s->macroblock_height = (s->height + 15) / 16;
2423  s->macroblock_count = s->macroblock_width * s->macroblock_height;
2424  s->c_macroblock_width = (c_width + 15) / 16;
2425  s->c_macroblock_height = (c_height + 15) / 16;
2426  s->c_macroblock_count = s->c_macroblock_width * s->c_macroblock_height;
2427  s->yuv_macroblock_count = s->macroblock_count + 2 * s->c_macroblock_count;
2428 
2429  s->fragment_width[0] = s->width / FRAGMENT_PIXELS;
2430  s->fragment_height[0] = s->height / FRAGMENT_PIXELS;
2431  s->fragment_width[1] = s->fragment_width[0] >> s->chroma_x_shift;
2432  s->fragment_height[1] = s->fragment_height[0] >> s->chroma_y_shift;
2433 
2434  /* fragment count covers all 8x8 blocks for all 3 planes */
2435  y_fragment_count = s->fragment_width[0] * s->fragment_height[0];
2436  c_fragment_count = s->fragment_width[1] * s->fragment_height[1];
2437  s->fragment_count = y_fragment_count + 2 * c_fragment_count;
2438  s->fragment_start[1] = y_fragment_count;
2439  s->fragment_start[2] = y_fragment_count + c_fragment_count;
2440 
2441  if (!s->theora_tables) {
2442  for (int i = 0; i < 64; i++) {
2443  s->coded_dc_scale_factor[0][i] = s->version < 2 ? vp31_dc_scale_factor[i] : vp4_y_dc_scale_factor[i];
2444  s->coded_dc_scale_factor[1][i] = s->version < 2 ? vp31_dc_scale_factor[i] : vp4_uv_dc_scale_factor[i];
2445  s->coded_ac_scale_factor[i] = s->version < 2 ? vp31_ac_scale_factor[i] : vp4_ac_scale_factor[i];
2446  s->base_matrix[0][i] = s->version < 2 ? vp31_intra_y_dequant[i] : vp4_generic_dequant[i];
2447  s->base_matrix[1][i] = s->version < 2 ? ff_mjpeg_std_chrominance_quant_tbl[i] : vp4_generic_dequant[i];
2448  s->base_matrix[2][i] = s->version < 2 ? vp31_inter_dequant[i] : vp4_generic_dequant[i];
2449  s->filter_limit_values[i] = s->version < 2 ? vp31_filter_limit_values[i] : vp4_filter_limit_values[i];
2450  }
2451 
2452  for (int inter = 0; inter < 2; inter++) {
2453  for (int plane = 0; plane < 3; plane++) {
2454  s->qr_count[inter][plane] = 1;
2455  s->qr_size[inter][plane][0] = 63;
2456  s->qr_base[inter][plane][0] =
2457  s->qr_base[inter][plane][1] = 2 * inter + (!!plane) * !inter;
2458  }
2459  }
2460  }
2461 
2462  if (!avctx->internal->is_copy) {
2463  CoeffVLCs *vlcs = av_refstruct_alloc_ext(sizeof(*s->coeff_vlc), 0,
2465  if (!vlcs)
2466  return AVERROR(ENOMEM);
2467 
2468  s->coeff_vlc = vlcs;
2469 
2470  if (!s->theora_tables) {
2471  const uint8_t (*bias_tabs)[32][2];
2472 
2473  /* init VLC tables */
2474  bias_tabs = CONFIG_VP4_DECODER && s->version >= 2 ? vp4_bias : vp3_bias;
2475  for (int i = 0; i < FF_ARRAY_ELEMS(vlcs->vlcs); i++) {
2476  ret = ff_vlc_init_from_lengths(&vlcs->vlcs[i], 11, 32,
2477  &bias_tabs[i][0][1], 2,
2478  &bias_tabs[i][0][0], 2, 1,
2479  0, 0, avctx);
2480  if (ret < 0)
2481  return ret;
2482  vlcs->vlc_tabs[i] = vlcs->vlcs[i].table;
2483  }
2484  } else {
2485  for (int i = 0; i < FF_ARRAY_ELEMS(vlcs->vlcs); i++) {
2486  const HuffTable *tab = &s->huffman_table[i];
2487 
2488  ret = ff_vlc_init_from_lengths(&vlcs->vlcs[i], 11, tab->nb_entries,
2489  &tab->entries[0].len, sizeof(*tab->entries),
2490  &tab->entries[0].sym, sizeof(*tab->entries), 1,
2491  0, 0, avctx);
2492  if (ret < 0)
2493  return ret;
2494  vlcs->vlc_tabs[i] = vlcs->vlcs[i].table;
2495  }
2496  }
2497  }
2498 
2499  ff_thread_once(&init_static_once, init_tables_once);
2500 
2501  return allocate_tables(avctx);
2502 }
2503 
2504 /// Release and shuffle frames after decode finishes
2505 static void update_frames(AVCodecContext *avctx)
2506 {
2507  Vp3DecodeContext *s = avctx->priv_data;
2508 
2509  if (s->keyframe)
2510  ff_progress_frame_replace(&s->golden_frame, &s->current_frame);
2511 
2512  /* shuffle frames */
2513  ff_progress_frame_unref(&s->last_frame);
2514  FFSWAP(ProgressFrame, s->last_frame, s->current_frame);
2515 }
2516 
2517 #if HAVE_THREADS
2518 static void ref_frames(Vp3DecodeContext *dst, const Vp3DecodeContext *src)
2519 {
2520  ff_progress_frame_replace(&dst->current_frame, &src->current_frame);
2521  ff_progress_frame_replace(&dst->golden_frame, &src->golden_frame);
2522  ff_progress_frame_replace(&dst->last_frame, &src->last_frame);
2523 }
2524 
2525 static int vp3_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
2526 {
2527  Vp3DecodeContext *s = dst->priv_data;
2528  const Vp3DecodeContext *s1 = src->priv_data;
2529  int qps_changed = 0;
2530 
2531  av_refstruct_replace(&s->coeff_vlc, s1->coeff_vlc);
2532 
2533  // copy previous frame data
2534  ref_frames(s, s1);
2535  if (!s1->current_frame.f ||
2536  s->width != s1->width || s->height != s1->height) {
2537  return -1;
2538  }
2539 
2540  if (s != s1) {
2541  s->keyframe = s1->keyframe;
2542 
2543  // copy qscale data if necessary
2544  for (int i = 0; i < 3; i++) {
2545  if (s->qps[i] != s1->qps[1]) {
2546  qps_changed = 1;
2547  memcpy(&s->qmat[i], &s1->qmat[i], sizeof(s->qmat[i]));
2548  }
2549  }
2550 
2551  if (s->qps[0] != s1->qps[0])
2552  memcpy(&s->bounding_values_array, &s1->bounding_values_array,
2553  sizeof(s->bounding_values_array));
2554 
2555  if (qps_changed) {
2556  memcpy(s->qps, s1->qps, sizeof(s->qps));
2557  memcpy(s->last_qps, s1->last_qps, sizeof(s->last_qps));
2558  s->nqps = s1->nqps;
2559  }
2560  }
2561 
2562  update_frames(dst);
2563  return 0;
2564 }
2565 #endif
2566 
2568  int *got_frame, AVPacket *avpkt)
2569 {
2570  const uint8_t *buf = avpkt->data;
2571  int buf_size = avpkt->size;
2572  Vp3DecodeContext *s = avctx->priv_data;
2573  GetBitContext gb;
2574  int ret;
2575 
2576  if ((ret = init_get_bits8(&gb, buf, buf_size)) < 0)
2577  return ret;
2578 
2579 #if CONFIG_THEORA_DECODER
2580  if (s->theora && get_bits1(&gb)) {
2581  int type = get_bits(&gb, 7);
2582  skip_bits_long(&gb, 6*8); /* "theora" */
2583 
2584  if (s->avctx->active_thread_type&FF_THREAD_FRAME) {
2585  av_log(avctx, AV_LOG_ERROR, "midstream reconfiguration with multithreading is unsupported, try -threads 1\n");
2586  return AVERROR_PATCHWELCOME;
2587  }
2588  if (type == 0) {
2589  vp3_decode_end(avctx);
2590  ret = theora_decode_header(avctx, &gb);
2591 
2592  if (ret >= 0)
2593  ret = vp3_decode_init(avctx);
2594  if (ret < 0) {
2595  vp3_decode_end(avctx);
2596  return ret;
2597  }
2598  return buf_size;
2599  } else if (type == 2) {
2600  vp3_decode_end(avctx);
2601  ret = theora_decode_tables(avctx, &gb);
2602  if (ret >= 0)
2603  ret = vp3_decode_init(avctx);
2604  if (ret < 0) {
2605  vp3_decode_end(avctx);
2606  return ret;
2607  }
2608  return buf_size;
2609  }
2610 
2611  av_log(avctx, AV_LOG_ERROR,
2612  "Header packet passed to frame decoder, skipping\n");
2613  return -1;
2614  }
2615 #endif
2616 
2617  s->keyframe = !get_bits1(&gb);
2618  if (!s->all_fragments) {
2619  av_log(avctx, AV_LOG_ERROR, "Data packet without prior valid headers\n");
2620  return -1;
2621  }
2622  if (!s->theora)
2623  skip_bits(&gb, 1);
2624  for (int i = 0; i < 3; i++)
2625  s->last_qps[i] = s->qps[i];
2626 
2627  s->nqps = 0;
2628  do {
2629  s->qps[s->nqps++] = get_bits(&gb, 6);
2630  } while (s->theora >= 0x030200 && s->nqps < 3 && get_bits1(&gb));
2631  for (int i = s->nqps; i < 3; i++)
2632  s->qps[i] = -1;
2633 
2634  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2635  av_log(s->avctx, AV_LOG_INFO, " VP3 %sframe #%"PRId64": Q index = %d\n",
2636  s->keyframe ? "key" : "", avctx->frame_num + 1, s->qps[0]);
2637 
2638  s->skip_loop_filter = !s->filter_limit_values[s->qps[0]] ||
2639  avctx->skip_loop_filter >= (s->keyframe ? AVDISCARD_ALL
2640  : AVDISCARD_NONKEY);
2641 
2642  if (s->qps[0] != s->last_qps[0])
2644 
2645  for (int i = 0; i < s->nqps; i++)
2646  // reinit all dequantizers if the first one changed, because
2647  // the DC of the first quantizer must be used for all matrices
2648  if (s->qps[i] != s->last_qps[i] || s->qps[0] != s->last_qps[0])
2649  init_dequantizer(s, i);
2650 
2651  if (avctx->skip_frame >= AVDISCARD_NONKEY && !s->keyframe)
2652  return buf_size;
2653 
2654  ff_progress_frame_unref(&s->current_frame);
2655  ret = ff_progress_frame_get_buffer(avctx, &s->current_frame,
2657  if (ret < 0) {
2658  // Don't goto error here, as one can't report progress on or
2659  // unref a non-existent frame.
2660  return ret;
2661  }
2662  s->current_frame.f->pict_type = s->keyframe ? AV_PICTURE_TYPE_I
2664  if (s->keyframe)
2665  s->current_frame.f->flags |= AV_FRAME_FLAG_KEY;
2666  else
2667  s->current_frame.f->flags &= ~AV_FRAME_FLAG_KEY;
2668 
2669  if (!s->edge_emu_buffer) {
2670  s->edge_emu_buffer = av_malloc(9 * FFABS(s->current_frame.f->linesize[0]));
2671  if (!s->edge_emu_buffer) {
2672  ret = AVERROR(ENOMEM);
2673  goto error;
2674  }
2675  }
2676 
2677  if (s->keyframe) {
2678  if (!s->theora) {
2679  skip_bits(&gb, 4); /* width code */
2680  skip_bits(&gb, 4); /* height code */
2681  if (s->version) {
2682  int version = get_bits(&gb, 5);
2683 #if !CONFIG_VP4_DECODER
2684  if (version >= 2) {
2685  av_log(avctx, AV_LOG_ERROR, "This build does not support decoding VP4.\n");
2687  }
2688 #endif
2689  s->version = version;
2690  if (avctx->frame_num == 0)
2691  av_log(s->avctx, AV_LOG_DEBUG,
2692  "VP version: %d\n", s->version);
2693  }
2694  }
2695  if (s->version || s->theora) {
2696  if (get_bits1(&gb))
2697  av_log(s->avctx, AV_LOG_ERROR,
2698  "Warning, unsupported keyframe coding type?!\n");
2699  skip_bits(&gb, 2); /* reserved? */
2700 
2701 #if CONFIG_VP4_DECODER
2702  if (s->version >= 2) {
2703  int mb_height, mb_width;
2704  int mb_width_mul, mb_width_div, mb_height_mul, mb_height_div;
2705 
2706  mb_height = get_bits(&gb, 8);
2707  mb_width = get_bits(&gb, 8);
2708  if (mb_height != s->macroblock_height ||
2709  mb_width != s->macroblock_width)
2710  avpriv_request_sample(s->avctx, "macroblock dimension mismatch");
2711 
2712  mb_width_mul = get_bits(&gb, 5);
2713  mb_width_div = get_bits(&gb, 3);
2714  mb_height_mul = get_bits(&gb, 5);
2715  mb_height_div = get_bits(&gb, 3);
2716  if (mb_width_mul != 1 || mb_width_div != 1 || mb_height_mul != 1 || mb_height_div != 1)
2717  avpriv_request_sample(s->avctx, "unexpected macroblock dimension multiplier/divider");
2718 
2719  if (get_bits(&gb, 2))
2720  avpriv_request_sample(s->avctx, "unknown bits");
2721  }
2722 #endif
2723  }
2724  } else {
2725  if (!s->golden_frame.f) {
2726  av_log(s->avctx, AV_LOG_WARNING,
2727  "vp3: first frame not a keyframe\n");
2728 
2729  if ((ret = ff_progress_frame_get_buffer(avctx, &s->golden_frame,
2730  AV_GET_BUFFER_FLAG_REF)) < 0)
2731  goto error;
2732  s->golden_frame.f->pict_type = AV_PICTURE_TYPE_I;
2733  ff_progress_frame_replace(&s->last_frame, &s->golden_frame);
2734  ff_progress_frame_report(&s->golden_frame, INT_MAX);
2735  }
2736  }
2737  ff_thread_finish_setup(avctx);
2738 
2739  memset(s->all_fragments, 0, s->fragment_count * sizeof(Vp3Fragment));
2740 
2741  if (s->version < 2) {
2742  if ((ret = unpack_superblocks(s, &gb)) < 0) {
2743  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_superblocks\n");
2744  goto error;
2745  }
2746 #if CONFIG_VP4_DECODER
2747  } else {
2748  if ((ret = vp4_unpack_macroblocks(s, &gb)) < 0) {
2749  av_log(s->avctx, AV_LOG_ERROR, "error in vp4_unpack_macroblocks\n");
2750  goto error;
2751  }
2752 #endif
2753  }
2754  if ((ret = unpack_modes(s, &gb)) < 0) {
2755  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_modes\n");
2756  goto error;
2757  }
2758  if (ret = unpack_vectors(s, &gb)) {
2759  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_vectors\n");
2760  goto error;
2761  }
2762  if ((ret = unpack_block_qpis(s, &gb)) < 0) {
2763  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_block_qpis\n");
2764  goto error;
2765  }
2766 
2767  if (s->version < 2) {
2768  if ((ret = unpack_dct_coeffs(s, &gb)) < 0) {
2769  av_log(s->avctx, AV_LOG_ERROR, "error in unpack_dct_coeffs\n");
2770  goto error;
2771  }
2772 #if CONFIG_VP4_DECODER
2773  } else {
2774  if ((ret = vp4_unpack_dct_coeffs(s, &gb)) < 0) {
2775  av_log(s->avctx, AV_LOG_ERROR, "error in vp4_unpack_dct_coeffs\n");
2776  goto error;
2777  }
2778 #endif
2779  }
2780 
2781  for (int i = 0; i < 3; i++) {
2782  int height = s->height >> (i && s->chroma_y_shift);
2783  if (s->flipped_image)
2784  s->data_offset[i] = 0;
2785  else
2786  s->data_offset[i] = (height - 1) * s->current_frame.f->linesize[i];
2787  }
2788 
2789  s->last_slice_end = 0;
2790  for (int i = 0; i < s->c_superblock_height; i++)
2791  render_slice(s, i);
2792 
2793  // filter the last row
2794  if (s->version < 2)
2795  for (int i = 0; i < 3; i++) {
2796  int row = (s->height >> (3 + (i && s->chroma_y_shift))) - 1;
2797  apply_loop_filter(s, i, row, row + 1);
2798  }
2799  vp3_draw_horiz_band(s, s->height);
2800 
2801  /* output frame, offset as needed */
2802  if ((ret = av_frame_ref(frame, s->current_frame.f)) < 0)
2803  return ret;
2804 
2805  frame->crop_left = s->offset_x;
2806  frame->crop_right = avctx->coded_width - avctx->width - s->offset_x;
2807  frame->crop_top = s->offset_y;
2808  frame->crop_bottom = avctx->coded_height - avctx->height - s->offset_y;
2809 
2810  *got_frame = 1;
2811 
2812  if (!HAVE_THREADS || !(s->avctx->active_thread_type & FF_THREAD_FRAME))
2813  update_frames(avctx);
2814 
2815  return buf_size;
2816 
2817 error:
2818  ff_progress_frame_report(&s->current_frame, INT_MAX);
2819 
2820  if (!HAVE_THREADS || !(s->avctx->active_thread_type & FF_THREAD_FRAME))
2821  av_frame_unref(s->current_frame.f);
2822 
2823  return ret;
2824 }
2825 
2826 static int read_huffman_tree(HuffTable *huff, GetBitContext *gb, int length,
2827  AVCodecContext *avctx)
2828 {
2829  if (get_bits1(gb)) {
2830  int token;
2831  if (huff->nb_entries >= 32) { /* overflow */
2832  av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n");
2833  return -1;
2834  }
2835  token = get_bits(gb, 5);
2836  ff_dlog(avctx, "code length %d, curr entry %d, token %d\n",
2837  length, huff->nb_entries, token);
2838  huff->entries[huff->nb_entries++] = (HuffEntry){ length, token };
2839  } else {
2840  /* The following bound follows from the fact that nb_entries <= 32. */
2841  if (length >= 31) { /* overflow */
2842  av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n");
2843  return -1;
2844  }
2845  length++;
2846  if (read_huffman_tree(huff, gb, length, avctx))
2847  return -1;
2848  if (read_huffman_tree(huff, gb, length, avctx))
2849  return -1;
2850  }
2851  return 0;
2852 }
2853 
2854 #if CONFIG_THEORA_DECODER
2855 static const enum AVPixelFormat theora_pix_fmts[4] = {
2857 };
2858 
2859 static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb)
2860 {
2861  Vp3DecodeContext *s = avctx->priv_data;
2862  int visible_width, visible_height, colorspace;
2863  uint8_t offset_x = 0, offset_y = 0;
2864  int ret;
2865  AVRational fps, aspect;
2866 
2867  if (get_bits_left(gb) < 206)
2868  return AVERROR_INVALIDDATA;
2869 
2870  s->theora_header = 0;
2871  s->theora = get_bits(gb, 24);
2872  av_log(avctx, AV_LOG_DEBUG, "Theora bitstream version %X\n", s->theora);
2873  if (!s->theora) {
2874  s->theora = 1;
2875  avpriv_request_sample(s->avctx, "theora 0");
2876  }
2877 
2878  /* 3.2.0 aka alpha3 has the same frame orientation as original vp3
2879  * but previous versions have the image flipped relative to vp3 */
2880  if (s->theora < 0x030200) {
2881  s->flipped_image = 1;
2882  av_log(avctx, AV_LOG_DEBUG,
2883  "Old (<alpha3) Theora bitstream, flipped image\n");
2884  }
2885 
2886  visible_width =
2887  s->width = get_bits(gb, 16) << 4;
2888  visible_height =
2889  s->height = get_bits(gb, 16) << 4;
2890 
2891  if (s->theora >= 0x030200) {
2892  visible_width = get_bits(gb, 24);
2893  visible_height = get_bits(gb, 24);
2894 
2895  offset_x = get_bits(gb, 8); /* offset x */
2896  offset_y = get_bits(gb, 8); /* offset y, from bottom */
2897  }
2898 
2899  /* sanity check */
2900  if (av_image_check_size(visible_width, visible_height, 0, avctx) < 0 ||
2901  visible_width + offset_x > s->width ||
2902  visible_height + offset_y > s->height ||
2903  visible_width < 18
2904  ) {
2905  av_log(avctx, AV_LOG_ERROR,
2906  "Invalid frame dimensions - w:%d h:%d x:%d y:%d (%dx%d).\n",
2907  visible_width, visible_height, offset_x, offset_y,
2908  s->width, s->height);
2909  return AVERROR_INVALIDDATA;
2910  }
2911 
2912  fps.num = get_bits_long(gb, 32);
2913  fps.den = get_bits_long(gb, 32);
2914  if (fps.num && fps.den) {
2915  if (fps.num < 0 || fps.den < 0) {
2916  av_log(avctx, AV_LOG_ERROR, "Invalid framerate\n");
2917  return AVERROR_INVALIDDATA;
2918  }
2919  av_reduce(&avctx->framerate.den, &avctx->framerate.num,
2920  fps.den, fps.num, 1 << 30);
2921  }
2922 
2923  aspect.num = get_bits(gb, 24);
2924  aspect.den = get_bits(gb, 24);
2925  if (aspect.num && aspect.den) {
2927  &avctx->sample_aspect_ratio.den,
2928  aspect.num, aspect.den, 1 << 30);
2929  ff_set_sar(avctx, avctx->sample_aspect_ratio);
2930  }
2931 
2932  if (s->theora < 0x030200)
2933  skip_bits(gb, 5); /* keyframe frequency force */
2934  colorspace = get_bits(gb, 8);
2935  skip_bits(gb, 24); /* bitrate */
2936 
2937  skip_bits(gb, 6); /* quality hint */
2938 
2939  if (s->theora >= 0x030200) {
2940  skip_bits(gb, 5); /* keyframe frequency force */
2941  avctx->pix_fmt = theora_pix_fmts[get_bits(gb, 2)];
2942  if (avctx->pix_fmt == AV_PIX_FMT_NONE) {
2943  av_log(avctx, AV_LOG_ERROR, "Invalid pixel format\n");
2944  return AVERROR_INVALIDDATA;
2945  }
2946  skip_bits(gb, 3); /* reserved */
2947  } else
2948  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
2949 
2950  if (s->width < 18)
2951  return AVERROR_PATCHWELCOME;
2952  ret = ff_set_dimensions(avctx, s->width, s->height);
2953  if (ret < 0)
2954  return ret;
2955  if (!(avctx->flags2 & AV_CODEC_FLAG2_IGNORE_CROP)) {
2956  avctx->width = visible_width;
2957  avctx->height = visible_height;
2958  // translate offsets from theora axis ([0,0] lower left)
2959  // to normal axis ([0,0] upper left)
2960  s->offset_x = offset_x;
2961  s->offset_y = s->height - visible_height - offset_y;
2962  }
2963 
2964  if (colorspace == 1)
2966  else if (colorspace == 2)
2968 
2969  if (colorspace == 1 || colorspace == 2) {
2970  avctx->colorspace = AVCOL_SPC_BT470BG;
2971  avctx->color_trc = AVCOL_TRC_BT709;
2972  }
2973 
2974  s->theora_header = 1;
2975  return 0;
2976 }
2977 
2978 static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb)
2979 {
2980  Vp3DecodeContext *s = avctx->priv_data;
2981  int n, matrices, ret;
2982 
2983  if (!s->theora_header)
2984  return AVERROR_INVALIDDATA;
2985 
2986  if (s->theora >= 0x030200) {
2987  n = get_bits(gb, 3);
2988  /* loop filter limit values table */
2989  if (n)
2990  for (int i = 0; i < 64; i++)
2991  s->filter_limit_values[i] = get_bits(gb, n);
2992  }
2993 
2994  if (s->theora >= 0x030200)
2995  n = get_bits(gb, 4) + 1;
2996  else
2997  n = 16;
2998  /* quality threshold table */
2999  for (int i = 0; i < 64; i++)
3000  s->coded_ac_scale_factor[i] = get_bits(gb, n);
3001 
3002  if (s->theora >= 0x030200)
3003  n = get_bits(gb, 4) + 1;
3004  else
3005  n = 16;
3006  /* dc scale factor table */
3007  for (int i = 0; i < 64; i++)
3008  s->coded_dc_scale_factor[0][i] =
3009  s->coded_dc_scale_factor[1][i] = get_bits(gb, n);
3010 
3011  if (s->theora >= 0x030200)
3012  matrices = get_bits(gb, 9) + 1;
3013  else
3014  matrices = 3;
3015 
3016  if (matrices > 384) {
3017  av_log(avctx, AV_LOG_ERROR, "invalid number of base matrixes\n");
3018  return -1;
3019  }
3020 
3021  for (int j = 0; j < matrices; j++)
3022  for (int i = 0; i < 64; i++)
3023  s->base_matrix[j][i] = get_bits(gb, 8);
3024 
3025  for (int inter = 0; inter <= 1; inter++) {
3026  for (int plane = 0; plane <= 2; plane++) {
3027  int newqr = 1;
3028  if (inter || plane > 0)
3029  newqr = get_bits1(gb);
3030  if (!newqr) {
3031  int qtj, plj;
3032  if (inter && get_bits1(gb)) {
3033  qtj = 0;
3034  plj = plane;
3035  } else {
3036  qtj = (3 * inter + plane - 1) / 3;
3037  plj = (plane + 2) % 3;
3038  }
3039  s->qr_count[inter][plane] = s->qr_count[qtj][plj];
3040  memcpy(s->qr_size[inter][plane], s->qr_size[qtj][plj],
3041  sizeof(s->qr_size[0][0]));
3042  memcpy(s->qr_base[inter][plane], s->qr_base[qtj][plj],
3043  sizeof(s->qr_base[0][0]));
3044  } else {
3045  int qri = 0;
3046  int qi = 0;
3047 
3048  for (;;) {
3049  int i = get_bits(gb, av_log2(matrices - 1) + 1);
3050  if (i >= matrices) {
3051  av_log(avctx, AV_LOG_ERROR,
3052  "invalid base matrix index\n");
3053  return -1;
3054  }
3055  s->qr_base[inter][plane][qri] = i;
3056  if (qi >= 63)
3057  break;
3058  i = get_bits(gb, av_log2(63 - qi) + 1) + 1;
3059  s->qr_size[inter][plane][qri++] = i;
3060  qi += i;
3061  }
3062 
3063  if (qi > 63) {
3064  av_log(avctx, AV_LOG_ERROR, "invalid qi %d > 63\n", qi);
3065  return -1;
3066  }
3067  s->qr_count[inter][plane] = qri;
3068  }
3069  }
3070  }
3071 
3072  /* Huffman tables */
3073  for (int i = 0; i < FF_ARRAY_ELEMS(s->huffman_table); i++) {
3074  s->huffman_table[i].nb_entries = 0;
3075  if ((ret = read_huffman_tree(&s->huffman_table[i], gb, 0, avctx)) < 0)
3076  return ret;
3077  }
3078 
3079  s->theora_tables = 1;
3080 
3081  return 0;
3082 }
3083 
3084 static av_cold int theora_decode_init(AVCodecContext *avctx)
3085 {
3086  Vp3DecodeContext *s = avctx->priv_data;
3087  GetBitContext gb;
3088  int ptype;
3089  const uint8_t *header_start[3];
3090  int header_len[3];
3091  int ret;
3092 
3093  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
3094 
3095  s->theora = 1;
3096 
3097  if (!avctx->extradata_size) {
3098  av_log(avctx, AV_LOG_ERROR, "Missing extradata!\n");
3099  return -1;
3100  }
3101 
3103  42, header_start, header_len) < 0) {
3104  av_log(avctx, AV_LOG_ERROR, "Corrupt extradata\n");
3105  return -1;
3106  }
3107 
3108  for (int i = 0; i < 3; i++) {
3109  if (header_len[i] <= 0)
3110  continue;
3111  ret = init_get_bits8(&gb, header_start[i], header_len[i]);
3112  if (ret < 0)
3113  return ret;
3114 
3115  ptype = get_bits(&gb, 8);
3116 
3117  if (!(ptype & 0x80)) {
3118  av_log(avctx, AV_LOG_ERROR, "Invalid extradata!\n");
3119 // return -1;
3120  }
3121 
3122  // FIXME: Check for this as well.
3123  skip_bits_long(&gb, 6 * 8); /* "theora" */
3124 
3125  switch (ptype) {
3126  case 0x80:
3127  if (theora_decode_header(avctx, &gb) < 0)
3128  return -1;
3129  break;
3130  case 0x81:
3131 // FIXME: is this needed? it breaks sometimes
3132 // theora_decode_comments(avctx, gb);
3133  break;
3134  case 0x82:
3135  if (theora_decode_tables(avctx, &gb))
3136  return -1;
3137  break;
3138  default:
3139  av_log(avctx, AV_LOG_ERROR,
3140  "Unknown Theora config packet: %d\n", ptype & ~0x80);
3141  break;
3142  }
3143  if (ptype != 0x81 && get_bits_left(&gb) >= 8U)
3144  av_log(avctx, AV_LOG_WARNING,
3145  "%d bits left in packet %X\n",
3146  get_bits_left(&gb), ptype);
3147  if (s->theora < 0x030200)
3148  break;
3149  }
3150 
3151  return vp3_decode_init(avctx);
3152 }
3153 
3154 const FFCodec ff_theora_decoder = {
3155  .p.name = "theora",
3156  CODEC_LONG_NAME("Theora"),
3157  .p.type = AVMEDIA_TYPE_VIDEO,
3158  .p.id = AV_CODEC_ID_THEORA,
3159  .priv_data_size = sizeof(Vp3DecodeContext),
3160  .init = theora_decode_init,
3161  .close = vp3_decode_end,
3163  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DRAW_HORIZ_BAND |
3165  .flush = vp3_decode_flush,
3166  UPDATE_THREAD_CONTEXT(vp3_update_thread_context),
3167  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
3170 };
3171 #endif
3172 
3174  .p.name = "vp3",
3175  CODEC_LONG_NAME("On2 VP3"),
3176  .p.type = AVMEDIA_TYPE_VIDEO,
3177  .p.id = AV_CODEC_ID_VP3,
3178  .priv_data_size = sizeof(Vp3DecodeContext),
3179  .init = vp3_decode_init,
3180  .close = vp3_decode_end,
3182  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DRAW_HORIZ_BAND |
3184  .flush = vp3_decode_flush,
3185  UPDATE_THREAD_CONTEXT(vp3_update_thread_context),
3186  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
3188 };
3189 
3190 #if CONFIG_VP4_DECODER
3191 const FFCodec ff_vp4_decoder = {
3192  .p.name = "vp4",
3193  CODEC_LONG_NAME("On2 VP4"),
3194  .p.type = AVMEDIA_TYPE_VIDEO,
3195  .p.id = AV_CODEC_ID_VP4,
3196  .priv_data_size = sizeof(Vp3DecodeContext),
3197  .init = vp3_decode_init,
3198  .close = vp3_decode_end,
3200  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DRAW_HORIZ_BAND |
3202  .flush = vp3_decode_flush,
3203  UPDATE_THREAD_CONTEXT(vp3_update_thread_context),
3204  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
3206 };
3207 #endif
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:32
vp4_ac_scale_factor
static const uint16_t vp4_ac_scale_factor[64]
Definition: vp4data.h:64
ff_progress_frame_report
void ff_progress_frame_report(ProgressFrame *f, int n)
Notify later decoding threads when part of their reference frame is ready.
Definition: decode.c:1913
vp4data.h
PUL
#define PUL
allocate_tables
static av_cold int allocate_tables(AVCodecContext *avctx)
Allocate tables for per-frame data in Vp3DecodeContext.
Definition: vp3.c:2306
vp3_dequant
static int vp3_dequant(Vp3DecodeContext *s, const Vp3Fragment *frag, int plane, int inter, int16_t block[64])
Pull DCT tokens from the 64 levels to decode and dequant the coefficients for the next block in codin...
Definition: vp3.c:1850
skip_bits_long
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:276
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
Vp3Fragment::dc
int16_t dc
Definition: vp3.c:70
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
ff_vlc_init_from_lengths
int ff_vlc_init_from_lengths(VLC *vlc, int nb_bits, int nb_codes, const int8_t *lens, int lens_wrap, const void *symbols, int symbols_wrap, int symbols_size, int offset, int flags, void *logctx)
Build VLC decoding tables suitable for use with get_vlc2()
Definition: vlc.c:306
av_clip
#define av_clip
Definition: common.h:100
Vp3DecodeContext::offset_x
uint8_t offset_x
Definition: vp3.c:249
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
VP3DSPContext
Definition: vp3dsp.h:29
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:689
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
vp4_get_mv
static int vp4_get_mv(GetBitContext *gb, int axis, int last_motion)
Definition: vp3.c:889
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:659
mem_internal.h
Vp3DecodeContext::c_macroblock_height
int c_macroblock_height
Definition: vp3.c:239
zero_run_base
static const uint8_t zero_run_base[32]
Definition: vp3data.h:133
MODE_INTER_PRIOR_LAST
#define MODE_INTER_PRIOR_LAST
Definition: vp3.c:88
VP4Predictor
Definition: vp3.c:178
Vp3DecodeContext::idct_scantable
uint8_t idct_scantable[64]
Definition: vp3.c:210
thread.h
HuffEntry::len
uint8_t len
Definition: exr.c:97
AVRefStructOpaque
RefStruct is an API for creating reference-counted objects with minimal overhead.
Definition: refstruct.h:58
VP4Predictor::dc
int dc
Definition: vp3.c:179
get_bits_long
static unsigned int get_bits_long(GetBitContext *s, int n)
Read 0-32 bits.
Definition: get_bits.h:419
mode_code_vlc_len
static const uint8_t mode_code_vlc_len[8]
Definition: vp3data.h:97
superblock_run_length_vlc
static VLCElem superblock_run_length_vlc[88]
Definition: vp3.c:166
read_huffman_tree
static int read_huffman_tree(HuffTable *huff, GetBitContext *gb, int length, AVCodecContext *avctx)
Definition: vp3.c:2826
PUR
#define PUR
vp3dsp.h
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:652
u
#define u(width, name, range_min, range_max)
Definition: cbs_apv.c:68
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:558
ff_vp3_decoder
const FFCodec ff_vp3_decoder
Definition: vp3.c:3173
ff_progress_frame_get_buffer
int ff_progress_frame_get_buffer(AVCodecContext *avctx, ProgressFrame *f, int flags)
Wrapper around ff_progress_frame_alloc() and ff_thread_get_buffer().
Definition: decode.c:1873
Vp3DecodeContext::all_fragments
Vp3Fragment * all_fragments
Definition: vp3.c:246
mode_code_vlc
static VLCElem mode_code_vlc[24+2108 *CONFIG_VP4_DECODER]
Definition: vp3.c:171
Vp3DecodeContext::filter_limit_values
uint8_t filter_limit_values[64]
Definition: vp3.c:327
FFCodec
Definition: codec_internal.h:127
fragment_run_length_vlc
static VLCElem fragment_run_length_vlc[56]
Definition: vp3.c:167
motion_vector_vlc
static VLCElem motion_vector_vlc[112]
Definition: vp3.c:168
base
uint8_t base
Definition: vp3data.h:128
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:91
Vp3Fragment::coding_method
uint8_t coding_method
Definition: vp3.c:71
thread.h
unpack_superblocks
static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:470
render_slice
static void render_slice(Vp3DecodeContext *s, int slice)
Definition: vp3.c:2057
CoeffVLCs::vlc_tabs
const VLCElem * vlc_tabs[80]
Definition: vp3.c:195
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1375
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:448
Vp3DecodeContext::height
int height
Definition: vp3.c:203
vlc_tables
static VLCElem vlc_tables[VLC_TABLES_SIZE]
Definition: imc.c:115
AV_CODEC_FLAG2_IGNORE_CROP
#define AV_CODEC_FLAG2_IGNORE_CROP
Discard cropping information from SPS.
Definition: avcodec.h:355
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
fragment
Definition: dashdec.c:37
Vp3DecodeContext::y_superblock_count
int y_superblock_count
Definition: vp3.c:226
xiph.h
bit
#define bit(string, value)
Definition: cbs_mpeg2.c:56
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:379
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:551
close
static av_cold void close(AVCodecParserContext *s)
Definition: apv_parser.c:135
AVCodecInternal::is_copy
int is_copy
When using frame-threaded decoding, this field is set for the first worker thread (e....
Definition: internal.h:54
Vp3DecodeContext::superblock_fragments
int * superblock_fragments
Definition: vp3.c:316
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:696
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:333
Vp3DecodeContext::golden_frame
ProgressFrame golden_frame
Definition: vp3.c:205
get_coeff
static int get_coeff(GetBitContext *gb, int token, int16_t *coeff)
Definition: vp3.c:1152
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
Vp3DecodeContext::qr_count
uint8_t qr_count[2][3]
Definition: vp3.c:259
Vp3DecodeContext::hdsp
HpelDSPContext hdsp
Definition: vp3.c:211
vp4_mv_vlc
static const uint8_t vp4_mv_vlc[2][7][63][2]
Definition: vp4data.h:112
BLOCK_Y
#define BLOCK_Y
Definition: vp3.c:644
Vp3DecodeContext::y_superblock_width
int y_superblock_width
Definition: vp3.c:224
CODING_MODE_COUNT
#define CODING_MODE_COUNT
Definition: vp3.c:92
FFSIGN
#define FFSIGN(a)
Definition: common.h:75
CoeffVLCs
Definition: rv60dec.c:89
GetBitContext
Definition: get_bits.h:109
tab
static const struct twinvq_data tab
Definition: twinvq_data.h:10345
SET_CHROMA_MODES
#define SET_CHROMA_MODES
tables
Writing a table generator This documentation is preliminary Parts of the API are not good and should be changed Basic concepts A table generator consists of two *_tablegen c and *_tablegen h The h file will provide the variable declarations and initialization code for the tables
Definition: tablegen.txt:10
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:488
perm
perm
Definition: f_perms.c:75
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:3475
MODE_INTER_LAST_MV
#define MODE_INTER_LAST_MV
Definition: vp3.c:87
Vp3DecodeContext::y_superblock_height
int y_superblock_height
Definition: vp3.c:225
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
VP4_DC_GOLDEN
@ VP4_DC_GOLDEN
Definition: vp3.c:150
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
Vp3DecodeContext::offset_y
uint8_t offset_y
Definition: vp3.c:250
Vp3DecodeContext::theora
int theora
Definition: vp3.c:201
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:607
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
loop
static int loop
Definition: ffplay.c:335
TRANSPOSE
#define TRANSPOSE(x)
AVRational::num
int num
Numerator.
Definition: rational.h:59
progressframe.h
refstruct.h
Vp3DecodeContext::num_kf_coded_fragment
int num_kf_coded_fragment[3]
Definition: vp3.c:299
TOKEN_ZERO_RUN
#define TOKEN_ZERO_RUN(coeff, zero_run)
Definition: vp3.c:283
vp4_pred_block_type_map
static const uint8_t vp4_pred_block_type_map[8]
Definition: vp3.c:155
FF_CODEC_CAP_USES_PROGRESSFRAMES
#define FF_CODEC_CAP_USES_PROGRESSFRAMES
The decoder might make use of the ProgressFrame API.
Definition: codec_internal.h:68
await_reference_row
static void await_reference_row(Vp3DecodeContext *s, const Vp3Fragment *fragment, int motion_y, int y)
Wait for the reference frame of the current fragment.
Definition: vp3.c:1936
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:645
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:205
motion_vector_vlc_table
static const uint8_t motion_vector_vlc_table[63][2]
Definition: vp3data.h:101
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:106
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:539
theora_decode_tables
static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb)
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:642
hilbert_offset
static const uint8_t hilbert_offset[16][2]
Definition: vp3.c:140
VLCInitState
For static VLCs, the number of bits can often be hardcoded at each get_vlc2() callsite.
Definition: vlc.h:220
emms_c
#define emms_c()
Definition: emms.h:63
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:515
Vp3DecodeContext::fragment_height
int fragment_height[2]
Definition: vp3.c:244
CoeffVLCs::vlcs
VLC vlcs[80]
Definition: vp3.c:196
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:346
s
#define s(width, name)
Definition: cbs_vp9.c:198
init_loop_filter
static void init_loop_filter(Vp3DecodeContext *s)
Definition: vp3.c:461
vp4_mv_table_selector
static const uint8_t vp4_mv_table_selector[32]
Definition: vp4data.h:105
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:411
vp3_decode_flush
static av_cold void vp3_decode_flush(AVCodecContext *avctx)
Definition: vp3.c:353
transform
static const int8_t transform[32][32]
Definition: dsp.c:27
HuffTable::nb_entries
uint8_t nb_entries
Definition: vp3.c:191
init_block_mapping
static int init_block_mapping(Vp3DecodeContext *s)
This function sets up all of the various blocks mappings: superblocks <-> fragments,...
Definition: vp3.c:386
SB_PARTIALLY_CODED
#define SB_PARTIALLY_CODED
Definition: vp3.c:76
bits
uint8_t bits
Definition: vp3data.h:128
SB_NOT_CODED
#define SB_NOT_CODED
Definition: vp3.c:75
av_refstruct_alloc_ext
static void * av_refstruct_alloc_ext(size_t size, unsigned flags, void *opaque, void(*free_cb)(AVRefStructOpaque opaque, void *obj))
A wrapper around av_refstruct_alloc_ext_c() for the common case of a non-const qualified opaque.
Definition: refstruct.h:94
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
eob_run_table
static const struct @300 eob_run_table[7]
Vp3Fragment::qpi
uint8_t qpi
Definition: vp3.c:72
ff_progress_frame_unref
void ff_progress_frame_unref(ProgressFrame *f)
Give up a reference to the underlying frame contained in a ProgressFrame and reset the ProgressFrame,...
Definition: decode.c:1896
ff_progress_frame_await
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_progress_frame_await() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_progress_frame_report() has been called on them. This includes draw_edges(). Porting codecs to frame threading
decode.h
get_bits.h
reverse_dc_prediction
static void reverse_dc_prediction(Vp3DecodeContext *s, int first_fragment, int fragment_width, int fragment_height)
Definition: vp3.c:1635
unpack_dct_coeffs
static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:1300
ModeAlphabet
static const int ModeAlphabet[6][CODING_MODE_COUNT]
Definition: vp3.c:102
VP4_DC_INTRA
@ VP4_DC_INTRA
Definition: vp3.c:148
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
RSHIFT
#define RSHIFT(a, b)
Definition: common.h:56
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:331
AVCOL_PRI_BT470BG
@ AVCOL_PRI_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:643
MODE_USING_GOLDEN
#define MODE_USING_GOLDEN
Definition: vp3.c:89
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:441
Vp3DecodeContext::macroblock_width
int macroblock_width
Definition: vp3.c:235
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
Vp3DecodeContext::idct_permutation
uint8_t idct_permutation[64]
Definition: vp3.c:209
if
if(ret)
Definition: filter_design.txt:179
init_dequantizer
static void init_dequantizer(Vp3DecodeContext *s, int qpi)
Definition: vp3.c:419
MODE_INTER_FOURMV
#define MODE_INTER_FOURMV
Definition: vp3.c:91
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:95
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:232
Vp3DecodeContext::c_superblock_width
int c_superblock_width
Definition: vp3.c:227
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:203
coeff_tables
static const int16_t *const coeff_tables[32]
Definition: vp3data.h:332
Vp3DecodeContext::offset_x_warned
int offset_x_warned
Definition: vp3.c:251
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
free_vlc_tables
static av_cold void free_vlc_tables(AVRefStructOpaque unused, void *obj)
Definition: vp3.c:2350
HuffTable
Definition: vp3.c:189
PU
#define PU
unpack_modes
static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:786
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:466
Vp3DecodeContext::superblock_count
int superblock_count
Definition: vp3.c:223
ff_vp3dsp_h_loop_filter_12
void ff_vp3dsp_h_loop_filter_12(uint8_t *first_pixel, ptrdiff_t stride, int *bounding_values)
theora_decode_header
static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb)
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:386
fragment_run_length_vlc_len
static const uint8_t fragment_run_length_vlc_len[30]
Definition: vp3data.h:92
vp4_bias
static const uint8_t vp4_bias[5 *16][32][2]
Definition: vp4data.h:329
ff_set_sar
int ff_set_sar(AVCodecContext *avctx, AVRational sar)
Check that the provided sample aspect ratio is valid and set it on the codec context.
Definition: utils.c:106
mathops.h
Vp3DecodeContext::theora_header
int theora_header
Definition: vp3.c:201
TOKEN_COEFF
#define TOKEN_COEFF(coeff)
Definition: vp3.c:284
vp4_y_dc_scale_factor
static const uint8_t vp4_y_dc_scale_factor[64]
Definition: vp4data.h:42
Vp3DecodeContext::skip_loop_filter
int skip_loop_filter
Definition: vp3.c:217
UPDATE_THREAD_CONTEXT
#define UPDATE_THREAD_CONTEXT(func)
Definition: codec_internal.h:340
Vp3DecodeContext::last_qps
int last_qps[3]
Definition: vp3.c:221
Vp3DecodeContext::coeff_vlc
CoeffVLCs * coeff_vlc
The first 16 of the following VLCs are for the dc coefficients; the others are four groups of 16 VLCs...
Definition: vp3.c:306
AV_CODEC_ID_VP4
@ AV_CODEC_ID_VP4
Definition: codec_id.h:300
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:646
jpegquanttables.h
vp31_ac_scale_factor
static const uint16_t vp31_ac_scale_factor[64]
Definition: vp3data.h:63
Vp3DecodeContext::qr_size
uint8_t qr_size[2][3][64]
Definition: vp3.c:260
AVOnce
#define AVOnce
Definition: thread.h:202
DC_COEFF
#define DC_COEFF(u)
Definition: vp3.c:1633
Vp3DecodeContext::vp3dsp
VP3DSPContext vp3dsp
Definition: vp3.c:213
Vp3DecodeContext::flipped_image
int flipped_image
Definition: vp3.c:215
vp31_intra_y_dequant
static const uint8_t vp31_intra_y_dequant[64]
Definition: vp3data.h:29
ff_vp3dsp_v_loop_filter_12
void ff_vp3dsp_v_loop_filter_12(uint8_t *first_pixel, ptrdiff_t stride, int *bounding_values)
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
HpelDSPContext
Half-pel DSP context.
Definition: hpeldsp.h:46
Vp3DecodeContext::fragment_width
int fragment_width[2]
Definition: vp3.c:243
ff_vp3dsp_set_bounding_values
void ff_vp3dsp_set_bounding_values(int *bounding_values_array, int filter_limit)
Definition: vp3dsp.c:477
Vp3DecodeContext::total_num_coded_frags
int total_num_coded_frags
Definition: vp3.c:291
SB_FULLY_CODED
#define SB_FULLY_CODED
Definition: vp3.c:77
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:231
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:495
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:302
AVPacket::size
int size
Definition: packet.h:559
fixed_motion_vector_table
static const int8_t fixed_motion_vector_table[64]
Definition: vp3data.h:115
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
height
#define height
Definition: dsp.h:89
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:278
codec_internal.h
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:104
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
unpack_vectors
static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:904
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
ff_vp4_decoder
const FFCodec ff_vp4_decoder
update_frames
static void update_frames(AVCodecContext *avctx)
Release and shuffle frames after decode finishes.
Definition: vp3.c:2505
FF_CODEC_CAP_EXPORTS_CROPPING
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: codec_internal.h:60
VLCElem
Definition: vlc.h:32
AV_NUM_DATA_POINTERS
#define AV_NUM_DATA_POINTERS
Definition: frame.h:428
ref_frame
static int ref_frame(VVCFrame *dst, const VVCFrame *src)
Definition: dec.c:616
Vp3DecodeContext::dct_tokens
int16_t * dct_tokens[3][64]
This is a list of all tokens in bitstream order.
Definition: vp3.c:280
Vp3DecodeContext::coded_dc_scale_factor
uint16_t coded_dc_scale_factor[2][64]
Definition: vp3.c:256
Vp3DecodeContext::qps
int qps[3]
Definition: vp3.c:219
Vp3DecodeContext::block
int16_t block[64]
Definition: vp3.c:214
Vp3DecodeContext::chroma_y_shift
int chroma_y_shift
Definition: vp3.c:204
Vp3DecodeContext::data_offset
int data_offset[3]
Definition: vp3.c:248
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
attributes.h
Vp3DecodeContext::macroblock_coding
unsigned char * macroblock_coding
Definition: vp3.c:320
version
version
Definition: libkvazaar.c:315
state
static struct @530 state
vp3data.h
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:221
AVCOL_TRC_BT709
@ AVCOL_TRC_BT709
also ITU-R BT1361
Definition: pixfmt.h:663
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1572
av_refstruct_unref
void av_refstruct_unref(void *objp)
Decrement the reference count of the underlying object and automatically free the object if there are...
Definition: refstruct.c:120
Vp3DecodeContext::avctx
AVCodecContext * avctx
Definition: vp3.c:200
AV_CODEC_ID_VP3
@ AV_CODEC_ID_VP3
Definition: codec_id.h:81
emms.h
Vp3DecodeContext::nkf_coded_fragment_list
int * nkf_coded_fragment_list
Definition: vp3.c:298
Vp3DecodeContext::keyframe
int keyframe
Definition: vp3.c:208
MODE_INTRA
#define MODE_INTRA
Definition: vp3.c:85
apply_loop_filter
static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int yend)
Definition: vp3.c:1784
Vp3DecodeContext::macroblock_height
int macroblock_height
Definition: vp3.c:236
Vp3DecodeContext::yuv_macroblock_count
int yuv_macroblock_count
Definition: vp3.c:240
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
Vp3DecodeContext::edge_emu_buffer
uint8_t * edge_emu_buffer
Definition: vp3.c:322
AVCodecContext::extradata
uint8_t * extradata
Out-of-band global headers that may be used by some codecs.
Definition: avcodec.h:514
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:369
Vp3DecodeContext::c_macroblock_count
int c_macroblock_count
Definition: vp3.c:237
AV_CODEC_ID_THEORA
@ AV_CODEC_ID_THEORA
Definition: codec_id.h:82
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
vp3_decode_frame
static int vp3_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: vp3.c:2567
superblock_run_length_vlc_lens
static const uint8_t superblock_run_length_vlc_lens[34]
Definition: vp3data.h:85
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:57
ff_mjpeg_std_chrominance_quant_tbl
const uint8_t ff_mjpeg_std_chrominance_quant_tbl[64]
Definition: jpegquanttables.c:45
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:496
Vp3DecodeContext::macroblock_count
int macroblock_count
Definition: vp3.c:234
SUPERBLOCK_VLC_BITS
#define SUPERBLOCK_VLC_BITS
Definition: vp3.c:64
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:179
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:676
Vp3DecodeContext::current_frame
ProgressFrame current_frame
Definition: vp3.c:207
Vp3DecodeContext::v_superblock_start
int v_superblock_start
Definition: vp3.c:231
Vp3DecodeContext::c_superblock_height
int c_superblock_height
Definition: vp3.c:228
AVCodecContext::height
int height
Definition: avcodec.h:592
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:631
VP4_DC_INTER
@ VP4_DC_INTER
Definition: vp3.c:149
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
VP4_MV_VLC_BITS
#define VP4_MV_VLC_BITS
Definition: vp3.c:63
Vp3DecodeContext::coded_fragment_list
int * coded_fragment_list[3]
Definition: vp3.c:295
VP4_DC_UNDEFINED
@ VP4_DC_UNDEFINED
Definition: vp3.c:152
avcodec.h
Vp3DecodeContext::c_superblock_count
int c_superblock_count
Definition: vp3.c:229
stride
#define stride
Definition: h264pred_template.c:536
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:137
PL
#define PL
AVCOL_PRI_BT470M
@ AVCOL_PRI_BT470M
also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:641
ff_vlc_free
void ff_vlc_free(VLC *vlc)
Definition: vlc.c:580
ret
ret
Definition: filter_design.txt:187
Vp3DecodeContext::theora_tables
int theora_tables
Definition: vp3.c:201
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
free_tables
static av_cold void free_tables(AVCodecContext *avctx)
Definition: vp3.c:337
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
unpack_vlcs
static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb, const VLCElem *vlc_table, int coeff_index, int plane, int eob_run)
Definition: vp3.c:1180
ProgressFrame::f
struct AVFrame * f
Definition: progressframe.h:74
MODE_INTER_PLUS_MV
#define MODE_INTER_PLUS_MV
Definition: vp3.c:86
Vp3DecodeContext::num_coded_frags
int num_coded_frags[3][64]
number of blocks that contain DCT coefficients at the given level or higher
Definition: vp3.c:290
vp4_block_pattern_table_selector
static const uint8_t vp4_block_pattern_table_selector[14]
Definition: vp4data.h:86
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
Vp3DecodeContext::chroma_x_shift
int chroma_x_shift
Definition: vp3.c:204
BLOCK_X
#define BLOCK_X
Definition: vp3.c:643
U
#define U(x)
Definition: vpx_arith.h:37
MODE_COPY
#define MODE_COPY
Definition: vp3.c:95
Vp3DecodeContext
Definition: vp3.c:199
ff_progress_frame_replace
void ff_progress_frame_replace(ProgressFrame *dst, const ProgressFrame *src)
Do nothing if dst and src already refer to the same AVFrame; otherwise unreference dst and if src is ...
Definition: decode.c:1903
ff_theora_decoder
const FFCodec ff_theora_decoder
vp4_filter_limit_values
static const uint8_t vp4_filter_limit_values[64]
Definition: vp4data.h:75
MODE_GOLDEN_MV
#define MODE_GOLDEN_MV
Definition: vp3.c:90
coeff_vlc
static const VLCElem * coeff_vlc[2][8][4]
Definition: atrac9dec.c:110
FRAGMENT_PIXELS
#define FRAGMENT_PIXELS
Definition: vp3.c:66
AVCodecContext
main external API structure.
Definition: avcodec.h:431
vp3_draw_horiz_band
static void vp3_draw_horiz_band(Vp3DecodeContext *s, int y)
called when all pixels up to row y are complete
Definition: vp3.c:1895
av_refstruct_replace
void av_refstruct_replace(void *dstp, const void *src)
Ensure *dstp refers to the same object as src.
Definition: refstruct.c:160
AVCHROMA_LOC_CENTER
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:789
vp4_generic_dequant
static const uint8_t vp4_generic_dequant[64]
Definition: vp4data.h:31
zero_run_get_bits
static const uint8_t zero_run_get_bits[32]
Definition: vp3data.h:140
AVRational::den
int den
Denominator.
Definition: rational.h:60
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
VLC
Definition: vlc.h:50
Vp3DecodeContext::coded_ac_scale_factor
uint32_t coded_ac_scale_factor[64]
Definition: vp3.c:257
NB_VP4_DC_TYPES
@ NB_VP4_DC_TYPES
Definition: vp3.c:151
Vp3DecodeContext::bounding_values_array
int bounding_values_array[256+4]
Definition: vp3.c:328
output_plane
static void output_plane(const Plane *plane, int buf_sel, uint8_t *dst, ptrdiff_t dst_pitch, int dst_height)
Convert and output the current plane.
Definition: indeo3.c:1031
HuffEntry
Definition: exr.c:96
vp31_inter_dequant
static const uint8_t vp31_inter_dequant[64]
Definition: vp3data.h:41
temp
else temp
Definition: vf_mcdeint.c:271
body
static void body(uint32_t ABCD[4], const uint8_t *src, size_t nblocks)
Definition: md5.c:103
VLC::table
VLCElem * table
Definition: vlc.h:52
vp4_block_pattern_vlc
static const uint8_t vp4_block_pattern_vlc[2][14][2]
Definition: vp4data.h:90
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
avpriv_split_xiph_headers
int avpriv_split_xiph_headers(const uint8_t *extradata, int extradata_size, int first_header_size, const uint8_t *header_start[3], int header_len[3])
Split a single extradata buffer into the three headers that most Xiph codecs use.
Definition: xiph.c:26
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
MODE_INTER_NO_MV
#define MODE_INTER_NO_MV
Definition: vp3.c:84
VideoDSPContext
Definition: videodsp.h:40
ff_vlc_init_tables_from_lengths
const av_cold VLCElem * ff_vlc_init_tables_from_lengths(VLCInitState *state, int nb_bits, int nb_codes, const int8_t *lens, int lens_wrap, const void *symbols, int symbols_wrap, int symbols_size, int offset, int flags)
Definition: vlc.c:366
HuffEntry::sym
uint8_t sym
Definition: vp3.c:186
Vp3DecodeContext::superblock_coding
unsigned char * superblock_coding
Definition: vp3.c:232
COMPATIBLE_FRAME
#define COMPATIBLE_FRAME(x)
Definition: vp3.c:1631
AVERROR_DECODER_NOT_FOUND
#define AVERROR_DECODER_NOT_FOUND
Decoder not found.
Definition: error.h:54
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:607
Vp3DecodeContext::fragment_start
int fragment_start[3]
Definition: vp3.c:247
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:279
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:200
ff_vlc_init_tables
static const VLCElem * ff_vlc_init_tables(VLCInitState *state, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, int flags)
Definition: vlc.h:254
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:322
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:37
vp3_bias
static const uint8_t vp3_bias[5 *16][32][2]
Definition: vp3data.h:370
get_eob_run
static int get_eob_run(GetBitContext *gb, int token)
Definition: vp3.c:1144
HuffTable::entries
HuffEntry entries[32]
Definition: vp3.c:190
VLC_INIT_STATIC_TABLE_FROM_LENGTHS
#define VLC_INIT_STATIC_TABLE_FROM_LENGTHS(vlc_table, nb_bits, nb_codes, lens, lens_wrap, syms, syms_wrap, syms_size, offset, flags)
Definition: vlc.h:288
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:456
Vp3DecodeContext::huffman_table
HuffTable huffman_table[5 *16]
Definition: vp3.c:325
ProgressFrame
The ProgressFrame structure.
Definition: progressframe.h:73
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
VLC_INIT_STATE
#define VLC_INIT_STATE(_table)
Definition: vlc.h:225
vp31_filter_limit_values
static const uint8_t vp31_filter_limit_values[64]
Definition: vp3data.h:74
AVPacket
This structure stores compressed data.
Definition: packet.h:535
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:458
VP4Predictor::type
int type
Definition: vp3.c:180
vp3_decode_init
static av_cold int vp3_decode_init(AVCodecContext *avctx)
Definition: vp3.c:2358
Vp3DecodeContext::base_matrix
uint8_t base_matrix[384][64]
Definition: vp3.c:258
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
videodsp.h
VP3_MV_VLC_BITS
#define VP3_MV_VLC_BITS
Definition: vp3.c:62
Vp3DecodeContext::fragment_count
int fragment_count
Definition: vp3.c:242
vp31_dc_scale_factor
static const uint8_t vp31_dc_scale_factor[64]
Definition: vp3data.h:52
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:592
imgutils.h
hpeldsp.h
Vp3DecodeContext::width
int width
Definition: vp3.c:203
Vp3DecodeContext::kf_coded_fragment_list
int * kf_coded_fragment_list
Definition: vp3.c:297
AV_CODEC_CAP_DRAW_HORIZ_BAND
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: codec.h:44
unpack_block_qpis
static int unpack_block_qpis(Vp3DecodeContext *s, GetBitContext *gb)
Definition: vp3.c:1101
Vp3DecodeContext::qr_base
uint16_t qr_base[2][3][64]
Definition: vp3.c:261
vp3_decode_end
static av_cold int vp3_decode_end(AVCodecContext *avctx)
Definition: vp3.c:362
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:80
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
h
h
Definition: vp9dsp_template.c:2070
vp4_uv_dc_scale_factor
static const uint8_t vp4_uv_dc_scale_factor[64]
Definition: vp4data.h:53
MAXIMUM_LONG_BIT_RUN
#define MAXIMUM_LONG_BIT_RUN
Definition: vp3.c:82
init_tables_once
static av_cold void init_tables_once(void)
Definition: vp3.c:2263
Vp3DecodeContext::version
int version
Definition: vp3.c:202
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
width
#define width
Definition: dsp.h:89
Vp3DecodeContext::motion_val
int8_t(*[2] motion_val)[2]
Definition: vp3.c:253
Vp3DecodeContext::last_slice_end
int last_slice_end
Definition: vp3.c:216
ff_vp3dsp_init
av_cold void ff_vp3dsp_init(VP3DSPContext *c)
Definition: vp3dsp.c:448
Vp3DecodeContext::dc_pred_row
VP4Predictor * dc_pred_row
Definition: vp3.c:330
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
Vp3DecodeContext::u_superblock_start
int u_superblock_start
Definition: vp3.c:230
coeff_get_bits
static const uint8_t coeff_get_bits[32]
Definition: vp3data.h:148
Vp3DecodeContext::dct_tokens_base
int16_t * dct_tokens_base
Definition: vp3.c:281
Vp3Fragment
Definition: vp3.c:69
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:616
src
#define src
Definition: vp8dsp.c:248
Vp3DecodeContext::nqps
int nqps
Definition: vp3.c:220
Vp3DecodeContext::qmat
int16_t qmat[3][2][3][64]
qmat[qpi][is_inter][plane]
Definition: vp3.c:310
Vp3DecodeContext::vdsp
VideoDSPContext vdsp
Definition: vp3.c:212
TOKEN_EOB
#define TOKEN_EOB(eob_run)
Definition: vp3.c:282
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:337
Vp3DecodeContext::c_macroblock_width
int c_macroblock_width
Definition: vp3.c:238
Vp3DecodeContext::last_frame
ProgressFrame last_frame
Definition: vp3.c:206