FFmpeg
huffyuvdec.c
Go to the documentation of this file.
1 /*
2  * huffyuv decoder
3  *
4  * Copyright (c) 2002-2014 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
7  * the algorithm used
8  *
9  * This file is part of FFmpeg.
10  *
11  * FFmpeg is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public
13  * License as published by the Free Software Foundation; either
14  * version 2.1 of the License, or (at your option) any later version.
15  *
16  * FFmpeg is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with FFmpeg; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24  *
25  * yuva, gray, 4:4:4, 4:1:1, 4:1:0 and >8 bit per sample support sponsored by NOA
26  */
27 
28 /**
29  * @file
30  * huffyuv decoder
31  */
32 
33 #define UNCHECKED_BITSTREAM_READER 1
34 
35 #include "config_components.h"
36 
37 #include "avcodec.h"
38 #include "codec_internal.h"
39 #include "get_bits.h"
40 #include "huffyuv.h"
41 #include "huffyuvdsp.h"
42 #include "lossless_videodsp.h"
43 #include "thread.h"
44 #include "libavutil/imgutils.h"
45 #include "libavutil/pixdesc.h"
46 
47 #define classic_shift_luma_table_size 42
49  34, 36, 35, 69, 135, 232, 9, 16, 10, 24, 11, 23, 12, 16, 13, 10,
50  14, 8, 15, 8, 16, 8, 17, 20, 16, 10, 207, 206, 205, 236, 11, 8,
51  10, 21, 9, 23, 8, 8, 199, 70, 69, 68, 0,
52  0,0,0,0,0,0,0,0,
53 };
54 
55 #define classic_shift_chroma_table_size 59
57  66, 36, 37, 38, 39, 40, 41, 75, 76, 77, 110, 239, 144, 81, 82, 83,
58  84, 85, 118, 183, 56, 57, 88, 89, 56, 89, 154, 57, 58, 57, 26, 141,
59  57, 56, 58, 57, 58, 57, 184, 119, 214, 245, 116, 83, 82, 49, 80, 79,
60  78, 77, 44, 75, 41, 40, 39, 38, 37, 36, 34, 0,
61  0,0,0,0,0,0,0,0,
62 };
63 
64 static const unsigned char classic_add_luma[256] = {
65  3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
66  73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
67  68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
68  35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
69  37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
70  35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
71  27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
72  15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
73  12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
74  12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
75  18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
76  28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
77  28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
78  62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
79  54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
80  46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
81 };
82 
83 static const unsigned char classic_add_chroma[256] = {
84  3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
85  7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
86  11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
87  43, 45, 76, 81, 46, 82, 75, 55, 56, 144, 58, 80, 60, 74, 147, 63,
88  143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
89  80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
90  17, 14, 5, 6, 100, 54, 47, 50, 51, 53, 106, 107, 108, 109, 110, 111,
91  112, 113, 114, 115, 4, 117, 118, 92, 94, 121, 122, 3, 124, 103, 2, 1,
92  0, 129, 130, 131, 120, 119, 126, 125, 136, 137, 138, 139, 140, 141, 142, 134,
93  135, 132, 133, 104, 64, 101, 62, 57, 102, 95, 93, 59, 61, 28, 97, 96,
94  52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
95  19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
96  7, 128, 127, 105, 123, 116, 35, 34, 33, 145, 31, 79, 42, 146, 78, 26,
97  83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
98  14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
99  6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
100 };
101 
102 static int read_len_table(uint8_t *dst, GetBitContext *gb, int n)
103 {
104  int i, val, repeat;
105 
106  for (i = 0; i < n;) {
107  repeat = get_bits(gb, 3);
108  val = get_bits(gb, 5);
109  if (repeat == 0)
110  repeat = get_bits(gb, 8);
111  if (i + repeat > n || get_bits_left(gb) < 0) {
112  av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
113  return AVERROR_INVALIDDATA;
114  }
115  while (repeat--)
116  dst[i++] = val;
117  }
118  return 0;
119 }
120 
122 {
123  int ret;
124  uint16_t *symbols = av_mallocz(5 << VLC_BITS);
125  uint16_t *bits;
126  uint8_t *len;
127  if (!symbols)
128  return AVERROR(ENOMEM);
129  bits = symbols + (1 << VLC_BITS);
130  len = (uint8_t *)(bits + (1 << VLC_BITS));
131 
132  if (s->bitstream_bpp < 24 || s->version > 2) {
133  int p, i, y, u;
134  for (p = 0; p < 4; p++) {
135  int p0 = s->version > 2 ? p : 0;
136  for (i = y = 0; y < s->vlc_n; y++) {
137  int len0 = s->len[p0][y];
138  int limit = VLC_BITS - len0;
139  if (limit <= 0 || !len0)
140  continue;
141  if ((sign_extend(y, 8) & (s->vlc_n-1)) != y)
142  continue;
143  for (u = 0; u < s->vlc_n; u++) {
144  int len1 = s->len[p][u];
145  if (len1 > limit || !len1)
146  continue;
147  if ((sign_extend(u, 8) & (s->vlc_n-1)) != u)
148  continue;
149  av_assert0(i < (1 << VLC_BITS));
150  len[i] = len0 + len1;
151  bits[i] = (s->bits[p0][y] << len1) + s->bits[p][u];
152  symbols[i] = (y << 8) + (u & 0xFF);
153  i++;
154  }
155  }
156  ff_free_vlc(&s->vlc[4 + p]);
157  if ((ret = ff_init_vlc_sparse(&s->vlc[4 + p], VLC_BITS, i, len, 1, 1,
158  bits, 2, 2, symbols, 2, 2, 0)) < 0)
159  goto out;
160  }
161  } else {
162  uint8_t (*map)[4] = (uint8_t(*)[4]) s->pix_bgr_map;
163  int i, b, g, r, code;
164  int p0 = s->decorrelate;
165  int p1 = !s->decorrelate;
166  /* Restrict the range to +/-16 because that's pretty much guaranteed
167  * to cover all the combinations that fit in 11 bits total, and it
168  * does not matter if we miss a few rare codes. */
169  for (i = 0, g = -16; g < 16; g++) {
170  int len0 = s->len[p0][g & 255];
171  int limit0 = VLC_BITS - len0;
172  if (limit0 < 2 || !len0)
173  continue;
174  for (b = -16; b < 16; b++) {
175  int len1 = s->len[p1][b & 255];
176  int limit1 = limit0 - len1;
177  if (limit1 < 1 || !len1)
178  continue;
179  code = (s->bits[p0][g & 255] << len1) + s->bits[p1][b & 255];
180  for (r = -16; r < 16; r++) {
181  int len2 = s->len[2][r & 255];
182  if (len2 > limit1 || !len2)
183  continue;
184  av_assert0(i < (1 << VLC_BITS));
185  len[i] = len0 + len1 + len2;
186  bits[i] = (code << len2) + s->bits[2][r & 255];
187  if (s->decorrelate) {
188  map[i][G] = g;
189  map[i][B] = g + b;
190  map[i][R] = g + r;
191  } else {
192  map[i][B] = g;
193  map[i][G] = b;
194  map[i][R] = r;
195  }
196  i++;
197  }
198  }
199  }
200  ff_free_vlc(&s->vlc[4]);
201  if ((ret = init_vlc(&s->vlc[4], VLC_BITS, i, len, 1, 1,
202  bits, 2, 2, 0)) < 0)
203  goto out;
204  }
205  ret = 0;
206 out:
207  av_freep(&symbols);
208  return ret;
209 }
210 
211 static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length)
212 {
213  GetBitContext gb;
214  int i, ret;
215  int count = 3;
216 
217  if ((ret = init_get_bits(&gb, src, length * 8)) < 0)
218  return ret;
219 
220  if (s->version > 2)
221  count = 1 + s->alpha + 2*s->chroma;
222 
223  for (i = 0; i < count; i++) {
224  if ((ret = read_len_table(s->len[i], &gb, s->vlc_n)) < 0)
225  return ret;
226  if ((ret = ff_huffyuv_generate_bits_table(s->bits[i], s->len[i], s->vlc_n)) < 0)
227  return ret;
228  ff_free_vlc(&s->vlc[i]);
229  if ((ret = init_vlc(&s->vlc[i], VLC_BITS, s->vlc_n, s->len[i], 1, 1,
230  s->bits[i], 4, 4, 0)) < 0)
231  return ret;
232  }
233 
234  if ((ret = generate_joint_tables(s)) < 0)
235  return ret;
236 
237  return (get_bits_count(&gb) + 7) / 8;
238 }
239 
241 {
242  GetBitContext gb;
243  int i, ret;
244 
247  if ((ret = read_len_table(s->len[0], &gb, 256)) < 0)
248  return ret;
249 
252  if ((ret = read_len_table(s->len[1], &gb, 256)) < 0)
253  return ret;
254 
255  for (i = 0; i < 256; i++)
256  s->bits[0][i] = classic_add_luma[i];
257  for (i = 0; i < 256; i++)
258  s->bits[1][i] = classic_add_chroma[i];
259 
260  if (s->bitstream_bpp >= 24) {
261  memcpy(s->bits[1], s->bits[0], 256 * sizeof(uint32_t));
262  memcpy(s->len[1], s->len[0], 256 * sizeof(uint8_t));
263  }
264  memcpy(s->bits[2], s->bits[1], 256 * sizeof(uint32_t));
265  memcpy(s->len[2], s->len[1], 256 * sizeof(uint8_t));
266 
267  for (i = 0; i < 4; i++) {
268  ff_free_vlc(&s->vlc[i]);
269  if ((ret = init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1,
270  s->bits[i], 4, 4, 0)) < 0)
271  return ret;
272  }
273 
274  if ((ret = generate_joint_tables(s)) < 0)
275  return ret;
276 
277  return 0;
278 }
279 
281 {
282  HYuvContext *s = avctx->priv_data;
283  int i;
284 
286  av_freep(&s->bitstream_buffer);
287 
288  for (i = 0; i < 8; i++)
289  ff_free_vlc(&s->vlc[i]);
290 
291  return 0;
292 }
293 
295 {
296  HYuvContext *s = avctx->priv_data;
297  int ret;
298 
299  ret = av_image_check_size(avctx->width, avctx->height, 0, avctx);
300  if (ret < 0)
301  return ret;
302 
303  ff_huffyuvdsp_init(&s->hdsp, avctx->pix_fmt);
304  ff_llviddsp_init(&s->llviddsp);
305  memset(s->vlc, 0, 4 * sizeof(VLC));
306 
307  s->interlaced = avctx->height > 288;
308  s->bgr32 = 1;
309 
310  if (avctx->extradata_size) {
311  if ((avctx->bits_per_coded_sample & 7) &&
312  avctx->bits_per_coded_sample != 12)
313  s->version = 1; // do such files exist at all?
314  else if (avctx->extradata_size > 3 && avctx->extradata[3] == 0)
315  s->version = 2;
316  else
317  s->version = 3;
318  } else
319  s->version = 0;
320 
321  s->bps = 8;
322  s->n = 1<<s->bps;
323  s->vlc_n = FFMIN(s->n, MAX_VLC_N);
324  s->chroma = 1;
325  if (s->version >= 2) {
326  int method, interlace;
327 
328  if (avctx->extradata_size < 4)
329  return AVERROR_INVALIDDATA;
330 
331  method = avctx->extradata[0];
332  s->decorrelate = method & 64 ? 1 : 0;
333  s->predictor = method & 63;
334  if (s->version == 2) {
335  s->bitstream_bpp = avctx->extradata[1];
336  if (s->bitstream_bpp == 0)
337  s->bitstream_bpp = avctx->bits_per_coded_sample & ~7;
338  } else {
339  s->bps = (avctx->extradata[1] >> 4) + 1;
340  s->n = 1<<s->bps;
341  s->vlc_n = FFMIN(s->n, MAX_VLC_N);
342  s->chroma_h_shift = avctx->extradata[1] & 3;
343  s->chroma_v_shift = (avctx->extradata[1] >> 2) & 3;
344  s->yuv = !!(avctx->extradata[2] & 1);
345  s->chroma= !!(avctx->extradata[2] & 3);
346  s->alpha = !!(avctx->extradata[2] & 4);
347  }
348  interlace = (avctx->extradata[2] & 0x30) >> 4;
349  s->interlaced = (interlace == 1) ? 1 : (interlace == 2) ? 0 : s->interlaced;
350  s->context = avctx->extradata[2] & 0x40 ? 1 : 0;
351 
352  if ((ret = read_huffman_tables(s, avctx->extradata + 4,
353  avctx->extradata_size - 4)) < 0)
354  return ret;
355  } else {
356  switch (avctx->bits_per_coded_sample & 7) {
357  case 1:
358  s->predictor = LEFT;
359  s->decorrelate = 0;
360  break;
361  case 2:
362  s->predictor = LEFT;
363  s->decorrelate = 1;
364  break;
365  case 3:
366  s->predictor = PLANE;
367  s->decorrelate = avctx->bits_per_coded_sample >= 24;
368  break;
369  case 4:
370  s->predictor = MEDIAN;
371  s->decorrelate = 0;
372  break;
373  default:
374  s->predictor = LEFT; // OLD
375  s->decorrelate = 0;
376  break;
377  }
378  s->bitstream_bpp = avctx->bits_per_coded_sample & ~7;
379  s->context = 0;
380 
381  if ((ret = read_old_huffman_tables(s)) < 0)
382  return ret;
383  }
384 
385  if (s->version <= 2) {
386  switch (s->bitstream_bpp) {
387  case 12:
388  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
389  s->yuv = 1;
390  break;
391  case 16:
392  if (s->yuy2)
393  avctx->pix_fmt = AV_PIX_FMT_YUYV422;
394  else
395  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
396  s->yuv = 1;
397  break;
398  case 24:
399  if (s->bgr32)
400  avctx->pix_fmt = AV_PIX_FMT_0RGB32;
401  else
402  avctx->pix_fmt = AV_PIX_FMT_BGR24;
403  break;
404  case 32:
405  av_assert0(s->bgr32);
406  avctx->pix_fmt = AV_PIX_FMT_RGB32;
407  s->alpha = 1;
408  break;
409  default:
410  return AVERROR_INVALIDDATA;
411  }
413  &s->chroma_h_shift,
414  &s->chroma_v_shift);
415  } else {
416  switch ( (s->chroma<<10) | (s->yuv<<9) | (s->alpha<<8) | ((s->bps-1)<<4) | s->chroma_h_shift | (s->chroma_v_shift<<2)) {
417  case 0x070:
418  avctx->pix_fmt = AV_PIX_FMT_GRAY8;
419  break;
420  case 0x0F0:
421  avctx->pix_fmt = AV_PIX_FMT_GRAY16;
422  break;
423  case 0x470:
424  avctx->pix_fmt = AV_PIX_FMT_GBRP;
425  break;
426  case 0x480:
427  avctx->pix_fmt = AV_PIX_FMT_GBRP9;
428  break;
429  case 0x490:
430  avctx->pix_fmt = AV_PIX_FMT_GBRP10;
431  break;
432  case 0x4B0:
433  avctx->pix_fmt = AV_PIX_FMT_GBRP12;
434  break;
435  case 0x4D0:
436  avctx->pix_fmt = AV_PIX_FMT_GBRP14;
437  break;
438  case 0x4F0:
439  avctx->pix_fmt = AV_PIX_FMT_GBRP16;
440  break;
441  case 0x570:
442  avctx->pix_fmt = AV_PIX_FMT_GBRAP;
443  break;
444  case 0x670:
445  avctx->pix_fmt = AV_PIX_FMT_YUV444P;
446  break;
447  case 0x680:
448  avctx->pix_fmt = AV_PIX_FMT_YUV444P9;
449  break;
450  case 0x690:
451  avctx->pix_fmt = AV_PIX_FMT_YUV444P10;
452  break;
453  case 0x6B0:
454  avctx->pix_fmt = AV_PIX_FMT_YUV444P12;
455  break;
456  case 0x6D0:
457  avctx->pix_fmt = AV_PIX_FMT_YUV444P14;
458  break;
459  case 0x6F0:
460  avctx->pix_fmt = AV_PIX_FMT_YUV444P16;
461  break;
462  case 0x671:
463  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
464  break;
465  case 0x681:
466  avctx->pix_fmt = AV_PIX_FMT_YUV422P9;
467  break;
468  case 0x691:
469  avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
470  break;
471  case 0x6B1:
472  avctx->pix_fmt = AV_PIX_FMT_YUV422P12;
473  break;
474  case 0x6D1:
475  avctx->pix_fmt = AV_PIX_FMT_YUV422P14;
476  break;
477  case 0x6F1:
478  avctx->pix_fmt = AV_PIX_FMT_YUV422P16;
479  break;
480  case 0x672:
481  avctx->pix_fmt = AV_PIX_FMT_YUV411P;
482  break;
483  case 0x674:
484  avctx->pix_fmt = AV_PIX_FMT_YUV440P;
485  break;
486  case 0x675:
487  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
488  break;
489  case 0x685:
490  avctx->pix_fmt = AV_PIX_FMT_YUV420P9;
491  break;
492  case 0x695:
493  avctx->pix_fmt = AV_PIX_FMT_YUV420P10;
494  break;
495  case 0x6B5:
496  avctx->pix_fmt = AV_PIX_FMT_YUV420P12;
497  break;
498  case 0x6D5:
499  avctx->pix_fmt = AV_PIX_FMT_YUV420P14;
500  break;
501  case 0x6F5:
502  avctx->pix_fmt = AV_PIX_FMT_YUV420P16;
503  break;
504  case 0x67A:
505  avctx->pix_fmt = AV_PIX_FMT_YUV410P;
506  break;
507  case 0x770:
508  avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
509  break;
510  case 0x780:
511  avctx->pix_fmt = AV_PIX_FMT_YUVA444P9;
512  break;
513  case 0x790:
515  break;
516  case 0x7F0:
518  break;
519  case 0x771:
520  avctx->pix_fmt = AV_PIX_FMT_YUVA422P;
521  break;
522  case 0x781:
523  avctx->pix_fmt = AV_PIX_FMT_YUVA422P9;
524  break;
525  case 0x791:
527  break;
528  case 0x7F1:
530  break;
531  case 0x775:
532  avctx->pix_fmt = AV_PIX_FMT_YUVA420P;
533  break;
534  case 0x785:
535  avctx->pix_fmt = AV_PIX_FMT_YUVA420P9;
536  break;
537  case 0x795:
539  break;
540  case 0x7F5:
542  break;
543  default:
544  return AVERROR_INVALIDDATA;
545  }
546  }
547 
548  ff_huffyuv_common_init(avctx);
549 
550  if ((avctx->pix_fmt == AV_PIX_FMT_YUV422P || avctx->pix_fmt == AV_PIX_FMT_YUV420P) && avctx->width & 1) {
551  av_log(avctx, AV_LOG_ERROR, "width must be even for this colorspace\n");
552  return AVERROR_INVALIDDATA;
553  }
554  if (s->predictor == MEDIAN && avctx->pix_fmt == AV_PIX_FMT_YUV422P &&
555  avctx->width % 4) {
556  av_log(avctx, AV_LOG_ERROR, "width must be a multiple of 4 "
557  "for this combination of colorspace and predictor type.\n");
558  return AVERROR_INVALIDDATA;
559  }
560 
561  if ((ret = ff_huffyuv_alloc_temp(s)) < 0)
562  return ret;
563 
564  return 0;
565 }
566 
567 /** Subset of GET_VLC for use in hand-roller VLC code */
568 #define VLC_INTERN(dst, table, gb, name, bits, max_depth) \
569  code = table[index].sym; \
570  n = table[index].len; \
571  if (max_depth > 1 && n < 0) { \
572  LAST_SKIP_BITS(name, gb, bits); \
573  UPDATE_CACHE(name, gb); \
574  \
575  nb_bits = -n; \
576  index = SHOW_UBITS(name, gb, nb_bits) + code; \
577  code = table[index].sym; \
578  n = table[index].len; \
579  if (max_depth > 2 && n < 0) { \
580  LAST_SKIP_BITS(name, gb, nb_bits); \
581  UPDATE_CACHE(name, gb); \
582  \
583  nb_bits = -n; \
584  index = SHOW_UBITS(name, gb, nb_bits) + code; \
585  code = table[index].sym; \
586  n = table[index].len; \
587  } \
588  } \
589  dst = code; \
590  LAST_SKIP_BITS(name, gb, n)
591 
592 
593 #define GET_VLC_DUAL(dst0, dst1, name, gb, dtable, table1, table2, \
594  bits, max_depth, OP) \
595  do { \
596  unsigned int index = SHOW_UBITS(name, gb, bits); \
597  int code, n = dtable[index].len; \
598  \
599  if (n<=0) { \
600  int nb_bits; \
601  VLC_INTERN(dst0, table1, gb, name, bits, max_depth); \
602  \
603  UPDATE_CACHE(re, gb); \
604  index = SHOW_UBITS(name, gb, bits); \
605  VLC_INTERN(dst1, table2, gb, name, bits, max_depth); \
606  } else { \
607  code = dtable[index].sym; \
608  OP(dst0, dst1, code); \
609  LAST_SKIP_BITS(name, gb, n); \
610  } \
611  } while (0)
612 
613 #define OP8bits(dst0, dst1, code) dst0 = code>>8; dst1 = code
614 
615 #define READ_2PIX(dst0, dst1, plane1) \
616  UPDATE_CACHE(re, &s->gb); \
617  GET_VLC_DUAL(dst0, dst1, re, &s->gb, s->vlc[4+plane1].table, \
618  s->vlc[0].table, s->vlc[plane1].table, VLC_BITS, 3, OP8bits)
619 
620 static void decode_422_bitstream(HYuvContext *s, int count)
621 {
622  int i, icount;
623  OPEN_READER(re, &s->gb);
624  count /= 2;
625 
626  icount = get_bits_left(&s->gb) / (32 * 4);
627  if (count >= icount) {
628  for (i = 0; i < icount; i++) {
629  READ_2PIX(s->temp[0][2 * i], s->temp[1][i], 1);
630  READ_2PIX(s->temp[0][2 * i + 1], s->temp[2][i], 2);
631  }
632  for (; i < count && BITS_LEFT(re, &s->gb) > 0; i++) {
633  READ_2PIX(s->temp[0][2 * i ], s->temp[1][i], 1);
634  if (BITS_LEFT(re, &s->gb) <= 0) break;
635  READ_2PIX(s->temp[0][2 * i + 1], s->temp[2][i], 2);
636  }
637  for (; i < count; i++)
638  s->temp[0][2 * i ] = s->temp[1][i] =
639  s->temp[0][2 * i + 1] = s->temp[2][i] = 0;
640  } else {
641  for (i = 0; i < count; i++) {
642  READ_2PIX(s->temp[0][2 * i], s->temp[1][i], 1);
643  READ_2PIX(s->temp[0][2 * i + 1], s->temp[2][i], 2);
644  }
645  }
646  CLOSE_READER(re, &s->gb);
647 }
648 
649 #define READ_2PIX_PLANE(dst0, dst1, plane, OP) \
650  UPDATE_CACHE(re, &s->gb); \
651  GET_VLC_DUAL(dst0, dst1, re, &s->gb, s->vlc[4+plane].table, \
652  s->vlc[plane].table, s->vlc[plane].table, VLC_BITS, 3, OP)
653 
654 #define OP14bits(dst0, dst1, code) dst0 = code>>8; dst1 = sign_extend(code, 8)
655 
656 /* TODO instead of restarting the read when the code isn't in the first level
657  * of the joint table, jump into the 2nd level of the individual table. */
658 #define READ_2PIX_PLANE16(dst0, dst1, plane){\
659  dst0 = get_vlc2(&s->gb, s->vlc[plane].table, VLC_BITS, 3)<<2;\
660  dst0 += get_bits(&s->gb, 2);\
661  dst1 = get_vlc2(&s->gb, s->vlc[plane].table, VLC_BITS, 3)<<2;\
662  dst1 += get_bits(&s->gb, 2);\
663 }
664 static void decode_plane_bitstream(HYuvContext *s, int width, int plane)
665 {
666  int i, count = width/2;
667 
668  if (s->bps <= 8) {
669  OPEN_READER(re, &s->gb);
670  if (count >= (get_bits_left(&s->gb)) / (32 * 2)) {
671  for (i = 0; i < count && BITS_LEFT(re, &s->gb) > 0; i++) {
672  READ_2PIX_PLANE(s->temp[0][2 * i], s->temp[0][2 * i + 1], plane, OP8bits);
673  }
674  } else {
675  for(i=0; i<count; i++){
676  READ_2PIX_PLANE(s->temp[0][2 * i], s->temp[0][2 * i + 1], plane, OP8bits);
677  }
678  }
679  if( width&1 && BITS_LEFT(re, &s->gb)>0 ) {
680  unsigned int index;
681  int nb_bits, code, n;
682  UPDATE_CACHE(re, &s->gb);
683  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
684  VLC_INTERN(s->temp[0][width-1], s->vlc[plane].table,
685  &s->gb, re, VLC_BITS, 3);
686  }
687  CLOSE_READER(re, &s->gb);
688  } else if (s->bps <= 14) {
689  OPEN_READER(re, &s->gb);
690  if (count >= (get_bits_left(&s->gb)) / (32 * 2)) {
691  for (i = 0; i < count && BITS_LEFT(re, &s->gb) > 0; i++) {
692  READ_2PIX_PLANE(s->temp16[0][2 * i], s->temp16[0][2 * i + 1], plane, OP14bits);
693  }
694  } else {
695  for(i=0; i<count; i++){
696  READ_2PIX_PLANE(s->temp16[0][2 * i], s->temp16[0][2 * i + 1], plane, OP14bits);
697  }
698  }
699  if( width&1 && BITS_LEFT(re, &s->gb)>0 ) {
700  unsigned int index;
701  int nb_bits, code, n;
702  UPDATE_CACHE(re, &s->gb);
703  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
704  VLC_INTERN(s->temp16[0][width-1], s->vlc[plane].table,
705  &s->gb, re, VLC_BITS, 3);
706  }
707  CLOSE_READER(re, &s->gb);
708  } else {
709  if (count >= (get_bits_left(&s->gb)) / (32 * 2)) {
710  for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
711  READ_2PIX_PLANE16(s->temp16[0][2 * i], s->temp16[0][2 * i + 1], plane);
712  }
713  } else {
714  for(i=0; i<count; i++){
715  READ_2PIX_PLANE16(s->temp16[0][2 * i], s->temp16[0][2 * i + 1], plane);
716  }
717  }
718  if( width&1 && get_bits_left(&s->gb)>0 ) {
719  int dst = get_vlc2(&s->gb, s->vlc[plane].table, VLC_BITS, 3)<<2;
720  s->temp16[0][width-1] = dst + get_bits(&s->gb, 2);
721  }
722  }
723 }
724 
725 static void decode_gray_bitstream(HYuvContext *s, int count)
726 {
727  int i;
728  OPEN_READER(re, &s->gb);
729  count /= 2;
730 
731  if (count >= (get_bits_left(&s->gb)) / (32 * 2)) {
732  for (i = 0; i < count && BITS_LEFT(re, &s->gb) > 0; i++) {
733  READ_2PIX(s->temp[0][2 * i], s->temp[0][2 * i + 1], 0);
734  }
735  } else {
736  for (i = 0; i < count; i++) {
737  READ_2PIX(s->temp[0][2 * i], s->temp[0][2 * i + 1], 0);
738  }
739  }
740  CLOSE_READER(re, &s->gb);
741 }
742 
743 static av_always_inline void decode_bgr_1(HYuvContext *s, int count,
744  int decorrelate, int alpha)
745 {
746  int i;
747  OPEN_READER(re, &s->gb);
748 
749  for (i = 0; i < count && BITS_LEFT(re, &s->gb) > 0; i++) {
750  unsigned int index;
751  int code, n, nb_bits;
752 
753  UPDATE_CACHE(re, &s->gb);
754  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
755  n = s->vlc[4].table[index].len;
756 
757  if (n>0) {
758  code = s->vlc[4].table[index].sym;
759  *(uint32_t *) &s->temp[0][4 * i] = s->pix_bgr_map[code];
760  LAST_SKIP_BITS(re, &s->gb, n);
761  } else {
762  if (decorrelate) {
763  VLC_INTERN(s->temp[0][4 * i + G], s->vlc[1].table,
764  &s->gb, re, VLC_BITS, 3);
765 
766  UPDATE_CACHE(re, &s->gb);
767  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
768  VLC_INTERN(code, s->vlc[0].table, &s->gb, re, VLC_BITS, 3);
769  s->temp[0][4 * i + B] = code + s->temp[0][4 * i + G];
770 
771  UPDATE_CACHE(re, &s->gb);
772  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
773  VLC_INTERN(code, s->vlc[2].table, &s->gb, re, VLC_BITS, 3);
774  s->temp[0][4 * i + R] = code + s->temp[0][4 * i + G];
775  } else {
776  VLC_INTERN(s->temp[0][4 * i + B], s->vlc[0].table,
777  &s->gb, re, VLC_BITS, 3);
778 
779  UPDATE_CACHE(re, &s->gb);
780  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
781  VLC_INTERN(s->temp[0][4 * i + G], s->vlc[1].table,
782  &s->gb, re, VLC_BITS, 3);
783 
784  UPDATE_CACHE(re, &s->gb);
785  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
786  VLC_INTERN(s->temp[0][4 * i + R], s->vlc[2].table,
787  &s->gb, re, VLC_BITS, 3);
788  }
789  }
790  if (alpha) {
791  UPDATE_CACHE(re, &s->gb);
792  index = SHOW_UBITS(re, &s->gb, VLC_BITS);
793  VLC_INTERN(s->temp[0][4 * i + A], s->vlc[2].table,
794  &s->gb, re, VLC_BITS, 3);
795  } else
796  s->temp[0][4 * i + A] = 0;
797  }
798  CLOSE_READER(re, &s->gb);
799 }
800 
801 static void decode_bgr_bitstream(HYuvContext *s, int count)
802 {
803  if (s->decorrelate) {
804  if (s->bitstream_bpp == 24)
805  decode_bgr_1(s, count, 1, 0);
806  else
807  decode_bgr_1(s, count, 1, 1);
808  } else {
809  if (s->bitstream_bpp == 24)
810  decode_bgr_1(s, count, 0, 0);
811  else
812  decode_bgr_1(s, count, 0, 1);
813  }
814 }
815 
816 static void draw_slice(HYuvContext *s, AVFrame *frame, int y)
817 {
818  int h, cy, i;
820 
821  if (!s->avctx->draw_horiz_band)
822  return;
823 
824  h = y - s->last_slice_end;
825  y -= h;
826 
827  if (s->bitstream_bpp == 12)
828  cy = y >> 1;
829  else
830  cy = y;
831 
832  offset[0] = frame->linesize[0] * y;
833  offset[1] = frame->linesize[1] * cy;
834  offset[2] = frame->linesize[2] * cy;
835  for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
836  offset[i] = 0;
837  emms_c();
838 
839  s->avctx->draw_horiz_band(s->avctx, frame, offset, y, 3, h);
840 
841  s->last_slice_end = y + h;
842 }
843 
844 static int left_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src, int w, int acc)
845 {
846  if (s->bps <= 8) {
847  return s->llviddsp.add_left_pred(dst, src, w, acc);
848  } else {
849  return s->llviddsp.add_left_pred_int16(( uint16_t *)dst, (const uint16_t *)src, s->n-1, w, acc);
850  }
851 }
852 
853 static void add_bytes(HYuvContext *s, uint8_t *dst, uint8_t *src, int w)
854 {
855  if (s->bps <= 8) {
856  s->llviddsp.add_bytes(dst, src, w);
857  } else {
858  s->hdsp.add_int16((uint16_t*)dst, (const uint16_t*)src, s->n - 1, w);
859  }
860 }
861 
862 static void add_median_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src, const uint8_t *diff, int w, int *left, int *left_top)
863 {
864  if (s->bps <= 8) {
865  s->llviddsp.add_median_pred(dst, src, diff, w, left, left_top);
866  } else {
867  s->hdsp.add_hfyu_median_pred_int16((uint16_t *)dst, (const uint16_t *)src, (const uint16_t *)diff, s->n-1, w, left, left_top);
868  }
869 }
870 
871 static int decode_slice(AVCodecContext *avctx, AVFrame *p, int height,
872  int buf_size, int y_offset, int table_size)
873 {
874  HYuvContext *s = avctx->priv_data;
875  int fake_ystride, fake_ustride, fake_vstride;
876  const int width = s->width;
877  const int width2 = s->width >> 1;
878  int ret;
879 
880  if ((ret = init_get_bits8(&s->gb, s->bitstream_buffer + table_size, buf_size - table_size)) < 0)
881  return ret;
882 
883  fake_ystride = s->interlaced ? p->linesize[0] * 2 : p->linesize[0];
884  fake_ustride = s->interlaced ? p->linesize[1] * 2 : p->linesize[1];
885  fake_vstride = s->interlaced ? p->linesize[2] * 2 : p->linesize[2];
886 
887  if (s->version > 2) {
888  int plane;
889  for(plane = 0; plane < 1 + 2*s->chroma + s->alpha; plane++) {
890  int left, lefttop, y;
891  int w = width;
892  int h = height;
893  int fake_stride = fake_ystride;
894 
895  if (s->chroma && (plane == 1 || plane == 2)) {
896  w >>= s->chroma_h_shift;
897  h >>= s->chroma_v_shift;
898  fake_stride = plane == 1 ? fake_ustride : fake_vstride;
899  }
900 
901  switch (s->predictor) {
902  case LEFT:
903  case PLANE:
904  decode_plane_bitstream(s, w, plane);
905  left = left_prediction(s, p->data[plane], s->temp[0], w, 0);
906 
907  for (y = 1; y < h; y++) {
908  uint8_t *dst = p->data[plane] + p->linesize[plane]*y;
909 
910  decode_plane_bitstream(s, w, plane);
911  left = left_prediction(s, dst, s->temp[0], w, left);
912  if (s->predictor == PLANE) {
913  if (y > s->interlaced) {
914  add_bytes(s, dst, dst - fake_stride, w);
915  }
916  }
917  }
918 
919  break;
920  case MEDIAN:
921  decode_plane_bitstream(s, w, plane);
922  left= left_prediction(s, p->data[plane], s->temp[0], w, 0);
923 
924  y = 1;
925  if (y >= h)
926  break;
927 
928  /* second line is left predicted for interlaced case */
929  if (s->interlaced) {
930  decode_plane_bitstream(s, w, plane);
931  left = left_prediction(s, p->data[plane] + p->linesize[plane], s->temp[0], w, left);
932  y++;
933  if (y >= h)
934  break;
935  }
936 
937  lefttop = p->data[plane][0];
938  decode_plane_bitstream(s, w, plane);
939  add_median_prediction(s, p->data[plane] + fake_stride, p->data[plane], s->temp[0], w, &left, &lefttop);
940  y++;
941 
942  for (; y<h; y++) {
943  uint8_t *dst;
944 
945  decode_plane_bitstream(s, w, plane);
946 
947  dst = p->data[plane] + p->linesize[plane] * y;
948 
949  add_median_prediction(s, dst, dst - fake_stride, s->temp[0], w, &left, &lefttop);
950  }
951 
952  break;
953  }
954  }
955  draw_slice(s, p, height);
956  } else if (s->bitstream_bpp < 24) {
957  int y, cy;
958  int lefty, leftu, leftv;
959  int lefttopy, lefttopu, lefttopv;
960 
961  if (s->yuy2) {
962  p->data[0][3] = get_bits(&s->gb, 8);
963  p->data[0][2] = get_bits(&s->gb, 8);
964  p->data[0][1] = get_bits(&s->gb, 8);
965  p->data[0][0] = get_bits(&s->gb, 8);
966 
967  av_log(avctx, AV_LOG_ERROR,
968  "YUY2 output is not implemented yet\n");
969  return AVERROR_PATCHWELCOME;
970  } else {
971  leftv =
972  p->data[2][0 + y_offset * p->linesize[2]] = get_bits(&s->gb, 8);
973  lefty =
974  p->data[0][1 + y_offset * p->linesize[0]] = get_bits(&s->gb, 8);
975  leftu =
976  p->data[1][0 + y_offset * p->linesize[1]] = get_bits(&s->gb, 8);
977  p->data[0][0 + y_offset * p->linesize[0]] = get_bits(&s->gb, 8);
978 
979  switch (s->predictor) {
980  case LEFT:
981  case PLANE:
983  lefty = s->llviddsp.add_left_pred(p->data[0] + p->linesize[0] * y_offset + 2, s->temp[0],
984  width - 2, lefty);
985  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
986  leftu = s->llviddsp.add_left_pred(p->data[1] + p->linesize[1] * y_offset + 1, s->temp[1], width2 - 1, leftu);
987  leftv = s->llviddsp.add_left_pred(p->data[2] + p->linesize[2] * y_offset + 1, s->temp[2], width2 - 1, leftv);
988  }
989 
990  for (cy = y = 1; y < height; y++, cy++) {
991  uint8_t *ydst, *udst, *vdst;
992 
993  if (s->bitstream_bpp == 12) {
995 
996  ydst = p->data[0] + p->linesize[0] * (y + y_offset);
997 
998  lefty = s->llviddsp.add_left_pred(ydst, s->temp[0],
999  width, lefty);
1000  if (s->predictor == PLANE) {
1001  if (y > s->interlaced)
1002  s->llviddsp.add_bytes(ydst, ydst - fake_ystride, width);
1003  }
1004  y++;
1005  if (y >= height)
1006  break;
1007  }
1008 
1009  draw_slice(s, p, y);
1010 
1011  ydst = p->data[0] + p->linesize[0] * (y + y_offset);
1012  udst = p->data[1] + p->linesize[1] * (cy + y_offset);
1013  vdst = p->data[2] + p->linesize[2] * (cy + y_offset);
1014 
1016  lefty = s->llviddsp.add_left_pred(ydst, s->temp[0],
1017  width, lefty);
1018  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1019  leftu = s->llviddsp.add_left_pred(udst, s->temp[1], width2, leftu);
1020  leftv = s->llviddsp.add_left_pred(vdst, s->temp[2], width2, leftv);
1021  }
1022  if (s->predictor == PLANE) {
1023  if (cy > s->interlaced) {
1024  s->llviddsp.add_bytes(ydst, ydst - fake_ystride, width);
1025  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1026  s->llviddsp.add_bytes(udst, udst - fake_ustride, width2);
1027  s->llviddsp.add_bytes(vdst, vdst - fake_vstride, width2);
1028  }
1029  }
1030  }
1031  }
1032  draw_slice(s, p, height);
1033 
1034  break;
1035  case MEDIAN:
1036  /* first line except first 2 pixels is left predicted */
1038  lefty = s->llviddsp.add_left_pred(p->data[0] + 2, s->temp[0],
1039  width - 2, lefty);
1040  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1041  leftu = s->llviddsp.add_left_pred(p->data[1] + 1, s->temp[1], width2 - 1, leftu);
1042  leftv = s->llviddsp.add_left_pred(p->data[2] + 1, s->temp[2], width2 - 1, leftv);
1043  }
1044 
1045  cy = y = 1;
1046  if (y >= height)
1047  break;
1048 
1049  /* second line is left predicted for interlaced case */
1050  if (s->interlaced) {
1052  lefty = s->llviddsp.add_left_pred(p->data[0] + p->linesize[0],
1053  s->temp[0], width, lefty);
1054  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1055  leftu = s->llviddsp.add_left_pred(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
1056  leftv = s->llviddsp.add_left_pred(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
1057  }
1058  y++;
1059  cy++;
1060  if (y >= height)
1061  break;
1062  }
1063 
1064  /* next 4 pixels are left predicted too */
1065  decode_422_bitstream(s, 4);
1066  lefty = s->llviddsp.add_left_pred(p->data[0] + fake_ystride,
1067  s->temp[0], 4, lefty);
1068  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1069  leftu = s->llviddsp.add_left_pred(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
1070  leftv = s->llviddsp.add_left_pred(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
1071  }
1072 
1073  /* next line except the first 4 pixels is median predicted */
1074  lefttopy = p->data[0][3];
1076  s->llviddsp.add_median_pred(p->data[0] + fake_ystride + 4,
1077  p->data[0] + 4, s->temp[0],
1078  width - 4, &lefty, &lefttopy);
1079  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1080  lefttopu = p->data[1][1];
1081  lefttopv = p->data[2][1];
1082  s->llviddsp.add_median_pred(p->data[1] + fake_ustride + 2, p->data[1] + 2, s->temp[1], width2 - 2, &leftu, &lefttopu);
1083  s->llviddsp.add_median_pred(p->data[2] + fake_vstride + 2, p->data[2] + 2, s->temp[2], width2 - 2, &leftv, &lefttopv);
1084  }
1085  y++;
1086  cy++;
1087 
1088  for (; y < height; y++, cy++) {
1089  uint8_t *ydst, *udst, *vdst;
1090 
1091  if (s->bitstream_bpp == 12) {
1092  while (2 * cy > y) {
1094  ydst = p->data[0] + p->linesize[0] * y;
1095  s->llviddsp.add_median_pred(ydst, ydst - fake_ystride,
1096  s->temp[0], width,
1097  &lefty, &lefttopy);
1098  y++;
1099  }
1100  if (y >= height)
1101  break;
1102  }
1103  draw_slice(s, p, y);
1104 
1106 
1107  ydst = p->data[0] + p->linesize[0] * y;
1108  udst = p->data[1] + p->linesize[1] * cy;
1109  vdst = p->data[2] + p->linesize[2] * cy;
1110 
1111  s->llviddsp.add_median_pred(ydst, ydst - fake_ystride,
1112  s->temp[0], width,
1113  &lefty, &lefttopy);
1114  if (!(s->flags & AV_CODEC_FLAG_GRAY)) {
1115  s->llviddsp.add_median_pred(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
1116  s->llviddsp.add_median_pred(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
1117  }
1118  }
1119 
1120  draw_slice(s, p, height);
1121  break;
1122  }
1123  }
1124  } else {
1125  int y;
1126  uint8_t left[4];
1127  const int last_line = (y_offset + height - 1) * p->linesize[0];
1128 
1129  if (s->bitstream_bpp == 32) {
1130  left[A] = p->data[0][last_line + A] = get_bits(&s->gb, 8);
1131  left[R] = p->data[0][last_line + R] = get_bits(&s->gb, 8);
1132  left[G] = p->data[0][last_line + G] = get_bits(&s->gb, 8);
1133  left[B] = p->data[0][last_line + B] = get_bits(&s->gb, 8);
1134  } else {
1135  left[R] = p->data[0][last_line + R] = get_bits(&s->gb, 8);
1136  left[G] = p->data[0][last_line + G] = get_bits(&s->gb, 8);
1137  left[B] = p->data[0][last_line + B] = get_bits(&s->gb, 8);
1138  left[A] = p->data[0][last_line + A] = 255;
1139  skip_bits(&s->gb, 8);
1140  }
1141 
1142  if (s->bgr32) {
1143  switch (s->predictor) {
1144  case LEFT:
1145  case PLANE:
1147  s->hdsp.add_hfyu_left_pred_bgr32(p->data[0] + last_line + 4,
1148  s->temp[0], width - 1, left);
1149 
1150  for (y = height - 2; y >= 0; y--) { // Yes it is stored upside down.
1152 
1153  s->hdsp.add_hfyu_left_pred_bgr32(p->data[0] + p->linesize[0] * (y + y_offset),
1154  s->temp[0], width, left);
1155  if (s->predictor == PLANE) {
1156  if (s->bitstream_bpp != 32)
1157  left[A] = 0;
1158  if (y < height - 1 - s->interlaced) {
1159  s->llviddsp.add_bytes(p->data[0] + p->linesize[0] * (y + y_offset),
1160  p->data[0] + p->linesize[0] * (y + y_offset) +
1161  fake_ystride, 4 * width);
1162  }
1163  }
1164  }
1165  // just 1 large slice as this is not possible in reverse order
1166  draw_slice(s, p, height);
1167  break;
1168  default:
1169  av_log(avctx, AV_LOG_ERROR,
1170  "prediction type not supported!\n");
1171  }
1172  } else {
1173  av_log(avctx, AV_LOG_ERROR,
1174  "BGR24 output is not implemented yet\n");
1175  return AVERROR_PATCHWELCOME;
1176  }
1177  }
1178 
1179  return 0;
1180 }
1181 
1182 static int decode_frame(AVCodecContext *avctx, AVFrame *p,
1183  int *got_frame, AVPacket *avpkt)
1184 {
1185  const uint8_t *buf = avpkt->data;
1186  int buf_size = avpkt->size;
1187  HYuvContext *s = avctx->priv_data;
1188  const int width = s->width;
1189  const int height = s->height;
1190  int slice, table_size = 0, ret, nb_slices;
1191  unsigned slices_info_offset;
1192  int slice_height;
1193 
1194  if (buf_size < (width * height + 7)/8)
1195  return AVERROR_INVALIDDATA;
1196 
1197  av_fast_padded_malloc(&s->bitstream_buffer,
1198  &s->bitstream_buffer_size,
1199  buf_size);
1200  if (!s->bitstream_buffer)
1201  return AVERROR(ENOMEM);
1202 
1203  s->bdsp.bswap_buf((uint32_t *) s->bitstream_buffer,
1204  (const uint32_t *) buf, buf_size / 4);
1205 
1206  if ((ret = ff_thread_get_buffer(avctx, p, 0)) < 0)
1207  return ret;
1208 
1209  if (s->context) {
1210  table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
1211  if (table_size < 0)
1212  return table_size;
1213  }
1214 
1215  if ((unsigned) (buf_size - table_size) >= INT_MAX / 8)
1216  return AVERROR_INVALIDDATA;
1217 
1218  s->last_slice_end = 0;
1219 
1220  if (avctx->codec_id == AV_CODEC_ID_HYMT &&
1221  (buf_size > 32 && AV_RL32(avpkt->data + buf_size - 16) == 0)) {
1222  slices_info_offset = AV_RL32(avpkt->data + buf_size - 4);
1223  slice_height = AV_RL32(avpkt->data + buf_size - 8);
1224  nb_slices = AV_RL32(avpkt->data + buf_size - 12);
1225  if (nb_slices * 8LL + slices_info_offset > buf_size - 16 ||
1226  s->chroma_v_shift ||
1227  slice_height <= 0 || nb_slices * (uint64_t)slice_height > height)
1228  return AVERROR_INVALIDDATA;
1229  } else {
1230  slice_height = height;
1231  nb_slices = 1;
1232  }
1233 
1234  for (slice = 0; slice < nb_slices; slice++) {
1235  int y_offset, slice_offset, slice_size;
1236 
1237  if (nb_slices > 1) {
1238  slice_offset = AV_RL32(avpkt->data + slices_info_offset + slice * 8);
1239  slice_size = AV_RL32(avpkt->data + slices_info_offset + slice * 8 + 4);
1240 
1241  if (slice_offset < 0 || slice_size <= 0 || (slice_offset&3) ||
1242  slice_offset + (int64_t)slice_size > buf_size)
1243  return AVERROR_INVALIDDATA;
1244 
1245  y_offset = height - (slice + 1) * slice_height;
1246  s->bdsp.bswap_buf((uint32_t *)s->bitstream_buffer,
1247  (const uint32_t *)(buf + slice_offset), slice_size / 4);
1248  } else {
1249  y_offset = 0;
1250  slice_offset = 0;
1251  slice_size = buf_size;
1252  }
1253 
1254  ret = decode_slice(avctx, p, slice_height, slice_size, y_offset, table_size);
1255  emms_c();
1256  if (ret < 0)
1257  return ret;
1258  }
1259 
1260  *got_frame = 1;
1261 
1262  return (get_bits_count(&s->gb) + 31) / 32 * 4 + table_size;
1263 }
1264 
1266  .p.name = "huffyuv",
1267  .p.long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1268  .p.type = AVMEDIA_TYPE_VIDEO,
1269  .p.id = AV_CODEC_ID_HUFFYUV,
1270  .priv_data_size = sizeof(HYuvContext),
1271  .init = decode_init,
1272  .close = decode_end,
1274  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DRAW_HORIZ_BAND |
1277 };
1278 
1279 #if CONFIG_FFVHUFF_DECODER
1280 const FFCodec ff_ffvhuff_decoder = {
1281  .p.name = "ffvhuff",
1282  .p.long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1283  .p.type = AVMEDIA_TYPE_VIDEO,
1284  .p.id = AV_CODEC_ID_FFVHUFF,
1285  .priv_data_size = sizeof(HYuvContext),
1286  .init = decode_init,
1287  .close = decode_end,
1289  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DRAW_HORIZ_BAND |
1292 };
1293 #endif /* CONFIG_FFVHUFF_DECODER */
1294 
1295 #if CONFIG_HYMT_DECODER
1296 const FFCodec ff_hymt_decoder = {
1297  .p.name = "hymt",
1298  .p.long_name = NULL_IF_CONFIG_SMALL("HuffYUV MT"),
1299  .p.type = AVMEDIA_TYPE_VIDEO,
1300  .p.id = AV_CODEC_ID_HYMT,
1301  .priv_data_size = sizeof(HYuvContext),
1302  .init = decode_init,
1303  .close = decode_end,
1305  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DRAW_HORIZ_BAND |
1308 };
1309 #endif /* CONFIG_HYMT_DECODER */
VLC_INTERN
#define VLC_INTERN(dst, table, gb, name, bits, max_depth)
Subset of GET_VLC for use in hand-roller VLC code.
Definition: huffyuvdec.c:568
add_bytes
static void add_bytes(HYuvContext *s, uint8_t *dst, uint8_t *src, int w)
Definition: huffyuvdec.c:853
AV_PIX_FMT_YUVA422P16
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:449
HYuvContext
Definition: huffyuv.h:55
generate_joint_tables
static int generate_joint_tables(HYuvContext *s)
Definition: huffyuvdec.c:121
decorrelate
static void decorrelate(SnowContext *s, SubBand *b, IDWTELEM *src, int stride, int inverse, int use_median)
Definition: snowenc.c:1319
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:39
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:839
r
const char * r
Definition: vf_curves.c:116
acc
int acc
Definition: yuv2rgb.c:554
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AV_CODEC_ID_HUFFYUV
@ AV_CODEC_ID_HUFFYUV
Definition: codec_id.h:75
out
FILE * out
Definition: movenc.c:54
AV_CODEC_ID_HYMT
@ AV_CODEC_ID_HYMT
Definition: codec_id.h:291
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:262
MAX_VLC_N
#define MAX_VLC_N
Definition: huffyuv.h:47
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
AV_PIX_FMT_YUVA422P9
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:441
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
pixdesc.h
AV_PIX_FMT_YUVA420P16
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:448
w
uint8_t w
Definition: llviddspenc.c:38
decode_gray_bitstream
static void decode_gray_bitstream(HYuvContext *s, int count)
Definition: huffyuvdec.c:725
R
#define R
Definition: huffyuvdsp.h:34
AVPacket::data
uint8_t * data
Definition: packet.h:374
huffyuvdsp.h
AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:443
init_vlc
#define init_vlc(vlc, nb_bits, nb_codes, bits, bits_wrap, bits_size, codes, codes_wrap, codes_size, flags)
Definition: vlc.h:43
b
#define b
Definition: input.c:34
READ_2PIX
#define READ_2PIX(dst0, dst1, plane1)
Definition: huffyuvdec.c:615
classic_add_luma
static const unsigned char classic_add_luma[256]
Definition: huffyuvdec.c:64
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:406
FFCodec
Definition: codec_internal.h:112
MEDIAN
@ MEDIAN
Definition: huffyuv.h:52
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
decode_init
static av_cold int decode_init(AVCodecContext *avctx)
Definition: huffyuvdec.c:294
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:178
OP8bits
#define OP8bits(dst0, dst1, code)
Definition: huffyuvdec.c:613
OP14bits
#define OP14bits(dst0, dst1, code)
Definition: huffyuvdec.c:654
AV_PIX_FMT_YUVA422P10
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:444
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:649
thread.h
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:346
add_median_prediction
static void add_median_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src, const uint8_t *diff, int w, int *left, int *left_top)
Definition: huffyuvdec.c:862
ff_huffyuv_alloc_temp
av_cold int ff_huffyuv_alloc_temp(HYuvContext *s)
Definition: huffyuv.c:58
init
static int init
Definition: av_tx.c:47
A
#define A(x)
Definition: vp56_arith.h:28
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
AV_PIX_FMT_YUVA420P9
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:440
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:116
AV_PIX_FMT_GBRP14
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:424
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:205
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:422
AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:450
GetBitContext
Definition: get_bits.h:61
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
AV_PIX_FMT_YUV422P9
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:404
classic_shift_chroma_table_size
#define classic_shift_chroma_table_size
Definition: huffyuvdec.c:55
decode_frame
static int decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, AVPacket *avpkt)
Definition: huffyuvdec.c:1182
val
static double val(void *priv, double ch)
Definition: aeval.c:77
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2690
classic_shift_luma
static const unsigned char classic_shift_luma[classic_shift_luma_table_size+AV_INPUT_BUFFER_PADDING_SIZE]
Definition: huffyuvdec.c:48
AV_PIX_FMT_GRAY16
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:390
decode_bgr_1
static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha)
Definition: huffyuvdec.c:743
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:409
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:418
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:667
read_huffman_tables
static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length)
Definition: huffyuvdec.c:211
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:149
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:491
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:254
s
#define s(width, name)
Definition: cbs_vp9.c:256
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:419
draw_slice
static void draw_slice(HYuvContext *s, AVFrame *frame, int y)
Definition: huffyuvdec.c:816
g
const char * g
Definition: vf_curves.c:117
bits
uint8_t bits
Definition: vp3data.h:141
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:403
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:417
get_bits.h
AV_CODEC_ID_FFVHUFF
@ AV_CODEC_ID_FFVHUFF
Definition: codec_id.h:117
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:399
if
if(ret)
Definition: filter_design.txt:179
ff_huffyuv_common_end
av_cold void ff_huffyuv_common_end(HYuvContext *s)
Definition: huffyuv.c:86
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:113
AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:425
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
AV_PIX_FMT_YUYV422
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:67
LAST_SKIP_BITS
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:199
ff_huffyuv_generate_bits_table
int ff_huffyuv_generate_bits_table(uint32_t *dst, const uint8_t *len_table, int n)
Definition: huffyuv.c:39
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:407
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
VLC_BITS
#define VLC_BITS
Definition: cfhd.h:96
READ_2PIX_PLANE16
#define READ_2PIX_PLANE16(dst0, dst1, plane)
Definition: huffyuvdec.c:658
AV_PIX_FMT_GBRP9
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:421
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:787
index
int index
Definition: gxfenc.c:89
READ_2PIX_PLANE
#define READ_2PIX_PLANE(dst0, dst1, plane, OP)
Definition: huffyuvdec.c:649
left_prediction
static int left_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src, int w, int acc)
Definition: huffyuvdec.c:844
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:249
AVPacket::size
int size
Definition: packet.h:375
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
codec_internal.h
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:411
AV_NUM_DATA_POINTERS
#define AV_NUM_DATA_POINTERS
Definition: frame.h:326
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:413
decode_end
static av_cold int decode_end(AVCodecContext *avctx)
Definition: huffyuvdec.c:280
LEFT
#define LEFT
Definition: cdgraphics.c:167
ff_huffyuvdsp_init
av_cold void ff_huffyuvdsp_init(HuffYUVDSPContext *c, enum AVPixelFormat pix_fmt)
Definition: huffyuvdsp.c:83
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:138
height
#define height
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:379
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:167
classic_add_chroma
static const unsigned char classic_add_chroma[256]
Definition: huffyuvdec.c:83
AV_PIX_FMT_YUVA444P10
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:445
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
decode_slice
static int decode_slice(AVCodecContext *avctx, AVFrame *p, int height, int buf_size, int y_offset, int table_size)
Definition: huffyuvdec.c:871
decode_plane_bitstream
static void decode_plane_bitstream(HYuvContext *s, int width, int plane)
Definition: huffyuvdec.c:664
AVCodecContext::bits_per_coded_sample
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1441
interlaced
uint8_t interlaced
Definition: mxfenc.c:2042
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:490
ff_hymt_decoder
const FFCodec ff_hymt_decoder
ff_init_vlc_sparse
int ff_init_vlc_sparse(VLC *vlc, int nb_bits, int nb_codes, const void *bits, int bits_wrap, int bits_size, const void *codes, int codes_wrap, int codes_size, const void *symbols, int symbols_wrap, int symbols_size, int flags)
Definition: vlc.c:272
ff_huffyuv_decoder
const FFCodec ff_huffyuv_decoder
Definition: huffyuvdec.c:1265
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:423
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:48
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: codec_internal.h:31
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:264
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:203
len
int len
Definition: vorbis_enc_data.h:426
PLANE
@ PLANE
Definition: huffyuv.h:51
AVCodecContext::height
int height
Definition: avcodec.h:562
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:599
AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:405
ff_free_vlc
void ff_free_vlc(VLC *vlc)
Definition: vlc.c:375
avcodec.h
limit
static double limit(double x)
Definition: vf_pseudocolor.c:128
G
#define G
Definition: huffyuvdsp.h:33
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AV_PIX_FMT_0RGB32
#define AV_PIX_FMT_0RGB32
Definition: pixfmt.h:383
decode_bgr_bitstream
static void decode_bgr_bitstream(HYuvContext *s, int count)
Definition: huffyuvdec.c:801
classic_shift_chroma
static const unsigned char classic_shift_chroma[classic_shift_chroma_table_size+AV_INPUT_BUFFER_PADDING_SIZE]
Definition: huffyuvdec.c:56
AV_PIX_FMT_YUVA444P9
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:442
AV_PIX_FMT_YUV420P12
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:410
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
classic_shift_luma_table_size
#define classic_shift_luma_table_size
Definition: huffyuvdec.c:47
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
BITS_LEFT
#define BITS_LEFT(name, gb)
Definition: get_bits.h:191
AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:415
B
#define B
Definition: huffyuvdsp.h:32
ff_llviddsp_init
void ff_llviddsp_init(LLVidDSPContext *c)
Definition: lossless_videodsp.c:113
decode_422_bitstream
static void decode_422_bitstream(HYuvContext *s, int count)
Definition: huffyuvdec.c:620
AVCodecContext
main external API structure.
Definition: avcodec.h:389
SHOW_UBITS
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:211
VLC
Definition: vlc.h:31
sign_extend
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:132
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
lossless_videodsp.h
map
const VDPAUPixFmtMap * map
Definition: hwcontext_vdpau.c:71
diff
static av_always_inline int diff(const uint32_t a, const uint32_t b)
Definition: vf_palettegen.c:139
alpha
static const int16_t alpha[]
Definition: ilbcdata.h:55
ff_huffyuv_common_init
av_cold void ff_huffyuv_common_init(AVCodecContext *avctx)
Definition: huffyuv.c:71
AVPacket
This structure stores compressed data.
Definition: packet.h:351
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:416
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
read_len_table
static int read_len_table(uint8_t *dst, GetBitContext *gb, int n)
Definition: huffyuvdec.c:102
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:562
imgutils.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:370
AV_CODEC_CAP_DRAW_HORIZ_BAND
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: codec.h:44
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
ff_ffvhuff_decoder
const FFCodec ff_ffvhuff_decoder
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
h
h
Definition: vp9dsp_template.c:2038
AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:416
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
huffyuv.h
AV_PIX_FMT_YUVA422P
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:166
AV_PIX_FMT_YUV420P14
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:414
re
float re
Definition: fft.c:79
read_old_huffman_tables
static int read_old_huffman_tables(HYuvContext *s)
Definition: huffyuvdec.c:240