FFmpeg
svq1dec.c
Go to the documentation of this file.
1 /*
2  * SVQ1 decoder
3  * ported to MPlayer by Arpi <arpi@thot.banki.hu>
4  * ported to libavcodec by Nick Kurshev <nickols_k@mail.ru>
5  *
6  * Copyright (c) 2002 The Xine project
7  * Copyright (c) 2002 The FFmpeg project
8  *
9  * SVQ1 Encoder (c) 2004 Mike Melanson <melanson@pcisys.net>
10  *
11  * This file is part of FFmpeg.
12  *
13  * FFmpeg is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU Lesser General Public
15  * License as published by the Free Software Foundation; either
16  * version 2.1 of the License, or (at your option) any later version.
17  *
18  * FFmpeg is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21  * Lesser General Public License for more details.
22  *
23  * You should have received a copy of the GNU Lesser General Public
24  * License along with FFmpeg; if not, write to the Free Software
25  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26  */
27 
28 /**
29  * @file
30  * Sorenson Vector Quantizer #1 (SVQ1) video codec.
31  * For more information of the SVQ1 algorithm, visit:
32  * http://www.pcisys.net/~melanson/codecs/
33  */
34 
35 #include "libavutil/crc.h"
36 
37 #include "avcodec.h"
38 #include "get_bits.h"
39 #include "h263.h"
40 #include "hpeldsp.h"
41 #include "internal.h"
42 #include "mathops.h"
43 #include "svq1.h"
44 
51 
52 /* motion vector (prediction) */
53 typedef struct svq1_pmv_s {
54  int x;
55  int y;
56 } svq1_pmv;
57 
58 typedef struct SVQ1Context {
62 
65 
66  int width;
67  int height;
69  int nonref; // 1 if the current frame won't be referenced
70 } SVQ1Context;
71 
72 static const uint8_t string_table[256] = {
73  0x00, 0xD5, 0x7F, 0xAA, 0xFE, 0x2B, 0x81, 0x54,
74  0x29, 0xFC, 0x56, 0x83, 0xD7, 0x02, 0xA8, 0x7D,
75  0x52, 0x87, 0x2D, 0xF8, 0xAC, 0x79, 0xD3, 0x06,
76  0x7B, 0xAE, 0x04, 0xD1, 0x85, 0x50, 0xFA, 0x2F,
77  0xA4, 0x71, 0xDB, 0x0E, 0x5A, 0x8F, 0x25, 0xF0,
78  0x8D, 0x58, 0xF2, 0x27, 0x73, 0xA6, 0x0C, 0xD9,
79  0xF6, 0x23, 0x89, 0x5C, 0x08, 0xDD, 0x77, 0xA2,
80  0xDF, 0x0A, 0xA0, 0x75, 0x21, 0xF4, 0x5E, 0x8B,
81  0x9D, 0x48, 0xE2, 0x37, 0x63, 0xB6, 0x1C, 0xC9,
82  0xB4, 0x61, 0xCB, 0x1E, 0x4A, 0x9F, 0x35, 0xE0,
83  0xCF, 0x1A, 0xB0, 0x65, 0x31, 0xE4, 0x4E, 0x9B,
84  0xE6, 0x33, 0x99, 0x4C, 0x18, 0xCD, 0x67, 0xB2,
85  0x39, 0xEC, 0x46, 0x93, 0xC7, 0x12, 0xB8, 0x6D,
86  0x10, 0xC5, 0x6F, 0xBA, 0xEE, 0x3B, 0x91, 0x44,
87  0x6B, 0xBE, 0x14, 0xC1, 0x95, 0x40, 0xEA, 0x3F,
88  0x42, 0x97, 0x3D, 0xE8, 0xBC, 0x69, 0xC3, 0x16,
89  0xEF, 0x3A, 0x90, 0x45, 0x11, 0xC4, 0x6E, 0xBB,
90  0xC6, 0x13, 0xB9, 0x6C, 0x38, 0xED, 0x47, 0x92,
91  0xBD, 0x68, 0xC2, 0x17, 0x43, 0x96, 0x3C, 0xE9,
92  0x94, 0x41, 0xEB, 0x3E, 0x6A, 0xBF, 0x15, 0xC0,
93  0x4B, 0x9E, 0x34, 0xE1, 0xB5, 0x60, 0xCA, 0x1F,
94  0x62, 0xB7, 0x1D, 0xC8, 0x9C, 0x49, 0xE3, 0x36,
95  0x19, 0xCC, 0x66, 0xB3, 0xE7, 0x32, 0x98, 0x4D,
96  0x30, 0xE5, 0x4F, 0x9A, 0xCE, 0x1B, 0xB1, 0x64,
97  0x72, 0xA7, 0x0D, 0xD8, 0x8C, 0x59, 0xF3, 0x26,
98  0x5B, 0x8E, 0x24, 0xF1, 0xA5, 0x70, 0xDA, 0x0F,
99  0x20, 0xF5, 0x5F, 0x8A, 0xDE, 0x0B, 0xA1, 0x74,
100  0x09, 0xDC, 0x76, 0xA3, 0xF7, 0x22, 0x88, 0x5D,
101  0xD6, 0x03, 0xA9, 0x7C, 0x28, 0xFD, 0x57, 0x82,
102  0xFF, 0x2A, 0x80, 0x55, 0x01, 0xD4, 0x7E, 0xAB,
103  0x84, 0x51, 0xFB, 0x2E, 0x7A, 0xAF, 0x05, 0xD0,
104  0xAD, 0x78, 0xD2, 0x07, 0x53, 0x86, 0x2C, 0xF9
105 };
106 
107 #define SVQ1_PROCESS_VECTOR() \
108  for (; level > 0; i++) { \
109  /* process next depth */ \
110  if (i == m) { \
111  m = n; \
112  if (--level == 0) \
113  break; \
114  } \
115  /* divide block if next bit set */ \
116  if (!get_bits1(bitbuf)) \
117  break; \
118  /* add child nodes */ \
119  list[n++] = list[i]; \
120  list[n++] = list[i] + (((level & 1) ? pitch : 1) << ((level >> 1) + 1));\
121  }
122 
123 #define SVQ1_ADD_CODEBOOK() \
124  /* add codebook entries to vector */ \
125  for (j = 0; j < stages; j++) { \
126  n3 = codebook[entries[j]] ^ 0x80808080; \
127  n1 += (n3 & 0xFF00FF00) >> 8; \
128  n2 += n3 & 0x00FF00FF; \
129  } \
130  \
131  /* clip to [0..255] */ \
132  if (n1 & 0xFF00FF00) { \
133  n3 = (n1 >> 15 & 0x00010001 | 0x01000100) - 0x00010001; \
134  n1 += 0x7F007F00; \
135  n1 |= (~n1 >> 15 & 0x00010001 | 0x01000100) - 0x00010001; \
136  n1 &= n3 & 0x00FF00FF; \
137  } \
138  \
139  if (n2 & 0xFF00FF00) { \
140  n3 = (n2 >> 15 & 0x00010001 | 0x01000100) - 0x00010001; \
141  n2 += 0x7F007F00; \
142  n2 |= (~n2 >> 15 & 0x00010001 | 0x01000100) - 0x00010001; \
143  n2 &= n3 & 0x00FF00FF; \
144  }
145 
146 #define SVQ1_CALC_CODEBOOK_ENTRIES(cbook) \
147  codebook = (const uint32_t *)cbook[level]; \
148  if (stages > 0) \
149  bit_cache = get_bits(bitbuf, 4 * stages); \
150  /* calculate codebook entries for this vector */ \
151  for (j = 0; j < stages; j++) { \
152  entries[j] = (((bit_cache >> (4 * (stages - j - 1))) & 0xF) + \
153  16 * j) << (level + 1); \
154  } \
155  mean -= stages * 128; \
156  n4 = (mean << 16) + mean;
157 
159  ptrdiff_t pitch)
160 {
161  uint32_t bit_cache;
162  uint8_t *list[63];
163  uint32_t *dst;
164  const uint32_t *codebook;
165  int entries[6];
166  int i, j, m, n;
167  int stages;
168  unsigned mean;
169  unsigned x, y, width, height, level;
170  uint32_t n1, n2, n3, n4;
171 
172  /* initialize list for breadth first processing of vectors */
173  list[0] = pixels;
174 
175  /* recursively process vector */
176  for (i = 0, m = 1, n = 1, level = 5; i < n; i++) {
178 
179  /* destination address and vector size */
180  dst = (uint32_t *)list[i];
181  width = 1 << ((4 + level) / 2);
182  height = 1 << ((3 + level) / 2);
183 
184  /* get number of stages (-1 skips vector, 0 for mean only) */
185  stages = get_vlc2(bitbuf, svq1_intra_multistage[level].table, 3, 3) - 1;
186 
187  if (stages == -1) {
188  for (y = 0; y < height; y++)
189  memset(&dst[y * (pitch / 4)], 0, width);
190  continue; /* skip vector */
191  }
192 
193  if ((stages > 0 && level >= 4)) {
194  ff_dlog(NULL,
195  "Error (svq1_decode_block_intra): invalid vector: stages=%i level=%i\n",
196  stages, level);
197  return AVERROR_INVALIDDATA; /* invalid vector */
198  }
199  av_assert0(stages >= 0);
200 
201  mean = get_vlc2(bitbuf, svq1_intra_mean.table, 8, 3);
202 
203  if (stages == 0) {
204  for (y = 0; y < height; y++)
205  memset(&dst[y * (pitch / 4)], mean, width);
206  } else {
208 
209  for (y = 0; y < height; y++) {
210  for (x = 0; x < width / 4; x++, codebook++) {
211  n1 = n4;
212  n2 = n4;
214  /* store result */
215  dst[x] = n1 << 8 | n2;
216  }
217  dst += pitch / 4;
218  }
219  }
220  }
221 
222  return 0;
223 }
224 
226  ptrdiff_t pitch)
227 {
228  uint32_t bit_cache;
229  uint8_t *list[63];
230  uint32_t *dst;
231  const uint32_t *codebook;
232  int entries[6];
233  int i, j, m, n;
234  int stages;
235  unsigned mean;
236  int x, y, width, height, level;
237  uint32_t n1, n2, n3, n4;
238 
239  /* initialize list for breadth first processing of vectors */
240  list[0] = pixels;
241 
242  /* recursively process vector */
243  for (i = 0, m = 1, n = 1, level = 5; i < n; i++) {
245 
246  /* destination address and vector size */
247  dst = (uint32_t *)list[i];
248  width = 1 << ((4 + level) / 2);
249  height = 1 << ((3 + level) / 2);
250 
251  /* get number of stages (-1 skips vector, 0 for mean only) */
252  stages = get_vlc2(bitbuf, svq1_inter_multistage[level].table, 3, 2) - 1;
253 
254  if (stages == -1)
255  continue; /* skip vector */
256 
257  if ((stages > 0 && level >= 4)) {
258  ff_dlog(NULL,
259  "Error (svq1_decode_block_non_intra): invalid vector: stages=%i level=%i\n",
260  stages, level);
261  return AVERROR_INVALIDDATA; /* invalid vector */
262  }
263  av_assert0(stages >= 0);
264 
265  mean = get_vlc2(bitbuf, svq1_inter_mean.table, 9, 3) - 256;
266 
268 
269  for (y = 0; y < height; y++) {
270  for (x = 0; x < width / 4; x++, codebook++) {
271  n3 = dst[x];
272  /* add mean value to vector */
273  n1 = n4 + ((n3 & 0xFF00FF00) >> 8);
274  n2 = n4 + (n3 & 0x00FF00FF);
276  /* store result */
277  dst[x] = n1 << 8 | n2;
278  }
279  dst += pitch / 4;
280  }
281  }
282  return 0;
283 }
284 
286  svq1_pmv **pmv)
287 {
288  int diff;
289  int i;
290 
291  for (i = 0; i < 2; i++) {
292  /* get motion code */
293  diff = get_vlc2(bitbuf, svq1_motion_component.table, 7, 2);
294  if (diff < 0)
295  return AVERROR_INVALIDDATA;
296  else if (diff) {
297  if (get_bits1(bitbuf))
298  diff = -diff;
299  }
300 
301  /* add median of motion vector predictors and clip result */
302  if (i == 1)
303  mv->y = sign_extend(diff + mid_pred(pmv[0]->y, pmv[1]->y, pmv[2]->y), 6);
304  else
305  mv->x = sign_extend(diff + mid_pred(pmv[0]->x, pmv[1]->x, pmv[2]->x), 6);
306  }
307 
308  return 0;
309 }
310 
311 static void svq1_skip_block(uint8_t *current, uint8_t *previous,
312  ptrdiff_t pitch, int x, int y)
313 {
314  uint8_t *src;
315  uint8_t *dst;
316  int i;
317 
318  src = &previous[x + y * pitch];
319  dst = current;
320 
321  for (i = 0; i < 16; i++) {
322  memcpy(dst, src, 16);
323  src += pitch;
324  dst += pitch;
325  }
326 }
327 
329  uint8_t *current, uint8_t *previous,
330  ptrdiff_t pitch, svq1_pmv *motion, int x, int y,
331  int width, int height)
332 {
333  uint8_t *src;
334  uint8_t *dst;
335  svq1_pmv mv;
336  svq1_pmv *pmv[3];
337  int result;
338 
339  /* predict and decode motion vector */
340  pmv[0] = &motion[0];
341  if (y == 0) {
342  pmv[1] =
343  pmv[2] = pmv[0];
344  } else {
345  pmv[1] = &motion[x / 8 + 2];
346  pmv[2] = &motion[x / 8 + 4];
347  }
348 
349  result = svq1_decode_motion_vector(bitbuf, &mv, pmv);
350  if (result)
351  return result;
352 
353  motion[0].x =
354  motion[x / 8 + 2].x =
355  motion[x / 8 + 3].x = mv.x;
356  motion[0].y =
357  motion[x / 8 + 2].y =
358  motion[x / 8 + 3].y = mv.y;
359 
360  mv.x = av_clip(mv.x, -2 * x, 2 * (width - x - 16));
361  mv.y = av_clip(mv.y, -2 * y, 2 * (height - y - 16));
362 
363  src = &previous[(x + (mv.x >> 1)) + (y + (mv.y >> 1)) * pitch];
364  dst = current;
365 
366  hdsp->put_pixels_tab[0][(mv.y & 1) << 1 | (mv.x & 1)](dst, src, pitch, 16);
367 
368  return 0;
369 }
370 
372  uint8_t *current, uint8_t *previous,
373  ptrdiff_t pitch, svq1_pmv *motion, int x, int y,
374  int width, int height)
375 {
376  uint8_t *src;
377  uint8_t *dst;
378  svq1_pmv mv;
379  svq1_pmv *pmv[4];
380  int i, result;
381 
382  /* predict and decode motion vector (0) */
383  pmv[0] = &motion[0];
384  if (y == 0) {
385  pmv[1] =
386  pmv[2] = pmv[0];
387  } else {
388  pmv[1] = &motion[(x / 8) + 2];
389  pmv[2] = &motion[(x / 8) + 4];
390  }
391 
392  result = svq1_decode_motion_vector(bitbuf, &mv, pmv);
393  if (result)
394  return result;
395 
396  /* predict and decode motion vector (1) */
397  pmv[0] = &mv;
398  if (y == 0) {
399  pmv[1] =
400  pmv[2] = pmv[0];
401  } else {
402  pmv[1] = &motion[(x / 8) + 3];
403  }
404  result = svq1_decode_motion_vector(bitbuf, &motion[0], pmv);
405  if (result)
406  return result;
407 
408  /* predict and decode motion vector (2) */
409  pmv[1] = &motion[0];
410  pmv[2] = &motion[(x / 8) + 1];
411 
412  result = svq1_decode_motion_vector(bitbuf, &motion[(x / 8) + 2], pmv);
413  if (result)
414  return result;
415 
416  /* predict and decode motion vector (3) */
417  pmv[2] = &motion[(x / 8) + 2];
418  pmv[3] = &motion[(x / 8) + 3];
419 
420  result = svq1_decode_motion_vector(bitbuf, pmv[3], pmv);
421  if (result)
422  return result;
423 
424  /* form predictions */
425  for (i = 0; i < 4; i++) {
426  int mvx = pmv[i]->x + (i & 1) * 16;
427  int mvy = pmv[i]->y + (i >> 1) * 16;
428 
429  // FIXME: clipping or padding?
430  mvx = av_clip(mvx, -2 * x, 2 * (width - x - 8));
431  mvy = av_clip(mvy, -2 * y, 2 * (height - y - 8));
432 
433  src = &previous[(x + (mvx >> 1)) + (y + (mvy >> 1)) * pitch];
434  dst = current;
435 
436  hdsp->put_pixels_tab[1][((mvy & 1) << 1) | (mvx & 1)](dst, src, pitch, 8);
437 
438  /* select next block */
439  if (i & 1)
440  current += 8 * (pitch - 1);
441  else
442  current += 8;
443  }
444 
445  return 0;
446 }
447 
449  GetBitContext *bitbuf,
450  uint8_t *current, uint8_t *previous,
451  ptrdiff_t pitch, svq1_pmv *motion, int x, int y,
452  int width, int height)
453 {
454  uint32_t block_type;
455  int result = 0;
456 
457  /* get block type */
458  block_type = get_vlc2(bitbuf, svq1_block_type.table, 2, 2);
459 
460  /* reset motion vectors */
461  if (block_type == SVQ1_BLOCK_SKIP || block_type == SVQ1_BLOCK_INTRA) {
462  motion[0].x =
463  motion[0].y =
464  motion[x / 8 + 2].x =
465  motion[x / 8 + 2].y =
466  motion[x / 8 + 3].x =
467  motion[x / 8 + 3].y = 0;
468  }
469 
470  switch (block_type) {
471  case SVQ1_BLOCK_SKIP:
472  svq1_skip_block(current, previous, pitch, x, y);
473  break;
474 
475  case SVQ1_BLOCK_INTER:
476  result = svq1_motion_inter_block(hdsp, bitbuf, current, previous,
477  pitch, motion, x, y, width, height);
478 
479  if (result != 0) {
480  ff_dlog(avctx, "Error in svq1_motion_inter_block %i\n", result);
481  break;
482  }
483  result = svq1_decode_block_non_intra(bitbuf, current, pitch);
484  break;
485 
486  case SVQ1_BLOCK_INTER_4V:
487  result = svq1_motion_inter_4v_block(hdsp, bitbuf, current, previous,
488  pitch, motion, x, y, width, height);
489 
490  if (result != 0) {
491  ff_dlog(avctx, "Error in svq1_motion_inter_4v_block %i\n", result);
492  break;
493  }
494  result = svq1_decode_block_non_intra(bitbuf, current, pitch);
495  break;
496 
497  case SVQ1_BLOCK_INTRA:
498  result = svq1_decode_block_intra(bitbuf, current, pitch);
499  break;
500  }
501 
502  return result;
503 }
504 
505 static void svq1_parse_string(GetBitContext *bitbuf, uint8_t out[257])
506 {
507  uint8_t seed;
508  int i;
509 
510  out[0] = get_bits(bitbuf, 8);
511  seed = string_table[out[0]];
512 
513  for (i = 1; i <= out[0]; i++) {
514  out[i] = get_bits(bitbuf, 8) ^ seed;
515  seed = string_table[out[i] ^ seed];
516  }
517  out[i] = 0;
518 }
519 
521 {
522  SVQ1Context *s = avctx->priv_data;
523  GetBitContext *bitbuf = &s->gb;
524  int frame_size_code;
525  int width = s->width;
526  int height = s->height;
527 
528  skip_bits(bitbuf, 8); /* temporal_reference */
529 
530  /* frame type */
531  s->nonref = 0;
532  switch (get_bits(bitbuf, 2)) {
533  case 0:
534  frame->pict_type = AV_PICTURE_TYPE_I;
535  break;
536  case 2:
537  s->nonref = 1;
538  case 1:
539  frame->pict_type = AV_PICTURE_TYPE_P;
540  break;
541  default:
542  av_log(avctx, AV_LOG_ERROR, "Invalid frame type.\n");
543  return AVERROR_INVALIDDATA;
544  }
545 
546  if (frame->pict_type == AV_PICTURE_TYPE_I) {
547  /* unknown fields */
548  if (s->frame_code == 0x50 || s->frame_code == 0x60) {
549  int csum = get_bits(bitbuf, 16);
550 
551  csum = av_bswap16(av_crc(av_crc_get_table(AV_CRC_16_CCITT), av_bswap16(csum), bitbuf->buffer, bitbuf->size_in_bits >> 3));
552 
553  ff_dlog(avctx, "%s checksum (%02x) for packet data\n",
554  (csum == 0) ? "correct" : "incorrect", csum);
555  }
556 
557  if ((s->frame_code ^ 0x10) >= 0x50) {
558  uint8_t msg[257];
559 
560  svq1_parse_string(bitbuf, msg);
561 
562  av_log(avctx, AV_LOG_INFO,
563  "embedded message:\n%s\n", ((char *)msg) + 1);
564  }
565 
566  skip_bits(bitbuf, 2);
567  skip_bits(bitbuf, 2);
568  skip_bits1(bitbuf);
569 
570  /* load frame size */
571  frame_size_code = get_bits(bitbuf, 3);
572 
573  if (frame_size_code == 7) {
574  /* load width, height (12 bits each) */
575  width = get_bits(bitbuf, 12);
576  height = get_bits(bitbuf, 12);
577 
578  if (!width || !height)
579  return AVERROR_INVALIDDATA;
580  } else {
581  /* get width, height from table */
582  width = ff_svq1_frame_size_table[frame_size_code][0];
583  height = ff_svq1_frame_size_table[frame_size_code][1];
584  }
585  }
586 
587  /* unknown fields */
588  if (get_bits1(bitbuf)) {
589  skip_bits1(bitbuf); /* use packet checksum if (1) */
590  skip_bits1(bitbuf); /* component checksums after image data if (1) */
591 
592  if (get_bits(bitbuf, 2) != 0)
593  return AVERROR_INVALIDDATA;
594  }
595 
596  if (get_bits1(bitbuf)) {
597  skip_bits1(bitbuf);
598  skip_bits(bitbuf, 4);
599  skip_bits1(bitbuf);
600  skip_bits(bitbuf, 2);
601 
602  if (skip_1stop_8data_bits(bitbuf) < 0)
603  return AVERROR_INVALIDDATA;
604  }
605 
606  s->width = width;
607  s->height = height;
608  return 0;
609 }
610 
611 static int svq1_decode_frame(AVCodecContext *avctx, void *data,
612  int *got_frame, AVPacket *avpkt)
613 {
614  const uint8_t *buf = avpkt->data;
615  int buf_size = avpkt->size;
616  SVQ1Context *s = avctx->priv_data;
617  AVFrame *cur = data;
618  uint8_t *current;
619  int result, i, x, y, width, height;
620  svq1_pmv *pmv;
621  int ret;
622 
623  /* initialize bit buffer */
624  ret = init_get_bits8(&s->gb, buf, buf_size);
625  if (ret < 0)
626  return ret;
627 
628  /* decode frame header */
629  s->frame_code = get_bits(&s->gb, 22);
630 
631  if ((s->frame_code & ~0x70) || !(s->frame_code & 0x60))
632  return AVERROR_INVALIDDATA;
633 
634  /* swap some header bytes (why?) */
635  if (s->frame_code != 0x20) {
636  uint32_t *src;
637 
638  if (buf_size < 9 * 4) {
639  av_log(avctx, AV_LOG_ERROR, "Input packet too small\n");
640  return AVERROR_INVALIDDATA;
641  }
642 
645  buf_size);
646  if (!s->pkt_swapped)
647  return AVERROR(ENOMEM);
648 
649  memcpy(s->pkt_swapped, buf, buf_size);
650  buf = s->pkt_swapped;
651  init_get_bits(&s->gb, buf, buf_size * 8);
652  skip_bits(&s->gb, 22);
653 
654  src = (uint32_t *)(s->pkt_swapped + 4);
655 
656  for (i = 0; i < 4; i++)
657  src[i] = ((src[i] << 16) | (src[i] >> 16)) ^ src[7 - i];
658  }
659 
660  result = svq1_decode_frame_header(avctx, cur);
661  if (result != 0) {
662  ff_dlog(avctx, "Error in svq1_decode_frame_header %i\n", result);
663  return result;
664  }
665 
666  result = ff_set_dimensions(avctx, s->width, s->height);
667  if (result < 0)
668  return result;
669 
670  if ((avctx->skip_frame >= AVDISCARD_NONREF && s->nonref) ||
671  (avctx->skip_frame >= AVDISCARD_NONKEY &&
672  cur->pict_type != AV_PICTURE_TYPE_I) ||
673  avctx->skip_frame >= AVDISCARD_ALL)
674  return buf_size;
675 
676  result = ff_get_buffer(avctx, cur, s->nonref ? 0 : AV_GET_BUFFER_FLAG_REF);
677  if (result < 0)
678  return result;
679 
680  pmv = av_malloc((FFALIGN(s->width, 16) / 8 + 3) * sizeof(*pmv));
681  if (!pmv)
682  return AVERROR(ENOMEM);
683 
684  /* decode y, u and v components */
685  for (i = 0; i < 3; i++) {
686  int linesize = cur->linesize[i];
687  if (i == 0) {
688  width = FFALIGN(s->width, 16);
689  height = FFALIGN(s->height, 16);
690  } else {
691  if (avctx->flags & AV_CODEC_FLAG_GRAY)
692  break;
693  width = FFALIGN(s->width / 4, 16);
694  height = FFALIGN(s->height / 4, 16);
695  }
696 
697  current = cur->data[i];
698 
699  if (cur->pict_type == AV_PICTURE_TYPE_I) {
700  /* keyframe */
701  for (y = 0; y < height; y += 16) {
702  for (x = 0; x < width; x += 16) {
703  result = svq1_decode_block_intra(&s->gb, &current[x],
704  linesize);
705  if (result) {
706  av_log(avctx, AV_LOG_ERROR,
707  "Error in svq1_decode_block %i (keyframe)\n",
708  result);
709  goto err;
710  }
711  }
712  current += 16 * linesize;
713  }
714  } else {
715  /* delta frame */
716  uint8_t *previous = s->prev->data[i];
717  if (!previous ||
718  s->prev->width != s->width || s->prev->height != s->height) {
719  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
720  result = AVERROR_INVALIDDATA;
721  goto err;
722  }
723 
724  memset(pmv, 0, ((width / 8) + 3) * sizeof(svq1_pmv));
725 
726  for (y = 0; y < height; y += 16) {
727  for (x = 0; x < width; x += 16) {
728  result = svq1_decode_delta_block(avctx, &s->hdsp,
729  &s->gb, &current[x],
730  previous, linesize,
731  pmv, x, y, width, height);
732  if (result != 0) {
733  ff_dlog(avctx,
734  "Error in svq1_decode_delta_block %i\n",
735  result);
736  goto err;
737  }
738  }
739 
740  pmv[0].x =
741  pmv[0].y = 0;
742 
743  current += 16 * linesize;
744  }
745  }
746  }
747 
748  if (!s->nonref) {
749  av_frame_unref(s->prev);
750  result = av_frame_ref(s->prev, cur);
751  if (result < 0)
752  goto err;
753  }
754 
755  *got_frame = 1;
756  result = buf_size;
757 
758 err:
759  av_free(pmv);
760  return result;
761 }
762 
764 {
765  SVQ1Context *s = avctx->priv_data;
766  int i;
767  int offset = 0;
768 
769  s->prev = av_frame_alloc();
770  if (!s->prev)
771  return AVERROR(ENOMEM);
772 
773  s->width = avctx->width + 3 & ~3;
774  s->height = avctx->height + 3 & ~3;
775  avctx->pix_fmt = AV_PIX_FMT_YUV410P;
776 
777  ff_hpeldsp_init(&s->hdsp, avctx->flags);
778 
779  INIT_VLC_STATIC(&svq1_block_type, 2, 4,
780  &ff_svq1_block_type_vlc[0][1], 2, 1,
781  &ff_svq1_block_type_vlc[0][0], 2, 1, 6);
782 
783  INIT_VLC_STATIC(&svq1_motion_component, 7, 33,
784  &ff_mvtab[0][1], 2, 1,
785  &ff_mvtab[0][0], 2, 1, 176);
786 
787  for (i = 0; i < 6; i++) {
788  static const uint8_t sizes[2][6] = { { 14, 10, 14, 18, 16, 18 },
789  { 10, 10, 14, 14, 14, 16 } };
790  static VLC_TYPE table[168][2];
791  svq1_intra_multistage[i].table = &table[offset];
792  svq1_intra_multistage[i].table_allocated = sizes[0][i];
793  offset += sizes[0][i];
794  init_vlc(&svq1_intra_multistage[i], 3, 8,
795  &ff_svq1_intra_multistage_vlc[i][0][1], 2, 1,
796  &ff_svq1_intra_multistage_vlc[i][0][0], 2, 1,
798  svq1_inter_multistage[i].table = &table[offset];
799  svq1_inter_multistage[i].table_allocated = sizes[1][i];
800  offset += sizes[1][i];
801  init_vlc(&svq1_inter_multistage[i], 3, 8,
802  &ff_svq1_inter_multistage_vlc[i][0][1], 2, 1,
803  &ff_svq1_inter_multistage_vlc[i][0][0], 2, 1,
805  }
806 
807  INIT_VLC_STATIC(&svq1_intra_mean, 8, 256,
808  &ff_svq1_intra_mean_vlc[0][1], 4, 2,
809  &ff_svq1_intra_mean_vlc[0][0], 4, 2, 632);
810 
811  INIT_VLC_STATIC(&svq1_inter_mean, 9, 512,
812  &ff_svq1_inter_mean_vlc[0][1], 4, 2,
813  &ff_svq1_inter_mean_vlc[0][0], 4, 2, 1434);
814 
815  return 0;
816 }
817 
819 {
820  SVQ1Context *s = avctx->priv_data;
821 
822  av_frame_free(&s->prev);
823  av_freep(&s->pkt_swapped);
824  s->pkt_swapped_allocated = 0;
825 
826  return 0;
827 }
828 
829 static void svq1_flush(AVCodecContext *avctx)
830 {
831  SVQ1Context *s = avctx->priv_data;
832 
833  av_frame_unref(s->prev);
834 }
835 
837  .name = "svq1",
838  .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 1 / Sorenson Video 1 / SVQ1"),
839  .type = AVMEDIA_TYPE_VIDEO,
840  .id = AV_CODEC_ID_SVQ1,
841  .priv_data_size = sizeof(SVQ1Context),
843  .close = svq1_decode_end,
845  .capabilities = AV_CODEC_CAP_DR1,
846  .flush = svq1_flush,
847  .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV410P,
848  AV_PIX_FMT_NONE },
849 };
#define NULL
Definition: coverity.c:32
discard all frames except keyframes
Definition: avcodec.h:828
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static VLC svq1_inter_multistage[6]
Definition: svq1dec.c:48
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
static VLC svq1_motion_component
Definition: svq1dec.c:46
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
static void flush(AVCodecContext *avctx)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
static void svq1_skip_block(uint8_t *current, uint8_t *previous, ptrdiff_t pitch, int x, int y)
Definition: svq1dec.c:311
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
#define INIT_VLC_STATIC(vlc, bits, a, b, c, d, e, f, g, static_size)
Definition: vlc.h:75
int size
Definition: avcodec.h:1534
const uint8_t * buffer
Definition: get_bits.h:62
#define av_bswap16
Definition: bswap.h:31
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1831
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:70
discard all
Definition: avcodec.h:829
#define SVQ1_BLOCK_INTRA
Definition: svq1.h:43
#define src
Definition: vp8dsp.c:254
AVCodec.
Definition: avcodec.h:3555
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:3096
const uint16_t ff_svq1_frame_size_table[7][2]
Definition: svq1.c:40
#define SVQ1_BLOCK_SKIP
Definition: svq1.h:40
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
static av_cold int svq1_decode_end(AVCodecContext *avctx)
Definition: svq1dec.c:818
uint8_t
#define av_cold
Definition: attributes.h:82
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
static VLC svq1_intra_mean
Definition: svq1dec.c:49
const int8_t *const ff_svq1_inter_codebooks[6]
Definition: svq1_cb.h:776
static VLC svq1_intra_multistage[6]
Definition: svq1dec.c:47
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:444
static int svq1_decode_block_intra(GetBitContext *bitbuf, uint8_t *pixels, ptrdiff_t pitch)
Definition: svq1dec.c:158
Public header for CRC hash function implementation.
#define height
uint8_t * data
Definition: avcodec.h:1533
#define ff_dlog(a,...)
bitstream reader API header.
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:901
#define FFALIGN(x, a)
Definition: macros.h:48
#define av_log(a,...)
static const uint16_t table[]
Definition: prosumer.c:206
#define SVQ1_ADD_CODEBOOK()
Definition: svq1dec.c:123
static int svq1_decode_frame_header(AVCodecContext *avctx, AVFrame *frame)
Definition: svq1dec.c:520
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
int width
Definition: frame.h:353
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define init_vlc(vlc, nb_bits, nb_codes,bits, bits_wrap, bits_size,codes, codes_wrap, codes_size,flags)
Definition: vlc.h:38
static const int sizes[][2]
Definition: img2dec.c:53
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
#define SVQ1_CALC_CODEBOOK_ENTRIES(cbook)
Definition: svq1dec.c:146
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1701
const char * name
Name of the codec implementation.
Definition: avcodec.h:3562
#define SVQ1_BLOCK_INTER
Definition: svq1.h:41
Sorenson Vector Quantizer #1 (SVQ1) video codec.
Definition: vlc.h:26
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
Half-pel DSP context.
Definition: hpeldsp.h:45
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:378
int width
Definition: svq1dec.c:66
const uint8_t ff_svq1_block_type_vlc[4][2]
Definition: svq1_vlc.h:27
#define width
int width
picture width / height.
Definition: avcodec.h:1794
HpelDSPContext hdsp
Definition: svq1dec.c:59
int size_in_bits
Definition: get_bits.h:68
static int svq1_motion_inter_block(HpelDSPContext *hdsp, GetBitContext *bitbuf, uint8_t *current, uint8_t *previous, ptrdiff_t pitch, svq1_pmv *motion, int x, int y, int width, int height)
Definition: svq1dec.c:328
uint32_t av_crc(const AVCRC *ctx, uint32_t crc, const uint8_t *buffer, size_t length)
Calculate the CRC of a block.
Definition: crc.c:392
uint8_t * pkt_swapped
Definition: svq1dec.c:63
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
static int svq1_decode_delta_block(AVCodecContext *avctx, HpelDSPContext *hdsp, GetBitContext *bitbuf, uint8_t *current, uint8_t *previous, ptrdiff_t pitch, svq1_pmv *motion, int x, int y, int width, int height)
Definition: svq1dec.c:448
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:797
int n
Definition: avisynth_c.h:760
static void svq1_flush(AVCodecContext *avctx)
Definition: svq1dec.c:829
int pkt_swapped_allocated
Definition: svq1dec.c:64
static void svq1_parse_string(GetBitContext *bitbuf, uint8_t out[257])
Definition: svq1dec.c:505
const uint8_t ff_svq1_inter_multistage_vlc[6][8][2]
Definition: svq1_vlc.h:50
int table_allocated
Definition: vlc.h:29
static const int8_t mv[256][2]
Definition: 4xm.c:77
Half-pel DSP functions.
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
Libavcodec external API header.
AVFrame * prev
Definition: svq1dec.c:61
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:677
main external API structure.
Definition: avcodec.h:1621
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
static unsigned int seed
Definition: videogen.c:78
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1969
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
void * buf
Definition: avisynth_c.h:766
static VLC svq1_inter_mean
Definition: svq1dec.c:50
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
static void skip_bits1(GetBitContext *s)
Definition: get_bits.h:538
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
GetBitContext gb
Definition: svq1dec.c:60
#define mid_pred
Definition: mathops.h:97
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:130
int height
Definition: svq1dec.c:67
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:554
#define SVQ1_BLOCK_INTER_4V
Definition: svq1.h:42
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
uint8_t level
Definition: svq3.c:209
const AVCRC * av_crc_get_table(AVCRCId crc_id)
Get an initialized standard CRC table.
Definition: crc.c:374
discard all non reference
Definition: avcodec.h:825
static int svq1_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: svq1dec.c:611
const int8_t *const ff_svq1_intra_codebooks[6]
Definition: svq1_cb.h:1519
common internal api header.
const uint8_t ff_svq1_intra_multistage_vlc[6][8][2]
Definition: svq1_vlc.h:33
#define INIT_VLC_USE_NEW_STATIC
Definition: vlc.h:55
int frame_code
Definition: svq1dec.c:68
static const uint8_t string_table[256]
Definition: svq1dec.c:72
static int svq1_decode_motion_vector(GetBitContext *bitbuf, svq1_pmv *mv, svq1_pmv **pmv)
Definition: svq1dec.c:285
const uint16_t ff_svq1_inter_mean_vlc[512][2]
Definition: svq1_vlc.h:136
static int svq1_motion_inter_4v_block(HpelDSPContext *hdsp, GetBitContext *bitbuf, uint8_t *current, uint8_t *previous, ptrdiff_t pitch, svq1_pmv *motion, int x, int y, int width, int height)
Definition: svq1dec.c:371
int y
Definition: svq1dec.c:55
void * priv_data
Definition: avcodec.h:1648
static av_always_inline int diff(const uint32_t a, const uint32_t b)
#define av_free(p)
int pixels
Definition: avisynth_c.h:390
#define SVQ1_PROCESS_VECTOR()
Definition: svq1dec.c:107
int nonref
Definition: svq1dec.c:69
VLC_TYPE(* table)[2]
code, bits
Definition: vlc.h:28
static int svq1_decode_block_non_intra(GetBitContext *bitbuf, uint8_t *pixels, ptrdiff_t pitch)
Definition: svq1dec.c:225
static av_cold int svq1_decode_init(AVCodecContext *avctx)
Definition: svq1dec.c:763
and forward the result(frame or status change) to the corresponding input.If nothing is possible
int height
Definition: frame.h:353
FILE * out
Definition: movenc.c:54
#define av_freep(p)
static int skip_1stop_8data_bits(GetBitContext *gb)
Definition: get_bits.h:854
#define VLC_TYPE
Definition: vlc.h:24
static VLC svq1_block_type
Definition: svq1dec.c:45
AVCodec ff_svq1_decoder
Definition: svq1dec.c:836
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
This structure stores compressed data.
Definition: avcodec.h:1510
const uint16_t ff_svq1_intra_mean_vlc[256][2]
Definition: svq1_vlc.h:67
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:1219
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:999
Predicted.
Definition: avutil.h:275
const uint8_t ff_mvtab[33][2]
Definition: h263data.c:90
int x
Definition: svq1dec.c:54