FFmpeg
svq3.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 The FFmpeg Project
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /*
22  * How to use this decoder:
23  * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24  * have stsd atoms to describe media trak properties. A stsd atom for a
25  * video trak contains 1 or more ImageDescription atoms. These atoms begin
26  * with the 4-byte length of the atom followed by the codec fourcc. Some
27  * decoders need information in this atom to operate correctly. Such
28  * is the case with SVQ3. In order to get the best use out of this decoder,
29  * the calling app must make the SVQ3 ImageDescription atom available
30  * via the AVCodecContext's extradata[_size] field:
31  *
32  * AVCodecContext.extradata = pointer to ImageDescription, first characters
33  * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34  * AVCodecContext.extradata_size = size of ImageDescription atom memory
35  * buffer (which will be the same as the ImageDescription atom size field
36  * from the QT file, minus 4 bytes since the length is missing)
37  *
38  * You will know you have these parameters passed correctly when the decoder
39  * correctly decodes this file:
40  * http://samples.mplayerhq.hu/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
41  */
42 
43 #include <inttypes.h>
44 
45 #include "libavutil/attributes.h"
46 #include "libavutil/crc.h"
47 #include "libavutil/mem_internal.h"
48 
49 #include "internal.h"
50 #include "avcodec.h"
51 #include "mpegutils.h"
52 #include "h264dec.h"
53 #include "h264data.h"
54 #include "golomb.h"
55 #include "hpeldsp.h"
56 #include "mathops.h"
57 #include "rectangle.h"
58 #include "tpeldsp.h"
59 
60 #if CONFIG_ZLIB
61 #include <zlib.h>
62 #endif
63 
64 #include "svq1.h"
65 
66 /**
67  * @file
68  * svq3 decoder.
69  */
70 
71 typedef struct SVQ3Frame {
73 
74  int16_t (*motion_val_buf[2])[2];
75  int16_t (*motion_val[2])[2];
76 
77  uint32_t *mb_type_buf, *mb_type;
78 } SVQ3Frame;
79 
80 typedef struct SVQ3Context {
82 
88 
94  uint8_t *slice_buf;
95  unsigned slice_buf_size;
99  uint32_t watermark_key;
106  int qscale;
107  int cbp;
112 
116 
117  int mb_x, mb_y;
118  int mb_xy;
121  int b_stride;
122 
123  uint32_t *mb2br_xy;
124 
127 
130 
131  unsigned int top_samples_available;
134 
135  uint8_t *edge_emu_buffer;
136 
137  DECLARE_ALIGNED(16, int16_t, mv_cache)[2][5 * 8][2];
138  DECLARE_ALIGNED(8, int8_t, ref_cache)[2][5 * 8];
139  DECLARE_ALIGNED(16, int16_t, mb)[16 * 48 * 2];
140  DECLARE_ALIGNED(16, int16_t, mb_luma_dc)[3][16 * 2];
141  DECLARE_ALIGNED(8, uint8_t, non_zero_count_cache)[15 * 8];
142  uint32_t dequant4_coeff[QP_MAX_NUM + 1][16];
143  int block_offset[2 * (16 * 3)];
145 } SVQ3Context;
146 
147 #define FULLPEL_MODE 1
148 #define HALFPEL_MODE 2
149 #define THIRDPEL_MODE 3
150 #define PREDICT_MODE 4
151 
152 /* dual scan (from some older H.264 draft)
153  * o-->o-->o o
154  * | /|
155  * o o o / o
156  * | / | |/ |
157  * o o o o
158  * /
159  * o-->o-->o-->o
160  */
161 static const uint8_t svq3_scan[16] = {
162  0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
163  2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
164  0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
165  0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
166 };
167 
168 static const uint8_t luma_dc_zigzag_scan[16] = {
169  0 * 16 + 0 * 64, 1 * 16 + 0 * 64, 2 * 16 + 0 * 64, 0 * 16 + 2 * 64,
170  3 * 16 + 0 * 64, 0 * 16 + 1 * 64, 1 * 16 + 1 * 64, 2 * 16 + 1 * 64,
171  1 * 16 + 2 * 64, 2 * 16 + 2 * 64, 3 * 16 + 2 * 64, 0 * 16 + 3 * 64,
172  3 * 16 + 1 * 64, 1 * 16 + 3 * 64, 2 * 16 + 3 * 64, 3 * 16 + 3 * 64,
173 };
174 
175 static const uint8_t svq3_pred_0[25][2] = {
176  { 0, 0 },
177  { 1, 0 }, { 0, 1 },
178  { 0, 2 }, { 1, 1 }, { 2, 0 },
179  { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
180  { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
181  { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
182  { 2, 4 }, { 3, 3 }, { 4, 2 },
183  { 4, 3 }, { 3, 4 },
184  { 4, 4 }
185 };
186 
187 static const int8_t svq3_pred_1[6][6][5] = {
188  { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
189  { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
190  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
191  { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
192  { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
193  { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
194  { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
195  { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
196  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
197  { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
198  { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
199  { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
200 };
201 
202 static const struct {
203  uint8_t run;
204  uint8_t level;
205 } svq3_dct_tables[2][16] = {
206  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
207  { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
208  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
209  { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
210 };
211 
212 static const uint32_t svq3_dequant_coeff[32] = {
213  3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
214  9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
215  24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
216  61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
217 };
218 
219 static void svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
220 {
221  const unsigned qmul = svq3_dequant_coeff[qp];
222 #define stride 16
223  int i;
224  int temp[16];
225  static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
226 
227  for (i = 0; i < 4; i++) {
228  const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
229  const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
230  const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
231  const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
232 
233  temp[4 * i + 0] = z0 + z3;
234  temp[4 * i + 1] = z1 + z2;
235  temp[4 * i + 2] = z1 - z2;
236  temp[4 * i + 3] = z0 - z3;
237  }
238 
239  for (i = 0; i < 4; i++) {
240  const int offset = x_offset[i];
241  const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
242  const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
243  const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
244  const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
245 
246  output[stride * 0 + offset] = (int)((z0 + z3) * qmul + 0x80000) >> 20;
247  output[stride * 2 + offset] = (int)((z1 + z2) * qmul + 0x80000) >> 20;
248  output[stride * 8 + offset] = (int)((z1 - z2) * qmul + 0x80000) >> 20;
249  output[stride * 10 + offset] = (int)((z0 - z3) * qmul + 0x80000) >> 20;
250  }
251 }
252 #undef stride
253 
254 static void svq3_add_idct_c(uint8_t *dst, int16_t *block,
255  int stride, int qp, int dc)
256 {
257  const int qmul = svq3_dequant_coeff[qp];
258  int i;
259 
260  if (dc) {
261  dc = 13 * 13 * (dc == 1 ? 1538U* block[0]
262  : qmul * (block[0] >> 3) / 2);
263  block[0] = 0;
264  }
265 
266  for (i = 0; i < 4; i++) {
267  const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
268  const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
269  const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
270  const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
271 
272  block[0 + 4 * i] = z0 + z3;
273  block[1 + 4 * i] = z1 + z2;
274  block[2 + 4 * i] = z1 - z2;
275  block[3 + 4 * i] = z0 - z3;
276  }
277 
278  for (i = 0; i < 4; i++) {
279  const unsigned z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
280  const unsigned z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
281  const unsigned z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
282  const unsigned z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
283  const int rr = (dc + 0x80000u);
284 
285  dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((int)((z0 + z3) * qmul + rr) >> 20));
286  dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((int)((z1 + z2) * qmul + rr) >> 20));
287  dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((int)((z1 - z2) * qmul + rr) >> 20));
288  dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((int)((z0 - z3) * qmul + rr) >> 20));
289  }
290 
291  memset(block, 0, 16 * sizeof(int16_t));
292 }
293 
294 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
295  int index, const int type)
296 {
297  static const uint8_t *const scan_patterns[4] = {
299  };
300 
301  int run, level, sign, limit;
302  unsigned vlc;
303  const int intra = 3 * type >> 2;
304  const uint8_t *const scan = scan_patterns[type];
305 
306  for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
307  for (; (vlc = get_interleaved_ue_golomb(gb)) != 0; index++) {
308  if ((int32_t)vlc < 0)
309  return -1;
310 
311  sign = (vlc & 1) ? 0 : -1;
312  vlc = vlc + 1 >> 1;
313 
314  if (type == 3) {
315  if (vlc < 3) {
316  run = 0;
317  level = vlc;
318  } else if (vlc < 4) {
319  run = 1;
320  level = 1;
321  } else {
322  run = vlc & 0x3;
323  level = (vlc + 9 >> 2) - run;
324  }
325  } else {
326  if (vlc < 16U) {
327  run = svq3_dct_tables[intra][vlc].run;
328  level = svq3_dct_tables[intra][vlc].level;
329  } else if (intra) {
330  run = vlc & 0x7;
331  level = (vlc >> 3) + ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
332  } else {
333  run = vlc & 0xF;
334  level = (vlc >> 4) + ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
335  }
336  }
337 
338 
339  if ((index += run) >= limit)
340  return -1;
341 
342  block[scan[index]] = (level ^ sign) - sign;
343  }
344 
345  if (type != 2) {
346  break;
347  }
348  }
349 
350  return 0;
351 }
352 
353 static av_always_inline int
354 svq3_fetch_diagonal_mv(const SVQ3Context *s, const int16_t **C,
355  int i, int list, int part_width)
356 {
357  const int topright_ref = s->ref_cache[list][i - 8 + part_width];
358 
359  if (topright_ref != PART_NOT_AVAILABLE) {
360  *C = s->mv_cache[list][i - 8 + part_width];
361  return topright_ref;
362  } else {
363  *C = s->mv_cache[list][i - 8 - 1];
364  return s->ref_cache[list][i - 8 - 1];
365  }
366 }
367 
368 /**
369  * Get the predicted MV.
370  * @param n the block index
371  * @param part_width the width of the partition (4, 8,16) -> (1, 2, 4)
372  * @param mx the x component of the predicted motion vector
373  * @param my the y component of the predicted motion vector
374  */
375 static av_always_inline void svq3_pred_motion(const SVQ3Context *s, int n,
376  int part_width, int list,
377  int ref, int *const mx, int *const my)
378 {
379  const int index8 = scan8[n];
380  const int top_ref = s->ref_cache[list][index8 - 8];
381  const int left_ref = s->ref_cache[list][index8 - 1];
382  const int16_t *const A = s->mv_cache[list][index8 - 1];
383  const int16_t *const B = s->mv_cache[list][index8 - 8];
384  const int16_t *C;
385  int diagonal_ref, match_count;
386 
387 /* mv_cache
388  * B . . A T T T T
389  * U . . L . . , .
390  * U . . L . . . .
391  * U . . L . . , .
392  * . . . L . . . .
393  */
394 
395  diagonal_ref = svq3_fetch_diagonal_mv(s, &C, index8, list, part_width);
396  match_count = (diagonal_ref == ref) + (top_ref == ref) + (left_ref == ref);
397  if (match_count > 1) { //most common
398  *mx = mid_pred(A[0], B[0], C[0]);
399  *my = mid_pred(A[1], B[1], C[1]);
400  } else if (match_count == 1) {
401  if (left_ref == ref) {
402  *mx = A[0];
403  *my = A[1];
404  } else if (top_ref == ref) {
405  *mx = B[0];
406  *my = B[1];
407  } else {
408  *mx = C[0];
409  *my = C[1];
410  }
411  } else {
412  if (top_ref == PART_NOT_AVAILABLE &&
413  diagonal_ref == PART_NOT_AVAILABLE &&
414  left_ref != PART_NOT_AVAILABLE) {
415  *mx = A[0];
416  *my = A[1];
417  } else {
418  *mx = mid_pred(A[0], B[0], C[0]);
419  *my = mid_pred(A[1], B[1], C[1]);
420  }
421  }
422 }
423 
424 static inline void svq3_mc_dir_part(SVQ3Context *s,
425  int x, int y, int width, int height,
426  int mx, int my, int dxy,
427  int thirdpel, int dir, int avg)
428 {
429  const SVQ3Frame *pic = (dir == 0) ? s->last_pic : s->next_pic;
430  uint8_t *src, *dest;
431  int i, emu = 0;
432  int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
433  int linesize = s->cur_pic->f->linesize[0];
434  int uvlinesize = s->cur_pic->f->linesize[1];
435 
436  mx += x;
437  my += y;
438 
439  if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
440  my < 0 || my >= s->v_edge_pos - height - 1) {
441  emu = 1;
442  mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
443  my = av_clip(my, -16, s->v_edge_pos - height + 15);
444  }
445 
446  /* form component predictions */
447  dest = s->cur_pic->f->data[0] + x + y * linesize;
448  src = pic->f->data[0] + mx + my * linesize;
449 
450  if (emu) {
451  s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
452  linesize, linesize,
453  width + 1, height + 1,
454  mx, my, s->h_edge_pos, s->v_edge_pos);
455  src = s->edge_emu_buffer;
456  }
457  if (thirdpel)
458  (avg ? s->tdsp.avg_tpel_pixels_tab
459  : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src, linesize,
460  width, height);
461  else
462  (avg ? s->hdsp.avg_pixels_tab
463  : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, linesize,
464  height);
465 
466  if (!(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
467  mx = mx + (mx < (int) x) >> 1;
468  my = my + (my < (int) y) >> 1;
469  width = width >> 1;
470  height = height >> 1;
471  blocksize++;
472 
473  for (i = 1; i < 3; i++) {
474  dest = s->cur_pic->f->data[i] + (x >> 1) + (y >> 1) * uvlinesize;
475  src = pic->f->data[i] + mx + my * uvlinesize;
476 
477  if (emu) {
478  s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
479  uvlinesize, uvlinesize,
480  width + 1, height + 1,
481  mx, my, (s->h_edge_pos >> 1),
482  s->v_edge_pos >> 1);
483  src = s->edge_emu_buffer;
484  }
485  if (thirdpel)
486  (avg ? s->tdsp.avg_tpel_pixels_tab
487  : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src,
488  uvlinesize,
489  width, height);
490  else
491  (avg ? s->hdsp.avg_pixels_tab
492  : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
493  uvlinesize,
494  height);
495  }
496  }
497 }
498 
499 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
500  int dir, int avg)
501 {
502  int i, j, k, mx, my, dx, dy, x, y;
503  const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
504  const int part_height = 16 >> ((unsigned)(size + 1) / 3);
505  const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
506  const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
507  const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
508 
509  for (i = 0; i < 16; i += part_height)
510  for (j = 0; j < 16; j += part_width) {
511  const int b_xy = (4 * s->mb_x + (j >> 2)) +
512  (4 * s->mb_y + (i >> 2)) * s->b_stride;
513  int dxy;
514  x = 16 * s->mb_x + j;
515  y = 16 * s->mb_y + i;
516  k = (j >> 2 & 1) + (i >> 1 & 2) +
517  (j >> 1 & 4) + (i & 8);
518 
519  if (mode != PREDICT_MODE) {
520  svq3_pred_motion(s, k, part_width >> 2, dir, 1, &mx, &my);
521  } else {
522  mx = s->next_pic->motion_val[0][b_xy][0] * 2;
523  my = s->next_pic->motion_val[0][b_xy][1] * 2;
524 
525  if (dir == 0) {
526  mx = mx * s->frame_num_offset /
527  s->prev_frame_num_offset + 1 >> 1;
528  my = my * s->frame_num_offset /
529  s->prev_frame_num_offset + 1 >> 1;
530  } else {
531  mx = mx * (s->frame_num_offset - s->prev_frame_num_offset) /
532  s->prev_frame_num_offset + 1 >> 1;
533  my = my * (s->frame_num_offset - s->prev_frame_num_offset) /
534  s->prev_frame_num_offset + 1 >> 1;
535  }
536  }
537 
538  /* clip motion vector prediction to frame border */
539  mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
540  my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
541 
542  /* get (optional) motion vector differential */
543  if (mode == PREDICT_MODE) {
544  dx = dy = 0;
545  } else {
546  dy = get_interleaved_se_golomb(&s->gb_slice);
547  dx = get_interleaved_se_golomb(&s->gb_slice);
548 
549  if (dx != (int16_t)dx || dy != (int16_t)dy) {
550  av_log(s->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
551  return -1;
552  }
553  }
554 
555  /* compute motion vector */
556  if (mode == THIRDPEL_MODE) {
557  int fx, fy;
558  mx = (mx + 1 >> 1) + dx;
559  my = (my + 1 >> 1) + dy;
560  fx = (unsigned)(mx + 0x30000) / 3 - 0x10000;
561  fy = (unsigned)(my + 0x30000) / 3 - 0x10000;
562  dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
563 
564  svq3_mc_dir_part(s, x, y, part_width, part_height,
565  fx, fy, dxy, 1, dir, avg);
566  mx += mx;
567  my += my;
568  } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
569  mx = (unsigned)(mx + 1 + 0x30000) / 3 + dx - 0x10000;
570  my = (unsigned)(my + 1 + 0x30000) / 3 + dy - 0x10000;
571  dxy = (mx & 1) + 2 * (my & 1);
572 
573  svq3_mc_dir_part(s, x, y, part_width, part_height,
574  mx >> 1, my >> 1, dxy, 0, dir, avg);
575  mx *= 3;
576  my *= 3;
577  } else {
578  mx = (unsigned)(mx + 3 + 0x60000) / 6 + dx - 0x10000;
579  my = (unsigned)(my + 3 + 0x60000) / 6 + dy - 0x10000;
580 
581  svq3_mc_dir_part(s, x, y, part_width, part_height,
582  mx, my, 0, 0, dir, avg);
583  mx *= 6;
584  my *= 6;
585  }
586 
587  /* update mv_cache */
588  if (mode != PREDICT_MODE) {
589  int32_t mv = pack16to32(mx, my);
590 
591  if (part_height == 8 && i < 8) {
592  AV_WN32A(s->mv_cache[dir][scan8[k] + 1 * 8], mv);
593 
594  if (part_width == 8 && j < 8)
595  AV_WN32A(s->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
596  }
597  if (part_width == 8 && j < 8)
598  AV_WN32A(s->mv_cache[dir][scan8[k] + 1], mv);
599  if (part_width == 4 || part_height == 4)
600  AV_WN32A(s->mv_cache[dir][scan8[k]], mv);
601  }
602 
603  /* write back motion vectors */
604  fill_rectangle(s->cur_pic->motion_val[dir][b_xy],
605  part_width >> 2, part_height >> 2, s->b_stride,
606  pack16to32(mx, my), 4);
607  }
608 
609  return 0;
610 }
611 
613  int mb_type, const int *block_offset,
614  int linesize, uint8_t *dest_y)
615 {
616  int i;
617  if (!IS_INTRA4x4(mb_type)) {
618  for (i = 0; i < 16; i++)
619  if (s->non_zero_count_cache[scan8[i]] || s->mb[i * 16]) {
620  uint8_t *const ptr = dest_y + block_offset[i];
621  svq3_add_idct_c(ptr, s->mb + i * 16, linesize,
622  s->qscale, IS_INTRA(mb_type) ? 1 : 0);
623  }
624  }
625 }
626 
628  int mb_type,
629  const int *block_offset,
630  int linesize,
631  uint8_t *dest_y)
632 {
633  int i;
634  int qscale = s->qscale;
635 
636  if (IS_INTRA4x4(mb_type)) {
637  for (i = 0; i < 16; i++) {
638  uint8_t *const ptr = dest_y + block_offset[i];
639  const int dir = s->intra4x4_pred_mode_cache[scan8[i]];
640 
641  uint8_t *topright;
642  int nnz, tr;
643  if (dir == DIAG_DOWN_LEFT_PRED || dir == VERT_LEFT_PRED) {
644  const int topright_avail = (s->topright_samples_available << i) & 0x8000;
645  av_assert2(s->mb_y || linesize <= block_offset[i]);
646  if (!topright_avail) {
647  tr = ptr[3 - linesize] * 0x01010101u;
648  topright = (uint8_t *)&tr;
649  } else
650  topright = ptr + 4 - linesize;
651  } else
652  topright = NULL;
653 
654  s->hpc.pred4x4[dir](ptr, topright, linesize);
655  nnz = s->non_zero_count_cache[scan8[i]];
656  if (nnz) {
657  svq3_add_idct_c(ptr, s->mb + i * 16, linesize, qscale, 0);
658  }
659  }
660  } else {
661  s->hpc.pred16x16[s->intra16x16_pred_mode](dest_y, linesize);
662  svq3_luma_dc_dequant_idct_c(s->mb, s->mb_luma_dc[0], qscale);
663  }
664 }
665 
667 {
668  const int mb_x = s->mb_x;
669  const int mb_y = s->mb_y;
670  const int mb_xy = s->mb_xy;
671  const int mb_type = s->cur_pic->mb_type[mb_xy];
672  uint8_t *dest_y, *dest_cb, *dest_cr;
673  int linesize, uvlinesize;
674  int i, j;
675  const int *block_offset = &s->block_offset[0];
676  const int block_h = 16 >> 1;
677 
678  linesize = s->cur_pic->f->linesize[0];
679  uvlinesize = s->cur_pic->f->linesize[1];
680 
681  dest_y = s->cur_pic->f->data[0] + (mb_x + mb_y * linesize) * 16;
682  dest_cb = s->cur_pic->f->data[1] + mb_x * 8 + mb_y * uvlinesize * block_h;
683  dest_cr = s->cur_pic->f->data[2] + mb_x * 8 + mb_y * uvlinesize * block_h;
684 
685  s->vdsp.prefetch(dest_y + (s->mb_x & 3) * 4 * linesize + 64, linesize, 4);
686  s->vdsp.prefetch(dest_cb + (s->mb_x & 7) * uvlinesize + 64, dest_cr - dest_cb, 2);
687 
688  if (IS_INTRA(mb_type)) {
689  s->hpc.pred8x8[s->chroma_pred_mode](dest_cb, uvlinesize);
690  s->hpc.pred8x8[s->chroma_pred_mode](dest_cr, uvlinesize);
691 
692  hl_decode_mb_predict_luma(s, mb_type, block_offset, linesize, dest_y);
693  }
694 
695  hl_decode_mb_idct_luma(s, mb_type, block_offset, linesize, dest_y);
696 
697  if (s->cbp & 0x30) {
698  uint8_t *dest[2] = { dest_cb, dest_cr };
699  s->h264dsp.h264_chroma_dc_dequant_idct(s->mb + 16 * 16 * 1,
700  s->dequant4_coeff[4][0]);
701  s->h264dsp.h264_chroma_dc_dequant_idct(s->mb + 16 * 16 * 2,
702  s->dequant4_coeff[4][0]);
703  for (j = 1; j < 3; j++) {
704  for (i = j * 16; i < j * 16 + 4; i++)
705  if (s->non_zero_count_cache[scan8[i]] || s->mb[i * 16]) {
706  uint8_t *const ptr = dest[j - 1] + block_offset[i];
707  svq3_add_idct_c(ptr, s->mb + i * 16,
708  uvlinesize, ff_h264_chroma_qp[0][s->qscale + 12] - 12, 2);
709  }
710  }
711  }
712 }
713 
714 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
715 {
716  int i, j, k, m, dir, mode;
717  int cbp = 0;
718  uint32_t vlc;
719  int8_t *top, *left;
720  const int mb_xy = s->mb_xy;
721  const int b_xy = 4 * s->mb_x + 4 * s->mb_y * s->b_stride;
722 
723  s->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
724  s->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
725  s->topright_samples_available = 0xFFFF;
726 
727  if (mb_type == 0) { /* SKIP */
728  if (s->pict_type == AV_PICTURE_TYPE_P ||
729  s->next_pic->mb_type[mb_xy] == -1) {
730  svq3_mc_dir_part(s, 16 * s->mb_x, 16 * s->mb_y, 16, 16,
731  0, 0, 0, 0, 0, 0);
732 
733  if (s->pict_type == AV_PICTURE_TYPE_B)
734  svq3_mc_dir_part(s, 16 * s->mb_x, 16 * s->mb_y, 16, 16,
735  0, 0, 0, 0, 1, 1);
736 
737  mb_type = MB_TYPE_SKIP;
738  } else {
739  mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
740  if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
741  return -1;
742  if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
743  return -1;
744 
745  mb_type = MB_TYPE_16x16;
746  }
747  } else if (mb_type < 8) { /* INTER */
748  if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&s->gb_slice))
750  else if (s->halfpel_flag &&
751  s->thirdpel_flag == !get_bits1(&s->gb_slice))
752  mode = HALFPEL_MODE;
753  else
754  mode = FULLPEL_MODE;
755 
756  /* fill caches */
757  /* note ref_cache should contain here:
758  * ????????
759  * ???11111
760  * N??11111
761  * N??11111
762  * N??11111
763  */
764 
765  for (m = 0; m < 2; m++) {
766  if (s->mb_x > 0 && s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - 1] + 6] != -1) {
767  for (i = 0; i < 4; i++)
768  AV_COPY32(s->mv_cache[m][scan8[0] - 1 + i * 8],
769  s->cur_pic->motion_val[m][b_xy - 1 + i * s->b_stride]);
770  } else {
771  for (i = 0; i < 4; i++)
772  AV_ZERO32(s->mv_cache[m][scan8[0] - 1 + i * 8]);
773  }
774  if (s->mb_y > 0) {
775  memcpy(s->mv_cache[m][scan8[0] - 1 * 8],
776  s->cur_pic->motion_val[m][b_xy - s->b_stride],
777  4 * 2 * sizeof(int16_t));
778  memset(&s->ref_cache[m][scan8[0] - 1 * 8],
779  (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
780 
781  if (s->mb_x < s->mb_width - 1) {
782  AV_COPY32(s->mv_cache[m][scan8[0] + 4 - 1 * 8],
783  s->cur_pic->motion_val[m][b_xy - s->b_stride + 4]);
784  s->ref_cache[m][scan8[0] + 4 - 1 * 8] =
785  (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride + 1] + 6] == -1 ||
786  s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
787  } else
788  s->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
789  if (s->mb_x > 0) {
790  AV_COPY32(s->mv_cache[m][scan8[0] - 1 - 1 * 8],
791  s->cur_pic->motion_val[m][b_xy - s->b_stride - 1]);
792  s->ref_cache[m][scan8[0] - 1 - 1 * 8] =
793  (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
794  } else
795  s->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
796  } else
797  memset(&s->ref_cache[m][scan8[0] - 1 * 8 - 1],
798  PART_NOT_AVAILABLE, 8);
799 
800  if (s->pict_type != AV_PICTURE_TYPE_B)
801  break;
802  }
803 
804  /* decode motion vector(s) and form prediction(s) */
805  if (s->pict_type == AV_PICTURE_TYPE_P) {
806  if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
807  return -1;
808  } else { /* AV_PICTURE_TYPE_B */
809  if (mb_type != 2) {
810  if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
811  return -1;
812  } else {
813  for (i = 0; i < 4; i++)
814  memset(s->cur_pic->motion_val[0][b_xy + i * s->b_stride],
815  0, 4 * 2 * sizeof(int16_t));
816  }
817  if (mb_type != 1) {
818  if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
819  return -1;
820  } else {
821  for (i = 0; i < 4; i++)
822  memset(s->cur_pic->motion_val[1][b_xy + i * s->b_stride],
823  0, 4 * 2 * sizeof(int16_t));
824  }
825  }
826 
827  mb_type = MB_TYPE_16x16;
828  } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
829  int8_t *i4x4 = s->intra4x4_pred_mode + s->mb2br_xy[s->mb_xy];
830  int8_t *i4x4_cache = s->intra4x4_pred_mode_cache;
831 
832  memset(s->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
833 
834  if (mb_type == 8) {
835  if (s->mb_x > 0) {
836  for (i = 0; i < 4; i++)
837  s->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - 1] + 6 - i];
838  if (s->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
839  s->left_samples_available = 0x5F5F;
840  }
841  if (s->mb_y > 0) {
842  s->intra4x4_pred_mode_cache[4 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 0];
843  s->intra4x4_pred_mode_cache[5 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 1];
844  s->intra4x4_pred_mode_cache[6 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 2];
845  s->intra4x4_pred_mode_cache[7 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 3];
846 
847  if (s->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
848  s->top_samples_available = 0x33FF;
849  }
850 
851  /* decode prediction codes for luma blocks */
852  for (i = 0; i < 16; i += 2) {
853  vlc = get_interleaved_ue_golomb(&s->gb_slice);
854 
855  if (vlc >= 25U) {
856  av_log(s->avctx, AV_LOG_ERROR,
857  "luma prediction:%"PRIu32"\n", vlc);
858  return -1;
859  }
860 
861  left = &s->intra4x4_pred_mode_cache[scan8[i] - 1];
862  top = &s->intra4x4_pred_mode_cache[scan8[i] - 8];
863 
864  left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
865  left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
866 
867  if (left[1] == -1 || left[2] == -1) {
868  av_log(s->avctx, AV_LOG_ERROR, "weird prediction\n");
869  return -1;
870  }
871  }
872  } else { /* mb_type == 33, DC_128_PRED block type */
873  for (i = 0; i < 4; i++)
874  memset(&s->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
875  }
876 
877  AV_COPY32(i4x4, i4x4_cache + 4 + 8 * 4);
878  i4x4[4] = i4x4_cache[7 + 8 * 3];
879  i4x4[5] = i4x4_cache[7 + 8 * 2];
880  i4x4[6] = i4x4_cache[7 + 8 * 1];
881 
882  if (mb_type == 8) {
883  ff_h264_check_intra4x4_pred_mode(s->intra4x4_pred_mode_cache,
884  s->avctx, s->top_samples_available,
885  s->left_samples_available);
886 
887  s->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
888  s->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
889  } else {
890  for (i = 0; i < 4; i++)
891  memset(&s->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
892 
893  s->top_samples_available = 0x33FF;
894  s->left_samples_available = 0x5F5F;
895  }
896 
897  mb_type = MB_TYPE_INTRA4x4;
898  } else { /* INTRA16x16 */
899  dir = ff_h264_i_mb_type_info[mb_type - 8].pred_mode;
900  dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
901 
902  if ((s->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(s->avctx, s->top_samples_available,
903  s->left_samples_available, dir, 0)) < 0) {
904  av_log(s->avctx, AV_LOG_ERROR, "ff_h264_check_intra_pred_mode < 0\n");
905  return s->intra16x16_pred_mode;
906  }
907 
908  cbp = ff_h264_i_mb_type_info[mb_type - 8].cbp;
909  mb_type = MB_TYPE_INTRA16x16;
910  }
911 
912  if (!IS_INTER(mb_type) && s->pict_type != AV_PICTURE_TYPE_I) {
913  for (i = 0; i < 4; i++)
914  memset(s->cur_pic->motion_val[0][b_xy + i * s->b_stride],
915  0, 4 * 2 * sizeof(int16_t));
916  if (s->pict_type == AV_PICTURE_TYPE_B) {
917  for (i = 0; i < 4; i++)
918  memset(s->cur_pic->motion_val[1][b_xy + i * s->b_stride],
919  0, 4 * 2 * sizeof(int16_t));
920  }
921  }
922  if (!IS_INTRA4x4(mb_type)) {
923  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy], DC_PRED, 8);
924  }
925  if (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B) {
926  memset(s->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
927  }
928 
929  if (!IS_INTRA16x16(mb_type) &&
930  (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B)) {
931  if ((vlc = get_interleaved_ue_golomb(&s->gb_slice)) >= 48U){
932  av_log(s->avctx, AV_LOG_ERROR, "cbp_vlc=%"PRIu32"\n", vlc);
933  return -1;
934  }
935 
936  cbp = IS_INTRA(mb_type) ? ff_h264_golomb_to_intra4x4_cbp[vlc]
938  }
939  if (IS_INTRA16x16(mb_type) ||
940  (s->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
941  s->qscale += get_interleaved_se_golomb(&s->gb_slice);
942 
943  if (s->qscale > 31u) {
944  av_log(s->avctx, AV_LOG_ERROR, "qscale:%d\n", s->qscale);
945  return -1;
946  }
947  }
948  if (IS_INTRA16x16(mb_type)) {
949  AV_ZERO128(s->mb_luma_dc[0] + 0);
950  AV_ZERO128(s->mb_luma_dc[0] + 8);
951  if (svq3_decode_block(&s->gb_slice, s->mb_luma_dc[0], 0, 1)) {
952  av_log(s->avctx, AV_LOG_ERROR,
953  "error while decoding intra luma dc\n");
954  return -1;
955  }
956  }
957 
958  if (cbp) {
959  const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
960  const int type = ((s->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
961 
962  for (i = 0; i < 4; i++)
963  if ((cbp & (1 << i))) {
964  for (j = 0; j < 4; j++) {
965  k = index ? (1 * (j & 1) + 2 * (i & 1) +
966  2 * (j & 2) + 4 * (i & 2))
967  : (4 * i + j);
968  s->non_zero_count_cache[scan8[k]] = 1;
969 
970  if (svq3_decode_block(&s->gb_slice, &s->mb[16 * k], index, type)) {
971  av_log(s->avctx, AV_LOG_ERROR,
972  "error while decoding block\n");
973  return -1;
974  }
975  }
976  }
977 
978  if ((cbp & 0x30)) {
979  for (i = 1; i < 3; ++i)
980  if (svq3_decode_block(&s->gb_slice, &s->mb[16 * 16 * i], 0, 3)) {
981  av_log(s->avctx, AV_LOG_ERROR,
982  "error while decoding chroma dc block\n");
983  return -1;
984  }
985 
986  if ((cbp & 0x20)) {
987  for (i = 1; i < 3; i++) {
988  for (j = 0; j < 4; j++) {
989  k = 16 * i + j;
990  s->non_zero_count_cache[scan8[k]] = 1;
991 
992  if (svq3_decode_block(&s->gb_slice, &s->mb[16 * k], 1, 1)) {
993  av_log(s->avctx, AV_LOG_ERROR,
994  "error while decoding chroma ac block\n");
995  return -1;
996  }
997  }
998  }
999  }
1000  }
1001  }
1002 
1003  s->cbp = cbp;
1004  s->cur_pic->mb_type[mb_xy] = mb_type;
1005 
1006  if (IS_INTRA(mb_type))
1007  s->chroma_pred_mode = ff_h264_check_intra_pred_mode(s->avctx, s->top_samples_available,
1008  s->left_samples_available, DC_PRED8x8, 1);
1009 
1010  return 0;
1011 }
1012 
1014 {
1015  SVQ3Context *s = avctx->priv_data;
1016  const int mb_xy = s->mb_xy;
1017  int i, header;
1018  unsigned slice_id;
1019 
1020  header = get_bits(&s->gb, 8);
1021 
1022  if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
1023  /* TODO: what? */
1024  av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
1025  return -1;
1026  } else {
1027  int slice_bits, slice_bytes, slice_length;
1028  int length = header >> 5 & 3;
1029 
1030  slice_length = show_bits(&s->gb, 8 * length);
1031  slice_bits = slice_length * 8;
1032  slice_bytes = slice_length + length - 1;
1033 
1034  skip_bits(&s->gb, 8);
1035 
1036  av_fast_padded_malloc(&s->slice_buf, &s->slice_buf_size, slice_bytes);
1037  if (!s->slice_buf)
1038  return AVERROR(ENOMEM);
1039 
1040  if (slice_bytes * 8LL > get_bits_left(&s->gb)) {
1041  av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
1042  return AVERROR_INVALIDDATA;
1043  }
1044  memcpy(s->slice_buf, s->gb.buffer + s->gb.index / 8, slice_bytes);
1045 
1046  if (s->watermark_key) {
1047  uint32_t header = AV_RL32(&s->slice_buf[1]);
1048  AV_WL32(&s->slice_buf[1], header ^ s->watermark_key);
1049  }
1050  init_get_bits(&s->gb_slice, s->slice_buf, slice_bits);
1051 
1052  if (length > 0) {
1053  memmove(s->slice_buf, &s->slice_buf[slice_length], length - 1);
1054  }
1055  skip_bits_long(&s->gb, slice_bytes * 8);
1056  }
1057 
1058  if ((slice_id = get_interleaved_ue_golomb(&s->gb_slice)) >= 3) {
1059  av_log(s->avctx, AV_LOG_ERROR, "illegal slice type %u \n", slice_id);
1060  return -1;
1061  }
1062 
1063  s->slice_type = ff_h264_golomb_to_pict_type[slice_id];
1064 
1065  if ((header & 0x9F) == 2) {
1066  i = (s->mb_num < 64) ? 6 : (1 + av_log2(s->mb_num - 1));
1067  get_bits(&s->gb_slice, i);
1068  } else if (get_bits1(&s->gb_slice)) {
1069  avpriv_report_missing_feature(s->avctx, "Media key encryption");
1070  return AVERROR_PATCHWELCOME;
1071  }
1072 
1073  s->slice_num = get_bits(&s->gb_slice, 8);
1074  s->qscale = get_bits(&s->gb_slice, 5);
1075  s->adaptive_quant = get_bits1(&s->gb_slice);
1076 
1077  /* unknown fields */
1078  skip_bits1(&s->gb_slice);
1079 
1080  if (s->has_watermark)
1081  skip_bits1(&s->gb_slice);
1082 
1083  skip_bits1(&s->gb_slice);
1084  skip_bits(&s->gb_slice, 2);
1085 
1086  if (skip_1stop_8data_bits(&s->gb_slice) < 0)
1087  return AVERROR_INVALIDDATA;
1088 
1089  /* reset intra predictors and invalidate motion vector references */
1090  if (s->mb_x > 0) {
1091  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - 1] + 3,
1092  -1, 4 * sizeof(int8_t));
1093  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - s->mb_x],
1094  -1, 8 * sizeof(int8_t) * s->mb_x);
1095  }
1096  if (s->mb_y > 0) {
1097  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - s->mb_stride],
1098  -1, 8 * sizeof(int8_t) * (s->mb_width - s->mb_x));
1099 
1100  if (s->mb_x > 0)
1101  s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride - 1] + 3] = -1;
1102  }
1103 
1104  return 0;
1105 }
1106 
1108 {
1109  int q, x;
1110  const int max_qp = 51;
1111 
1112  for (q = 0; q < max_qp + 1; q++) {
1113  int shift = ff_h264_quant_div6[q] + 2;
1114  int idx = ff_h264_quant_rem6[q];
1115  for (x = 0; x < 16; x++)
1116  s->dequant4_coeff[q][(x >> 2) | ((x << 2) & 0xF)] =
1117  ((uint32_t)ff_h264_dequant4_coeff_init[idx][(x & 1) + ((x >> 2) & 1)] * 16) << shift;
1118  }
1119 }
1120 
1122 {
1123  SVQ3Context *s = avctx->priv_data;
1124  int m, x, y;
1125  unsigned char *extradata;
1126  unsigned char *extradata_end;
1127  unsigned int size;
1128  int marker_found = 0;
1129  int ret;
1130 
1131  s->cur_pic = &s->frames[0];
1132  s->last_pic = &s->frames[1];
1133  s->next_pic = &s->frames[2];
1134 
1135  s->cur_pic->f = av_frame_alloc();
1136  s->last_pic->f = av_frame_alloc();
1137  s->next_pic->f = av_frame_alloc();
1138  if (!s->cur_pic->f || !s->last_pic->f || !s->next_pic->f)
1139  return AVERROR(ENOMEM);
1140 
1141  ff_h264dsp_init(&s->h264dsp, 8, 1);
1142  ff_h264_pred_init(&s->hpc, AV_CODEC_ID_SVQ3, 8, 1);
1143  ff_videodsp_init(&s->vdsp, 8);
1144 
1145 
1146  avctx->bits_per_raw_sample = 8;
1147 
1148  ff_hpeldsp_init(&s->hdsp, avctx->flags);
1149  ff_tpeldsp_init(&s->tdsp);
1150 
1151  avctx->pix_fmt = AV_PIX_FMT_YUVJ420P;
1152  avctx->color_range = AVCOL_RANGE_JPEG;
1153 
1154  s->avctx = avctx;
1155  s->halfpel_flag = 1;
1156  s->thirdpel_flag = 1;
1157  s->has_watermark = 0;
1158 
1159  /* prowl for the "SEQH" marker in the extradata */
1160  extradata = (unsigned char *)avctx->extradata;
1161  extradata_end = avctx->extradata + avctx->extradata_size;
1162  if (extradata) {
1163  for (m = 0; m + 8 < avctx->extradata_size; m++) {
1164  if (!memcmp(extradata, "SEQH", 4)) {
1165  marker_found = 1;
1166  break;
1167  }
1168  extradata++;
1169  }
1170  }
1171 
1172  /* if a match was found, parse the extra data */
1173  if (marker_found) {
1174  GetBitContext gb;
1175  int frame_size_code;
1176  int unk0, unk1, unk2, unk3, unk4;
1177  int w,h;
1178 
1179  size = AV_RB32(&extradata[4]);
1180  if (size > extradata_end - extradata - 8)
1181  return AVERROR_INVALIDDATA;
1182  init_get_bits(&gb, extradata + 8, size * 8);
1183 
1184  /* 'frame size code' and optional 'width, height' */
1185  frame_size_code = get_bits(&gb, 3);
1186  switch (frame_size_code) {
1187  case 0:
1188  w = 160;
1189  h = 120;
1190  break;
1191  case 1:
1192  w = 128;
1193  h = 96;
1194  break;
1195  case 2:
1196  w = 176;
1197  h = 144;
1198  break;
1199  case 3:
1200  w = 352;
1201  h = 288;
1202  break;
1203  case 4:
1204  w = 704;
1205  h = 576;
1206  break;
1207  case 5:
1208  w = 240;
1209  h = 180;
1210  break;
1211  case 6:
1212  w = 320;
1213  h = 240;
1214  break;
1215  case 7:
1216  w = get_bits(&gb, 12);
1217  h = get_bits(&gb, 12);
1218  break;
1219  }
1220  ret = ff_set_dimensions(avctx, w, h);
1221  if (ret < 0)
1222  return ret;
1223 
1224  s->halfpel_flag = get_bits1(&gb);
1225  s->thirdpel_flag = get_bits1(&gb);
1226 
1227  /* unknown fields */
1228  unk0 = get_bits1(&gb);
1229  unk1 = get_bits1(&gb);
1230  unk2 = get_bits1(&gb);
1231  unk3 = get_bits1(&gb);
1232 
1233  s->low_delay = get_bits1(&gb);
1234 
1235  /* unknown field */
1236  unk4 = get_bits1(&gb);
1237 
1238  av_log(avctx, AV_LOG_DEBUG, "Unknown fields %d %d %d %d %d\n",
1239  unk0, unk1, unk2, unk3, unk4);
1240 
1241  if (skip_1stop_8data_bits(&gb) < 0)
1242  return AVERROR_INVALIDDATA;
1243 
1244  s->has_watermark = get_bits1(&gb);
1245  avctx->has_b_frames = !s->low_delay;
1246  if (s->has_watermark) {
1247 #if CONFIG_ZLIB
1248  unsigned watermark_width = get_interleaved_ue_golomb(&gb);
1249  unsigned watermark_height = get_interleaved_ue_golomb(&gb);
1250  int u1 = get_interleaved_ue_golomb(&gb);
1251  int u2 = get_bits(&gb, 8);
1252  int u3 = get_bits(&gb, 2);
1253  int u4 = get_interleaved_ue_golomb(&gb);
1254  unsigned long buf_len = watermark_width *
1255  watermark_height * 4;
1256  int offset = get_bits_count(&gb) + 7 >> 3;
1257  uint8_t *buf;
1258 
1259  if (watermark_height <= 0 ||
1260  (uint64_t)watermark_width * 4 > UINT_MAX / watermark_height)
1261  return AVERROR_INVALIDDATA;
1262 
1263  buf = av_malloc(buf_len);
1264  if (!buf)
1265  return AVERROR(ENOMEM);
1266 
1267  av_log(avctx, AV_LOG_DEBUG, "watermark size: %ux%u\n",
1268  watermark_width, watermark_height);
1269  av_log(avctx, AV_LOG_DEBUG,
1270  "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
1271  u1, u2, u3, u4, offset);
1272  if (uncompress(buf, &buf_len, extradata + 8 + offset,
1273  size - offset) != Z_OK) {
1274  av_log(avctx, AV_LOG_ERROR,
1275  "could not uncompress watermark logo\n");
1276  av_free(buf);
1277  return -1;
1278  }
1279  s->watermark_key = av_bswap16(av_crc(av_crc_get_table(AV_CRC_16_CCITT), 0, buf, buf_len));
1280 
1281  s->watermark_key = s->watermark_key << 16 | s->watermark_key;
1282  av_log(avctx, AV_LOG_DEBUG,
1283  "watermark key %#"PRIx32"\n", s->watermark_key);
1284  av_free(buf);
1285 #else
1286  av_log(avctx, AV_LOG_ERROR,
1287  "this svq3 file contains watermark which need zlib support compiled in\n");
1288  return AVERROR(ENOSYS);
1289 #endif
1290  }
1291  }
1292 
1293  s->mb_width = (avctx->width + 15) / 16;
1294  s->mb_height = (avctx->height + 15) / 16;
1295  s->mb_stride = s->mb_width + 1;
1296  s->mb_num = s->mb_width * s->mb_height;
1297  s->b_stride = 4 * s->mb_width;
1298  s->h_edge_pos = s->mb_width * 16;
1299  s->v_edge_pos = s->mb_height * 16;
1300 
1301  s->intra4x4_pred_mode = av_mallocz(s->mb_stride * 2 * 8);
1302  if (!s->intra4x4_pred_mode)
1303  return AVERROR(ENOMEM);
1304 
1305  s->mb2br_xy = av_mallocz(s->mb_stride * (s->mb_height + 1) *
1306  sizeof(*s->mb2br_xy));
1307  if (!s->mb2br_xy)
1308  return AVERROR(ENOMEM);
1309 
1310  for (y = 0; y < s->mb_height; y++)
1311  for (x = 0; x < s->mb_width; x++) {
1312  const int mb_xy = x + y * s->mb_stride;
1313 
1314  s->mb2br_xy[mb_xy] = 8 * (mb_xy % (2 * s->mb_stride));
1315  }
1316 
1318 
1319  return 0;
1320 }
1321 
1322 static void free_picture(SVQ3Frame *pic)
1323 {
1324  int i;
1325  for (i = 0; i < 2; i++) {
1326  av_freep(&pic->motion_val_buf[i]);
1327  }
1328  av_freep(&pic->mb_type_buf);
1329 
1330  av_frame_unref(pic->f);
1331 }
1332 
1333 static int get_buffer(AVCodecContext *avctx, SVQ3Frame *pic)
1334 {
1335  SVQ3Context *s = avctx->priv_data;
1336  const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
1337  const int b4_stride = s->mb_width * 4 + 1;
1338  const int b4_array_size = b4_stride * s->mb_height * 4;
1339  int ret;
1340 
1341  if (!pic->motion_val_buf[0]) {
1342  int i;
1343 
1344  pic->mb_type_buf = av_calloc(big_mb_num + s->mb_stride, sizeof(uint32_t));
1345  if (!pic->mb_type_buf)
1346  return AVERROR(ENOMEM);
1347  pic->mb_type = pic->mb_type_buf + 2 * s->mb_stride + 1;
1348 
1349  for (i = 0; i < 2; i++) {
1350  pic->motion_val_buf[i] = av_calloc(b4_array_size + 4, 2 * sizeof(int16_t));
1351  if (!pic->motion_val_buf[i]) {
1352  ret = AVERROR(ENOMEM);
1353  goto fail;
1354  }
1355 
1356  pic->motion_val[i] = pic->motion_val_buf[i] + 4;
1357  }
1358  }
1359 
1360  ret = ff_get_buffer(avctx, pic->f,
1361  (s->pict_type != AV_PICTURE_TYPE_B) ?
1363  if (ret < 0)
1364  goto fail;
1365 
1366  if (!s->edge_emu_buffer) {
1367  s->edge_emu_buffer = av_calloc(pic->f->linesize[0], 17);
1368  if (!s->edge_emu_buffer)
1369  return AVERROR(ENOMEM);
1370  }
1371 
1372  return 0;
1373 fail:
1374  free_picture(pic);
1375  return ret;
1376 }
1377 
1378 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1379  int *got_frame, AVPacket *avpkt)
1380 {
1381  SVQ3Context *s = avctx->priv_data;
1382  int buf_size = avpkt->size;
1383  int left;
1384  int ret, m, i;
1385 
1386  /* special case for last picture */
1387  if (buf_size == 0) {
1388  if (s->next_pic->f->data[0] && !s->low_delay && !s->last_frame_output) {
1389  ret = av_frame_ref(data, s->next_pic->f);
1390  if (ret < 0)
1391  return ret;
1392  s->last_frame_output = 1;
1393  *got_frame = 1;
1394  }
1395  return 0;
1396  }
1397 
1398  s->mb_x = s->mb_y = s->mb_xy = 0;
1399 
1400  ret = init_get_bits8(&s->gb, avpkt->data, avpkt->size);
1401  if (ret < 0)
1402  return ret;
1403 
1404  if (svq3_decode_slice_header(avctx))
1405  return -1;
1406 
1407  s->pict_type = s->slice_type;
1408 
1409  if (s->pict_type != AV_PICTURE_TYPE_B)
1410  FFSWAP(SVQ3Frame*, s->next_pic, s->last_pic);
1411 
1412  av_frame_unref(s->cur_pic->f);
1413 
1414  /* for skipping the frame */
1415  s->cur_pic->f->pict_type = s->pict_type;
1416  s->cur_pic->f->key_frame = (s->pict_type == AV_PICTURE_TYPE_I);
1417 
1418  ret = get_buffer(avctx, s->cur_pic);
1419  if (ret < 0)
1420  return ret;
1421 
1422  for (i = 0; i < 16; i++) {
1423  s->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * s->cur_pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
1424  s->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * s->cur_pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
1425  }
1426  for (i = 0; i < 16; i++) {
1427  s->block_offset[16 + i] =
1428  s->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * s->cur_pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
1429  s->block_offset[48 + 16 + i] =
1430  s->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * s->cur_pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
1431  }
1432 
1433  if (s->pict_type != AV_PICTURE_TYPE_I) {
1434  if (!s->last_pic->f->data[0]) {
1435  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1436  av_frame_unref(s->last_pic->f);
1437  ret = get_buffer(avctx, s->last_pic);
1438  if (ret < 0)
1439  return ret;
1440  memset(s->last_pic->f->data[0], 0, avctx->height * s->last_pic->f->linesize[0]);
1441  memset(s->last_pic->f->data[1], 0x80, (avctx->height / 2) *
1442  s->last_pic->f->linesize[1]);
1443  memset(s->last_pic->f->data[2], 0x80, (avctx->height / 2) *
1444  s->last_pic->f->linesize[2]);
1445  }
1446 
1447  if (s->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f->data[0]) {
1448  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1449  av_frame_unref(s->next_pic->f);
1450  ret = get_buffer(avctx, s->next_pic);
1451  if (ret < 0)
1452  return ret;
1453  memset(s->next_pic->f->data[0], 0, avctx->height * s->next_pic->f->linesize[0]);
1454  memset(s->next_pic->f->data[1], 0x80, (avctx->height / 2) *
1455  s->next_pic->f->linesize[1]);
1456  memset(s->next_pic->f->data[2], 0x80, (avctx->height / 2) *
1457  s->next_pic->f->linesize[2]);
1458  }
1459  }
1460 
1461  if (avctx->debug & FF_DEBUG_PICT_INFO)
1462  av_log(s->avctx, AV_LOG_DEBUG,
1463  "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1464  av_get_picture_type_char(s->pict_type),
1465  s->halfpel_flag, s->thirdpel_flag,
1466  s->adaptive_quant, s->qscale, s->slice_num);
1467 
1468  if (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B ||
1469  avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I ||
1470  avctx->skip_frame >= AVDISCARD_ALL)
1471  return 0;
1472 
1473  if (s->next_p_frame_damaged) {
1474  if (s->pict_type == AV_PICTURE_TYPE_B)
1475  return 0;
1476  else
1477  s->next_p_frame_damaged = 0;
1478  }
1479 
1480  if (s->pict_type == AV_PICTURE_TYPE_B) {
1481  s->frame_num_offset = s->slice_num - s->prev_frame_num;
1482 
1483  if (s->frame_num_offset < 0)
1484  s->frame_num_offset += 256;
1485  if (s->frame_num_offset == 0 ||
1486  s->frame_num_offset >= s->prev_frame_num_offset) {
1487  av_log(s->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1488  return -1;
1489  }
1490  } else {
1491  s->prev_frame_num = s->frame_num;
1492  s->frame_num = s->slice_num;
1493  s->prev_frame_num_offset = s->frame_num - s->prev_frame_num;
1494 
1495  if (s->prev_frame_num_offset < 0)
1496  s->prev_frame_num_offset += 256;
1497  }
1498 
1499  for (m = 0; m < 2; m++) {
1500  int i;
1501  for (i = 0; i < 4; i++) {
1502  int j;
1503  for (j = -1; j < 4; j++)
1504  s->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1505  if (i < 3)
1506  s->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1507  }
1508  }
1509 
1510  for (s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
1511  for (s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
1512  unsigned mb_type;
1513  s->mb_xy = s->mb_x + s->mb_y * s->mb_stride;
1514 
1515  if ((get_bits_left(&s->gb_slice)) <= 7) {
1516  if (((get_bits_count(&s->gb_slice) & 7) == 0 ||
1517  show_bits(&s->gb_slice, get_bits_left(&s->gb_slice) & 7) == 0)) {
1518 
1519  if (svq3_decode_slice_header(avctx))
1520  return -1;
1521  }
1522  if (s->slice_type != s->pict_type) {
1523  avpriv_request_sample(avctx, "non constant slice type");
1524  }
1525  /* TODO: support s->mb_skip_run */
1526  }
1527 
1528  mb_type = get_interleaved_ue_golomb(&s->gb_slice);
1529 
1530  if (s->pict_type == AV_PICTURE_TYPE_I)
1531  mb_type += 8;
1532  else if (s->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1533  mb_type += 4;
1534  if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1535  av_log(s->avctx, AV_LOG_ERROR,
1536  "error while decoding MB %d %d\n", s->mb_x, s->mb_y);
1537  return -1;
1538  }
1539 
1540  if (mb_type != 0 || s->cbp)
1541  hl_decode_mb(s);
1542 
1543  if (s->pict_type != AV_PICTURE_TYPE_B && !s->low_delay)
1544  s->cur_pic->mb_type[s->mb_x + s->mb_y * s->mb_stride] =
1545  (s->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1546  }
1547 
1548  ff_draw_horiz_band(avctx, s->cur_pic->f,
1549  s->last_pic->f->data[0] ? s->last_pic->f : NULL,
1550  16 * s->mb_y, 16, PICT_FRAME, 0,
1551  s->low_delay);
1552  }
1553 
1554  left = buf_size*8 - get_bits_count(&s->gb_slice);
1555 
1556  if (s->mb_y != s->mb_height || s->mb_x != s->mb_width) {
1557  av_log(avctx, AV_LOG_INFO, "frame num %d incomplete pic x %d y %d left %d\n", avctx->frame_number, s->mb_y, s->mb_x, left);
1558  //av_hex_dump(stderr, buf+buf_size-8, 8);
1559  }
1560 
1561  if (left < 0) {
1562  av_log(avctx, AV_LOG_ERROR, "frame num %d left %d\n", avctx->frame_number, left);
1563  return -1;
1564  }
1565 
1566  if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay)
1567  ret = av_frame_ref(data, s->cur_pic->f);
1568  else if (s->last_pic->f->data[0])
1569  ret = av_frame_ref(data, s->last_pic->f);
1570  if (ret < 0)
1571  return ret;
1572 
1573  /* Do not output the last pic after seeking. */
1574  if (s->last_pic->f->data[0] || s->low_delay)
1575  *got_frame = 1;
1576 
1577  if (s->pict_type != AV_PICTURE_TYPE_B) {
1578  FFSWAP(SVQ3Frame*, s->cur_pic, s->next_pic);
1579  } else {
1580  av_frame_unref(s->cur_pic->f);
1581  }
1582 
1583  return buf_size;
1584 }
1585 
1587 {
1588  SVQ3Context *s = avctx->priv_data;
1589 
1590  for (int i = 0; i < FF_ARRAY_ELEMS(s->frames); i++) {
1591  free_picture(&s->frames[i]);
1592  av_frame_free(&s->frames[i].f);
1593  }
1594  av_freep(&s->slice_buf);
1595  av_freep(&s->intra4x4_pred_mode);
1596  av_freep(&s->edge_emu_buffer);
1597  av_freep(&s->mb2br_xy);
1598 
1599  return 0;
1600 }
1601 
1603  .name = "svq3",
1604  .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1605  .type = AVMEDIA_TYPE_VIDEO,
1606  .id = AV_CODEC_ID_SVQ3,
1607  .priv_data_size = sizeof(SVQ3Context),
1609  .close = svq3_decode_end,
1611  .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND |
1614  .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,
1615  AV_PIX_FMT_NONE},
1616  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
1617 };
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:38
SVQ3Context::frame_num
int frame_num
Definition: svq3.c:108
SVQ3Context::edge_emu_buffer
uint8_t * edge_emu_buffer
Definition: svq3.c:135
IS_INTRA4x4
#define IS_INTRA4x4(a)
Definition: mpegutils.h:74
svq3_dequant_coeff
static const uint32_t svq3_dequant_coeff[32]
Definition: svq3.c:212
AVCodec
AVCodec.
Definition: codec.h:202
SVQ3Context::next_pic
SVQ3Frame * next_pic
Definition: svq3.c:90
skip_bits_long
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:292
SVQ3Context::slice_type
enum AVPictureType slice_type
Definition: svq3.c:114
SVQ3Context::gb_slice
GetBitContext gb_slice
Definition: svq3.c:93
SVQ3Context::vdsp
VideoDSPContext vdsp
Definition: svq3.c:87
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
SVQ3Context::slice_num
int slice_num
Definition: svq3.c:105
level
uint8_t level
Definition: svq3.c:204
av_clip
#define av_clip
Definition: common.h:96
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:850
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
DC_PRED8x8
#define DC_PRED8x8
Definition: h264pred.h:68
svq3_decode_slice_header
static int svq3_decode_slice_header(AVCodecContext *avctx)
Definition: svq3.c:1013
AV_WL32
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
mem_internal.h
SVQ3Context::avctx
AVCodecContext * avctx
Definition: svq3.c:81
DC_128_PRED
@ DC_128_PRED
Definition: vp9.h:58
SVQ3Context::mb_num
int mb_num
Definition: svq3.c:120
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:264
SVQ3Context::v_edge_pos
int v_edge_pos
Definition: svq3.c:103
AVPictureType
AVPictureType
Definition: avutil.h:272
ff_h264_chroma_qp
const uint8_t ff_h264_chroma_qp[7][QP_MAX_NUM+1]
Definition: h264data.c:203
mv
static const int8_t mv[256][2]
Definition: 4xm.c:79
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:225
SVQ3Context::left_samples_available
unsigned int left_samples_available
Definition: svq3.c:133
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:220
get_interleaved_ue_golomb
static unsigned get_interleaved_ue_golomb(GetBitContext *gb)
Definition: golomb.h:145
ff_h264_golomb_to_inter_cbp
const uint8_t ff_h264_golomb_to_inter_cbp[48]
Definition: h264data.c:48
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:109
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:317
SVQ3Context::h_edge_pos
int h_edge_pos
Definition: svq3.c:102
index
fg index
Definition: ffmpeg_filter.c:167
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:597
IMbInfo::cbp
uint8_t cbp
Definition: h264data.h:36
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:373
DC_PRED
@ DC_PRED
Definition: vp9.h:48
MB_TYPE_INTRA4x4
#define MB_TYPE_INTRA4x4
Definition: mpegutils.h:50
SVQ3Context::slice_buf
uint8_t * slice_buf
Definition: svq3.c:94
data
const char data[16]
Definition: mxf.c:143
VERT_LEFT_PRED
@ VERT_LEFT_PRED
Definition: vp9.h:53
MB_TYPE_16x16
#define MB_TYPE_16x16
Definition: mpegutils.h:53
SVQ3Context::mb
int16_t mb[16 *48 *2]
Definition: svq3.c:139
PREDICT_MODE
#define PREDICT_MODE
Definition: svq3.c:150
AV_WN32A
#define AV_WN32A(p, v)
Definition: intreadwrite.h:538
ff_h264_golomb_to_intra4x4_cbp
const uint8_t ff_h264_golomb_to_intra4x4_cbp[48]
Definition: h264data.c:42
SVQ3Context::frame_num_offset
int frame_num_offset
Definition: svq3.c:109
mpegutils.h
MB_TYPE_INTRA16x16
#define MB_TYPE_INTRA16x16
Definition: mpegutils.h:51
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:660
SVQ3Context::slice_buf_size
unsigned slice_buf_size
Definition: svq3.c:95
SVQ3Context::last_frame_output
int last_frame_output
Definition: svq3.c:104
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1303
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:338
get_buffer
static int get_buffer(AVCodecContext *avctx, SVQ3Frame *pic)
Definition: svq3.c:1333
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
init
static int init
Definition: av_tx.c:47
A
#define A(x)
Definition: vp56_arith.h:28
crc.h
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:468
golomb.h
exp golomb vlc stuff
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:380
SVQ3Context::last_pic
SVQ3Frame * last_pic
Definition: svq3.c:91
SVQ3Context::qscale
int qscale
Definition: svq3.c:106
U
#define U(x)
Definition: vp56_arith.h:37
SVQ3Context::topright_samples_available
unsigned int topright_samples_available
Definition: svq3.c:132
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1673
fail
#define fail()
Definition: checkasm.h:127
GetBitContext
Definition: get_bits.h:62
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:463
SVQ3Context::tdsp
TpelDSPContext tdsp
Definition: svq3.c:86
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
SVQ3Context::thirdpel_flag
int thirdpel_flag
Definition: svq3.c:97
ff_h264_golomb_to_pict_type
const uint8_t ff_h264_golomb_to_pict_type[5]
Definition: h264data.c:37
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:97
SVQ3Context::intra4x4_pred_mode_cache
int8_t intra4x4_pred_mode_cache[5 *8]
Definition: svq3.c:128
C
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
Definition: writing_filters.txt:58
SVQ3Context::gb
GetBitContext gb
Definition: svq3.c:92
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
SVQ3Context::frames
SVQ3Frame frames[3]
Definition: svq3.c:144
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
SVQ3Context::cbp
int cbp
Definition: svq3.c:107
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:678
FULLPEL_MODE
#define FULLPEL_MODE
Definition: svq3.c:147
SVQ3Context::mb_y
int mb_y
Definition: svq3.c:117
SVQ3Context::mb_x
int mb_x
Definition: svq3.c:117
SVQ3Context::adaptive_quant
int adaptive_quant
Definition: svq3.c:100
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:485
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:679
width
#define width
s
#define s(width, name)
Definition: cbs_vp9.c:257
TpelDSPContext
thirdpel DSP context
Definition: tpeldsp.h:42
SVQ3Context::pict_type
enum AVPictureType pict_type
Definition: svq3.c:113
AV_ZERO32
#define AV_ZERO32(d)
Definition: intreadwrite.h:629
svq3_mc_dir
static int svq3_mc_dir(SVQ3Context *s, int size, int mode, int dir, int avg)
Definition: svq3.c:499
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:361
ff_tpeldsp_init
av_cold void ff_tpeldsp_init(TpelDSPContext *c)
Definition: tpeldsp.c:312
QP_MAX_NUM
#define QP_MAX_NUM
Definition: h264.h:27
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
h264data.h
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1425
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
svq3_pred_motion
static av_always_inline void svq3_pred_motion(const SVQ3Context *s, int n, int part_width, int list, int ref, int *const mx, int *const my)
Get the predicted MV.
Definition: svq3.c:375
IS_SKIP
#define IS_SKIP(a)
Definition: mpegutils.h:80
SVQ3Context::top_samples_available
unsigned int top_samples_available
Definition: svq3.c:131
IS_INTRA
#define IS_INTRA(x, y)
AV_CODEC_ID_SVQ3
@ AV_CODEC_ID_SVQ3
Definition: codec_id.h:73
SVQ3Context::b_stride
int b_stride
Definition: svq3.c:121
SVQ3Context::prev_frame_num_offset
int prev_frame_num_offset
Definition: svq3.c:110
SVQ3Frame::mb_type_buf
uint32_t * mb_type_buf
Definition: svq3.c:77
SVQ3Context::h264dsp
H264DSPContext h264dsp
Definition: svq3.c:83
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
IMbInfo::pred_mode
uint8_t pred_mode
Definition: h264data.h:35
if
if(ret)
Definition: filter_design.txt:179
SVQ3Context::next_p_frame_damaged
int next_p_frame_damaged
Definition: svq3.c:101
SVQ3Frame::motion_val
int16_t(*[2] motion_val)[2]
Definition: svq3.c:75
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:54
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
run
uint8_t run
Definition: svq3.c:203
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:967
SVQ3Context::mb_width
int mb_width
Definition: svq3.c:119
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
SVQ3Context::mb2br_xy
uint32_t * mb2br_xy
Definition: svq3.c:123
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:499
src
#define src
Definition: vp8dsp.c:255
mathops.h
list
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
Definition: filter_design.txt:25
ff_h264_chroma_dc_scan
const uint8_t ff_h264_chroma_dc_scan[4]
Definition: h264data.c:54
SVQ3Context
Definition: svq3.c:80
AV_ZERO128
#define AV_ZERO128(d)
Definition: intreadwrite.h:637
SVQ3Context::mb_luma_dc
int16_t mb_luma_dc[3][16 *2]
Definition: svq3.c:140
tpeldsp.h
hl_decode_mb_idct_luma
static av_always_inline void hl_decode_mb_idct_luma(SVQ3Context *s, int mb_type, const int *block_offset, int linesize, uint8_t *dest_y)
Definition: svq3.c:612
HpelDSPContext
Half-pel DSP context.
Definition: hpeldsp.h:45
H264DSPContext
Context for storing H.264 DSP functions.
Definition: h264dsp.h:42
SVQ3Context::intra16x16_pred_mode
int intra16x16_pred_mode
Definition: svq3.c:126
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:53
SVQ3Context::hpc
H264PredContext hpc
Definition: svq3.c:84
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1652
init_dequant4_coeff_table
static void init_dequant4_coeff_table(SVQ3Context *s)
Definition: svq3.c:1107
ff_zigzag_scan
const uint8_t ff_zigzag_scan[16+1]
Definition: mathtables.c:109
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
svq3_fetch_diagonal_mv
static av_always_inline int svq3_fetch_diagonal_mv(const SVQ3Context *s, const int16_t **C, int i, int list, int part_width)
Definition: svq3.c:354
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:243
AVPacket::size
int size
Definition: packet.h:374
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:325
ff_draw_horiz_band
void ff_draw_horiz_band(AVCodecContext *avctx, AVFrame *cur, AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
Definition: mpegutils.c:51
rectangle.h
hl_decode_mb
static void hl_decode_mb(SVQ3Context *s)
Definition: svq3.c:666
get_interleaved_se_golomb
static int get_interleaved_se_golomb(GetBitContext *gb)
Definition: golomb.h:303
free_picture
static void free_picture(SVQ3Frame *pic)
Definition: svq3.c:1322
size
int size
Definition: twinvq_data.h:10344
SVQ3Frame::mb_type
uint32_t * mb_type
Definition: svq3.c:77
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
MB_TYPE_SKIP
#define MB_TYPE_SKIP
Definition: mpegutils.h:61
avg
#define avg(a, b, c, d)
Definition: colorspacedsp_template.c:28
header
static const uint8_t header[24]
Definition: sdr2.c:67
height
#define height
av_crc_get_table
const AVCRC * av_crc_get_table(AVCRCId crc_id)
Get an initialized standard CRC table.
Definition: crc.c:374
av_bswap16
#define av_bswap16
Definition: bswap.h:31
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
attributes.h
ff_h264_quant_rem6
const uint8_t ff_h264_quant_rem6[QP_MAX_NUM+1]
Definition: h264data.c:174
skip_bits1
static void skip_bits1(GetBitContext *s)
Definition: get_bits.h:539
IS_INTRA16x16
#define IS_INTRA16x16(a)
Definition: mpegutils.h:75
hl_decode_mb_predict_luma
static av_always_inline void hl_decode_mb_predict_luma(SVQ3Context *s, int mb_type, const int *block_offset, int linesize, uint8_t *dest_y)
Definition: svq3.c:627
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
SVQ3Frame::motion_val_buf
int16_t(*[2] motion_val_buf)[2]
Definition: svq3.c:74
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
pack16to32
static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
Definition: h264dec.h:680
SVQ3Context::prev_frame_num
int prev_frame_num
Definition: svq3.c:111
svq3_add_idct_c
static void svq3_add_idct_c(uint8_t *dst, int16_t *block, int stride, int qp, int dc)
Definition: svq3.c:254
av_get_picture_type_char
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:83
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem.h:116
h264dec.h
svq3_decode_frame
static int svq3_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: svq3.c:1378
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
svq3_luma_dc_dequant_idct_c
static void svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
Definition: svq3.c:219
stride
#define stride
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
AV_CRC_16_CCITT
@ AV_CRC_16_CCITT
Definition: crc.h:51
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:484
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:447
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:50
SVQ3Frame
Definition: svq3.c:71
THIRDPEL_MODE
#define THIRDPEL_MODE
Definition: svq3.c:149
SVQ3Context::mv_cache
int16_t mv_cache[2][5 *8][2]
Definition: svq3.c:137
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:50
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:435
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:263
AV_COPY32
#define AV_COPY32(d, s)
Definition: intreadwrite.h:601
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:209
SVQ3Context::non_zero_count_cache
uint8_t non_zero_count_cache[15 *8]
Definition: svq3.c:141
PART_NOT_AVAILABLE
#define PART_NOT_AVAILABLE
Definition: h264dec.h:410
AVCodecContext::height
int height
Definition: avcodec.h:556
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:593
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:271
svq3_decode_mb
static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
Definition: svq3.c:714
svq3_scan
static const uint8_t svq3_scan[16]
Definition: svq3.c:161
avcodec.h
limit
static double limit(double x)
Definition: vf_pseudocolor.c:128
ff_h264dsp_init
av_cold void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
Definition: h264dsp.c:66
SVQ3Context::halfpel_flag
int halfpel_flag
Definition: svq3.c:96
mid_pred
#define mid_pred
Definition: mathops.h:97
svq3_pred_1
static const int8_t svq3_pred_1[6][6][5]
Definition: svq3.c:187
ret
ret
Definition: filter_design.txt:187
ff_svq3_decoder
const AVCodec ff_svq3_decoder
Definition: svq3.c:1602
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
SVQ3Context::mb_height
int mb_height
Definition: svq3.c:119
SVQ3Context::hdsp
HpelDSPContext hdsp
Definition: svq3.c:85
SVQ3Context::low_delay
int low_delay
Definition: svq3.c:115
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
svq3_decode_block
static int svq3_decode_block(GetBitContext *gb, int16_t *block, int index, const int type)
Definition: svq3.c:294
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
B
#define B
Definition: huffyuvdsp.h:32
skip_1stop_8data_bits
static int skip_1stop_8data_bits(GetBitContext *gb)
Definition: get_bits.h:855
AVCodecContext
main external API structure.
Definition: avcodec.h:383
ff_h264_dequant4_coeff_init
const uint8_t ff_h264_dequant4_coeff_init[6][3]
Definition: h264data.c:152
SVQ3Frame::f
AVFrame * f
Definition: svq3.c:72
SVQ3Context::block_offset
int block_offset[2 *(16 *3)]
Definition: svq3.c:143
ff_h264_pred_init
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:413
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
av_crc
uint32_t av_crc(const AVCRC *ctx, uint32_t crc, const uint8_t *buffer, size_t length)
Calculate the CRC of a block.
Definition: crc.c:392
mode
mode
Definition: ebur128.h:83
ff_h264_check_intra4x4_pred_mode
int ff_h264_check_intra4x4_pred_mode(int8_t *pred_mode_cache, void *logctx, int top_samples_available, int left_samples_available)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264_parse.c:131
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
ff_h264_i_mb_type_info
const IMbInfo ff_h264_i_mb_type_info[26]
Definition: h264data.c:66
fill_rectangle
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:826
svq1.h
SVQ3Context::chroma_pred_mode
int chroma_pred_mode
Definition: svq3.c:125
SVQ3Context::watermark_key
uint32_t watermark_key
Definition: svq3.c:99
SVQ3Context::mb_xy
int mb_xy
Definition: svq3.c:118
scan8
static const uint8_t scan8[16 *3+3]
Definition: h264dec.h:664
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
temp
else temp
Definition: vf_mcdeint.c:248
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:82
luma_dc_zigzag_scan
static const uint8_t luma_dc_zigzag_scan[16]
Definition: svq3.c:168
av_clip_uint8
#define av_clip_uint8
Definition: common.h:102
ff_h264_quant_div6
const uint8_t ff_h264_quant_div6[QP_MAX_NUM+1]
Definition: h264data.c:182
VideoDSPContext
Definition: videodsp.h:41
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1302
H264PredContext
Context for storing H.264 prediction functions.
Definition: h264pred.h:92
shift
static int shift(int a, int b)
Definition: sonic.c:83
svq3_mc_dir_part
static void svq3_mc_dir_part(SVQ3Context *s, int x, int y, int width, int height, int mx, int my, int dxy, int thirdpel, int dir, int avg)
Definition: svq3.c:424
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
IS_INTER
#define IS_INTER(a)
Definition: mpegutils.h:78
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:86
svq3_decode_end
static av_cold int svq3_decode_end(AVCodecContext *avctx)
Definition: svq3.c:1586
AVCodecContext::frame_number
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:1023
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:37
SVQ3Context::dequant4_coeff
uint32_t dequant4_coeff[QP_MAX_NUM+1][16]
Definition: svq3.c:142
SVQ3Context::ref_cache
int8_t ref_cache[2][5 *8]
Definition: svq3.c:138
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVPacket
This structure stores compressed data.
Definition: packet.h:350
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:410
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
SVQ3Context::mb_stride
int mb_stride
Definition: svq3.c:120
DIAG_DOWN_LEFT_PRED
@ DIAG_DOWN_LEFT_PRED
Definition: vp9.h:49
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:556
int32_t
int32_t
Definition: audioconvert.c:56
hpeldsp.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:362
AV_CODEC_CAP_DRAW_HORIZ_BAND
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: codec.h:44
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
svq3_decode_init
static av_cold int svq3_decode_init(AVCodecContext *avctx)
Definition: svq3.c:1121
h
h
Definition: vp9dsp_template.c:2038
svq3_dct_tables
static const struct @130 svq3_dct_tables[2][16]
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:50
int
int
Definition: ffmpeg_filter.c:153
SVQ3Context::cur_pic
SVQ3Frame * cur_pic
Definition: svq3.c:89
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
SVQ3Context::has_watermark
int has_watermark
Definition: svq3.c:98
SVQ3Context::intra4x4_pred_mode
int8_t * intra4x4_pred_mode
Definition: svq3.c:129
svq3_pred_0
static const uint8_t svq3_pred_0[25][2]
Definition: svq3.c:175
HALFPEL_MODE
#define HALFPEL_MODE
Definition: svq3.c:148
ff_h264_check_intra_pred_mode
int ff_h264_check_intra_pred_mode(void *logctx, int top_samples_available, int left_samples_available, int mode, int is_chroma)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264_parse.c:179