FFmpeg
svq3.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 The FFmpeg Project
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /*
22  * How to use this decoder:
23  * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24  * have stsd atoms to describe media trak properties. A stsd atom for a
25  * video trak contains 1 or more ImageDescription atoms. These atoms begin
26  * with the 4-byte length of the atom followed by the codec fourcc. Some
27  * decoders need information in this atom to operate correctly. Such
28  * is the case with SVQ3. In order to get the best use out of this decoder,
29  * the calling app must make the SVQ3 ImageDescription atom available
30  * via the AVCodecContext's extradata[_size] field:
31  *
32  * AVCodecContext.extradata = pointer to ImageDescription, first characters
33  * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34  * AVCodecContext.extradata_size = size of ImageDescription atom memory
35  * buffer (which will be the same as the ImageDescription atom size field
36  * from the QT file, minus 4 bytes since the length is missing)
37  *
38  * You will know you have these parameters passed correctly when the decoder
39  * correctly decodes this file:
40  * http://samples.mplayerhq.hu/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
41  */
42 
43 #include <inttypes.h>
44 
45 #include "libavutil/attributes.h"
46 #include "internal.h"
47 #include "avcodec.h"
48 #include "mpegutils.h"
49 #include "h264dec.h"
50 #include "h264data.h"
51 #include "golomb.h"
52 #include "hpeldsp.h"
53 #include "mathops.h"
54 #include "rectangle.h"
55 #include "tpeldsp.h"
56 
57 #if CONFIG_ZLIB
58 #include <zlib.h>
59 #endif
60 
61 #include "svq1.h"
62 
63 /**
64  * @file
65  * svq3 decoder.
66  */
67 
68 typedef struct SVQ3Frame {
70 
72  int16_t (*motion_val[2])[2];
73 
75  uint32_t *mb_type;
76 
77 
79  int8_t *ref_index[2];
80 } SVQ3Frame;
81 
82 typedef struct SVQ3Context {
84 
90 
101  uint32_t watermark_key;
103  int buf_size;
110  int qscale;
111  int cbp;
116 
120 
121  int mb_x, mb_y;
122  int mb_xy;
125  int b_stride;
126 
127  uint32_t *mb2br_xy;
128 
131 
134 
135  unsigned int top_samples_available;
138 
140 
141  DECLARE_ALIGNED(16, int16_t, mv_cache)[2][5 * 8][2];
142  DECLARE_ALIGNED(8, int8_t, ref_cache)[2][5 * 8];
143  DECLARE_ALIGNED(16, int16_t, mb)[16 * 48 * 2];
144  DECLARE_ALIGNED(16, int16_t, mb_luma_dc)[3][16 * 2];
146  uint32_t dequant4_coeff[QP_MAX_NUM + 1][16];
147  int block_offset[2 * (16 * 3)];
148 } SVQ3Context;
149 
150 #define FULLPEL_MODE 1
151 #define HALFPEL_MODE 2
152 #define THIRDPEL_MODE 3
153 #define PREDICT_MODE 4
154 
155 /* dual scan (from some older H.264 draft)
156  * o-->o-->o o
157  * | /|
158  * o o o / o
159  * | / | |/ |
160  * o o o o
161  * /
162  * o-->o-->o-->o
163  */
164 static const uint8_t svq3_scan[16] = {
165  0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
166  2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
167  0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
168  0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
169 };
170 
171 static const uint8_t luma_dc_zigzag_scan[16] = {
172  0 * 16 + 0 * 64, 1 * 16 + 0 * 64, 2 * 16 + 0 * 64, 0 * 16 + 2 * 64,
173  3 * 16 + 0 * 64, 0 * 16 + 1 * 64, 1 * 16 + 1 * 64, 2 * 16 + 1 * 64,
174  1 * 16 + 2 * 64, 2 * 16 + 2 * 64, 3 * 16 + 2 * 64, 0 * 16 + 3 * 64,
175  3 * 16 + 1 * 64, 1 * 16 + 3 * 64, 2 * 16 + 3 * 64, 3 * 16 + 3 * 64,
176 };
177 
178 static const uint8_t svq3_pred_0[25][2] = {
179  { 0, 0 },
180  { 1, 0 }, { 0, 1 },
181  { 0, 2 }, { 1, 1 }, { 2, 0 },
182  { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
183  { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
184  { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
185  { 2, 4 }, { 3, 3 }, { 4, 2 },
186  { 4, 3 }, { 3, 4 },
187  { 4, 4 }
188 };
189 
190 static const int8_t svq3_pred_1[6][6][5] = {
191  { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
192  { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
193  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
194  { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
195  { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
196  { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
197  { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
198  { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
199  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
200  { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
201  { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
202  { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
203 };
204 
205 static const struct {
208 } svq3_dct_tables[2][16] = {
209  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
210  { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
211  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
212  { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
213 };
214 
215 static const uint32_t svq3_dequant_coeff[32] = {
216  3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
217  9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
218  24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
219  61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
220 };
221 
222 static int svq3_decode_end(AVCodecContext *avctx);
223 
224 static void svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
225 {
226  const unsigned qmul = svq3_dequant_coeff[qp];
227 #define stride 16
228  int i;
229  int temp[16];
230  static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
231 
232  for (i = 0; i < 4; i++) {
233  const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
234  const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
235  const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
236  const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
237 
238  temp[4 * i + 0] = z0 + z3;
239  temp[4 * i + 1] = z1 + z2;
240  temp[4 * i + 2] = z1 - z2;
241  temp[4 * i + 3] = z0 - z3;
242  }
243 
244  for (i = 0; i < 4; i++) {
245  const int offset = x_offset[i];
246  const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
247  const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
248  const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
249  const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
250 
251  output[stride * 0 + offset] = (int)((z0 + z3) * qmul + 0x80000) >> 20;
252  output[stride * 2 + offset] = (int)((z1 + z2) * qmul + 0x80000) >> 20;
253  output[stride * 8 + offset] = (int)((z1 - z2) * qmul + 0x80000) >> 20;
254  output[stride * 10 + offset] = (int)((z0 - z3) * qmul + 0x80000) >> 20;
255  }
256 }
257 #undef stride
258 
259 static void svq3_add_idct_c(uint8_t *dst, int16_t *block,
260  int stride, int qp, int dc)
261 {
262  const int qmul = svq3_dequant_coeff[qp];
263  int i;
264 
265  if (dc) {
266  dc = 13 * 13 * (dc == 1 ? 1538U* block[0]
267  : qmul * (block[0] >> 3) / 2);
268  block[0] = 0;
269  }
270 
271  for (i = 0; i < 4; i++) {
272  const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
273  const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
274  const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
275  const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
276 
277  block[0 + 4 * i] = z0 + z3;
278  block[1 + 4 * i] = z1 + z2;
279  block[2 + 4 * i] = z1 - z2;
280  block[3 + 4 * i] = z0 - z3;
281  }
282 
283  for (i = 0; i < 4; i++) {
284  const unsigned z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
285  const unsigned z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
286  const unsigned z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
287  const unsigned z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
288  const int rr = (dc + 0x80000u);
289 
290  dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((int)((z0 + z3) * qmul + rr) >> 20));
291  dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((int)((z1 + z2) * qmul + rr) >> 20));
292  dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((int)((z1 - z2) * qmul + rr) >> 20));
293  dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((int)((z0 - z3) * qmul + rr) >> 20));
294  }
295 
296  memset(block, 0, 16 * sizeof(int16_t));
297 }
298 
299 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
300  int index, const int type)
301 {
302  static const uint8_t *const scan_patterns[4] = {
304  };
305 
306  int run, level, sign, limit;
307  unsigned vlc;
308  const int intra = 3 * type >> 2;
309  const uint8_t *const scan = scan_patterns[type];
310 
311  for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
312  for (; (vlc = get_interleaved_ue_golomb(gb)) != 0; index++) {
313  if ((int32_t)vlc < 0)
314  return -1;
315 
316  sign = (vlc & 1) ? 0 : -1;
317  vlc = vlc + 1 >> 1;
318 
319  if (type == 3) {
320  if (vlc < 3) {
321  run = 0;
322  level = vlc;
323  } else if (vlc < 4) {
324  run = 1;
325  level = 1;
326  } else {
327  run = vlc & 0x3;
328  level = (vlc + 9 >> 2) - run;
329  }
330  } else {
331  if (vlc < 16U) {
332  run = svq3_dct_tables[intra][vlc].run;
333  level = svq3_dct_tables[intra][vlc].level;
334  } else if (intra) {
335  run = vlc & 0x7;
336  level = (vlc >> 3) + ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
337  } else {
338  run = vlc & 0xF;
339  level = (vlc >> 4) + ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
340  }
341  }
342 
343 
344  if ((index += run) >= limit)
345  return -1;
346 
347  block[scan[index]] = (level ^ sign) - sign;
348  }
349 
350  if (type != 2) {
351  break;
352  }
353  }
354 
355  return 0;
356 }
357 
358 static av_always_inline int
359 svq3_fetch_diagonal_mv(const SVQ3Context *s, const int16_t **C,
360  int i, int list, int part_width)
361 {
362  const int topright_ref = s->ref_cache[list][i - 8 + part_width];
363 
364  if (topright_ref != PART_NOT_AVAILABLE) {
365  *C = s->mv_cache[list][i - 8 + part_width];
366  return topright_ref;
367  } else {
368  *C = s->mv_cache[list][i - 8 - 1];
369  return s->ref_cache[list][i - 8 - 1];
370  }
371 }
372 
373 /**
374  * Get the predicted MV.
375  * @param n the block index
376  * @param part_width the width of the partition (4, 8,16) -> (1, 2, 4)
377  * @param mx the x component of the predicted motion vector
378  * @param my the y component of the predicted motion vector
379  */
381  int part_width, int list,
382  int ref, int *const mx, int *const my)
383 {
384  const int index8 = scan8[n];
385  const int top_ref = s->ref_cache[list][index8 - 8];
386  const int left_ref = s->ref_cache[list][index8 - 1];
387  const int16_t *const A = s->mv_cache[list][index8 - 1];
388  const int16_t *const B = s->mv_cache[list][index8 - 8];
389  const int16_t *C;
390  int diagonal_ref, match_count;
391 
392 /* mv_cache
393  * B . . A T T T T
394  * U . . L . . , .
395  * U . . L . . . .
396  * U . . L . . , .
397  * . . . L . . . .
398  */
399 
400  diagonal_ref = svq3_fetch_diagonal_mv(s, &C, index8, list, part_width);
401  match_count = (diagonal_ref == ref) + (top_ref == ref) + (left_ref == ref);
402  if (match_count > 1) { //most common
403  *mx = mid_pred(A[0], B[0], C[0]);
404  *my = mid_pred(A[1], B[1], C[1]);
405  } else if (match_count == 1) {
406  if (left_ref == ref) {
407  *mx = A[0];
408  *my = A[1];
409  } else if (top_ref == ref) {
410  *mx = B[0];
411  *my = B[1];
412  } else {
413  *mx = C[0];
414  *my = C[1];
415  }
416  } else {
417  if (top_ref == PART_NOT_AVAILABLE &&
418  diagonal_ref == PART_NOT_AVAILABLE &&
419  left_ref != PART_NOT_AVAILABLE) {
420  *mx = A[0];
421  *my = A[1];
422  } else {
423  *mx = mid_pred(A[0], B[0], C[0]);
424  *my = mid_pred(A[1], B[1], C[1]);
425  }
426  }
427 }
428 
429 static inline void svq3_mc_dir_part(SVQ3Context *s,
430  int x, int y, int width, int height,
431  int mx, int my, int dxy,
432  int thirdpel, int dir, int avg)
433 {
434  const SVQ3Frame *pic = (dir == 0) ? s->last_pic : s->next_pic;
435  uint8_t *src, *dest;
436  int i, emu = 0;
437  int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
438  int linesize = s->cur_pic->f->linesize[0];
439  int uvlinesize = s->cur_pic->f->linesize[1];
440 
441  mx += x;
442  my += y;
443 
444  if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
445  my < 0 || my >= s->v_edge_pos - height - 1) {
446  emu = 1;
447  mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
448  my = av_clip(my, -16, s->v_edge_pos - height + 15);
449  }
450 
451  /* form component predictions */
452  dest = s->cur_pic->f->data[0] + x + y * linesize;
453  src = pic->f->data[0] + mx + my * linesize;
454 
455  if (emu) {
456  s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
457  linesize, linesize,
458  width + 1, height + 1,
459  mx, my, s->h_edge_pos, s->v_edge_pos);
460  src = s->edge_emu_buffer;
461  }
462  if (thirdpel)
463  (avg ? s->tdsp.avg_tpel_pixels_tab
464  : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src, linesize,
465  width, height);
466  else
467  (avg ? s->hdsp.avg_pixels_tab
468  : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, linesize,
469  height);
470 
471  if (!(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
472  mx = mx + (mx < (int) x) >> 1;
473  my = my + (my < (int) y) >> 1;
474  width = width >> 1;
475  height = height >> 1;
476  blocksize++;
477 
478  for (i = 1; i < 3; i++) {
479  dest = s->cur_pic->f->data[i] + (x >> 1) + (y >> 1) * uvlinesize;
480  src = pic->f->data[i] + mx + my * uvlinesize;
481 
482  if (emu) {
483  s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
484  uvlinesize, uvlinesize,
485  width + 1, height + 1,
486  mx, my, (s->h_edge_pos >> 1),
487  s->v_edge_pos >> 1);
488  src = s->edge_emu_buffer;
489  }
490  if (thirdpel)
491  (avg ? s->tdsp.avg_tpel_pixels_tab
492  : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src,
493  uvlinesize,
494  width, height);
495  else
496  (avg ? s->hdsp.avg_pixels_tab
497  : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
498  uvlinesize,
499  height);
500  }
501  }
502 }
503 
504 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
505  int dir, int avg)
506 {
507  int i, j, k, mx, my, dx, dy, x, y;
508  const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
509  const int part_height = 16 >> ((unsigned)(size + 1) / 3);
510  const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
511  const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
512  const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
513 
514  for (i = 0; i < 16; i += part_height)
515  for (j = 0; j < 16; j += part_width) {
516  const int b_xy = (4 * s->mb_x + (j >> 2)) +
517  (4 * s->mb_y + (i >> 2)) * s->b_stride;
518  int dxy;
519  x = 16 * s->mb_x + j;
520  y = 16 * s->mb_y + i;
521  k = (j >> 2 & 1) + (i >> 1 & 2) +
522  (j >> 1 & 4) + (i & 8);
523 
524  if (mode != PREDICT_MODE) {
525  svq3_pred_motion(s, k, part_width >> 2, dir, 1, &mx, &my);
526  } else {
527  mx = s->next_pic->motion_val[0][b_xy][0] * 2;
528  my = s->next_pic->motion_val[0][b_xy][1] * 2;
529 
530  if (dir == 0) {
531  mx = mx * s->frame_num_offset /
532  s->prev_frame_num_offset + 1 >> 1;
533  my = my * s->frame_num_offset /
534  s->prev_frame_num_offset + 1 >> 1;
535  } else {
536  mx = mx * (s->frame_num_offset - s->prev_frame_num_offset) /
537  s->prev_frame_num_offset + 1 >> 1;
538  my = my * (s->frame_num_offset - s->prev_frame_num_offset) /
539  s->prev_frame_num_offset + 1 >> 1;
540  }
541  }
542 
543  /* clip motion vector prediction to frame border */
544  mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
545  my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
546 
547  /* get (optional) motion vector differential */
548  if (mode == PREDICT_MODE) {
549  dx = dy = 0;
550  } else {
551  dy = get_interleaved_se_golomb(&s->gb_slice);
552  dx = get_interleaved_se_golomb(&s->gb_slice);
553 
554  if (dx != (int16_t)dx || dy != (int16_t)dy) {
555  av_log(s->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
556  return -1;
557  }
558  }
559 
560  /* compute motion vector */
561  if (mode == THIRDPEL_MODE) {
562  int fx, fy;
563  mx = (mx + 1 >> 1) + dx;
564  my = (my + 1 >> 1) + dy;
565  fx = (unsigned)(mx + 0x30000) / 3 - 0x10000;
566  fy = (unsigned)(my + 0x30000) / 3 - 0x10000;
567  dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
568 
569  svq3_mc_dir_part(s, x, y, part_width, part_height,
570  fx, fy, dxy, 1, dir, avg);
571  mx += mx;
572  my += my;
573  } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
574  mx = (unsigned)(mx + 1 + 0x30000) / 3 + dx - 0x10000;
575  my = (unsigned)(my + 1 + 0x30000) / 3 + dy - 0x10000;
576  dxy = (mx & 1) + 2 * (my & 1);
577 
578  svq3_mc_dir_part(s, x, y, part_width, part_height,
579  mx >> 1, my >> 1, dxy, 0, dir, avg);
580  mx *= 3;
581  my *= 3;
582  } else {
583  mx = (unsigned)(mx + 3 + 0x60000) / 6 + dx - 0x10000;
584  my = (unsigned)(my + 3 + 0x60000) / 6 + dy - 0x10000;
585 
586  svq3_mc_dir_part(s, x, y, part_width, part_height,
587  mx, my, 0, 0, dir, avg);
588  mx *= 6;
589  my *= 6;
590  }
591 
592  /* update mv_cache */
593  if (mode != PREDICT_MODE) {
594  int32_t mv = pack16to32(mx, my);
595 
596  if (part_height == 8 && i < 8) {
597  AV_WN32A(s->mv_cache[dir][scan8[k] + 1 * 8], mv);
598 
599  if (part_width == 8 && j < 8)
600  AV_WN32A(s->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
601  }
602  if (part_width == 8 && j < 8)
603  AV_WN32A(s->mv_cache[dir][scan8[k] + 1], mv);
604  if (part_width == 4 || part_height == 4)
605  AV_WN32A(s->mv_cache[dir][scan8[k]], mv);
606  }
607 
608  /* write back motion vectors */
609  fill_rectangle(s->cur_pic->motion_val[dir][b_xy],
610  part_width >> 2, part_height >> 2, s->b_stride,
611  pack16to32(mx, my), 4);
612  }
613 
614  return 0;
615 }
616 
618  int mb_type, const int *block_offset,
619  int linesize, uint8_t *dest_y)
620 {
621  int i;
622  if (!IS_INTRA4x4(mb_type)) {
623  for (i = 0; i < 16; i++)
624  if (s->non_zero_count_cache[scan8[i]] || s->mb[i * 16]) {
625  uint8_t *const ptr = dest_y + block_offset[i];
626  svq3_add_idct_c(ptr, s->mb + i * 16, linesize,
627  s->qscale, IS_INTRA(mb_type) ? 1 : 0);
628  }
629  }
630 }
631 
633  int mb_type,
634  const int *block_offset,
635  int linesize,
636  uint8_t *dest_y)
637 {
638  int i;
639  int qscale = s->qscale;
640 
641  if (IS_INTRA4x4(mb_type)) {
642  for (i = 0; i < 16; i++) {
643  uint8_t *const ptr = dest_y + block_offset[i];
644  const int dir = s->intra4x4_pred_mode_cache[scan8[i]];
645 
646  uint8_t *topright;
647  int nnz, tr;
648  if (dir == DIAG_DOWN_LEFT_PRED || dir == VERT_LEFT_PRED) {
649  const int topright_avail = (s->topright_samples_available << i) & 0x8000;
650  av_assert2(s->mb_y || linesize <= block_offset[i]);
651  if (!topright_avail) {
652  tr = ptr[3 - linesize] * 0x01010101u;
653  topright = (uint8_t *)&tr;
654  } else
655  topright = ptr + 4 - linesize;
656  } else
657  topright = NULL;
658 
659  s->hpc.pred4x4[dir](ptr, topright, linesize);
660  nnz = s->non_zero_count_cache[scan8[i]];
661  if (nnz) {
662  svq3_add_idct_c(ptr, s->mb + i * 16, linesize, qscale, 0);
663  }
664  }
665  } else {
666  s->hpc.pred16x16[s->intra16x16_pred_mode](dest_y, linesize);
667  svq3_luma_dc_dequant_idct_c(s->mb, s->mb_luma_dc[0], qscale);
668  }
669 }
670 
672 {
673  const int mb_x = s->mb_x;
674  const int mb_y = s->mb_y;
675  const int mb_xy = s->mb_xy;
676  const int mb_type = s->cur_pic->mb_type[mb_xy];
677  uint8_t *dest_y, *dest_cb, *dest_cr;
678  int linesize, uvlinesize;
679  int i, j;
680  const int *block_offset = &s->block_offset[0];
681  const int block_h = 16 >> 1;
682 
683  linesize = s->cur_pic->f->linesize[0];
684  uvlinesize = s->cur_pic->f->linesize[1];
685 
686  dest_y = s->cur_pic->f->data[0] + (mb_x + mb_y * linesize) * 16;
687  dest_cb = s->cur_pic->f->data[1] + mb_x * 8 + mb_y * uvlinesize * block_h;
688  dest_cr = s->cur_pic->f->data[2] + mb_x * 8 + mb_y * uvlinesize * block_h;
689 
690  s->vdsp.prefetch(dest_y + (s->mb_x & 3) * 4 * linesize + 64, linesize, 4);
691  s->vdsp.prefetch(dest_cb + (s->mb_x & 7) * uvlinesize + 64, dest_cr - dest_cb, 2);
692 
693  if (IS_INTRA(mb_type)) {
694  s->hpc.pred8x8[s->chroma_pred_mode](dest_cb, uvlinesize);
695  s->hpc.pred8x8[s->chroma_pred_mode](dest_cr, uvlinesize);
696 
697  hl_decode_mb_predict_luma(s, mb_type, block_offset, linesize, dest_y);
698  }
699 
700  hl_decode_mb_idct_luma(s, mb_type, block_offset, linesize, dest_y);
701 
702  if (s->cbp & 0x30) {
703  uint8_t *dest[2] = { dest_cb, dest_cr };
704  s->h264dsp.h264_chroma_dc_dequant_idct(s->mb + 16 * 16 * 1,
705  s->dequant4_coeff[4][0]);
706  s->h264dsp.h264_chroma_dc_dequant_idct(s->mb + 16 * 16 * 2,
707  s->dequant4_coeff[4][0]);
708  for (j = 1; j < 3; j++) {
709  for (i = j * 16; i < j * 16 + 4; i++)
710  if (s->non_zero_count_cache[scan8[i]] || s->mb[i * 16]) {
711  uint8_t *const ptr = dest[j - 1] + block_offset[i];
712  svq3_add_idct_c(ptr, s->mb + i * 16,
713  uvlinesize, ff_h264_chroma_qp[0][s->qscale + 12] - 12, 2);
714  }
715  }
716  }
717 }
718 
719 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
720 {
721  int i, j, k, m, dir, mode;
722  int cbp = 0;
723  uint32_t vlc;
724  int8_t *top, *left;
725  const int mb_xy = s->mb_xy;
726  const int b_xy = 4 * s->mb_x + 4 * s->mb_y * s->b_stride;
727 
728  s->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
729  s->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
730  s->topright_samples_available = 0xFFFF;
731 
732  if (mb_type == 0) { /* SKIP */
733  if (s->pict_type == AV_PICTURE_TYPE_P ||
734  s->next_pic->mb_type[mb_xy] == -1) {
735  svq3_mc_dir_part(s, 16 * s->mb_x, 16 * s->mb_y, 16, 16,
736  0, 0, 0, 0, 0, 0);
737 
738  if (s->pict_type == AV_PICTURE_TYPE_B)
739  svq3_mc_dir_part(s, 16 * s->mb_x, 16 * s->mb_y, 16, 16,
740  0, 0, 0, 0, 1, 1);
741 
742  mb_type = MB_TYPE_SKIP;
743  } else {
744  mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
745  if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
746  return -1;
747  if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
748  return -1;
749 
750  mb_type = MB_TYPE_16x16;
751  }
752  } else if (mb_type < 8) { /* INTER */
753  if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&s->gb_slice))
755  else if (s->halfpel_flag &&
756  s->thirdpel_flag == !get_bits1(&s->gb_slice))
757  mode = HALFPEL_MODE;
758  else
759  mode = FULLPEL_MODE;
760 
761  /* fill caches */
762  /* note ref_cache should contain here:
763  * ????????
764  * ???11111
765  * N??11111
766  * N??11111
767  * N??11111
768  */
769 
770  for (m = 0; m < 2; m++) {
771  if (s->mb_x > 0 && s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - 1] + 6] != -1) {
772  for (i = 0; i < 4; i++)
773  AV_COPY32(s->mv_cache[m][scan8[0] - 1 + i * 8],
774  s->cur_pic->motion_val[m][b_xy - 1 + i * s->b_stride]);
775  } else {
776  for (i = 0; i < 4; i++)
777  AV_ZERO32(s->mv_cache[m][scan8[0] - 1 + i * 8]);
778  }
779  if (s->mb_y > 0) {
780  memcpy(s->mv_cache[m][scan8[0] - 1 * 8],
781  s->cur_pic->motion_val[m][b_xy - s->b_stride],
782  4 * 2 * sizeof(int16_t));
783  memset(&s->ref_cache[m][scan8[0] - 1 * 8],
784  (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
785 
786  if (s->mb_x < s->mb_width - 1) {
787  AV_COPY32(s->mv_cache[m][scan8[0] + 4 - 1 * 8],
788  s->cur_pic->motion_val[m][b_xy - s->b_stride + 4]);
789  s->ref_cache[m][scan8[0] + 4 - 1 * 8] =
790  (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride + 1] + 6] == -1 ||
791  s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
792  } else
793  s->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
794  if (s->mb_x > 0) {
795  AV_COPY32(s->mv_cache[m][scan8[0] - 1 - 1 * 8],
796  s->cur_pic->motion_val[m][b_xy - s->b_stride - 1]);
797  s->ref_cache[m][scan8[0] - 1 - 1 * 8] =
798  (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
799  } else
800  s->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
801  } else
802  memset(&s->ref_cache[m][scan8[0] - 1 * 8 - 1],
803  PART_NOT_AVAILABLE, 8);
804 
805  if (s->pict_type != AV_PICTURE_TYPE_B)
806  break;
807  }
808 
809  /* decode motion vector(s) and form prediction(s) */
810  if (s->pict_type == AV_PICTURE_TYPE_P) {
811  if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
812  return -1;
813  } else { /* AV_PICTURE_TYPE_B */
814  if (mb_type != 2) {
815  if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
816  return -1;
817  } else {
818  for (i = 0; i < 4; i++)
819  memset(s->cur_pic->motion_val[0][b_xy + i * s->b_stride],
820  0, 4 * 2 * sizeof(int16_t));
821  }
822  if (mb_type != 1) {
823  if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
824  return -1;
825  } else {
826  for (i = 0; i < 4; i++)
827  memset(s->cur_pic->motion_val[1][b_xy + i * s->b_stride],
828  0, 4 * 2 * sizeof(int16_t));
829  }
830  }
831 
832  mb_type = MB_TYPE_16x16;
833  } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
834  int8_t *i4x4 = s->intra4x4_pred_mode + s->mb2br_xy[s->mb_xy];
835  int8_t *i4x4_cache = s->intra4x4_pred_mode_cache;
836 
837  memset(s->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
838 
839  if (mb_type == 8) {
840  if (s->mb_x > 0) {
841  for (i = 0; i < 4; i++)
842  s->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - 1] + 6 - i];
843  if (s->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
844  s->left_samples_available = 0x5F5F;
845  }
846  if (s->mb_y > 0) {
847  s->intra4x4_pred_mode_cache[4 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 0];
848  s->intra4x4_pred_mode_cache[5 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 1];
849  s->intra4x4_pred_mode_cache[6 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 2];
850  s->intra4x4_pred_mode_cache[7 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 3];
851 
852  if (s->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
853  s->top_samples_available = 0x33FF;
854  }
855 
856  /* decode prediction codes for luma blocks */
857  for (i = 0; i < 16; i += 2) {
858  vlc = get_interleaved_ue_golomb(&s->gb_slice);
859 
860  if (vlc >= 25U) {
861  av_log(s->avctx, AV_LOG_ERROR,
862  "luma prediction:%"PRIu32"\n", vlc);
863  return -1;
864  }
865 
866  left = &s->intra4x4_pred_mode_cache[scan8[i] - 1];
867  top = &s->intra4x4_pred_mode_cache[scan8[i] - 8];
868 
869  left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
870  left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
871 
872  if (left[1] == -1 || left[2] == -1) {
873  av_log(s->avctx, AV_LOG_ERROR, "weird prediction\n");
874  return -1;
875  }
876  }
877  } else { /* mb_type == 33, DC_128_PRED block type */
878  for (i = 0; i < 4; i++)
879  memset(&s->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
880  }
881 
882  AV_COPY32(i4x4, i4x4_cache + 4 + 8 * 4);
883  i4x4[4] = i4x4_cache[7 + 8 * 3];
884  i4x4[5] = i4x4_cache[7 + 8 * 2];
885  i4x4[6] = i4x4_cache[7 + 8 * 1];
886 
887  if (mb_type == 8) {
888  ff_h264_check_intra4x4_pred_mode(s->intra4x4_pred_mode_cache,
889  s->avctx, s->top_samples_available,
890  s->left_samples_available);
891 
892  s->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
893  s->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
894  } else {
895  for (i = 0; i < 4; i++)
896  memset(&s->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
897 
898  s->top_samples_available = 0x33FF;
899  s->left_samples_available = 0x5F5F;
900  }
901 
902  mb_type = MB_TYPE_INTRA4x4;
903  } else { /* INTRA16x16 */
904  dir = ff_h264_i_mb_type_info[mb_type - 8].pred_mode;
905  dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
906 
907  if ((s->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(s->avctx, s->top_samples_available,
908  s->left_samples_available, dir, 0)) < 0) {
909  av_log(s->avctx, AV_LOG_ERROR, "ff_h264_check_intra_pred_mode < 0\n");
910  return s->intra16x16_pred_mode;
911  }
912 
913  cbp = ff_h264_i_mb_type_info[mb_type - 8].cbp;
914  mb_type = MB_TYPE_INTRA16x16;
915  }
916 
917  if (!IS_INTER(mb_type) && s->pict_type != AV_PICTURE_TYPE_I) {
918  for (i = 0; i < 4; i++)
919  memset(s->cur_pic->motion_val[0][b_xy + i * s->b_stride],
920  0, 4 * 2 * sizeof(int16_t));
921  if (s->pict_type == AV_PICTURE_TYPE_B) {
922  for (i = 0; i < 4; i++)
923  memset(s->cur_pic->motion_val[1][b_xy + i * s->b_stride],
924  0, 4 * 2 * sizeof(int16_t));
925  }
926  }
927  if (!IS_INTRA4x4(mb_type)) {
928  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy], DC_PRED, 8);
929  }
930  if (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B) {
931  memset(s->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
932  }
933 
934  if (!IS_INTRA16x16(mb_type) &&
935  (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B)) {
936  if ((vlc = get_interleaved_ue_golomb(&s->gb_slice)) >= 48U){
937  av_log(s->avctx, AV_LOG_ERROR, "cbp_vlc=%"PRIu32"\n", vlc);
938  return -1;
939  }
940 
941  cbp = IS_INTRA(mb_type) ? ff_h264_golomb_to_intra4x4_cbp[vlc]
943  }
944  if (IS_INTRA16x16(mb_type) ||
945  (s->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
946  s->qscale += get_interleaved_se_golomb(&s->gb_slice);
947 
948  if (s->qscale > 31u) {
949  av_log(s->avctx, AV_LOG_ERROR, "qscale:%d\n", s->qscale);
950  return -1;
951  }
952  }
953  if (IS_INTRA16x16(mb_type)) {
954  AV_ZERO128(s->mb_luma_dc[0] + 0);
955  AV_ZERO128(s->mb_luma_dc[0] + 8);
956  if (svq3_decode_block(&s->gb_slice, s->mb_luma_dc[0], 0, 1)) {
957  av_log(s->avctx, AV_LOG_ERROR,
958  "error while decoding intra luma dc\n");
959  return -1;
960  }
961  }
962 
963  if (cbp) {
964  const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
965  const int type = ((s->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
966 
967  for (i = 0; i < 4; i++)
968  if ((cbp & (1 << i))) {
969  for (j = 0; j < 4; j++) {
970  k = index ? (1 * (j & 1) + 2 * (i & 1) +
971  2 * (j & 2) + 4 * (i & 2))
972  : (4 * i + j);
973  s->non_zero_count_cache[scan8[k]] = 1;
974 
975  if (svq3_decode_block(&s->gb_slice, &s->mb[16 * k], index, type)) {
976  av_log(s->avctx, AV_LOG_ERROR,
977  "error while decoding block\n");
978  return -1;
979  }
980  }
981  }
982 
983  if ((cbp & 0x30)) {
984  for (i = 1; i < 3; ++i)
985  if (svq3_decode_block(&s->gb_slice, &s->mb[16 * 16 * i], 0, 3)) {
986  av_log(s->avctx, AV_LOG_ERROR,
987  "error while decoding chroma dc block\n");
988  return -1;
989  }
990 
991  if ((cbp & 0x20)) {
992  for (i = 1; i < 3; i++) {
993  for (j = 0; j < 4; j++) {
994  k = 16 * i + j;
995  s->non_zero_count_cache[scan8[k]] = 1;
996 
997  if (svq3_decode_block(&s->gb_slice, &s->mb[16 * k], 1, 1)) {
998  av_log(s->avctx, AV_LOG_ERROR,
999  "error while decoding chroma ac block\n");
1000  return -1;
1001  }
1002  }
1003  }
1004  }
1005  }
1006  }
1007 
1008  s->cbp = cbp;
1009  s->cur_pic->mb_type[mb_xy] = mb_type;
1010 
1011  if (IS_INTRA(mb_type))
1012  s->chroma_pred_mode = ff_h264_check_intra_pred_mode(s->avctx, s->top_samples_available,
1013  s->left_samples_available, DC_PRED8x8, 1);
1014 
1015  return 0;
1016 }
1017 
1019 {
1020  SVQ3Context *s = avctx->priv_data;
1021  const int mb_xy = s->mb_xy;
1022  int i, header;
1023  unsigned slice_id;
1024 
1025  header = get_bits(&s->gb, 8);
1026 
1027  if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
1028  /* TODO: what? */
1029  av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
1030  return -1;
1031  } else {
1032  int slice_bits, slice_bytes, slice_length;
1033  int length = header >> 5 & 3;
1034 
1035  slice_length = show_bits(&s->gb, 8 * length);
1036  slice_bits = slice_length * 8;
1037  slice_bytes = slice_length + length - 1;
1038 
1039  skip_bits(&s->gb, 8);
1040 
1041  av_fast_malloc(&s->slice_buf, &s->slice_size, slice_bytes + AV_INPUT_BUFFER_PADDING_SIZE);
1042  if (!s->slice_buf)
1043  return AVERROR(ENOMEM);
1044 
1045  if (slice_bytes * 8LL > get_bits_left(&s->gb)) {
1046  av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
1047  return AVERROR_INVALIDDATA;
1048  }
1049  memcpy(s->slice_buf, s->gb.buffer + s->gb.index / 8, slice_bytes);
1050 
1051  if (s->watermark_key) {
1052  uint32_t header = AV_RL32(&s->slice_buf[1]);
1053  AV_WL32(&s->slice_buf[1], header ^ s->watermark_key);
1054  }
1055  init_get_bits(&s->gb_slice, s->slice_buf, slice_bits);
1056 
1057  if (length > 0) {
1058  memmove(s->slice_buf, &s->slice_buf[slice_length], length - 1);
1059  }
1060  skip_bits_long(&s->gb, slice_bytes * 8);
1061  }
1062 
1063  if ((slice_id = get_interleaved_ue_golomb(&s->gb_slice)) >= 3) {
1064  av_log(s->avctx, AV_LOG_ERROR, "illegal slice type %u \n", slice_id);
1065  return -1;
1066  }
1067 
1068  s->slice_type = ff_h264_golomb_to_pict_type[slice_id];
1069 
1070  if ((header & 0x9F) == 2) {
1071  i = (s->mb_num < 64) ? 6 : (1 + av_log2(s->mb_num - 1));
1072  get_bits(&s->gb_slice, i);
1073  } else if (get_bits1(&s->gb_slice)) {
1074  avpriv_report_missing_feature(s->avctx, "Media key encryption");
1075  return AVERROR_PATCHWELCOME;
1076  }
1077 
1078  s->slice_num = get_bits(&s->gb_slice, 8);
1079  s->qscale = get_bits(&s->gb_slice, 5);
1080  s->adaptive_quant = get_bits1(&s->gb_slice);
1081 
1082  /* unknown fields */
1083  skip_bits1(&s->gb_slice);
1084 
1085  if (s->has_watermark)
1086  skip_bits1(&s->gb_slice);
1087 
1088  skip_bits1(&s->gb_slice);
1089  skip_bits(&s->gb_slice, 2);
1090 
1091  if (skip_1stop_8data_bits(&s->gb_slice) < 0)
1092  return AVERROR_INVALIDDATA;
1093 
1094  /* reset intra predictors and invalidate motion vector references */
1095  if (s->mb_x > 0) {
1096  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - 1] + 3,
1097  -1, 4 * sizeof(int8_t));
1098  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - s->mb_x],
1099  -1, 8 * sizeof(int8_t) * s->mb_x);
1100  }
1101  if (s->mb_y > 0) {
1102  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - s->mb_stride],
1103  -1, 8 * sizeof(int8_t) * (s->mb_width - s->mb_x));
1104 
1105  if (s->mb_x > 0)
1106  s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride - 1] + 3] = -1;
1107  }
1108 
1109  return 0;
1110 }
1111 
1113 {
1114  int q, x;
1115  const int max_qp = 51;
1116 
1117  for (q = 0; q < max_qp + 1; q++) {
1118  int shift = ff_h264_quant_div6[q] + 2;
1119  int idx = ff_h264_quant_rem6[q];
1120  for (x = 0; x < 16; x++)
1121  s->dequant4_coeff[q][(x >> 2) | ((x << 2) & 0xF)] =
1122  ((uint32_t)ff_h264_dequant4_coeff_init[idx][(x & 1) + ((x >> 2) & 1)] * 16) << shift;
1123  }
1124 }
1125 
1127 {
1128  SVQ3Context *s = avctx->priv_data;
1129  int m, x, y;
1130  unsigned char *extradata;
1131  unsigned char *extradata_end;
1132  unsigned int size;
1133  int marker_found = 0;
1134  int ret;
1135 
1136  s->cur_pic = av_mallocz(sizeof(*s->cur_pic));
1137  s->last_pic = av_mallocz(sizeof(*s->last_pic));
1138  s->next_pic = av_mallocz(sizeof(*s->next_pic));
1139  if (!s->next_pic || !s->last_pic || !s->cur_pic) {
1140  ret = AVERROR(ENOMEM);
1141  goto fail;
1142  }
1143 
1144  s->cur_pic->f = av_frame_alloc();
1145  s->last_pic->f = av_frame_alloc();
1146  s->next_pic->f = av_frame_alloc();
1147  if (!s->cur_pic->f || !s->last_pic->f || !s->next_pic->f)
1148  return AVERROR(ENOMEM);
1149 
1150  ff_h264dsp_init(&s->h264dsp, 8, 1);
1151  ff_h264_pred_init(&s->hpc, AV_CODEC_ID_SVQ3, 8, 1);
1152  ff_videodsp_init(&s->vdsp, 8);
1153 
1154 
1155  avctx->bits_per_raw_sample = 8;
1156 
1157  ff_hpeldsp_init(&s->hdsp, avctx->flags);
1158  ff_tpeldsp_init(&s->tdsp);
1159 
1160  avctx->pix_fmt = AV_PIX_FMT_YUVJ420P;
1161  avctx->color_range = AVCOL_RANGE_JPEG;
1162 
1163  s->avctx = avctx;
1164  s->halfpel_flag = 1;
1165  s->thirdpel_flag = 1;
1166  s->has_watermark = 0;
1167 
1168  /* prowl for the "SEQH" marker in the extradata */
1169  extradata = (unsigned char *)avctx->extradata;
1170  extradata_end = avctx->extradata + avctx->extradata_size;
1171  if (extradata) {
1172  for (m = 0; m + 8 < avctx->extradata_size; m++) {
1173  if (!memcmp(extradata, "SEQH", 4)) {
1174  marker_found = 1;
1175  break;
1176  }
1177  extradata++;
1178  }
1179  }
1180 
1181  /* if a match was found, parse the extra data */
1182  if (marker_found) {
1183  GetBitContext gb;
1184  int frame_size_code;
1185  int unk0, unk1, unk2, unk3, unk4;
1186  int w,h;
1187 
1188  size = AV_RB32(&extradata[4]);
1189  if (size > extradata_end - extradata - 8) {
1191  goto fail;
1192  }
1193  init_get_bits(&gb, extradata + 8, size * 8);
1194 
1195  /* 'frame size code' and optional 'width, height' */
1196  frame_size_code = get_bits(&gb, 3);
1197  switch (frame_size_code) {
1198  case 0:
1199  w = 160;
1200  h = 120;
1201  break;
1202  case 1:
1203  w = 128;
1204  h = 96;
1205  break;
1206  case 2:
1207  w = 176;
1208  h = 144;
1209  break;
1210  case 3:
1211  w = 352;
1212  h = 288;
1213  break;
1214  case 4:
1215  w = 704;
1216  h = 576;
1217  break;
1218  case 5:
1219  w = 240;
1220  h = 180;
1221  break;
1222  case 6:
1223  w = 320;
1224  h = 240;
1225  break;
1226  case 7:
1227  w = get_bits(&gb, 12);
1228  h = get_bits(&gb, 12);
1229  break;
1230  }
1231  ret = ff_set_dimensions(avctx, w, h);
1232  if (ret < 0)
1233  goto fail;
1234 
1235  s->halfpel_flag = get_bits1(&gb);
1236  s->thirdpel_flag = get_bits1(&gb);
1237 
1238  /* unknown fields */
1239  unk0 = get_bits1(&gb);
1240  unk1 = get_bits1(&gb);
1241  unk2 = get_bits1(&gb);
1242  unk3 = get_bits1(&gb);
1243 
1244  s->low_delay = get_bits1(&gb);
1245 
1246  /* unknown field */
1247  unk4 = get_bits1(&gb);
1248 
1249  av_log(avctx, AV_LOG_DEBUG, "Unknown fields %d %d %d %d %d\n",
1250  unk0, unk1, unk2, unk3, unk4);
1251 
1252  if (skip_1stop_8data_bits(&gb) < 0) {
1254  goto fail;
1255  }
1256 
1257  s->has_watermark = get_bits1(&gb);
1258  avctx->has_b_frames = !s->low_delay;
1259  if (s->has_watermark) {
1260 #if CONFIG_ZLIB
1261  unsigned watermark_width = get_interleaved_ue_golomb(&gb);
1262  unsigned watermark_height = get_interleaved_ue_golomb(&gb);
1263  int u1 = get_interleaved_ue_golomb(&gb);
1264  int u2 = get_bits(&gb, 8);
1265  int u3 = get_bits(&gb, 2);
1266  int u4 = get_interleaved_ue_golomb(&gb);
1267  unsigned long buf_len = watermark_width *
1268  watermark_height * 4;
1269  int offset = get_bits_count(&gb) + 7 >> 3;
1270  uint8_t *buf;
1271 
1272  if (watermark_height <= 0 ||
1273  (uint64_t)watermark_width * 4 > UINT_MAX / watermark_height) {
1274  ret = -1;
1275  goto fail;
1276  }
1277 
1278  buf = av_malloc(buf_len);
1279  if (!buf) {
1280  ret = AVERROR(ENOMEM);
1281  goto fail;
1282  }
1283  av_log(avctx, AV_LOG_DEBUG, "watermark size: %ux%u\n",
1284  watermark_width, watermark_height);
1285  av_log(avctx, AV_LOG_DEBUG,
1286  "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
1287  u1, u2, u3, u4, offset);
1288  if (uncompress(buf, &buf_len, extradata + 8 + offset,
1289  size - offset) != Z_OK) {
1290  av_log(avctx, AV_LOG_ERROR,
1291  "could not uncompress watermark logo\n");
1292  av_free(buf);
1293  ret = -1;
1294  goto fail;
1295  }
1296  s->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
1297  s->watermark_key = s->watermark_key << 16 | s->watermark_key;
1298  av_log(avctx, AV_LOG_DEBUG,
1299  "watermark key %#"PRIx32"\n", s->watermark_key);
1300  av_free(buf);
1301 #else
1302  av_log(avctx, AV_LOG_ERROR,
1303  "this svq3 file contains watermark which need zlib support compiled in\n");
1304  ret = -1;
1305  goto fail;
1306 #endif
1307  }
1308  }
1309 
1310  s->mb_width = (avctx->width + 15) / 16;
1311  s->mb_height = (avctx->height + 15) / 16;
1312  s->mb_stride = s->mb_width + 1;
1313  s->mb_num = s->mb_width * s->mb_height;
1314  s->b_stride = 4 * s->mb_width;
1315  s->h_edge_pos = s->mb_width * 16;
1316  s->v_edge_pos = s->mb_height * 16;
1317 
1318  s->intra4x4_pred_mode = av_mallocz(s->mb_stride * 2 * 8);
1319  if (!s->intra4x4_pred_mode)
1320  return AVERROR(ENOMEM);
1321 
1322  s->mb2br_xy = av_mallocz(s->mb_stride * (s->mb_height + 1) *
1323  sizeof(*s->mb2br_xy));
1324  if (!s->mb2br_xy)
1325  return AVERROR(ENOMEM);
1326 
1327  for (y = 0; y < s->mb_height; y++)
1328  for (x = 0; x < s->mb_width; x++) {
1329  const int mb_xy = x + y * s->mb_stride;
1330 
1331  s->mb2br_xy[mb_xy] = 8 * (mb_xy % (2 * s->mb_stride));
1332  }
1333 
1335 
1336  return 0;
1337 fail:
1338  svq3_decode_end(avctx);
1339  return ret;
1340 }
1341 
1342 static void free_picture(AVCodecContext *avctx, SVQ3Frame *pic)
1343 {
1344  int i;
1345  for (i = 0; i < 2; i++) {
1348  }
1350 
1351  av_frame_unref(pic->f);
1352 }
1353 
1354 static int get_buffer(AVCodecContext *avctx, SVQ3Frame *pic)
1355 {
1356  SVQ3Context *s = avctx->priv_data;
1357  const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
1358  const int mb_array_size = s->mb_stride * s->mb_height;
1359  const int b4_stride = s->mb_width * 4 + 1;
1360  const int b4_array_size = b4_stride * s->mb_height * 4;
1361  int ret;
1362 
1363  if (!pic->motion_val_buf[0]) {
1364  int i;
1365 
1366  pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) * sizeof(uint32_t));
1367  if (!pic->mb_type_buf)
1368  return AVERROR(ENOMEM);
1369  pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
1370 
1371  for (i = 0; i < 2; i++) {
1372  pic->motion_val_buf[i] = av_buffer_allocz(2 * (b4_array_size + 4) * sizeof(int16_t));
1373  pic->ref_index_buf[i] = av_buffer_allocz(4 * mb_array_size);
1374  if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i]) {
1375  ret = AVERROR(ENOMEM);
1376  goto fail;
1377  }
1378 
1379  pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
1380  pic->ref_index[i] = pic->ref_index_buf[i]->data;
1381  }
1382  }
1383 
1384  ret = ff_get_buffer(avctx, pic->f,
1385  (s->pict_type != AV_PICTURE_TYPE_B) ?
1387  if (ret < 0)
1388  goto fail;
1389 
1390  if (!s->edge_emu_buffer) {
1391  s->edge_emu_buffer = av_mallocz_array(pic->f->linesize[0], 17);
1392  if (!s->edge_emu_buffer)
1393  return AVERROR(ENOMEM);
1394  }
1395 
1396  return 0;
1397 fail:
1398  free_picture(avctx, pic);
1399  return ret;
1400 }
1401 
1402 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1403  int *got_frame, AVPacket *avpkt)
1404 {
1405  SVQ3Context *s = avctx->priv_data;
1406  int buf_size = avpkt->size;
1407  int left;
1408  uint8_t *buf;
1409  int ret, m, i;
1410 
1411  /* special case for last picture */
1412  if (buf_size == 0) {
1413  if (s->next_pic->f->data[0] && !s->low_delay && !s->last_frame_output) {
1414  ret = av_frame_ref(data, s->next_pic->f);
1415  if (ret < 0)
1416  return ret;
1417  s->last_frame_output = 1;
1418  *got_frame = 1;
1419  }
1420  return 0;
1421  }
1422 
1423  s->mb_x = s->mb_y = s->mb_xy = 0;
1424 
1425  if (s->watermark_key) {
1426  av_fast_padded_malloc(&s->buf, &s->buf_size, buf_size);
1427  if (!s->buf)
1428  return AVERROR(ENOMEM);
1429  memcpy(s->buf, avpkt->data, buf_size);
1430  buf = s->buf;
1431  } else {
1432  buf = avpkt->data;
1433  }
1434 
1435  ret = init_get_bits(&s->gb, buf, 8 * buf_size);
1436  if (ret < 0)
1437  return ret;
1438 
1439  if (svq3_decode_slice_header(avctx))
1440  return -1;
1441 
1442  s->pict_type = s->slice_type;
1443 
1444  if (s->pict_type != AV_PICTURE_TYPE_B)
1445  FFSWAP(SVQ3Frame*, s->next_pic, s->last_pic);
1446 
1447  av_frame_unref(s->cur_pic->f);
1448 
1449  /* for skipping the frame */
1450  s->cur_pic->f->pict_type = s->pict_type;
1451  s->cur_pic->f->key_frame = (s->pict_type == AV_PICTURE_TYPE_I);
1452 
1453  ret = get_buffer(avctx, s->cur_pic);
1454  if (ret < 0)
1455  return ret;
1456 
1457  for (i = 0; i < 16; i++) {
1458  s->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * s->cur_pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
1459  s->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * s->cur_pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
1460  }
1461  for (i = 0; i < 16; i++) {
1462  s->block_offset[16 + i] =
1463  s->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * s->cur_pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
1464  s->block_offset[48 + 16 + i] =
1465  s->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * s->cur_pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
1466  }
1467 
1468  if (s->pict_type != AV_PICTURE_TYPE_I) {
1469  if (!s->last_pic->f->data[0]) {
1470  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1471  av_frame_unref(s->last_pic->f);
1472  ret = get_buffer(avctx, s->last_pic);
1473  if (ret < 0)
1474  return ret;
1475  memset(s->last_pic->f->data[0], 0, avctx->height * s->last_pic->f->linesize[0]);
1476  memset(s->last_pic->f->data[1], 0x80, (avctx->height / 2) *
1477  s->last_pic->f->linesize[1]);
1478  memset(s->last_pic->f->data[2], 0x80, (avctx->height / 2) *
1479  s->last_pic->f->linesize[2]);
1480  }
1481 
1482  if (s->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f->data[0]) {
1483  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1484  av_frame_unref(s->next_pic->f);
1485  ret = get_buffer(avctx, s->next_pic);
1486  if (ret < 0)
1487  return ret;
1488  memset(s->next_pic->f->data[0], 0, avctx->height * s->next_pic->f->linesize[0]);
1489  memset(s->next_pic->f->data[1], 0x80, (avctx->height / 2) *
1490  s->next_pic->f->linesize[1]);
1491  memset(s->next_pic->f->data[2], 0x80, (avctx->height / 2) *
1492  s->next_pic->f->linesize[2]);
1493  }
1494  }
1495 
1496  if (avctx->debug & FF_DEBUG_PICT_INFO)
1497  av_log(s->avctx, AV_LOG_DEBUG,
1498  "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1499  av_get_picture_type_char(s->pict_type),
1500  s->halfpel_flag, s->thirdpel_flag,
1501  s->adaptive_quant, s->qscale, s->slice_num);
1502 
1503  if (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B ||
1504  avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I ||
1505  avctx->skip_frame >= AVDISCARD_ALL)
1506  return 0;
1507 
1508  if (s->next_p_frame_damaged) {
1509  if (s->pict_type == AV_PICTURE_TYPE_B)
1510  return 0;
1511  else
1512  s->next_p_frame_damaged = 0;
1513  }
1514 
1515  if (s->pict_type == AV_PICTURE_TYPE_B) {
1516  s->frame_num_offset = s->slice_num - s->prev_frame_num;
1517 
1518  if (s->frame_num_offset < 0)
1519  s->frame_num_offset += 256;
1520  if (s->frame_num_offset == 0 ||
1521  s->frame_num_offset >= s->prev_frame_num_offset) {
1522  av_log(s->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1523  return -1;
1524  }
1525  } else {
1526  s->prev_frame_num = s->frame_num;
1527  s->frame_num = s->slice_num;
1528  s->prev_frame_num_offset = s->frame_num - s->prev_frame_num;
1529 
1530  if (s->prev_frame_num_offset < 0)
1531  s->prev_frame_num_offset += 256;
1532  }
1533 
1534  for (m = 0; m < 2; m++) {
1535  int i;
1536  for (i = 0; i < 4; i++) {
1537  int j;
1538  for (j = -1; j < 4; j++)
1539  s->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1540  if (i < 3)
1541  s->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1542  }
1543  }
1544 
1545  for (s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
1546  for (s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
1547  unsigned mb_type;
1548  s->mb_xy = s->mb_x + s->mb_y * s->mb_stride;
1549 
1550  if ((get_bits_left(&s->gb_slice)) <= 7) {
1551  if (((get_bits_count(&s->gb_slice) & 7) == 0 ||
1552  show_bits(&s->gb_slice, get_bits_left(&s->gb_slice) & 7) == 0)) {
1553 
1554  if (svq3_decode_slice_header(avctx))
1555  return -1;
1556  }
1557  if (s->slice_type != s->pict_type) {
1558  avpriv_request_sample(avctx, "non constant slice type");
1559  }
1560  /* TODO: support s->mb_skip_run */
1561  }
1562 
1563  mb_type = get_interleaved_ue_golomb(&s->gb_slice);
1564 
1565  if (s->pict_type == AV_PICTURE_TYPE_I)
1566  mb_type += 8;
1567  else if (s->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1568  mb_type += 4;
1569  if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1570  av_log(s->avctx, AV_LOG_ERROR,
1571  "error while decoding MB %d %d\n", s->mb_x, s->mb_y);
1572  return -1;
1573  }
1574 
1575  if (mb_type != 0 || s->cbp)
1576  hl_decode_mb(s);
1577 
1578  if (s->pict_type != AV_PICTURE_TYPE_B && !s->low_delay)
1579  s->cur_pic->mb_type[s->mb_x + s->mb_y * s->mb_stride] =
1580  (s->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1581  }
1582 
1583  ff_draw_horiz_band(avctx, s->cur_pic->f,
1584  s->last_pic->f->data[0] ? s->last_pic->f : NULL,
1585  16 * s->mb_y, 16, PICT_FRAME, 0,
1586  s->low_delay);
1587  }
1588 
1589  left = buf_size*8 - get_bits_count(&s->gb_slice);
1590 
1591  if (s->mb_y != s->mb_height || s->mb_x != s->mb_width) {
1592  av_log(avctx, AV_LOG_INFO, "frame num %d incomplete pic x %d y %d left %d\n", avctx->frame_number, s->mb_y, s->mb_x, left);
1593  //av_hex_dump(stderr, buf+buf_size-8, 8);
1594  }
1595 
1596  if (left < 0) {
1597  av_log(avctx, AV_LOG_ERROR, "frame num %d left %d\n", avctx->frame_number, left);
1598  return -1;
1599  }
1600 
1601  if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay)
1602  ret = av_frame_ref(data, s->cur_pic->f);
1603  else if (s->last_pic->f->data[0])
1604  ret = av_frame_ref(data, s->last_pic->f);
1605  if (ret < 0)
1606  return ret;
1607 
1608  /* Do not output the last pic after seeking. */
1609  if (s->last_pic->f->data[0] || s->low_delay)
1610  *got_frame = 1;
1611 
1612  if (s->pict_type != AV_PICTURE_TYPE_B) {
1613  FFSWAP(SVQ3Frame*, s->cur_pic, s->next_pic);
1614  } else {
1615  av_frame_unref(s->cur_pic->f);
1616  }
1617 
1618  return buf_size;
1619 }
1620 
1622 {
1623  SVQ3Context *s = avctx->priv_data;
1624 
1625  free_picture(avctx, s->cur_pic);
1626  free_picture(avctx, s->next_pic);
1627  free_picture(avctx, s->last_pic);
1628  av_frame_free(&s->cur_pic->f);
1629  av_frame_free(&s->next_pic->f);
1630  av_frame_free(&s->last_pic->f);
1631  av_freep(&s->cur_pic);
1632  av_freep(&s->next_pic);
1633  av_freep(&s->last_pic);
1634  av_freep(&s->slice_buf);
1635  av_freep(&s->intra4x4_pred_mode);
1636  av_freep(&s->edge_emu_buffer);
1637  av_freep(&s->mb2br_xy);
1638 
1639 
1640  av_freep(&s->buf);
1641  s->buf_size = 0;
1642 
1643  return 0;
1644 }
1645 
1647  .name = "svq3",
1648  .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1649  .type = AVMEDIA_TYPE_VIDEO,
1650  .id = AV_CODEC_ID_SVQ3,
1651  .priv_data_size = sizeof(SVQ3Context),
1653  .close = svq3_decode_end,
1655  .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND |
1658  .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,
1659  AV_PIX_FMT_NONE},
1660 };
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:39
SVQ3Context::frame_num
int frame_num
Definition: svq3.c:112
SVQ3Context::edge_emu_buffer
uint8_t * edge_emu_buffer
Definition: svq3.c:139
IS_INTRA4x4
#define IS_INTRA4x4(a)
Definition: mpegutils.h:75
svq3_dequant_coeff
static const uint32_t svq3_dequant_coeff[32]
Definition: svq3.c:215
AVCodec
AVCodec.
Definition: avcodec.h:3481
SVQ3Context::next_pic
SVQ3Frame * next_pic
Definition: svq3.c:92
skip_bits_long
static void skip_bits_long(GetBitContext *s, int n)
Skips the specified number of bits.
Definition: get_bits.h:291
SVQ3Context::slice_type
enum AVPictureType slice_type
Definition: svq3.c:118
SVQ3Context::gb_slice
GetBitContext gb_slice
Definition: svq3.c:95
SVQ3Context::vdsp
VideoDSPContext vdsp
Definition: svq3.c:89
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
SVQ3Context::slice_num
int slice_num
Definition: svq3.c:109
level
uint8_t level
Definition: svq3.c:207
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:849
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
DC_PRED8x8
#define DC_PRED8x8
Definition: h264pred.h:68
svq3_decode_slice_header
static int svq3_decode_slice_header(AVCodecContext *avctx)
Definition: svq3.c:1018
AV_WL32
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
SVQ3Context::avctx
AVCodecContext * avctx
Definition: svq3.c:83
DC_128_PRED
@ DC_128_PRED
Definition: vp9.h:58
FFSWAP
#define FFSWAP(type, a, b)
Definition: common.h:99
SVQ3Context::mb_num
int mb_num
Definition: svq3.c:124
n
int n
Definition: avisynth_c.h:760
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:252
SVQ3Context::v_edge_pos
int v_edge_pos
Definition: svq3.c:107
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:89
AVPictureType
AVPictureType
Definition: avutil.h:272
ff_h264_chroma_qp
const uint8_t ff_h264_chroma_qp[7][QP_MAX_NUM+1]
Definition: h264data.c:203
mv
static const int8_t mv[256][2]
Definition: 4xm.c:77
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:225
SVQ3Context::left_samples_available
unsigned int left_samples_available
Definition: svq3.c:137
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
svq3_dct_tables
static const struct @148 svq3_dct_tables[2][16]
get_interleaved_ue_golomb
static unsigned get_interleaved_ue_golomb(GetBitContext *gb)
Definition: golomb.h:143
ff_h264_golomb_to_inter_cbp
const uint8_t ff_h264_golomb_to_inter_cbp[48]
Definition: h264data.c:48
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
SVQ3Context::h_edge_pos
int h_edge_pos
Definition: svq3.c:106
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:522
IMbInfo::cbp
uint8_t cbp
Definition: h264data.h:36
internal.h
AVPacket::data
uint8_t * data
Definition: avcodec.h:1477
DC_PRED
@ DC_PRED
Definition: vp9.h:48
MB_TYPE_INTRA4x4
#define MB_TYPE_INTRA4x4
Definition: mpegutils.h:51
SVQ3Context::slice_buf
uint8_t * slice_buf
Definition: svq3.c:96
data
const char data[16]
Definition: mxf.c:91
VERT_LEFT_PRED
@ VERT_LEFT_PRED
Definition: vp9.h:53
MB_TYPE_16x16
#define MB_TYPE_16x16
Definition: mpegutils.h:54
SVQ3Context::mb
int16_t mb[16 *48 *2]
Definition: svq3.c:143
PREDICT_MODE
#define PREDICT_MODE
Definition: svq3.c:153
av_mallocz_array
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:191
free_picture
static void free_picture(AVCodecContext *avctx, SVQ3Frame *pic)
Definition: svq3.c:1342
av_buffer_allocz
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:83
AV_WN32A
#define AV_WN32A(p, v)
Definition: intreadwrite.h:538
ff_h264_golomb_to_intra4x4_cbp
const uint8_t ff_h264_golomb_to_intra4x4_cbp[48]
Definition: h264data.c:42
SVQ3Context::frame_num_offset
int frame_num_offset
Definition: svq3.c:113
mpegutils.h
MB_TYPE_INTRA16x16
#define MB_TYPE_INTRA16x16
Definition: mpegutils.h:52
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:659
SVQ3Context::last_frame_output
int last_frame_output
Definition: svq3.c:108
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:2651
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
get_buffer
static int get_buffer(AVCodecContext *avctx, SVQ3Frame *pic)
Definition: svq3.c:1354
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
A
#define A(x)
Definition: vp56_arith.h:28
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
golomb.h
exp golomb vlc stuff
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
SVQ3Context::last_pic
SVQ3Frame * last_pic
Definition: svq3.c:93
SVQ3Context::qscale
int qscale
Definition: svq3.c:110
U
#define U(x)
Definition: vp56_arith.h:37
SVQ3Context::topright_samples_available
unsigned int topright_samples_available
Definition: svq3.c:136
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:3040
fail
#define fail()
Definition: checkasm.h:120
GetBitContext
Definition: get_bits.h:61
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1645
SVQ3Context::slice_size
int slice_size
Definition: svq3.c:97
SVQ3Context::tdsp
TpelDSPContext tdsp
Definition: svq3.c:88
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
SVQ3Context::thirdpel_flag
int thirdpel_flag
Definition: svq3.c:99
ff_h264_golomb_to_pict_type
const uint8_t ff_h264_golomb_to_pict_type[5]
Definition: h264data.c:37
src
#define src
Definition: vp8dsp.c:254
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
SVQ3Context::intra4x4_pred_mode_cache
int8_t intra4x4_pred_mode_cache[5 *8]
Definition: svq3.c:132
C
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
Definition: writing_filters.txt:58
SVQ3Context::gb
GetBitContext gb
Definition: svq3.c:94
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
buf
void * buf
Definition: avisynth_c.h:766
av_cold
#define av_cold
Definition: attributes.h:84
SVQ3Context::cbp
int cbp
Definition: svq3.c:111
FULLPEL_MODE
#define FULLPEL_MODE
Definition: svq3.c:150
SVQ3Context::mb_y
int mb_y
Definition: svq3.c:121
SVQ3Context::mb_x
int mb_x
Definition: svq3.c:121
SVQ3Context::adaptive_quant
int adaptive_quant
Definition: svq3.c:104
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:42
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:1667
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1855
SVQ3Context::buf_size
int buf_size
Definition: svq3.c:103
width
#define width
s
#define s(width, name)
Definition: cbs_vp9.c:257
TpelDSPContext
thirdpel DSP context
Definition: tpeldsp.h:42
SVQ3Context::pict_type
enum AVPictureType pict_type
Definition: svq3.c:117
AV_ZERO32
#define AV_ZERO32(d)
Definition: intreadwrite.h:629
svq3_mc_dir
static int svq3_mc_dir(SVQ3Context *s, int size, int mode, int dir, int avg)
Definition: svq3.c:504
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:1176
ff_tpeldsp_init
av_cold void ff_tpeldsp_init(TpelDSPContext *c)
Definition: tpeldsp.c:312
QP_MAX_NUM
#define QP_MAX_NUM
Definition: h264.h:27
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
h264data.h
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:2796
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
svq3_pred_motion
static av_always_inline void svq3_pred_motion(const SVQ3Context *s, int n, int part_width, int list, int ref, int *const mx, int *const my)
Get the predicted MV.
Definition: svq3.c:380
IS_SKIP
#define IS_SKIP(a)
Definition: mpegutils.h:81
SVQ3Context::top_samples_available
unsigned int top_samples_available
Definition: svq3.c:135
IS_INTRA
#define IS_INTRA(x, y)
SVQ3Frame::motion_val_buf
AVBufferRef * motion_val_buf[2]
Definition: svq3.c:71
AV_CODEC_ID_SVQ3
@ AV_CODEC_ID_SVQ3
Definition: avcodec.h:241
SVQ3Context::b_stride
int b_stride
Definition: svq3.c:125
SVQ3Context::prev_frame_num_offset
int prev_frame_num_offset
Definition: svq3.c:114
SVQ3Context::h264dsp
H264DSPContext h264dsp
Definition: svq3.c:85
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
int32_t
int32_t
Definition: audio_convert.c:194
IMbInfo::pred_mode
uint8_t pred_mode
Definition: h264data.h:35
if
if(ret)
Definition: filter_design.txt:179
SVQ3Context::next_p_frame_damaged
int next_p_frame_damaged
Definition: svq3.c:105
SVQ3Frame::motion_val
int16_t(*[2] motion_val)[2]
Definition: svq3.c:72
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: avcodec.h:811
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
run
uint8_t run
Definition: svq3.c:206
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2200
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:125
SVQ3Context::mb_width
int mb_width
Definition: svq3.c:123
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
SVQ3Context::mb2br_xy
uint32_t * mb2br_xy
Definition: svq3.c:127
SVQ3Frame::ref_index_buf
AVBufferRef * ref_index_buf[2]
Definition: svq3.c:78
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
mathops.h
list
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining list
Definition: filter_design.txt:25
ff_h264_chroma_dc_scan
const uint8_t ff_h264_chroma_dc_scan[4]
Definition: h264data.c:54
SVQ3Context
Definition: svq3.c:82
AV_ZERO128
#define AV_ZERO128(d)
Definition: intreadwrite.h:637
SVQ3Frame::mb_type_buf
AVBufferRef * mb_type_buf
Definition: svq3.c:74
SVQ3Context::mb_luma_dc
int16_t mb_luma_dc[3][16 *2]
Definition: svq3.c:144
tpeldsp.h
index
int index
Definition: gxfenc.c:89
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
hl_decode_mb_idct_luma
static av_always_inline void hl_decode_mb_idct_luma(SVQ3Context *s, int mb_type, const int *block_offset, int linesize, uint8_t *dest_y)
Definition: svq3.c:617
HpelDSPContext
Half-pel DSP context.
Definition: hpeldsp.h:45
H264DSPContext
Context for storing H.264 DSP functions.
Definition: h264dsp.h:42
SVQ3Context::intra16x16_pred_mode
int intra16x16_pred_mode
Definition: svq3.c:130
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: avcodec.h:810
SVQ3Context::hpc
H264PredContext hpc
Definition: svq3.c:86
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1965
init_dequant4_coeff_table
static void init_dequant4_coeff_table(SVQ3Context *s)
Definition: svq3.c:1112
ff_zigzag_scan
const uint8_t ff_zigzag_scan[16+1]
Definition: mathtables.c:109
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:981
svq3_fetch_diagonal_mv
static av_always_inline int svq3_fetch_diagonal_mv(const SVQ3Context *s, const int16_t **C, int i, int list, int part_width)
Definition: svq3.c:359
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:883
AVPacket::size
int size
Definition: avcodec.h:1478
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
ff_draw_horiz_band
void ff_draw_horiz_band(AVCodecContext *avctx, AVFrame *cur, AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
Definition: mpegutils.c:51
rectangle.h
hl_decode_mb
static void hl_decode_mb(SVQ3Context *s)
Definition: svq3.c:671
get_interleaved_se_golomb
static int get_interleaved_se_golomb(GetBitContext *gb)
Definition: golomb.h:301
size
int size
Definition: twinvq_data.h:11134
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:92
avpriv_report_missing_feature
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
MB_TYPE_SKIP
#define MB_TYPE_SKIP
Definition: mpegutils.h:62
avg
#define avg(a, b, c, d)
Definition: colorspacedsp_template.c:28
header
static const uint8_t header[24]
Definition: sdr2.c:67
height
#define height
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
attributes.h
ff_h264_quant_rem6
const uint8_t ff_h264_quant_rem6[QP_MAX_NUM+1]
Definition: h264data.c:174
skip_bits1
static void skip_bits1(GetBitContext *s)
Definition: get_bits.h:538
IS_INTRA16x16
#define IS_INTRA16x16(a)
Definition: mpegutils.h:76
hl_decode_mb_predict_luma
static av_always_inline void hl_decode_mb_predict_luma(SVQ3Context *s, int mb_type, const int *block_offset, int linesize, uint8_t *dest_y)
Definition: svq3.c:632
input
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
Definition: filter_design.txt:172
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
pack16to32
static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
Definition: h264dec.h:660
SVQ3Context::prev_frame_num
int prev_frame_num
Definition: svq3.c:115
svq3_add_idct_c
static void svq3_add_idct_c(uint8_t *dst, int16_t *block, int stride, int qp, int dc)
Definition: svq3.c:259
av_get_picture_type_char
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:88
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem.h:112
h264dec.h
svq3_decode_frame
static int svq3_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: svq3.c:1402
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
svq3_luma_dc_dequant_idct_c
static void svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
Definition: svq3.c:224
stride
#define stride
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1666
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:446
SVQ3Frame
Definition: svq3.c:68
THIRDPEL_MODE
#define THIRDPEL_MODE
Definition: svq3.c:152
SVQ3Context::mv_cache
int16_t mv_cache[2][5 *8][2]
Definition: svq3.c:141
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:70
av_always_inline
#define av_always_inline
Definition: attributes.h:43
uint8_t
uint8_t
Definition: audio_convert.c:194
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
AV_COPY32
#define AV_COPY32(d, s)
Definition: intreadwrite.h:601
AVCodec::name
const char * name
Name of the codec implementation.
Definition: avcodec.h:3488
SVQ3Context::non_zero_count_cache
uint8_t non_zero_count_cache[15 *8]
Definition: svq3.c:145
PART_NOT_AVAILABLE
#define PART_NOT_AVAILABLE
Definition: h264dec.h:391
AVCodecContext::height
int height
Definition: avcodec.h:1738
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1775
svq3_decode_mb
static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
Definition: svq3.c:719
svq3_scan
static const uint8_t svq3_scan[16]
Definition: svq3.c:164
avcodec.h
ff_h264dsp_init
av_cold void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
Definition: h264dsp.c:67
SVQ3Context::halfpel_flag
int halfpel_flag
Definition: svq3.c:98
mid_pred
#define mid_pred
Definition: mathops.h:97
svq3_pred_1
static const int8_t svq3_pred_1[6][6][5]
Definition: svq3.c:190
ret
ret
Definition: filter_design.txt:187
SVQ3Frame::mb_type
uint32_t * mb_type
Definition: svq3.c:75
SVQ3Context::mb_height
int mb_height
Definition: svq3.c:123
SVQ3Context::hdsp
HpelDSPContext hdsp
Definition: svq3.c:87
ff_svq3_decoder
AVCodec ff_svq3_decoder
Definition: svq3.c:1646
SVQ3Context::low_delay
int low_delay
Definition: svq3.c:119
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: avcodec.h:790
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
svq3_decode_block
static int svq3_decode_block(GetBitContext *gb, int16_t *block, int index, const int type)
Definition: svq3.c:299
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:88
B
#define B
Definition: huffyuvdsp.h:32
skip_1stop_8data_bits
static int skip_1stop_8data_bits(GetBitContext *gb)
Definition: get_bits.h:854
AVCodecContext
main external API structure.
Definition: avcodec.h:1565
ff_h264_dequant4_coeff_init
const uint8_t ff_h264_dequant4_coeff_init[6][3]
Definition: h264data.c:152
SVQ3Frame::f
AVFrame * f
Definition: svq3.c:69
SVQ3Context::block_offset
int block_offset[2 *(16 *3)]
Definition: svq3.c:147
ff_h264_pred_init
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:411
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
SVQ3Frame::ref_index
int8_t * ref_index[2]
Definition: svq3.c:79
mode
mode
Definition: ebur128.h:83
ff_h264_check_intra4x4_pred_mode
int ff_h264_check_intra4x4_pred_mode(int8_t *pred_mode_cache, void *logctx, int top_samples_available, int left_samples_available)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264_parse.c:131
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
ff_h264_i_mb_type_info
const IMbInfo ff_h264_i_mb_type_info[26]
Definition: h264data.c:66
fill_rectangle
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:827
svq1.h
SVQ3Context::chroma_pred_mode
int chroma_pred_mode
Definition: svq3.c:129
SVQ3Context::watermark_key
uint32_t watermark_key
Definition: svq3.c:101
SVQ3Context::mb_xy
int mb_xy
Definition: svq3.c:122
scan8
static const uint8_t scan8[16 *3+3]
Definition: h264dec.h:644
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
temp
else temp
Definition: vf_mcdeint.c:256
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:1006
luma_dc_zigzag_scan
static const uint8_t luma_dc_zigzag_scan[16]
Definition: svq3.c:171
ff_h264_quant_div6
const uint8_t ff_h264_quant_div6[QP_MAX_NUM+1]
Definition: h264data.c:182
VideoDSPContext
Definition: videodsp.h:41
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:2650
H264PredContext
Context for storing H.264 prediction functions.
Definition: h264pred.h:92
shift
static int shift(int a, int b)
Definition: sonic.c:82
svq3_mc_dir_part
static void svq3_mc_dir_part(SVQ3Context *s, int x, int y, int width, int height, int mx, int my, int dxy, int thirdpel, int dir, int avg)
Definition: svq3.c:429
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
IS_INTER
#define IS_INTER(a)
Definition: mpegutils.h:79
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:81
svq3_decode_end
static int svq3_decode_end(AVCodecContext *avctx)
Definition: svq3.c:1621
AVCodecContext::frame_number
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:2256
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:39
SVQ3Context::dequant4_coeff
uint32_t dequant4_coeff[QP_MAX_NUM+1][16]
Definition: svq3.c:146
SVQ3Context::ref_cache
int8_t ref_cache[2][5 *8]
Definition: svq3.c:142
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVPacket
This structure stores compressed data.
Definition: avcodec.h:1454
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:1592
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:500
SVQ3Context::mb_stride
int mb_stride
Definition: svq3.c:124
DIAG_DOWN_LEFT_PRED
@ DIAG_DOWN_LEFT_PRED
Definition: vp9.h:49
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:1738
hpeldsp.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
AV_CODEC_CAP_DRAW_HORIZ_BAND
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: avcodec.h:975
ff_svq1_packet_checksum
uint16_t ff_svq1_packet_checksum(const uint8_t *data, const int length, int value)
Definition: svq13.c:60
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
length
const char int length
Definition: avisynth_c.h:860
svq3_decode_init
static av_cold int svq3_decode_init(AVCodecContext *avctx)
Definition: svq3.c:1126
h
h
Definition: vp9dsp_template.c:2038
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: avcodec.h:807
int
int
Definition: ffmpeg_filter.c:191
SVQ3Context::buf
uint8_t * buf
Definition: svq3.c:102
SVQ3Context::cur_pic
SVQ3Frame * cur_pic
Definition: svq3.c:91
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
SVQ3Context::has_watermark
int has_watermark
Definition: svq3.c:100
SVQ3Context::intra4x4_pred_mode
int8_t * intra4x4_pred_mode
Definition: svq3.c:133
svq3_pred_0
static const uint8_t svq3_pred_0[25][2]
Definition: svq3.c:178
HALFPEL_MODE
#define HALFPEL_MODE
Definition: svq3.c:151
ff_h264_check_intra_pred_mode
int ff_h264_check_intra_pred_mode(void *logctx, int top_samples_available, int left_samples_available, int mode, int is_chroma)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264_parse.c:179