FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
svq3.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 The FFmpeg Project
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /*
22  * How to use this decoder:
23  * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24  * have stsd atoms to describe media trak properties. A stsd atom for a
25  * video trak contains 1 or more ImageDescription atoms. These atoms begin
26  * with the 4-byte length of the atom followed by the codec fourcc. Some
27  * decoders need information in this atom to operate correctly. Such
28  * is the case with SVQ3. In order to get the best use out of this decoder,
29  * the calling app must make the SVQ3 ImageDescription atom available
30  * via the AVCodecContext's extradata[_size] field:
31  *
32  * AVCodecContext.extradata = pointer to ImageDescription, first characters
33  * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34  * AVCodecContext.extradata_size = size of ImageDescription atom memory
35  * buffer (which will be the same as the ImageDescription atom size field
36  * from the QT file, minus 4 bytes since the length is missing)
37  *
38  * You will know you have these parameters passed correctly when the decoder
39  * correctly decodes this file:
40  * http://samples.mplayerhq.hu/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
41  */
42 
43 #include <inttypes.h>
44 
45 #include "libavutil/attributes.h"
46 #include "internal.h"
47 #include "avcodec.h"
48 #include "mpegutils.h"
49 #include "h264dec.h"
50 #include "h264data.h"
51 #include "golomb.h"
52 #include "hpeldsp.h"
53 #include "mathops.h"
54 #include "rectangle.h"
55 #include "tpeldsp.h"
56 
57 #if CONFIG_ZLIB
58 #include <zlib.h>
59 #endif
60 
61 #include "svq1.h"
62 
63 /**
64  * @file
65  * svq3 decoder.
66  */
67 
68 typedef struct SVQ3Frame {
70 
72  int16_t (*motion_val[2])[2];
73 
75  uint32_t *mb_type;
76 
77 
79  int8_t *ref_index[2];
80 } SVQ3Frame;
81 
82 typedef struct SVQ3Context {
84 
90 
101  uint32_t watermark_key;
103  int buf_size;
110  int qscale;
111  int cbp;
116 
120 
121  int mb_x, mb_y;
122  int mb_xy;
125  int b_stride;
126 
127  uint32_t *mb2br_xy;
128 
131 
134 
135  unsigned int top_samples_available;
138 
140 
141  DECLARE_ALIGNED(16, int16_t, mv_cache)[2][5 * 8][2];
142  DECLARE_ALIGNED(8, int8_t, ref_cache)[2][5 * 8];
143  DECLARE_ALIGNED(16, int16_t, mb)[16 * 48 * 2];
144  DECLARE_ALIGNED(16, int16_t, mb_luma_dc)[3][16 * 2];
146  uint32_t dequant4_coeff[QP_MAX_NUM + 1][16];
147  int block_offset[2 * (16 * 3)];
148 } SVQ3Context;
149 
150 #define FULLPEL_MODE 1
151 #define HALFPEL_MODE 2
152 #define THIRDPEL_MODE 3
153 #define PREDICT_MODE 4
154 
155 /* dual scan (from some older H.264 draft)
156  * o-->o-->o o
157  * | /|
158  * o o o / o
159  * | / | |/ |
160  * o o o o
161  * /
162  * o-->o-->o-->o
163  */
164 static const uint8_t svq3_scan[16] = {
165  0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
166  2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
167  0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
168  0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
169 };
170 
171 static const uint8_t luma_dc_zigzag_scan[16] = {
172  0 * 16 + 0 * 64, 1 * 16 + 0 * 64, 2 * 16 + 0 * 64, 0 * 16 + 2 * 64,
173  3 * 16 + 0 * 64, 0 * 16 + 1 * 64, 1 * 16 + 1 * 64, 2 * 16 + 1 * 64,
174  1 * 16 + 2 * 64, 2 * 16 + 2 * 64, 3 * 16 + 2 * 64, 0 * 16 + 3 * 64,
175  3 * 16 + 1 * 64, 1 * 16 + 3 * 64, 2 * 16 + 3 * 64, 3 * 16 + 3 * 64,
176 };
177 
178 static const uint8_t svq3_pred_0[25][2] = {
179  { 0, 0 },
180  { 1, 0 }, { 0, 1 },
181  { 0, 2 }, { 1, 1 }, { 2, 0 },
182  { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
183  { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
184  { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
185  { 2, 4 }, { 3, 3 }, { 4, 2 },
186  { 4, 3 }, { 3, 4 },
187  { 4, 4 }
188 };
189 
190 static const int8_t svq3_pred_1[6][6][5] = {
191  { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
192  { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
193  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
194  { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
195  { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
196  { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
197  { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
198  { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
199  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
200  { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
201  { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
202  { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
203 };
204 
205 static const struct {
208 } svq3_dct_tables[2][16] = {
209  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
210  { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
211  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
212  { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
213 };
214 
215 static const uint32_t svq3_dequant_coeff[32] = {
216  3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
217  9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
218  24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
219  61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
220 };
221 
222 static int svq3_decode_end(AVCodecContext *avctx);
223 
224 static void svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
225 {
226  const int qmul = svq3_dequant_coeff[qp];
227 #define stride 16
228  int i;
229  int temp[16];
230  static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
231 
232  for (i = 0; i < 4; i++) {
233  const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
234  const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
235  const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
236  const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
237 
238  temp[4 * i + 0] = z0 + z3;
239  temp[4 * i + 1] = z1 + z2;
240  temp[4 * i + 2] = z1 - z2;
241  temp[4 * i + 3] = z0 - z3;
242  }
243 
244  for (i = 0; i < 4; i++) {
245  const int offset = x_offset[i];
246  const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
247  const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
248  const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
249  const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
250 
251  output[stride * 0 + offset] = (z0 + z3) * qmul + 0x80000 >> 20;
252  output[stride * 2 + offset] = (z1 + z2) * qmul + 0x80000 >> 20;
253  output[stride * 8 + offset] = (z1 - z2) * qmul + 0x80000 >> 20;
254  output[stride * 10 + offset] = (z0 - z3) * qmul + 0x80000 >> 20;
255  }
256 }
257 #undef stride
258 
259 static void svq3_add_idct_c(uint8_t *dst, int16_t *block,
260  int stride, int qp, int dc)
261 {
262  const int qmul = svq3_dequant_coeff[qp];
263  int i;
264 
265  if (dc) {
266  dc = 13 * 13 * (dc == 1 ? 1538 * block[0]
267  : qmul * (block[0] >> 3) / 2);
268  block[0] = 0;
269  }
270 
271  for (i = 0; i < 4; i++) {
272  const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
273  const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
274  const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
275  const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
276 
277  block[0 + 4 * i] = z0 + z3;
278  block[1 + 4 * i] = z1 + z2;
279  block[2 + 4 * i] = z1 - z2;
280  block[3 + 4 * i] = z0 - z3;
281  }
282 
283  for (i = 0; i < 4; i++) {
284  const int z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
285  const int z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
286  const int z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
287  const int z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
288  const int rr = (dc + 0x80000);
289 
290  dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((z0 + z3) * qmul + rr >> 20));
291  dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((z1 + z2) * qmul + rr >> 20));
292  dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((z1 - z2) * qmul + rr >> 20));
293  dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((z0 - z3) * qmul + rr >> 20));
294  }
295 
296  memset(block, 0, 16 * sizeof(int16_t));
297 }
298 
299 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
300  int index, const int type)
301 {
302  static const uint8_t *const scan_patterns[4] = {
304  };
305 
306  int run, level, sign, limit;
307  unsigned vlc;
308  const int intra = 3 * type >> 2;
309  const uint8_t *const scan = scan_patterns[type];
310 
311  for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
312  for (; (vlc = get_interleaved_ue_golomb(gb)) != 0; index++) {
313  if ((int32_t)vlc < 0)
314  return -1;
315 
316  sign = (vlc & 1) ? 0 : -1;
317  vlc = vlc + 1 >> 1;
318 
319  if (type == 3) {
320  if (vlc < 3) {
321  run = 0;
322  level = vlc;
323  } else if (vlc < 4) {
324  run = 1;
325  level = 1;
326  } else {
327  run = vlc & 0x3;
328  level = (vlc + 9 >> 2) - run;
329  }
330  } else {
331  if (vlc < 16U) {
332  run = svq3_dct_tables[intra][vlc].run;
333  level = svq3_dct_tables[intra][vlc].level;
334  } else if (intra) {
335  run = vlc & 0x7;
336  level = (vlc >> 3) + ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
337  } else {
338  run = vlc & 0xF;
339  level = (vlc >> 4) + ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
340  }
341  }
342 
343 
344  if ((index += run) >= limit)
345  return -1;
346 
347  block[scan[index]] = (level ^ sign) - sign;
348  }
349 
350  if (type != 2) {
351  break;
352  }
353  }
354 
355  return 0;
356 }
357 
358 static av_always_inline int
359 svq3_fetch_diagonal_mv(const SVQ3Context *s, const int16_t **C,
360  int i, int list, int part_width)
361 {
362  const int topright_ref = s->ref_cache[list][i - 8 + part_width];
363 
364  if (topright_ref != PART_NOT_AVAILABLE) {
365  *C = s->mv_cache[list][i - 8 + part_width];
366  return topright_ref;
367  } else {
368  *C = s->mv_cache[list][i - 8 - 1];
369  return s->ref_cache[list][i - 8 - 1];
370  }
371 }
372 
373 /**
374  * Get the predicted MV.
375  * @param n the block index
376  * @param part_width the width of the partition (4, 8,16) -> (1, 2, 4)
377  * @param mx the x component of the predicted motion vector
378  * @param my the y component of the predicted motion vector
379  */
381  int part_width, int list,
382  int ref, int *const mx, int *const my)
383 {
384  const int index8 = scan8[n];
385  const int top_ref = s->ref_cache[list][index8 - 8];
386  const int left_ref = s->ref_cache[list][index8 - 1];
387  const int16_t *const A = s->mv_cache[list][index8 - 1];
388  const int16_t *const B = s->mv_cache[list][index8 - 8];
389  const int16_t *C;
390  int diagonal_ref, match_count;
391 
392 /* mv_cache
393  * B . . A T T T T
394  * U . . L . . , .
395  * U . . L . . . .
396  * U . . L . . , .
397  * . . . L . . . .
398  */
399 
400  diagonal_ref = svq3_fetch_diagonal_mv(s, &C, index8, list, part_width);
401  match_count = (diagonal_ref == ref) + (top_ref == ref) + (left_ref == ref);
402  if (match_count > 1) { //most common
403  *mx = mid_pred(A[0], B[0], C[0]);
404  *my = mid_pred(A[1], B[1], C[1]);
405  } else if (match_count == 1) {
406  if (left_ref == ref) {
407  *mx = A[0];
408  *my = A[1];
409  } else if (top_ref == ref) {
410  *mx = B[0];
411  *my = B[1];
412  } else {
413  *mx = C[0];
414  *my = C[1];
415  }
416  } else {
417  if (top_ref == PART_NOT_AVAILABLE &&
418  diagonal_ref == PART_NOT_AVAILABLE &&
419  left_ref != PART_NOT_AVAILABLE) {
420  *mx = A[0];
421  *my = A[1];
422  } else {
423  *mx = mid_pred(A[0], B[0], C[0]);
424  *my = mid_pred(A[1], B[1], C[1]);
425  }
426  }
427 }
428 
429 static inline void svq3_mc_dir_part(SVQ3Context *s,
430  int x, int y, int width, int height,
431  int mx, int my, int dxy,
432  int thirdpel, int dir, int avg)
433 {
434  const SVQ3Frame *pic = (dir == 0) ? s->last_pic : s->next_pic;
435  uint8_t *src, *dest;
436  int i, emu = 0;
437  int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
438  int linesize = s->cur_pic->f->linesize[0];
439  int uvlinesize = s->cur_pic->f->linesize[1];
440 
441  mx += x;
442  my += y;
443 
444  if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
445  my < 0 || my >= s->v_edge_pos - height - 1) {
446  emu = 1;
447  mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
448  my = av_clip(my, -16, s->v_edge_pos - height + 15);
449  }
450 
451  /* form component predictions */
452  dest = s->cur_pic->f->data[0] + x + y * linesize;
453  src = pic->f->data[0] + mx + my * linesize;
454 
455  if (emu) {
457  linesize, linesize,
458  width + 1, height + 1,
459  mx, my, s->h_edge_pos, s->v_edge_pos);
460  src = s->edge_emu_buffer;
461  }
462  if (thirdpel)
463  (avg ? s->tdsp.avg_tpel_pixels_tab
464  : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src, linesize,
465  width, height);
466  else
467  (avg ? s->hdsp.avg_pixels_tab
468  : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, linesize,
469  height);
470 
471  if (!(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
472  mx = mx + (mx < (int) x) >> 1;
473  my = my + (my < (int) y) >> 1;
474  width = width >> 1;
475  height = height >> 1;
476  blocksize++;
477 
478  for (i = 1; i < 3; i++) {
479  dest = s->cur_pic->f->data[i] + (x >> 1) + (y >> 1) * uvlinesize;
480  src = pic->f->data[i] + mx + my * uvlinesize;
481 
482  if (emu) {
484  uvlinesize, uvlinesize,
485  width + 1, height + 1,
486  mx, my, (s->h_edge_pos >> 1),
487  s->v_edge_pos >> 1);
488  src = s->edge_emu_buffer;
489  }
490  if (thirdpel)
491  (avg ? s->tdsp.avg_tpel_pixels_tab
492  : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src,
493  uvlinesize,
494  width, height);
495  else
496  (avg ? s->hdsp.avg_pixels_tab
497  : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
498  uvlinesize,
499  height);
500  }
501  }
502 }
503 
504 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
505  int dir, int avg)
506 {
507  int i, j, k, mx, my, dx, dy, x, y;
508  const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
509  const int part_height = 16 >> ((unsigned)(size + 1) / 3);
510  const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
511  const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
512  const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
513 
514  for (i = 0; i < 16; i += part_height)
515  for (j = 0; j < 16; j += part_width) {
516  const int b_xy = (4 * s->mb_x + (j >> 2)) +
517  (4 * s->mb_y + (i >> 2)) * s->b_stride;
518  int dxy;
519  x = 16 * s->mb_x + j;
520  y = 16 * s->mb_y + i;
521  k = (j >> 2 & 1) + (i >> 1 & 2) +
522  (j >> 1 & 4) + (i & 8);
523 
524  if (mode != PREDICT_MODE) {
525  svq3_pred_motion(s, k, part_width >> 2, dir, 1, &mx, &my);
526  } else {
527  mx = s->next_pic->motion_val[0][b_xy][0] << 1;
528  my = s->next_pic->motion_val[0][b_xy][1] << 1;
529 
530  if (dir == 0) {
531  mx = mx * s->frame_num_offset /
532  s->prev_frame_num_offset + 1 >> 1;
533  my = my * s->frame_num_offset /
534  s->prev_frame_num_offset + 1 >> 1;
535  } else {
536  mx = mx * (s->frame_num_offset - s->prev_frame_num_offset) /
537  s->prev_frame_num_offset + 1 >> 1;
538  my = my * (s->frame_num_offset - s->prev_frame_num_offset) /
539  s->prev_frame_num_offset + 1 >> 1;
540  }
541  }
542 
543  /* clip motion vector prediction to frame border */
544  mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
545  my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
546 
547  /* get (optional) motion vector differential */
548  if (mode == PREDICT_MODE) {
549  dx = dy = 0;
550  } else {
553 
554  if (dx == INVALID_VLC || dy == INVALID_VLC) {
555  av_log(s->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
556  return -1;
557  }
558  }
559 
560  /* compute motion vector */
561  if (mode == THIRDPEL_MODE) {
562  int fx, fy;
563  mx = (mx + 1 >> 1) + dx;
564  my = (my + 1 >> 1) + dy;
565  fx = (unsigned)(mx + 0x3000) / 3 - 0x1000;
566  fy = (unsigned)(my + 0x3000) / 3 - 0x1000;
567  dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
568 
569  svq3_mc_dir_part(s, x, y, part_width, part_height,
570  fx, fy, dxy, 1, dir, avg);
571  mx += mx;
572  my += my;
573  } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
574  mx = (unsigned)(mx + 1 + 0x3000) / 3 + dx - 0x1000;
575  my = (unsigned)(my + 1 + 0x3000) / 3 + dy - 0x1000;
576  dxy = (mx & 1) + 2 * (my & 1);
577 
578  svq3_mc_dir_part(s, x, y, part_width, part_height,
579  mx >> 1, my >> 1, dxy, 0, dir, avg);
580  mx *= 3;
581  my *= 3;
582  } else {
583  mx = (unsigned)(mx + 3 + 0x6000) / 6 + dx - 0x1000;
584  my = (unsigned)(my + 3 + 0x6000) / 6 + dy - 0x1000;
585 
586  svq3_mc_dir_part(s, x, y, part_width, part_height,
587  mx, my, 0, 0, dir, avg);
588  mx *= 6;
589  my *= 6;
590  }
591 
592  /* update mv_cache */
593  if (mode != PREDICT_MODE) {
594  int32_t mv = pack16to32(mx, my);
595 
596  if (part_height == 8 && i < 8) {
597  AV_WN32A(s->mv_cache[dir][scan8[k] + 1 * 8], mv);
598 
599  if (part_width == 8 && j < 8)
600  AV_WN32A(s->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
601  }
602  if (part_width == 8 && j < 8)
603  AV_WN32A(s->mv_cache[dir][scan8[k] + 1], mv);
604  if (part_width == 4 || part_height == 4)
605  AV_WN32A(s->mv_cache[dir][scan8[k]], mv);
606  }
607 
608  /* write back motion vectors */
609  fill_rectangle(s->cur_pic->motion_val[dir][b_xy],
610  part_width >> 2, part_height >> 2, s->b_stride,
611  pack16to32(mx, my), 4);
612  }
613 
614  return 0;
615 }
616 
618  int mb_type, const int *block_offset,
619  int linesize, uint8_t *dest_y)
620 {
621  int i;
622  if (!IS_INTRA4x4(mb_type)) {
623  for (i = 0; i < 16; i++)
624  if (s->non_zero_count_cache[scan8[i]] || s->mb[i * 16]) {
625  uint8_t *const ptr = dest_y + block_offset[i];
626  svq3_add_idct_c(ptr, s->mb + i * 16, linesize,
627  s->qscale, IS_INTRA(mb_type) ? 1 : 0);
628  }
629  }
630 }
631 
632 static av_always_inline int dctcoef_get(int16_t *mb, int index)
633 {
634  return AV_RN16A(mb + index);
635 }
636 
638  int mb_type,
639  const int *block_offset,
640  int linesize,
641  uint8_t *dest_y)
642 {
643  int i;
644  int qscale = s->qscale;
645 
646  if (IS_INTRA4x4(mb_type)) {
647  for (i = 0; i < 16; i++) {
648  uint8_t *const ptr = dest_y + block_offset[i];
649  const int dir = s->intra4x4_pred_mode_cache[scan8[i]];
650 
651  uint8_t *topright;
652  int nnz, tr;
653  if (dir == DIAG_DOWN_LEFT_PRED || dir == VERT_LEFT_PRED) {
654  const int topright_avail = (s->topright_samples_available << i) & 0x8000;
655  av_assert2(s->mb_y || linesize <= block_offset[i]);
656  if (!topright_avail) {
657  tr = ptr[3 - linesize] * 0x01010101u;
658  topright = (uint8_t *)&tr;
659  } else
660  topright = ptr + 4 - linesize;
661  } else
662  topright = NULL;
663 
664  s->hpc.pred4x4[dir](ptr, topright, linesize);
665  nnz = s->non_zero_count_cache[scan8[i]];
666  if (nnz) {
667  svq3_add_idct_c(ptr, s->mb + i * 16, linesize, qscale, 0);
668  }
669  }
670  } else {
671  s->hpc.pred16x16[s->intra16x16_pred_mode](dest_y, linesize);
672  svq3_luma_dc_dequant_idct_c(s->mb, s->mb_luma_dc[0], qscale);
673  }
674 }
675 
677 {
678  const int mb_x = s->mb_x;
679  const int mb_y = s->mb_y;
680  const int mb_xy = s->mb_xy;
681  const int mb_type = s->cur_pic->mb_type[mb_xy];
682  uint8_t *dest_y, *dest_cb, *dest_cr;
683  int linesize, uvlinesize;
684  int i, j;
685  const int *block_offset = &s->block_offset[0];
686  const int block_h = 16 >> 1;
687 
688  linesize = s->cur_pic->f->linesize[0];
689  uvlinesize = s->cur_pic->f->linesize[1];
690 
691  dest_y = s->cur_pic->f->data[0] + (mb_x + mb_y * linesize) * 16;
692  dest_cb = s->cur_pic->f->data[1] + mb_x * 8 + mb_y * uvlinesize * block_h;
693  dest_cr = s->cur_pic->f->data[2] + mb_x * 8 + mb_y * uvlinesize * block_h;
694 
695  s->vdsp.prefetch(dest_y + (s->mb_x & 3) * 4 * linesize + 64, linesize, 4);
696  s->vdsp.prefetch(dest_cb + (s->mb_x & 7) * uvlinesize + 64, dest_cr - dest_cb, 2);
697 
698  if (IS_INTRA(mb_type)) {
699  s->hpc.pred8x8[s->chroma_pred_mode](dest_cb, uvlinesize);
700  s->hpc.pred8x8[s->chroma_pred_mode](dest_cr, uvlinesize);
701 
702  hl_decode_mb_predict_luma(s, mb_type, block_offset, linesize, dest_y);
703  }
704 
705  hl_decode_mb_idct_luma(s, mb_type, block_offset, linesize, dest_y);
706 
707  if (s->cbp & 0x30) {
708  uint8_t *dest[2] = { dest_cb, dest_cr };
709  s->h264dsp.h264_chroma_dc_dequant_idct(s->mb + 16 * 16 * 1,
710  s->dequant4_coeff[4][0]);
711  s->h264dsp.h264_chroma_dc_dequant_idct(s->mb + 16 * 16 * 2,
712  s->dequant4_coeff[4][0]);
713  for (j = 1; j < 3; j++) {
714  for (i = j * 16; i < j * 16 + 4; i++)
715  if (s->non_zero_count_cache[scan8[i]] || s->mb[i * 16]) {
716  uint8_t *const ptr = dest[j - 1] + block_offset[i];
717  svq3_add_idct_c(ptr, s->mb + i * 16,
718  uvlinesize, ff_h264_chroma_qp[0][s->qscale + 12] - 12, 2);
719  }
720  }
721  }
722 }
723 
724 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
725 {
726  int i, j, k, m, dir, mode;
727  int cbp = 0;
728  uint32_t vlc;
729  int8_t *top, *left;
730  const int mb_xy = s->mb_xy;
731  const int b_xy = 4 * s->mb_x + 4 * s->mb_y * s->b_stride;
732 
733  s->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
734  s->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
735  s->topright_samples_available = 0xFFFF;
736 
737  if (mb_type == 0) { /* SKIP */
738  if (s->pict_type == AV_PICTURE_TYPE_P ||
739  s->next_pic->mb_type[mb_xy] == -1) {
740  svq3_mc_dir_part(s, 16 * s->mb_x, 16 * s->mb_y, 16, 16,
741  0, 0, 0, 0, 0, 0);
742 
743  if (s->pict_type == AV_PICTURE_TYPE_B)
744  svq3_mc_dir_part(s, 16 * s->mb_x, 16 * s->mb_y, 16, 16,
745  0, 0, 0, 0, 1, 1);
746 
747  mb_type = MB_TYPE_SKIP;
748  } else {
749  mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
750  if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
751  return -1;
752  if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
753  return -1;
754 
755  mb_type = MB_TYPE_16x16;
756  }
757  } else if (mb_type < 8) { /* INTER */
758  if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&s->gb_slice))
759  mode = THIRDPEL_MODE;
760  else if (s->halfpel_flag &&
761  s->thirdpel_flag == !get_bits1(&s->gb_slice))
762  mode = HALFPEL_MODE;
763  else
764  mode = FULLPEL_MODE;
765 
766  /* fill caches */
767  /* note ref_cache should contain here:
768  * ????????
769  * ???11111
770  * N??11111
771  * N??11111
772  * N??11111
773  */
774 
775  for (m = 0; m < 2; m++) {
776  if (s->mb_x > 0 && s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - 1] + 6] != -1) {
777  for (i = 0; i < 4; i++)
778  AV_COPY32(s->mv_cache[m][scan8[0] - 1 + i * 8],
779  s->cur_pic->motion_val[m][b_xy - 1 + i * s->b_stride]);
780  } else {
781  for (i = 0; i < 4; i++)
782  AV_ZERO32(s->mv_cache[m][scan8[0] - 1 + i * 8]);
783  }
784  if (s->mb_y > 0) {
785  memcpy(s->mv_cache[m][scan8[0] - 1 * 8],
786  s->cur_pic->motion_val[m][b_xy - s->b_stride],
787  4 * 2 * sizeof(int16_t));
788  memset(&s->ref_cache[m][scan8[0] - 1 * 8],
789  (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
790 
791  if (s->mb_x < s->mb_width - 1) {
792  AV_COPY32(s->mv_cache[m][scan8[0] + 4 - 1 * 8],
793  s->cur_pic->motion_val[m][b_xy - s->b_stride + 4]);
794  s->ref_cache[m][scan8[0] + 4 - 1 * 8] =
795  (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride + 1] + 6] == -1 ||
796  s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
797  } else
798  s->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
799  if (s->mb_x > 0) {
800  AV_COPY32(s->mv_cache[m][scan8[0] - 1 - 1 * 8],
801  s->cur_pic->motion_val[m][b_xy - s->b_stride - 1]);
802  s->ref_cache[m][scan8[0] - 1 - 1 * 8] =
803  (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
804  } else
805  s->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
806  } else
807  memset(&s->ref_cache[m][scan8[0] - 1 * 8 - 1],
808  PART_NOT_AVAILABLE, 8);
809 
810  if (s->pict_type != AV_PICTURE_TYPE_B)
811  break;
812  }
813 
814  /* decode motion vector(s) and form prediction(s) */
815  if (s->pict_type == AV_PICTURE_TYPE_P) {
816  if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
817  return -1;
818  } else { /* AV_PICTURE_TYPE_B */
819  if (mb_type != 2) {
820  if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
821  return -1;
822  } else {
823  for (i = 0; i < 4; i++)
824  memset(s->cur_pic->motion_val[0][b_xy + i * s->b_stride],
825  0, 4 * 2 * sizeof(int16_t));
826  }
827  if (mb_type != 1) {
828  if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
829  return -1;
830  } else {
831  for (i = 0; i < 4; i++)
832  memset(s->cur_pic->motion_val[1][b_xy + i * s->b_stride],
833  0, 4 * 2 * sizeof(int16_t));
834  }
835  }
836 
837  mb_type = MB_TYPE_16x16;
838  } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
839  int8_t *i4x4 = s->intra4x4_pred_mode + s->mb2br_xy[s->mb_xy];
840  int8_t *i4x4_cache = s->intra4x4_pred_mode_cache;
841 
842  memset(s->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
843 
844  if (mb_type == 8) {
845  if (s->mb_x > 0) {
846  for (i = 0; i < 4; i++)
847  s->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - 1] + 6 - i];
848  if (s->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
849  s->left_samples_available = 0x5F5F;
850  }
851  if (s->mb_y > 0) {
852  s->intra4x4_pred_mode_cache[4 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 0];
853  s->intra4x4_pred_mode_cache[5 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 1];
854  s->intra4x4_pred_mode_cache[6 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 2];
855  s->intra4x4_pred_mode_cache[7 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 3];
856 
857  if (s->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
858  s->top_samples_available = 0x33FF;
859  }
860 
861  /* decode prediction codes for luma blocks */
862  for (i = 0; i < 16; i += 2) {
864 
865  if (vlc >= 25U) {
867  "luma prediction:%"PRIu32"\n", vlc);
868  return -1;
869  }
870 
871  left = &s->intra4x4_pred_mode_cache[scan8[i] - 1];
872  top = &s->intra4x4_pred_mode_cache[scan8[i] - 8];
873 
874  left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
875  left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
876 
877  if (left[1] == -1 || left[2] == -1) {
878  av_log(s->avctx, AV_LOG_ERROR, "weird prediction\n");
879  return -1;
880  }
881  }
882  } else { /* mb_type == 33, DC_128_PRED block type */
883  for (i = 0; i < 4; i++)
884  memset(&s->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
885  }
886 
887  AV_COPY32(i4x4, i4x4_cache + 4 + 8 * 4);
888  i4x4[4] = i4x4_cache[7 + 8 * 3];
889  i4x4[5] = i4x4_cache[7 + 8 * 2];
890  i4x4[6] = i4x4_cache[7 + 8 * 1];
891 
892  if (mb_type == 8) {
896 
897  s->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
898  s->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
899  } else {
900  for (i = 0; i < 4; i++)
901  memset(&s->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
902 
903  s->top_samples_available = 0x33FF;
904  s->left_samples_available = 0x5F5F;
905  }
906 
907  mb_type = MB_TYPE_INTRA4x4;
908  } else { /* INTRA16x16 */
909  dir = ff_h264_i_mb_type_info[mb_type - 8].pred_mode;
910  dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
911 
913  s->left_samples_available, dir, 0)) < 0) {
914  av_log(s->avctx, AV_LOG_ERROR, "ff_h264_check_intra_pred_mode < 0\n");
915  return s->intra16x16_pred_mode;
916  }
917 
918  cbp = ff_h264_i_mb_type_info[mb_type - 8].cbp;
919  mb_type = MB_TYPE_INTRA16x16;
920  }
921 
922  if (!IS_INTER(mb_type) && s->pict_type != AV_PICTURE_TYPE_I) {
923  for (i = 0; i < 4; i++)
924  memset(s->cur_pic->motion_val[0][b_xy + i * s->b_stride],
925  0, 4 * 2 * sizeof(int16_t));
926  if (s->pict_type == AV_PICTURE_TYPE_B) {
927  for (i = 0; i < 4; i++)
928  memset(s->cur_pic->motion_val[1][b_xy + i * s->b_stride],
929  0, 4 * 2 * sizeof(int16_t));
930  }
931  }
932  if (!IS_INTRA4x4(mb_type)) {
933  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy], DC_PRED, 8);
934  }
935  if (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B) {
936  memset(s->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
937  }
938 
939  if (!IS_INTRA16x16(mb_type) &&
940  (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B)) {
941  if ((vlc = get_interleaved_ue_golomb(&s->gb_slice)) >= 48U){
942  av_log(s->avctx, AV_LOG_ERROR, "cbp_vlc=%"PRIu32"\n", vlc);
943  return -1;
944  }
945 
946  cbp = IS_INTRA(mb_type) ? ff_h264_golomb_to_intra4x4_cbp[vlc]
948  }
949  if (IS_INTRA16x16(mb_type) ||
950  (s->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
952 
953  if (s->qscale > 31u) {
954  av_log(s->avctx, AV_LOG_ERROR, "qscale:%d\n", s->qscale);
955  return -1;
956  }
957  }
958  if (IS_INTRA16x16(mb_type)) {
959  AV_ZERO128(s->mb_luma_dc[0] + 0);
960  AV_ZERO128(s->mb_luma_dc[0] + 8);
961  if (svq3_decode_block(&s->gb_slice, s->mb_luma_dc[0], 0, 1)) {
963  "error while decoding intra luma dc\n");
964  return -1;
965  }
966  }
967 
968  if (cbp) {
969  const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
970  const int type = ((s->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
971 
972  for (i = 0; i < 4; i++)
973  if ((cbp & (1 << i))) {
974  for (j = 0; j < 4; j++) {
975  k = index ? (1 * (j & 1) + 2 * (i & 1) +
976  2 * (j & 2) + 4 * (i & 2))
977  : (4 * i + j);
978  s->non_zero_count_cache[scan8[k]] = 1;
979 
980  if (svq3_decode_block(&s->gb_slice, &s->mb[16 * k], index, type)) {
982  "error while decoding block\n");
983  return -1;
984  }
985  }
986  }
987 
988  if ((cbp & 0x30)) {
989  for (i = 1; i < 3; ++i)
990  if (svq3_decode_block(&s->gb_slice, &s->mb[16 * 16 * i], 0, 3)) {
992  "error while decoding chroma dc block\n");
993  return -1;
994  }
995 
996  if ((cbp & 0x20)) {
997  for (i = 1; i < 3; i++) {
998  for (j = 0; j < 4; j++) {
999  k = 16 * i + j;
1000  s->non_zero_count_cache[scan8[k]] = 1;
1001 
1002  if (svq3_decode_block(&s->gb_slice, &s->mb[16 * k], 1, 1)) {
1004  "error while decoding chroma ac block\n");
1005  return -1;
1006  }
1007  }
1008  }
1009  }
1010  }
1011  }
1012 
1013  s->cbp = cbp;
1014  s->cur_pic->mb_type[mb_xy] = mb_type;
1015 
1016  if (IS_INTRA(mb_type))
1019 
1020  return 0;
1021 }
1022 
1024 {
1025  SVQ3Context *s = avctx->priv_data;
1026  const int mb_xy = s->mb_xy;
1027  int i, header;
1028  unsigned slice_id;
1029 
1030  header = get_bits(&s->gb, 8);
1031 
1032  if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
1033  /* TODO: what? */
1034  av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
1035  return -1;
1036  } else {
1037  int slice_bits, slice_bytes, slice_length;
1038  int length = header >> 5 & 3;
1039 
1040  slice_length = show_bits(&s->gb, 8 * length);
1041  slice_bits = slice_length * 8;
1042  slice_bytes = slice_length + length - 1;
1043 
1044  if (8LL*slice_bytes > get_bits_left(&s->gb)) {
1045  av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
1046  return -1;
1047  }
1048 
1049  skip_bits(&s->gb, 8);
1050 
1052  if (!s->slice_buf)
1053  return AVERROR(ENOMEM);
1054 
1055  memcpy(s->slice_buf, s->gb.buffer + s->gb.index / 8, slice_bytes);
1056 
1057  init_get_bits(&s->gb_slice, s->slice_buf, slice_bits);
1058 
1059  if (s->watermark_key) {
1060  uint32_t header = AV_RL32(&s->gb_slice.buffer[1]);
1061  AV_WL32(&s->gb_slice.buffer[1], header ^ s->watermark_key);
1062  }
1063  if (length > 0) {
1064  memmove(s->slice_buf, &s->slice_buf[slice_length], length - 1);
1065  }
1066  skip_bits_long(&s->gb, slice_bytes * 8);
1067  }
1068 
1069  if ((slice_id = get_interleaved_ue_golomb(&s->gb_slice)) >= 3) {
1070  av_log(s->avctx, AV_LOG_ERROR, "illegal slice type %u \n", slice_id);
1071  return -1;
1072  }
1073 
1074  s->slice_type = ff_h264_golomb_to_pict_type[slice_id];
1075 
1076  if ((header & 0x9F) == 2) {
1077  i = (s->mb_num < 64) ? 6 : (1 + av_log2(s->mb_num - 1));
1078  get_bits(&s->gb_slice, i);
1079  } else {
1080  skip_bits1(&s->gb_slice);
1081  }
1082 
1083  s->slice_num = get_bits(&s->gb_slice, 8);
1084  s->qscale = get_bits(&s->gb_slice, 5);
1085  s->adaptive_quant = get_bits1(&s->gb_slice);
1086 
1087  /* unknown fields */
1088  skip_bits1(&s->gb_slice);
1089 
1090  if (s->has_watermark)
1091  skip_bits1(&s->gb_slice);
1092 
1093  skip_bits1(&s->gb_slice);
1094  skip_bits(&s->gb_slice, 2);
1095 
1096  if (skip_1stop_8data_bits(&s->gb_slice) < 0)
1097  return AVERROR_INVALIDDATA;
1098 
1099  /* reset intra predictors and invalidate motion vector references */
1100  if (s->mb_x > 0) {
1101  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - 1] + 3,
1102  -1, 4 * sizeof(int8_t));
1103  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - s->mb_x],
1104  -1, 8 * sizeof(int8_t) * s->mb_x);
1105  }
1106  if (s->mb_y > 0) {
1107  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - s->mb_stride],
1108  -1, 8 * sizeof(int8_t) * (s->mb_width - s->mb_x));
1109 
1110  if (s->mb_x > 0)
1111  s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride - 1] + 3] = -1;
1112  }
1113 
1114  return 0;
1115 }
1116 
1118 {
1119  int q, x;
1120  const int max_qp = 51;
1121 
1122  for (q = 0; q < max_qp + 1; q++) {
1123  int shift = ff_h264_quant_div6[q] + 2;
1124  int idx = ff_h264_quant_rem6[q];
1125  for (x = 0; x < 16; x++)
1126  s->dequant4_coeff[q][(x >> 2) | ((x << 2) & 0xF)] =
1127  ((uint32_t)ff_h264_dequant4_coeff_init[idx][(x & 1) + ((x >> 2) & 1)] * 16) << shift;
1128  }
1129 }
1130 
1132 {
1133  SVQ3Context *s = avctx->priv_data;
1134  int m, x, y;
1135  unsigned char *extradata;
1136  unsigned char *extradata_end;
1137  unsigned int size;
1138  int marker_found = 0;
1139  int ret;
1140 
1141  s->cur_pic = av_mallocz(sizeof(*s->cur_pic));
1142  s->last_pic = av_mallocz(sizeof(*s->last_pic));
1143  s->next_pic = av_mallocz(sizeof(*s->next_pic));
1144  if (!s->next_pic || !s->last_pic || !s->cur_pic) {
1145  ret = AVERROR(ENOMEM);
1146  goto fail;
1147  }
1148 
1149  s->cur_pic->f = av_frame_alloc();
1150  s->last_pic->f = av_frame_alloc();
1151  s->next_pic->f = av_frame_alloc();
1152  if (!s->cur_pic->f || !s->last_pic->f || !s->next_pic->f)
1153  return AVERROR(ENOMEM);
1154 
1155  ff_h264dsp_init(&s->h264dsp, 8, 1);
1157  ff_videodsp_init(&s->vdsp, 8);
1158 
1159 
1160  avctx->bits_per_raw_sample = 8;
1161 
1162  ff_hpeldsp_init(&s->hdsp, avctx->flags);
1163  ff_tpeldsp_init(&s->tdsp);
1164 
1165  avctx->pix_fmt = AV_PIX_FMT_YUVJ420P;
1166  avctx->color_range = AVCOL_RANGE_JPEG;
1167 
1168  s->avctx = avctx;
1169  s->halfpel_flag = 1;
1170  s->thirdpel_flag = 1;
1171  s->has_watermark = 0;
1172 
1173  /* prowl for the "SEQH" marker in the extradata */
1174  extradata = (unsigned char *)avctx->extradata;
1175  extradata_end = avctx->extradata + avctx->extradata_size;
1176  if (extradata) {
1177  for (m = 0; m + 8 < avctx->extradata_size; m++) {
1178  if (!memcmp(extradata, "SEQH", 4)) {
1179  marker_found = 1;
1180  break;
1181  }
1182  extradata++;
1183  }
1184  }
1185 
1186  /* if a match was found, parse the extra data */
1187  if (marker_found) {
1188  GetBitContext gb;
1189  int frame_size_code;
1190  int unk0, unk1, unk2, unk3, unk4;
1191 
1192  size = AV_RB32(&extradata[4]);
1193  if (size > extradata_end - extradata - 8) {
1194  ret = AVERROR_INVALIDDATA;
1195  goto fail;
1196  }
1197  init_get_bits(&gb, extradata + 8, size * 8);
1198 
1199  /* 'frame size code' and optional 'width, height' */
1200  frame_size_code = get_bits(&gb, 3);
1201  switch (frame_size_code) {
1202  case 0:
1203  avctx->width = 160;
1204  avctx->height = 120;
1205  break;
1206  case 1:
1207  avctx->width = 128;
1208  avctx->height = 96;
1209  break;
1210  case 2:
1211  avctx->width = 176;
1212  avctx->height = 144;
1213  break;
1214  case 3:
1215  avctx->width = 352;
1216  avctx->height = 288;
1217  break;
1218  case 4:
1219  avctx->width = 704;
1220  avctx->height = 576;
1221  break;
1222  case 5:
1223  avctx->width = 240;
1224  avctx->height = 180;
1225  break;
1226  case 6:
1227  avctx->width = 320;
1228  avctx->height = 240;
1229  break;
1230  case 7:
1231  avctx->width = get_bits(&gb, 12);
1232  avctx->height = get_bits(&gb, 12);
1233  break;
1234  }
1235 
1236  s->halfpel_flag = get_bits1(&gb);
1237  s->thirdpel_flag = get_bits1(&gb);
1238 
1239  /* unknown fields */
1240  unk0 = get_bits1(&gb);
1241  unk1 = get_bits1(&gb);
1242  unk2 = get_bits1(&gb);
1243  unk3 = get_bits1(&gb);
1244 
1245  s->low_delay = get_bits1(&gb);
1246 
1247  /* unknown field */
1248  unk4 = get_bits1(&gb);
1249 
1250  av_log(avctx, AV_LOG_DEBUG, "Unknown fields %d %d %d %d %d\n",
1251  unk0, unk1, unk2, unk3, unk4);
1252 
1253  if (skip_1stop_8data_bits(&gb) < 0) {
1254  ret = AVERROR_INVALIDDATA;
1255  goto fail;
1256  }
1257 
1258  s->has_watermark = get_bits1(&gb);
1259  avctx->has_b_frames = !s->low_delay;
1260  if (s->has_watermark) {
1261 #if CONFIG_ZLIB
1262  unsigned watermark_width = get_interleaved_ue_golomb(&gb);
1263  unsigned watermark_height = get_interleaved_ue_golomb(&gb);
1264  int u1 = get_interleaved_ue_golomb(&gb);
1265  int u2 = get_bits(&gb, 8);
1266  int u3 = get_bits(&gb, 2);
1267  int u4 = get_interleaved_ue_golomb(&gb);
1268  unsigned long buf_len = watermark_width *
1269  watermark_height * 4;
1270  int offset = get_bits_count(&gb) + 7 >> 3;
1271  uint8_t *buf;
1272 
1273  if (watermark_height <= 0 ||
1274  (uint64_t)watermark_width * 4 > UINT_MAX / watermark_height) {
1275  ret = -1;
1276  goto fail;
1277  }
1278 
1279  buf = av_malloc(buf_len);
1280  if (!buf) {
1281  ret = AVERROR(ENOMEM);
1282  goto fail;
1283  }
1284  av_log(avctx, AV_LOG_DEBUG, "watermark size: %ux%u\n",
1285  watermark_width, watermark_height);
1286  av_log(avctx, AV_LOG_DEBUG,
1287  "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
1288  u1, u2, u3, u4, offset);
1289  if (uncompress(buf, &buf_len, extradata + 8 + offset,
1290  size - offset) != Z_OK) {
1291  av_log(avctx, AV_LOG_ERROR,
1292  "could not uncompress watermark logo\n");
1293  av_free(buf);
1294  ret = -1;
1295  goto fail;
1296  }
1297  s->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
1298  s->watermark_key = s->watermark_key << 16 | s->watermark_key;
1299  av_log(avctx, AV_LOG_DEBUG,
1300  "watermark key %#"PRIx32"\n", s->watermark_key);
1301  av_free(buf);
1302 #else
1303  av_log(avctx, AV_LOG_ERROR,
1304  "this svq3 file contains watermark which need zlib support compiled in\n");
1305  ret = -1;
1306  goto fail;
1307 #endif
1308  }
1309  }
1310 
1311  s->mb_width = (avctx->width + 15) / 16;
1312  s->mb_height = (avctx->height + 15) / 16;
1313  s->mb_stride = s->mb_width + 1;
1314  s->mb_num = s->mb_width * s->mb_height;
1315  s->b_stride = 4 * s->mb_width;
1316  s->h_edge_pos = s->mb_width * 16;
1317  s->v_edge_pos = s->mb_height * 16;
1318 
1319  s->intra4x4_pred_mode = av_mallocz(s->mb_stride * 2 * 8);
1320  if (!s->intra4x4_pred_mode)
1321  return AVERROR(ENOMEM);
1322 
1323  s->mb2br_xy = av_mallocz(s->mb_stride * (s->mb_height + 1) *
1324  sizeof(*s->mb2br_xy));
1325  if (!s->mb2br_xy)
1326  return AVERROR(ENOMEM);
1327 
1328  for (y = 0; y < s->mb_height; y++)
1329  for (x = 0; x < s->mb_width; x++) {
1330  const int mb_xy = x + y * s->mb_stride;
1331 
1332  s->mb2br_xy[mb_xy] = 8 * (mb_xy % (2 * s->mb_stride));
1333  }
1334 
1336 
1337  return 0;
1338 fail:
1339  svq3_decode_end(avctx);
1340  return ret;
1341 }
1342 
1343 static void free_picture(AVCodecContext *avctx, SVQ3Frame *pic)
1344 {
1345  int i;
1346  for (i = 0; i < 2; i++) {
1347  av_buffer_unref(&pic->motion_val_buf[i]);
1348  av_buffer_unref(&pic->ref_index_buf[i]);
1349  }
1351 
1352  av_frame_unref(pic->f);
1353 }
1354 
1355 static int get_buffer(AVCodecContext *avctx, SVQ3Frame *pic)
1356 {
1357  SVQ3Context *s = avctx->priv_data;
1358  const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
1359  const int mb_array_size = s->mb_stride * s->mb_height;
1360  const int b4_stride = s->mb_width * 4 + 1;
1361  const int b4_array_size = b4_stride * s->mb_height * 4;
1362  int ret;
1363 
1364  if (!pic->motion_val_buf[0]) {
1365  int i;
1366 
1367  pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) * sizeof(uint32_t));
1368  if (!pic->mb_type_buf)
1369  return AVERROR(ENOMEM);
1370  pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
1371 
1372  for (i = 0; i < 2; i++) {
1373  pic->motion_val_buf[i] = av_buffer_allocz(2 * (b4_array_size + 4) * sizeof(int16_t));
1374  pic->ref_index_buf[i] = av_buffer_allocz(4 * mb_array_size);
1375  if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i]) {
1376  ret = AVERROR(ENOMEM);
1377  goto fail;
1378  }
1379 
1380  pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
1381  pic->ref_index[i] = pic->ref_index_buf[i]->data;
1382  }
1383  }
1384 
1385  ret = ff_get_buffer(avctx, pic->f,
1386  (s->pict_type != AV_PICTURE_TYPE_B) ?
1388  if (ret < 0)
1389  goto fail;
1390 
1391  if (!s->edge_emu_buffer) {
1392  s->edge_emu_buffer = av_mallocz_array(pic->f->linesize[0], 17);
1393  if (!s->edge_emu_buffer)
1394  return AVERROR(ENOMEM);
1395  }
1396 
1397  return 0;
1398 fail:
1399  free_picture(avctx, pic);
1400  return ret;
1401 }
1402 
1403 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1404  int *got_frame, AVPacket *avpkt)
1405 {
1406  SVQ3Context *s = avctx->priv_data;
1407  int buf_size = avpkt->size;
1408  int left;
1409  uint8_t *buf;
1410  int ret, m, i;
1411 
1412  /* special case for last picture */
1413  if (buf_size == 0) {
1414  if (s->next_pic->f->data[0] && !s->low_delay && !s->last_frame_output) {
1415  ret = av_frame_ref(data, s->next_pic->f);
1416  if (ret < 0)
1417  return ret;
1418  s->last_frame_output = 1;
1419  *got_frame = 1;
1420  }
1421  return 0;
1422  }
1423 
1424  s->mb_x = s->mb_y = s->mb_xy = 0;
1425 
1426  if (s->watermark_key) {
1427  av_fast_padded_malloc(&s->buf, &s->buf_size, buf_size);
1428  if (!s->buf)
1429  return AVERROR(ENOMEM);
1430  memcpy(s->buf, avpkt->data, buf_size);
1431  buf = s->buf;
1432  } else {
1433  buf = avpkt->data;
1434  }
1435 
1436  ret = init_get_bits(&s->gb, buf, 8 * buf_size);
1437  if (ret < 0)
1438  return ret;
1439 
1440  if (svq3_decode_slice_header(avctx))
1441  return -1;
1442 
1443  s->pict_type = s->slice_type;
1444 
1445  if (s->pict_type != AV_PICTURE_TYPE_B)
1446  FFSWAP(SVQ3Frame*, s->next_pic, s->last_pic);
1447 
1448  av_frame_unref(s->cur_pic->f);
1449 
1450  /* for skipping the frame */
1451  s->cur_pic->f->pict_type = s->pict_type;
1453 
1454  ret = get_buffer(avctx, s->cur_pic);
1455  if (ret < 0)
1456  return ret;
1457 
1458  for (i = 0; i < 16; i++) {
1459  s->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * s->cur_pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
1460  s->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * s->cur_pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
1461  }
1462  for (i = 0; i < 16; i++) {
1463  s->block_offset[16 + i] =
1464  s->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * s->cur_pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
1465  s->block_offset[48 + 16 + i] =
1466  s->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * s->cur_pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
1467  }
1468 
1469  if (s->pict_type != AV_PICTURE_TYPE_I) {
1470  if (!s->last_pic->f->data[0]) {
1471  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1472  av_frame_unref(s->last_pic->f);
1473  ret = get_buffer(avctx, s->last_pic);
1474  if (ret < 0)
1475  return ret;
1476  memset(s->last_pic->f->data[0], 0, avctx->height * s->last_pic->f->linesize[0]);
1477  memset(s->last_pic->f->data[1], 0x80, (avctx->height / 2) *
1478  s->last_pic->f->linesize[1]);
1479  memset(s->last_pic->f->data[2], 0x80, (avctx->height / 2) *
1480  s->last_pic->f->linesize[2]);
1481  }
1482 
1483  if (s->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f->data[0]) {
1484  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1485  av_frame_unref(s->next_pic->f);
1486  ret = get_buffer(avctx, s->next_pic);
1487  if (ret < 0)
1488  return ret;
1489  memset(s->next_pic->f->data[0], 0, avctx->height * s->next_pic->f->linesize[0]);
1490  memset(s->next_pic->f->data[1], 0x80, (avctx->height / 2) *
1491  s->next_pic->f->linesize[1]);
1492  memset(s->next_pic->f->data[2], 0x80, (avctx->height / 2) *
1493  s->next_pic->f->linesize[2]);
1494  }
1495  }
1496 
1497  if (avctx->debug & FF_DEBUG_PICT_INFO)
1499  "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1501  s->halfpel_flag, s->thirdpel_flag,
1502  s->adaptive_quant, s->qscale, s->slice_num);
1503 
1504  if (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B ||
1506  avctx->skip_frame >= AVDISCARD_ALL)
1507  return 0;
1508 
1509  if (s->next_p_frame_damaged) {
1510  if (s->pict_type == AV_PICTURE_TYPE_B)
1511  return 0;
1512  else
1513  s->next_p_frame_damaged = 0;
1514  }
1515 
1516  if (s->pict_type == AV_PICTURE_TYPE_B) {
1518 
1519  if (s->frame_num_offset < 0)
1520  s->frame_num_offset += 256;
1521  if (s->frame_num_offset == 0 ||
1523  av_log(s->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1524  return -1;
1525  }
1526  } else {
1527  s->prev_frame_num = s->frame_num;
1528  s->frame_num = s->slice_num;
1530 
1531  if (s->prev_frame_num_offset < 0)
1532  s->prev_frame_num_offset += 256;
1533  }
1534 
1535  for (m = 0; m < 2; m++) {
1536  int i;
1537  for (i = 0; i < 4; i++) {
1538  int j;
1539  for (j = -1; j < 4; j++)
1540  s->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1541  if (i < 3)
1542  s->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1543  }
1544  }
1545 
1546  for (s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
1547  for (s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
1548  unsigned mb_type;
1549  s->mb_xy = s->mb_x + s->mb_y * s->mb_stride;
1550 
1551  if ((get_bits_left(&s->gb_slice)) <= 7) {
1552  if (((get_bits_count(&s->gb_slice) & 7) == 0 ||
1553  show_bits(&s->gb_slice, get_bits_left(&s->gb_slice) & 7) == 0)) {
1554 
1555  if (svq3_decode_slice_header(avctx))
1556  return -1;
1557  }
1558  if (s->slice_type != s->pict_type) {
1559  avpriv_request_sample(avctx, "non constant slice type\n");
1560  }
1561  /* TODO: support s->mb_skip_run */
1562  }
1563 
1564  mb_type = get_interleaved_ue_golomb(&s->gb_slice);
1565 
1566  if (s->pict_type == AV_PICTURE_TYPE_I)
1567  mb_type += 8;
1568  else if (s->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1569  mb_type += 4;
1570  if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1572  "error while decoding MB %d %d\n", s->mb_x, s->mb_y);
1573  return -1;
1574  }
1575 
1576  if (mb_type != 0 || s->cbp)
1577  hl_decode_mb(s);
1578 
1579  if (s->pict_type != AV_PICTURE_TYPE_B && !s->low_delay)
1580  s->cur_pic->mb_type[s->mb_x + s->mb_y * s->mb_stride] =
1581  (s->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1582  }
1583 
1584  ff_draw_horiz_band(avctx, s->cur_pic->f,
1585  s->last_pic->f->data[0] ? s->last_pic->f : NULL,
1586  16 * s->mb_y, 16, PICT_FRAME, 0,
1587  s->low_delay);
1588  }
1589 
1590  left = buf_size*8 - get_bits_count(&s->gb_slice);
1591 
1592  if (s->mb_y != s->mb_height || s->mb_x != s->mb_width) {
1593  av_log(avctx, AV_LOG_INFO, "frame num %d incomplete pic x %d y %d left %d\n", avctx->frame_number, s->mb_y, s->mb_x, left);
1594  //av_hex_dump(stderr, buf+buf_size-8, 8);
1595  }
1596 
1597  if (left < 0) {
1598  av_log(avctx, AV_LOG_ERROR, "frame num %d left %d\n", avctx->frame_number, left);
1599  return -1;
1600  }
1601 
1602  if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay)
1603  ret = av_frame_ref(data, s->cur_pic->f);
1604  else if (s->last_pic->f->data[0])
1605  ret = av_frame_ref(data, s->last_pic->f);
1606  if (ret < 0)
1607  return ret;
1608 
1609  /* Do not output the last pic after seeking. */
1610  if (s->last_pic->f->data[0] || s->low_delay)
1611  *got_frame = 1;
1612 
1613  if (s->pict_type != AV_PICTURE_TYPE_B) {
1614  FFSWAP(SVQ3Frame*, s->cur_pic, s->next_pic);
1615  } else {
1616  av_frame_unref(s->cur_pic->f);
1617  }
1618 
1619  return buf_size;
1620 }
1621 
1623 {
1624  SVQ3Context *s = avctx->priv_data;
1625 
1626  free_picture(avctx, s->cur_pic);
1627  free_picture(avctx, s->next_pic);
1628  free_picture(avctx, s->last_pic);
1629  av_frame_free(&s->cur_pic->f);
1630  av_frame_free(&s->next_pic->f);
1631  av_frame_free(&s->last_pic->f);
1632  av_freep(&s->cur_pic);
1633  av_freep(&s->next_pic);
1634  av_freep(&s->last_pic);
1635  av_freep(&s->slice_buf);
1638  av_freep(&s->mb2br_xy);
1639 
1640 
1641  av_freep(&s->buf);
1642  s->buf_size = 0;
1643 
1644  return 0;
1645 }
1646 
1648  .name = "svq3",
1649  .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1650  .type = AVMEDIA_TYPE_VIDEO,
1651  .id = AV_CODEC_ID_SVQ3,
1652  .priv_data_size = sizeof(SVQ3Context),
1654  .close = svq3_decode_end,
1656  .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND |
1659  .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,
1660  AV_PIX_FMT_NONE},
1661 };
#define MB_TYPE_INTRA16x16
Definition: avcodec.h:1254
uint8_t pred_mode
Definition: h264data.h:35
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
#define NULL
Definition: coverity.c:32
#define MB_TYPE_SKIP
Definition: avcodec.h:1264
discard all frames except keyframes
Definition: avcodec.h:786
void(* prefetch)(uint8_t *buf, ptrdiff_t stride, int h)
Prefetch memory into cache (if supported by hardware).
Definition: videodsp.h:76
const char * s
Definition: avisynth_c.h:768
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
int cbp
Definition: svq3.c:111
static int shift(int a, int b)
Definition: sonic.c:82
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:124
This structure describes decoded (raw) audio or video data.
Definition: frame.h:184
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
#define C
HpelDSPContext hdsp
Definition: svq3.c:87
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:68
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:247
uint16_t ff_svq1_packet_checksum(const uint8_t *data, const int length, int value)
Definition: svq13.c:60
static int svq3_decode_block(GetBitContext *gb, int16_t *block, int index, const int type)
Definition: svq3.c:299
else temp
Definition: vf_mcdeint.c:259
static void skip_bits_long(GetBitContext *s, int n)
Definition: get_bits.h:204
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
int prev_frame_num
Definition: svq3.c:115
static av_always_inline void svq3_pred_motion(const SVQ3Context *s, int n, int part_width, int list, int ref, int *const mx, int *const my)
Get the predicted MV.
Definition: svq3.c:380
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2413
int size
Definition: avcodec.h:1602
#define MB_TYPE_INTRA4x4
Definition: avcodec.h:1253
int mb_xy
Definition: svq3.c:122
const uint8_t * buffer
Definition: get_bits.h:56
int av_log2(unsigned v)
Definition: intmath.c:26
uint8_t * slice_buf
Definition: svq3.c:96
#define INVALID_VLC
Definition: golomb.h:38
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1904
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:411
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:120
int v_edge_pos
Definition: svq3.c:107
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:252
const uint8_t ff_h264_quant_rem6[QP_MAX_NUM+1]
Definition: h264data.c:174
discard all
Definition: avcodec.h:787
uint8_t run
Definition: svq3.c:206
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:3077
#define FULLPEL_MODE
Definition: svq3.c:150
void ff_draw_horiz_band(AVCodecContext *avctx, AVFrame *cur, AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
Definition: mpegutils.c:30
AVCodec.
Definition: avcodec.h:3600
#define AV_WN32A(p, v)
Definition: intreadwrite.h:538
#define AV_COPY32(d, s)
Definition: intreadwrite.h:586
int16_t mb[16 *48 *2]
Definition: svq3.c:143
Macro definitions for various function/variable attributes.
static int svq3_mc_dir(SVQ3Context *s, int size, int mode, int dir, int avg)
Definition: svq3.c:504
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:3301
static int16_t block[64]
Definition: dct.c:113
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:984
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:63
int has_watermark
Definition: svq3.c:100
int thirdpel_flag
Definition: svq3.c:99
void void avpriv_request_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
int mb_num
Definition: svq3.c:124
const uint8_t ff_h264_dequant4_coeff_init[6][3]
Definition: h264data.c:152
static const uint8_t luma_dc_zigzag_scan[16]
Definition: svq3.c:171
uint8_t
static av_always_inline void hl_decode_mb_idct_luma(SVQ3Context *s, int mb_type, const int *block_offset, int linesize, uint8_t *dest_y)
Definition: svq3.c:617
#define av_cold
Definition: attributes.h:82
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:145
static const struct @119 svq3_dct_tables[2][16]
#define mb
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
#define DC_PRED8x8
Definition: h264pred.h:68
mode
Definition: f_perms.c:27
int block_offset[2 *(16 *3)]
Definition: svq3.c:147
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:2917
static av_always_inline int svq3_fetch_diagonal_mv(const SVQ3Context *s, const int16_t **C, int i, int list, int part_width)
Definition: svq3.c:359
static av_always_inline int dctcoef_get(int16_t *mb, int index)
Definition: svq3.c:632
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:383
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1791
Definition: vf_geq.c:46
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:87
const uint8_t ff_h264_chroma_dc_scan[4]
Definition: h264data.c:54
Context for storing H.264 prediction functions.
Definition: h264pred.h:92
void(* pred8x8[4+3+4])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:97
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
Definition: mem.h:101
#define height
uint8_t * data
Definition: avcodec.h:1601
thirdpel DSP context
Definition: tpeldsp.h:42
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:199
const IMbInfo ff_h264_i_mb_type_info[26]
Definition: h264data.c:66
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:91
AVBufferRef * ref_index_buf[2]
Definition: svq3.c:78
const uint8_t ff_h264_golomb_to_inter_cbp[48]
Definition: h264data.c:48
int ff_h264_check_intra4x4_pred_mode(int8_t *pred_mode_cache, void *logctx, int top_samples_available, int left_samples_available)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264_parse.c:113
thirdpel DSP functions
ptrdiff_t size
Definition: opengl_enc.c:101
static const uint8_t header[24]
Definition: sdr2.c:67
enum AVPictureType slice_type
Definition: svq3.c:118
void(* pred4x4[9+3+3])(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.h:93
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:861
#define A(x)
Definition: vp56_arith.h:28
#define av_log(a,...)
int prev_frame_num_offset
Definition: svq3.c:114
int low_delay
Definition: svq3.c:119
static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
Definition: svq3.c:724
#define U(x)
Definition: vp56_arith.h:37
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:568
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1998
#define HALFPEL_MODE
Definition: svq3.c:151
AVCodecContext * avctx
Definition: svq3.c:83
int8_t * intra4x4_pred_mode
Definition: svq3.c:133
#define AVERROR(e)
Definition: error.h:43
uint8_t * edge_emu_buffer
Definition: svq3.c:139
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:158
av_cold void ff_tpeldsp_init(TpelDSPContext *c)
Definition: tpeldsp.c:312
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:176
const uint8_t ff_zigzag_scan[16+1]
Definition: mathtables.c:109
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1771
int frame_num
Definition: svq3.c:112
int mb_x
Definition: svq3.c:121
static int get_interleaved_se_golomb(GetBitContext *gb)
Definition: golomb.h:222
GLsizei GLsizei * length
Definition: opengl_enc.c:115
unsigned int left_samples_available
Definition: svq3.c:137
const char * name
Name of the codec implementation.
Definition: avcodec.h:3607
#define IS_SKIP(a)
Definition: mpegutils.h:83
int chroma_pred_mode
Definition: svq3.c:129
const uint8_t ff_h264_golomb_to_pict_type[5]
Definition: h264data.c:37
#define PREDICT_MODE
Definition: svq3.c:153
static const uint8_t offset[127][2]
Definition: vf_spp.c:92
static void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.h:226
#define fail()
Definition: checkasm.h:83
unsigned int topright_samples_available
Definition: svq3.c:136
Sorenson Vector Quantizer #1 (SVQ1) video codec.
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:499
Definition: svq3.c:68
useful rectangle filling function
AVBufferRef * motion_val_buf[2]
Definition: svq3.c:71
tpel_mc_func avg_tpel_pixels_tab[11]
Definition: tpeldsp.h:54
Half-pel DSP context.
Definition: hpeldsp.h:45
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: avcodec.h:953
SVQ3Frame * cur_pic
Definition: svq3.c:91
Context for storing H.264 DSP functions.
Definition: h264dsp.h:42
uint32_t dequant4_coeff[QP_MAX_NUM+1][16]
Definition: svq3.c:146
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:258
int16_t(*[2] motion_val)[2]
Definition: svq3.c:72
#define FFMIN(a, b)
Definition: common.h:96
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:74
#define width
int width
picture width / height.
Definition: avcodec.h:1863
int32_t
GetBitContext gb_slice
Definition: svq3.c:95
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:282
static av_cold int svq3_decode_init(AVCodecContext *avctx)
Definition: svq3.c:1131
tpel_mc_func put_tpel_pixels_tab[11]
Thirdpel motion compensation with rounding (a + b + 1) >> 1.
Definition: tpeldsp.h:53
H.264 / AVC / MPEG-4 part10 codec.
int b_stride
Definition: svq3.c:125
H264PredContext hpc
Definition: svq3.c:86
int n
Definition: avisynth_c.h:684
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:788
#define src
Definition: vp9dsp.c:530
int last_frame_output
Definition: svq3.c:108
int next_p_frame_damaged
Definition: svq3.c:105
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:460
#define IS_INTRA16x16(a)
Definition: mpegutils.h:78
static const int8_t mv[256][2]
Definition: 4xm.c:77
H264DSPContext h264dsp
Definition: svq3.c:85
Half-pel DSP functions.
AVCodec ff_svq3_decoder
Definition: svq3.c:1647
GetBitContext gb
Definition: svq3.c:94
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
Libavcodec external API header.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:215
int debug
debug
Definition: avcodec.h:2916
int intra16x16_pred_mode
Definition: svq3.c:130
main external API structure.
Definition: avcodec.h:1676
const uint8_t ff_h264_chroma_qp[7][QP_MAX_NUM+1]
Definition: h264data.c:203
uint8_t * data
The data buffer.
Definition: buffer.h:89
#define QP_MAX_NUM
Definition: h264.h:27
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: utils.c:947
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
void * buf
Definition: avisynth_c.h:690
GLint GLenum type
Definition: opengl_enc.c:105
static const uint8_t scan8[16 *3+3]
Definition: h264dec.h:656
int extradata_size
Definition: avcodec.h:1792
void(* pred16x16[4+3+2])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:98
int qscale
Definition: svq3.c:110
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:82
AVBufferRef * mb_type_buf
Definition: svq3.c:74
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:299
const uint8_t ff_h264_golomb_to_intra4x4_cbp[48]
Definition: h264data.c:42
static void skip_bits1(GetBitContext *s)
Definition: get_bits.h:324
int mb_height
Definition: svq3.c:123
enum AVPictureType pict_type
Definition: svq3.c:117
const uint8_t ff_h264_quant_div6[QP_MAX_NUM+1]
Definition: h264data.c:182
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:292
int index
Definition: gxfenc.c:89
static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
Definition: h264dec.h:672
static void svq3_mc_dir_part(SVQ3Context *s, int x, int y, int width, int height, int mx, int my, int dxy, int thirdpel, int dir, int avg)
Definition: svq3.c:429
uint32_t * mb_type
Definition: svq3.c:75
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:406
static void init_dequant4_coeff_table(SVQ3Context *s)
Definition: svq3.c:1117
#define MB_TYPE_16x16
Definition: avcodec.h:1256
void(* h264_chroma_dc_dequant_idct)(int16_t *block, int qmul)
Definition: h264dsp.h:104
static int svq3_decode_end(AVCodecContext *avctx)
Definition: svq3.c:1622
#define mid_pred
Definition: mathops.h:96
int8_t ref_cache[2][5 *8]
Definition: svq3.c:142
static const uint8_t svq3_pred_0[25][2]
Definition: svq3.c:178
int mb_y
Definition: svq3.c:121
AVPictureType
Definition: avutil.h:266
#define IS_INTER(a)
Definition: mpegutils.h:81
int slice_num
Definition: svq3.c:109
#define u(width,...)
AVFrame * f
Definition: svq3.c:69
uint8_t * buf
Definition: svq3.c:102
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:262
SVQ3Frame * last_pic
Definition: svq3.c:93
VideoDSPContext vdsp
Definition: svq3.c:89
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:493
static void free_picture(AVCodecContext *avctx, SVQ3Frame *pic)
Definition: svq3.c:1343
uint32_t * mb2br_xy
Definition: svq3.c:127
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:198
uint8_t level
Definition: svq3.c:207
Definition: vp9.h:84
#define AV_ZERO128(d)
Definition: intreadwrite.h:622
A reference to a data buffer.
Definition: buffer.h:81
#define avg(a, b, c, d)
discard all non reference
Definition: avcodec.h:783
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:105
static av_always_inline void hl_decode_mb_predict_luma(SVQ3Context *s, int mb_type, const int *block_offset, int linesize, uint8_t *dest_y)
Definition: svq3.c:637
uint8_t non_zero_count_cache[15 *8]
Definition: svq3.c:145
uint8_t cbp
Definition: h264data.h:36
common internal api header.
if(ret< 0)
Definition: vf_mcdeint.c:282
static int get_buffer(AVCodecContext *avctx, SVQ3Frame *pic)
Definition: svq3.c:1355
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
int mb_stride
Definition: svq3.c:124
int ff_h264_check_intra_pred_mode(void *logctx, int top_samples_available, int left_samples_available, int mode, int is_chroma)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264_parse.c:161
int16_t mb_luma_dc[3][16 *2]
Definition: svq3.c:144
int h_edge_pos
Definition: svq3.c:106
Bi-dir predicted.
Definition: avutil.h:270
static void svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
Definition: svq3.c:224
#define stride
int frame_num_offset
Definition: svq3.c:113
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:734
#define IS_INTRA(x, y)
static const uint32_t svq3_dequant_coeff[32]
Definition: svq3.c:215
void * priv_data
Definition: avcodec.h:1718
#define THIRDPEL_MODE
Definition: svq3.c:152
#define PICT_FRAME
Definition: mpegutils.h:39
unsigned int top_samples_available
Definition: svq3.c:135
#define IS_INTRA4x4(a)
Definition: mpegutils.h:77
#define av_free(p)
static void hl_decode_mb(SVQ3Context *s)
Definition: svq3.c:676
static int svq3_decode_slice_header(AVCodecContext *avctx)
Definition: svq3.c:1023
#define PART_NOT_AVAILABLE
Definition: h264dec.h:396
int slice_size
Definition: svq3.c:97
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:253
#define AV_ZERO32(d)
Definition: intreadwrite.h:614
TpelDSPContext tdsp
Definition: svq3.c:88
static const uint8_t svq3_scan[16]
Definition: svq3.c:164
#define AV_RN16A(p)
Definition: intreadwrite.h:522
int8_t intra4x4_pred_mode_cache[5 *8]
Definition: svq3.c:132
int mb_width
Definition: svq3.c:123
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> dc
av_cold void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
Definition: h264dsp.c:67
static const int8_t svq3_pred_1[6][6][5]
Definition: svq3.c:190
static void svq3_add_idct_c(uint8_t *dst, int16_t *block, int stride, int qp, int dc)
Definition: svq3.c:259
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:2469
#define av_freep(p)
uint32_t watermark_key
Definition: svq3.c:101
static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
Definition: ffmpeg.c:2035
static int skip_1stop_8data_bits(GetBitContext *gb)
Definition: get_bits.h:573
#define av_always_inline
Definition: attributes.h:39
SVQ3Frame * next_pic
Definition: svq3.c:92
#define FFSWAP(type, a, b)
Definition: common.h:99
static unsigned get_interleaved_ue_golomb(GetBitContext *gb)
Definition: golomb.h:115
int buf_size
Definition: svq3.c:103
exp golomb vlc stuff
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
AVPixelFormat
Pixel format.
Definition: pixfmt.h:60
This structure stores compressed data.
Definition: avcodec.h:1578
static int svq3_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: svq3.c:1403
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:1354
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:959
for(j=16;j >0;--j)
Predicted.
Definition: avutil.h:269
int halfpel_flag
Definition: svq3.c:98
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
int adaptive_quant
Definition: svq3.c:104
int8_t * ref_index[2]
Definition: svq3.c:79
int16_t mv_cache[2][5 *8][2]
Definition: svq3.c:141