FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
svq3.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 The FFmpeg Project
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /*
22  * How to use this decoder:
23  * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24  * have stsd atoms to describe media trak properties. A stsd atom for a
25  * video trak contains 1 or more ImageDescription atoms. These atoms begin
26  * with the 4-byte length of the atom followed by the codec fourcc. Some
27  * decoders need information in this atom to operate correctly. Such
28  * is the case with SVQ3. In order to get the best use out of this decoder,
29  * the calling app must make the SVQ3 ImageDescription atom available
30  * via the AVCodecContext's extradata[_size] field:
31  *
32  * AVCodecContext.extradata = pointer to ImageDescription, first characters
33  * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34  * AVCodecContext.extradata_size = size of ImageDescription atom memory
35  * buffer (which will be the same as the ImageDescription atom size field
36  * from the QT file, minus 4 bytes since the length is missing)
37  *
38  * You will know you have these parameters passed correctly when the decoder
39  * correctly decodes this file:
40  * http://samples.mplayerhq.hu/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
41  */
42 
43 #include <inttypes.h>
44 
45 #include "libavutil/attributes.h"
46 #include "internal.h"
47 #include "avcodec.h"
48 #include "mpegutils.h"
49 #include "h264.h"
50 #include "h264data.h"
51 #include "golomb.h"
52 #include "hpeldsp.h"
53 #include "mathops.h"
54 #include "rectangle.h"
55 #include "tpeldsp.h"
56 #include "vdpau_internal.h"
57 
58 #if CONFIG_ZLIB
59 #include <zlib.h>
60 #endif
61 
62 #include "svq1.h"
63 
64 /**
65  * @file
66  * svq3 decoder.
67  */
68 
69 typedef struct SVQ3Context {
71 
77 
88  uint32_t watermark_key;
90  int buf_size;
96  int slice_num;
97  int qscale;
98  int cbp;
99  int frame_num;
103 
106 
107  int mb_x, mb_y;
108  int mb_xy;
111  int b_stride;
112 
113  uint32_t *mb2br_xy;
114 
117 
120 
121  unsigned int top_samples_available;
124 
126 
127  DECLARE_ALIGNED(16, int16_t, mv_cache)[2][5 * 8][2];
128  DECLARE_ALIGNED(8, int8_t, ref_cache)[2][5 * 8];
129  DECLARE_ALIGNED(16, int16_t, mb)[16 * 48 * 2];
130  DECLARE_ALIGNED(16, int16_t, mb_luma_dc)[3][16 * 2];
132  uint32_t dequant4_coeff[QP_MAX_NUM + 1][16];
133  int block_offset[2 * (16 * 3)];
134 } SVQ3Context;
135 
136 #define FULLPEL_MODE 1
137 #define HALFPEL_MODE 2
138 #define THIRDPEL_MODE 3
139 #define PREDICT_MODE 4
140 
141 /* dual scan (from some older H.264 draft)
142  * o-->o-->o o
143  * | /|
144  * o o o / o
145  * | / | |/ |
146  * o o o o
147  * /
148  * o-->o-->o-->o
149  */
150 static const uint8_t svq3_scan[16] = {
151  0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
152  2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
153  0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
154  0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
155 };
156 
157 static const uint8_t luma_dc_zigzag_scan[16] = {
158  0 * 16 + 0 * 64, 1 * 16 + 0 * 64, 2 * 16 + 0 * 64, 0 * 16 + 2 * 64,
159  3 * 16 + 0 * 64, 0 * 16 + 1 * 64, 1 * 16 + 1 * 64, 2 * 16 + 1 * 64,
160  1 * 16 + 2 * 64, 2 * 16 + 2 * 64, 3 * 16 + 2 * 64, 0 * 16 + 3 * 64,
161  3 * 16 + 1 * 64, 1 * 16 + 3 * 64, 2 * 16 + 3 * 64, 3 * 16 + 3 * 64,
162 };
163 
164 static const uint8_t svq3_pred_0[25][2] = {
165  { 0, 0 },
166  { 1, 0 }, { 0, 1 },
167  { 0, 2 }, { 1, 1 }, { 2, 0 },
168  { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
169  { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
170  { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
171  { 2, 4 }, { 3, 3 }, { 4, 2 },
172  { 4, 3 }, { 3, 4 },
173  { 4, 4 }
174 };
175 
176 static const int8_t svq3_pred_1[6][6][5] = {
177  { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
178  { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
179  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
180  { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
181  { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
182  { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
183  { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
184  { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
185  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
186  { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
187  { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
188  { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
189 };
190 
191 static const struct {
194 } svq3_dct_tables[2][16] = {
195  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
196  { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
197  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
198  { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
199 };
200 
201 static const uint32_t svq3_dequant_coeff[32] = {
202  3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
203  9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
204  24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
205  61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
206 };
207 
208 static int svq3_decode_end(AVCodecContext *avctx);
209 
210 static void svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
211 {
212  const int qmul = svq3_dequant_coeff[qp];
213 #define stride 16
214  int i;
215  int temp[16];
216  static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
217 
218  for (i = 0; i < 4; i++) {
219  const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
220  const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
221  const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
222  const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
223 
224  temp[4 * i + 0] = z0 + z3;
225  temp[4 * i + 1] = z1 + z2;
226  temp[4 * i + 2] = z1 - z2;
227  temp[4 * i + 3] = z0 - z3;
228  }
229 
230  for (i = 0; i < 4; i++) {
231  const int offset = x_offset[i];
232  const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
233  const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
234  const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
235  const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
236 
237  output[stride * 0 + offset] = (z0 + z3) * qmul + 0x80000 >> 20;
238  output[stride * 2 + offset] = (z1 + z2) * qmul + 0x80000 >> 20;
239  output[stride * 8 + offset] = (z1 - z2) * qmul + 0x80000 >> 20;
240  output[stride * 10 + offset] = (z0 - z3) * qmul + 0x80000 >> 20;
241  }
242 }
243 #undef stride
244 
245 static void svq3_add_idct_c(uint8_t *dst, int16_t *block,
246  int stride, int qp, int dc)
247 {
248  const int qmul = svq3_dequant_coeff[qp];
249  int i;
250 
251  if (dc) {
252  dc = 13 * 13 * (dc == 1 ? 1538 * block[0]
253  : qmul * (block[0] >> 3) / 2);
254  block[0] = 0;
255  }
256 
257  for (i = 0; i < 4; i++) {
258  const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
259  const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
260  const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
261  const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
262 
263  block[0 + 4 * i] = z0 + z3;
264  block[1 + 4 * i] = z1 + z2;
265  block[2 + 4 * i] = z1 - z2;
266  block[3 + 4 * i] = z0 - z3;
267  }
268 
269  for (i = 0; i < 4; i++) {
270  const int z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
271  const int z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
272  const int z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
273  const int z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
274  const int rr = (dc + 0x80000);
275 
276  dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((z0 + z3) * qmul + rr >> 20));
277  dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((z1 + z2) * qmul + rr >> 20));
278  dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((z1 - z2) * qmul + rr >> 20));
279  dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((z0 - z3) * qmul + rr >> 20));
280  }
281 
282  memset(block, 0, 16 * sizeof(int16_t));
283 }
284 
285 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
286  int index, const int type)
287 {
288  static const uint8_t *const scan_patterns[4] = {
290  };
291 
292  int run, level, sign, limit;
293  unsigned vlc;
294  const int intra = 3 * type >> 2;
295  const uint8_t *const scan = scan_patterns[type];
296 
297  for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
298  for (; (vlc = get_interleaved_ue_golomb(gb)) != 0; index++) {
299  if ((int32_t)vlc < 0)
300  return -1;
301 
302  sign = (vlc & 1) ? 0 : -1;
303  vlc = vlc + 1 >> 1;
304 
305  if (type == 3) {
306  if (vlc < 3) {
307  run = 0;
308  level = vlc;
309  } else if (vlc < 4) {
310  run = 1;
311  level = 1;
312  } else {
313  run = vlc & 0x3;
314  level = (vlc + 9 >> 2) - run;
315  }
316  } else {
317  if (vlc < 16U) {
318  run = svq3_dct_tables[intra][vlc].run;
319  level = svq3_dct_tables[intra][vlc].level;
320  } else if (intra) {
321  run = vlc & 0x7;
322  level = (vlc >> 3) + ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
323  } else {
324  run = vlc & 0xF;
325  level = (vlc >> 4) + ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
326  }
327  }
328 
329 
330  if ((index += run) >= limit)
331  return -1;
332 
333  block[scan[index]] = (level ^ sign) - sign;
334  }
335 
336  if (type != 2) {
337  break;
338  }
339  }
340 
341  return 0;
342 }
343 
344 static av_always_inline int
345 svq3_fetch_diagonal_mv(const SVQ3Context *s, const int16_t **C,
346  int i, int list, int part_width)
347 {
348  const int topright_ref = s->ref_cache[list][i - 8 + part_width];
349 
350  if (topright_ref != PART_NOT_AVAILABLE) {
351  *C = s->mv_cache[list][i - 8 + part_width];
352  return topright_ref;
353  } else {
354  *C = s->mv_cache[list][i - 8 - 1];
355  return s->ref_cache[list][i - 8 - 1];
356  }
357 }
358 
359 /**
360  * Get the predicted MV.
361  * @param n the block index
362  * @param part_width the width of the partition (4, 8,16) -> (1, 2, 4)
363  * @param mx the x component of the predicted motion vector
364  * @param my the y component of the predicted motion vector
365  */
367  int part_width, int list,
368  int ref, int *const mx, int *const my)
369 {
370  const int index8 = scan8[n];
371  const int top_ref = s->ref_cache[list][index8 - 8];
372  const int left_ref = s->ref_cache[list][index8 - 1];
373  const int16_t *const A = s->mv_cache[list][index8 - 1];
374  const int16_t *const B = s->mv_cache[list][index8 - 8];
375  const int16_t *C;
376  int diagonal_ref, match_count;
377 
378 /* mv_cache
379  * B . . A T T T T
380  * U . . L . . , .
381  * U . . L . . . .
382  * U . . L . . , .
383  * . . . L . . . .
384  */
385 
386  diagonal_ref = svq3_fetch_diagonal_mv(s, &C, index8, list, part_width);
387  match_count = (diagonal_ref == ref) + (top_ref == ref) + (left_ref == ref);
388  if (match_count > 1) { //most common
389  *mx = mid_pred(A[0], B[0], C[0]);
390  *my = mid_pred(A[1], B[1], C[1]);
391  } else if (match_count == 1) {
392  if (left_ref == ref) {
393  *mx = A[0];
394  *my = A[1];
395  } else if (top_ref == ref) {
396  *mx = B[0];
397  *my = B[1];
398  } else {
399  *mx = C[0];
400  *my = C[1];
401  }
402  } else {
403  if (top_ref == PART_NOT_AVAILABLE &&
404  diagonal_ref == PART_NOT_AVAILABLE &&
405  left_ref != PART_NOT_AVAILABLE) {
406  *mx = A[0];
407  *my = A[1];
408  } else {
409  *mx = mid_pred(A[0], B[0], C[0]);
410  *my = mid_pred(A[1], B[1], C[1]);
411  }
412  }
413 }
414 
415 static inline void svq3_mc_dir_part(SVQ3Context *s,
416  int x, int y, int width, int height,
417  int mx, int my, int dxy,
418  int thirdpel, int dir, int avg)
419 {
420  const H264Picture *pic = (dir == 0) ? s->last_pic : s->next_pic;
421  uint8_t *src, *dest;
422  int i, emu = 0;
423  int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
424  int linesize = s->cur_pic->f->linesize[0];
425  int uvlinesize = s->cur_pic->f->linesize[1];
426 
427  mx += x;
428  my += y;
429 
430  if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
431  my < 0 || my >= s->v_edge_pos - height - 1) {
432  emu = 1;
433  mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
434  my = av_clip(my, -16, s->v_edge_pos - height + 15);
435  }
436 
437  /* form component predictions */
438  dest = s->cur_pic->f->data[0] + x + y * linesize;
439  src = pic->f->data[0] + mx + my * linesize;
440 
441  if (emu) {
443  linesize, linesize,
444  width + 1, height + 1,
445  mx, my, s->h_edge_pos, s->v_edge_pos);
446  src = s->edge_emu_buffer;
447  }
448  if (thirdpel)
449  (avg ? s->tdsp.avg_tpel_pixels_tab
450  : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src, linesize,
451  width, height);
452  else
453  (avg ? s->hdsp.avg_pixels_tab
454  : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, linesize,
455  height);
456 
457  if (!(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
458  mx = mx + (mx < (int) x) >> 1;
459  my = my + (my < (int) y) >> 1;
460  width = width >> 1;
461  height = height >> 1;
462  blocksize++;
463 
464  for (i = 1; i < 3; i++) {
465  dest = s->cur_pic->f->data[i] + (x >> 1) + (y >> 1) * uvlinesize;
466  src = pic->f->data[i] + mx + my * uvlinesize;
467 
468  if (emu) {
470  uvlinesize, uvlinesize,
471  width + 1, height + 1,
472  mx, my, (s->h_edge_pos >> 1),
473  s->v_edge_pos >> 1);
474  src = s->edge_emu_buffer;
475  }
476  if (thirdpel)
477  (avg ? s->tdsp.avg_tpel_pixels_tab
478  : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src,
479  uvlinesize,
480  width, height);
481  else
482  (avg ? s->hdsp.avg_pixels_tab
483  : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
484  uvlinesize,
485  height);
486  }
487  }
488 }
489 
490 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
491  int dir, int avg)
492 {
493  int i, j, k, mx, my, dx, dy, x, y;
494  const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
495  const int part_height = 16 >> ((unsigned)(size + 1) / 3);
496  const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
497  const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
498  const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
499 
500  for (i = 0; i < 16; i += part_height)
501  for (j = 0; j < 16; j += part_width) {
502  const int b_xy = (4 * s->mb_x + (j >> 2)) +
503  (4 * s->mb_y + (i >> 2)) * s->b_stride;
504  int dxy;
505  x = 16 * s->mb_x + j;
506  y = 16 * s->mb_y + i;
507  k = (j >> 2 & 1) + (i >> 1 & 2) +
508  (j >> 1 & 4) + (i & 8);
509 
510  if (mode != PREDICT_MODE) {
511  svq3_pred_motion(s, k, part_width >> 2, dir, 1, &mx, &my);
512  } else {
513  mx = s->next_pic->motion_val[0][b_xy][0] << 1;
514  my = s->next_pic->motion_val[0][b_xy][1] << 1;
515 
516  if (dir == 0) {
517  mx = mx * s->frame_num_offset /
518  s->prev_frame_num_offset + 1 >> 1;
519  my = my * s->frame_num_offset /
520  s->prev_frame_num_offset + 1 >> 1;
521  } else {
522  mx = mx * (s->frame_num_offset - s->prev_frame_num_offset) /
523  s->prev_frame_num_offset + 1 >> 1;
524  my = my * (s->frame_num_offset - s->prev_frame_num_offset) /
525  s->prev_frame_num_offset + 1 >> 1;
526  }
527  }
528 
529  /* clip motion vector prediction to frame border */
530  mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
531  my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
532 
533  /* get (optional) motion vector differential */
534  if (mode == PREDICT_MODE) {
535  dx = dy = 0;
536  } else {
539 
540  if (dx == INVALID_VLC || dy == INVALID_VLC) {
541  av_log(s->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
542  return -1;
543  }
544  }
545 
546  /* compute motion vector */
547  if (mode == THIRDPEL_MODE) {
548  int fx, fy;
549  mx = (mx + 1 >> 1) + dx;
550  my = (my + 1 >> 1) + dy;
551  fx = (unsigned)(mx + 0x3000) / 3 - 0x1000;
552  fy = (unsigned)(my + 0x3000) / 3 - 0x1000;
553  dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
554 
555  svq3_mc_dir_part(s, x, y, part_width, part_height,
556  fx, fy, dxy, 1, dir, avg);
557  mx += mx;
558  my += my;
559  } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
560  mx = (unsigned)(mx + 1 + 0x3000) / 3 + dx - 0x1000;
561  my = (unsigned)(my + 1 + 0x3000) / 3 + dy - 0x1000;
562  dxy = (mx & 1) + 2 * (my & 1);
563 
564  svq3_mc_dir_part(s, x, y, part_width, part_height,
565  mx >> 1, my >> 1, dxy, 0, dir, avg);
566  mx *= 3;
567  my *= 3;
568  } else {
569  mx = (unsigned)(mx + 3 + 0x6000) / 6 + dx - 0x1000;
570  my = (unsigned)(my + 3 + 0x6000) / 6 + dy - 0x1000;
571 
572  svq3_mc_dir_part(s, x, y, part_width, part_height,
573  mx, my, 0, 0, dir, avg);
574  mx *= 6;
575  my *= 6;
576  }
577 
578  /* update mv_cache */
579  if (mode != PREDICT_MODE) {
580  int32_t mv = pack16to32(mx, my);
581 
582  if (part_height == 8 && i < 8) {
583  AV_WN32A(s->mv_cache[dir][scan8[k] + 1 * 8], mv);
584 
585  if (part_width == 8 && j < 8)
586  AV_WN32A(s->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
587  }
588  if (part_width == 8 && j < 8)
589  AV_WN32A(s->mv_cache[dir][scan8[k] + 1], mv);
590  if (part_width == 4 || part_height == 4)
591  AV_WN32A(s->mv_cache[dir][scan8[k]], mv);
592  }
593 
594  /* write back motion vectors */
595  fill_rectangle(s->cur_pic->motion_val[dir][b_xy],
596  part_width >> 2, part_height >> 2, s->b_stride,
597  pack16to32(mx, my), 4);
598  }
599 
600  return 0;
601 }
602 
604  int mb_type, const int *block_offset,
605  int linesize, uint8_t *dest_y)
606 {
607  int i;
608  if (!IS_INTRA4x4(mb_type)) {
609  for (i = 0; i < 16; i++)
610  if (s->non_zero_count_cache[scan8[i]] || s->mb[i * 16]) {
611  uint8_t *const ptr = dest_y + block_offset[i];
612  svq3_add_idct_c(ptr, s->mb + i * 16, linesize,
613  s->qscale, IS_INTRA(mb_type) ? 1 : 0);
614  }
615  }
616 }
617 
618 static av_always_inline int dctcoef_get(int16_t *mb, int index)
619 {
620  return AV_RN16A(mb + index);
621 }
622 
624  int mb_type,
625  const int *block_offset,
626  int linesize,
627  uint8_t *dest_y)
628 {
629  int i;
630  int qscale = s->qscale;
631 
632  if (IS_INTRA4x4(mb_type)) {
633  for (i = 0; i < 16; i++) {
634  uint8_t *const ptr = dest_y + block_offset[i];
635  const int dir = s->intra4x4_pred_mode_cache[scan8[i]];
636 
637  uint8_t *topright;
638  int nnz, tr;
639  if (dir == DIAG_DOWN_LEFT_PRED || dir == VERT_LEFT_PRED) {
640  const int topright_avail = (s->topright_samples_available << i) & 0x8000;
641  av_assert2(s->mb_y || linesize <= block_offset[i]);
642  if (!topright_avail) {
643  tr = ptr[3 - linesize] * 0x01010101u;
644  topright = (uint8_t *)&tr;
645  } else
646  topright = ptr + 4 - linesize;
647  } else
648  topright = NULL;
649 
650  s->hpc.pred4x4[dir](ptr, topright, linesize);
651  nnz = s->non_zero_count_cache[scan8[i]];
652  if (nnz) {
653  svq3_add_idct_c(ptr, s->mb + i * 16, linesize, qscale, 0);
654  }
655  }
656  } else {
657  s->hpc.pred16x16[s->intra16x16_pred_mode](dest_y, linesize);
658  svq3_luma_dc_dequant_idct_c(s->mb, s->mb_luma_dc[0], qscale);
659  }
660 }
661 
663 {
664  const int mb_x = s->mb_x;
665  const int mb_y = s->mb_y;
666  const int mb_xy = s->mb_xy;
667  const int mb_type = s->cur_pic->mb_type[mb_xy];
668  uint8_t *dest_y, *dest_cb, *dest_cr;
669  int linesize, uvlinesize;
670  int i, j;
671  const int *block_offset = &s->block_offset[0];
672  const int block_h = 16 >> 1;
673 
674  linesize = s->cur_pic->f->linesize[0];
675  uvlinesize = s->cur_pic->f->linesize[1];
676 
677  dest_y = s->cur_pic->f->data[0] + (mb_x + mb_y * linesize) * 16;
678  dest_cb = s->cur_pic->f->data[1] + mb_x * 8 + mb_y * uvlinesize * block_h;
679  dest_cr = s->cur_pic->f->data[2] + mb_x * 8 + mb_y * uvlinesize * block_h;
680 
681  s->vdsp.prefetch(dest_y + (s->mb_x & 3) * 4 * linesize + 64, linesize, 4);
682  s->vdsp.prefetch(dest_cb + (s->mb_x & 7) * uvlinesize + 64, dest_cr - dest_cb, 2);
683 
684  if (IS_INTRA(mb_type)) {
685  s->hpc.pred8x8[s->chroma_pred_mode](dest_cb, uvlinesize);
686  s->hpc.pred8x8[s->chroma_pred_mode](dest_cr, uvlinesize);
687 
688  hl_decode_mb_predict_luma(s, mb_type, block_offset, linesize, dest_y);
689  }
690 
691  hl_decode_mb_idct_luma(s, mb_type, block_offset, linesize, dest_y);
692 
693  if (s->cbp & 0x30) {
694  uint8_t *dest[2] = { dest_cb, dest_cr };
695  s->h264dsp.h264_chroma_dc_dequant_idct(s->mb + 16 * 16 * 1,
696  s->dequant4_coeff[4][0]);
697  s->h264dsp.h264_chroma_dc_dequant_idct(s->mb + 16 * 16 * 2,
698  s->dequant4_coeff[4][0]);
699  for (j = 1; j < 3; j++) {
700  for (i = j * 16; i < j * 16 + 4; i++)
701  if (s->non_zero_count_cache[scan8[i]] || s->mb[i * 16]) {
702  uint8_t *const ptr = dest[j - 1] + block_offset[i];
703  svq3_add_idct_c(ptr, s->mb + i * 16,
704  uvlinesize, ff_h264_chroma_qp[0][s->qscale + 12] - 12, 2);
705  }
706  }
707  }
708 }
709 
710 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
711 {
712  int i, j, k, m, dir, mode;
713  int cbp = 0;
714  uint32_t vlc;
715  int8_t *top, *left;
716  const int mb_xy = s->mb_xy;
717  const int b_xy = 4 * s->mb_x + 4 * s->mb_y * s->b_stride;
718 
719  s->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
720  s->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
721  s->topright_samples_available = 0xFFFF;
722 
723  if (mb_type == 0) { /* SKIP */
724  if (s->pict_type == AV_PICTURE_TYPE_P ||
725  s->next_pic->mb_type[mb_xy] == -1) {
726  svq3_mc_dir_part(s, 16 * s->mb_x, 16 * s->mb_y, 16, 16,
727  0, 0, 0, 0, 0, 0);
728 
729  if (s->pict_type == AV_PICTURE_TYPE_B)
730  svq3_mc_dir_part(s, 16 * s->mb_x, 16 * s->mb_y, 16, 16,
731  0, 0, 0, 0, 1, 1);
732 
733  mb_type = MB_TYPE_SKIP;
734  } else {
735  mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
736  if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
737  return -1;
738  if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
739  return -1;
740 
741  mb_type = MB_TYPE_16x16;
742  }
743  } else if (mb_type < 8) { /* INTER */
744  if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&s->gb_slice))
745  mode = THIRDPEL_MODE;
746  else if (s->halfpel_flag &&
747  s->thirdpel_flag == !get_bits1(&s->gb_slice))
748  mode = HALFPEL_MODE;
749  else
750  mode = FULLPEL_MODE;
751 
752  /* fill caches */
753  /* note ref_cache should contain here:
754  * ????????
755  * ???11111
756  * N??11111
757  * N??11111
758  * N??11111
759  */
760 
761  for (m = 0; m < 2; m++) {
762  if (s->mb_x > 0 && s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - 1] + 6] != -1) {
763  for (i = 0; i < 4; i++)
764  AV_COPY32(s->mv_cache[m][scan8[0] - 1 + i * 8],
765  s->cur_pic->motion_val[m][b_xy - 1 + i * s->b_stride]);
766  } else {
767  for (i = 0; i < 4; i++)
768  AV_ZERO32(s->mv_cache[m][scan8[0] - 1 + i * 8]);
769  }
770  if (s->mb_y > 0) {
771  memcpy(s->mv_cache[m][scan8[0] - 1 * 8],
772  s->cur_pic->motion_val[m][b_xy - s->b_stride],
773  4 * 2 * sizeof(int16_t));
774  memset(&s->ref_cache[m][scan8[0] - 1 * 8],
775  (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
776 
777  if (s->mb_x < s->mb_width - 1) {
778  AV_COPY32(s->mv_cache[m][scan8[0] + 4 - 1 * 8],
779  s->cur_pic->motion_val[m][b_xy - s->b_stride + 4]);
780  s->ref_cache[m][scan8[0] + 4 - 1 * 8] =
781  (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride + 1] + 6] == -1 ||
782  s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
783  } else
784  s->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
785  if (s->mb_x > 0) {
786  AV_COPY32(s->mv_cache[m][scan8[0] - 1 - 1 * 8],
787  s->cur_pic->motion_val[m][b_xy - s->b_stride - 1]);
788  s->ref_cache[m][scan8[0] - 1 - 1 * 8] =
789  (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
790  } else
791  s->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
792  } else
793  memset(&s->ref_cache[m][scan8[0] - 1 * 8 - 1],
794  PART_NOT_AVAILABLE, 8);
795 
796  if (s->pict_type != AV_PICTURE_TYPE_B)
797  break;
798  }
799 
800  /* decode motion vector(s) and form prediction(s) */
801  if (s->pict_type == AV_PICTURE_TYPE_P) {
802  if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
803  return -1;
804  } else { /* AV_PICTURE_TYPE_B */
805  if (mb_type != 2) {
806  if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
807  return -1;
808  } else {
809  for (i = 0; i < 4; i++)
810  memset(s->cur_pic->motion_val[0][b_xy + i * s->b_stride],
811  0, 4 * 2 * sizeof(int16_t));
812  }
813  if (mb_type != 1) {
814  if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
815  return -1;
816  } else {
817  for (i = 0; i < 4; i++)
818  memset(s->cur_pic->motion_val[1][b_xy + i * s->b_stride],
819  0, 4 * 2 * sizeof(int16_t));
820  }
821  }
822 
823  mb_type = MB_TYPE_16x16;
824  } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
825  int8_t *i4x4 = s->intra4x4_pred_mode + s->mb2br_xy[s->mb_xy];
826  int8_t *i4x4_cache = s->intra4x4_pred_mode_cache;
827 
828  memset(s->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
829 
830  if (mb_type == 8) {
831  if (s->mb_x > 0) {
832  for (i = 0; i < 4; i++)
833  s->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - 1] + 6 - i];
834  if (s->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
835  s->left_samples_available = 0x5F5F;
836  }
837  if (s->mb_y > 0) {
838  s->intra4x4_pred_mode_cache[4 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 0];
839  s->intra4x4_pred_mode_cache[5 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 1];
840  s->intra4x4_pred_mode_cache[6 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 2];
841  s->intra4x4_pred_mode_cache[7 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 3];
842 
843  if (s->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
844  s->top_samples_available = 0x33FF;
845  }
846 
847  /* decode prediction codes for luma blocks */
848  for (i = 0; i < 16; i += 2) {
850 
851  if (vlc >= 25U) {
853  "luma prediction:%"PRIu32"\n", vlc);
854  return -1;
855  }
856 
857  left = &s->intra4x4_pred_mode_cache[scan8[i] - 1];
858  top = &s->intra4x4_pred_mode_cache[scan8[i] - 8];
859 
860  left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
861  left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
862 
863  if (left[1] == -1 || left[2] == -1) {
864  av_log(s->avctx, AV_LOG_ERROR, "weird prediction\n");
865  return -1;
866  }
867  }
868  } else { /* mb_type == 33, DC_128_PRED block type */
869  for (i = 0; i < 4; i++)
870  memset(&s->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
871  }
872 
873  AV_COPY32(i4x4, i4x4_cache + 4 + 8 * 4);
874  i4x4[4] = i4x4_cache[7 + 8 * 3];
875  i4x4[5] = i4x4_cache[7 + 8 * 2];
876  i4x4[6] = i4x4_cache[7 + 8 * 1];
877 
878  if (mb_type == 8) {
882 
883  s->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
884  s->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
885  } else {
886  for (i = 0; i < 4; i++)
887  memset(&s->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
888 
889  s->top_samples_available = 0x33FF;
890  s->left_samples_available = 0x5F5F;
891  }
892 
893  mb_type = MB_TYPE_INTRA4x4;
894  } else { /* INTRA16x16 */
895  dir = ff_h264_i_mb_type_info[mb_type - 8].pred_mode;
896  dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
897 
899  s->left_samples_available, dir, 0)) < 0) {
900  av_log(s->avctx, AV_LOG_ERROR, "ff_h264_check_intra_pred_mode < 0\n");
901  return s->intra16x16_pred_mode;
902  }
903 
904  cbp = ff_h264_i_mb_type_info[mb_type - 8].cbp;
905  mb_type = MB_TYPE_INTRA16x16;
906  }
907 
908  if (!IS_INTER(mb_type) && s->pict_type != AV_PICTURE_TYPE_I) {
909  for (i = 0; i < 4; i++)
910  memset(s->cur_pic->motion_val[0][b_xy + i * s->b_stride],
911  0, 4 * 2 * sizeof(int16_t));
912  if (s->pict_type == AV_PICTURE_TYPE_B) {
913  for (i = 0; i < 4; i++)
914  memset(s->cur_pic->motion_val[1][b_xy + i * s->b_stride],
915  0, 4 * 2 * sizeof(int16_t));
916  }
917  }
918  if (!IS_INTRA4x4(mb_type)) {
919  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy], DC_PRED, 8);
920  }
921  if (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B) {
922  memset(s->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
923  }
924 
925  if (!IS_INTRA16x16(mb_type) &&
926  (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B)) {
927  if ((vlc = get_interleaved_ue_golomb(&s->gb_slice)) >= 48U){
928  av_log(s->avctx, AV_LOG_ERROR, "cbp_vlc=%"PRIu32"\n", vlc);
929  return -1;
930  }
931 
932  cbp = IS_INTRA(mb_type) ? ff_h264_golomb_to_intra4x4_cbp[vlc]
934  }
935  if (IS_INTRA16x16(mb_type) ||
936  (s->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
938 
939  if (s->qscale > 31u) {
940  av_log(s->avctx, AV_LOG_ERROR, "qscale:%d\n", s->qscale);
941  return -1;
942  }
943  }
944  if (IS_INTRA16x16(mb_type)) {
945  AV_ZERO128(s->mb_luma_dc[0] + 0);
946  AV_ZERO128(s->mb_luma_dc[0] + 8);
947  if (svq3_decode_block(&s->gb_slice, s->mb_luma_dc[0], 0, 1)) {
949  "error while decoding intra luma dc\n");
950  return -1;
951  }
952  }
953 
954  if (cbp) {
955  const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
956  const int type = ((s->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
957 
958  for (i = 0; i < 4; i++)
959  if ((cbp & (1 << i))) {
960  for (j = 0; j < 4; j++) {
961  k = index ? (1 * (j & 1) + 2 * (i & 1) +
962  2 * (j & 2) + 4 * (i & 2))
963  : (4 * i + j);
964  s->non_zero_count_cache[scan8[k]] = 1;
965 
966  if (svq3_decode_block(&s->gb_slice, &s->mb[16 * k], index, type)) {
968  "error while decoding block\n");
969  return -1;
970  }
971  }
972  }
973 
974  if ((cbp & 0x30)) {
975  for (i = 1; i < 3; ++i)
976  if (svq3_decode_block(&s->gb_slice, &s->mb[16 * 16 * i], 0, 3)) {
978  "error while decoding chroma dc block\n");
979  return -1;
980  }
981 
982  if ((cbp & 0x20)) {
983  for (i = 1; i < 3; i++) {
984  for (j = 0; j < 4; j++) {
985  k = 16 * i + j;
986  s->non_zero_count_cache[scan8[k]] = 1;
987 
988  if (svq3_decode_block(&s->gb_slice, &s->mb[16 * k], 1, 1)) {
990  "error while decoding chroma ac block\n");
991  return -1;
992  }
993  }
994  }
995  }
996  }
997  }
998 
999  s->cbp = cbp;
1000  s->cur_pic->mb_type[mb_xy] = mb_type;
1001 
1002  if (IS_INTRA(mb_type))
1005 
1006  return 0;
1007 }
1008 
1010 {
1011  SVQ3Context *s = avctx->priv_data;
1012  const int mb_xy = s->mb_xy;
1013  int i, header;
1014  unsigned slice_id;
1015 
1016  header = get_bits(&s->gb, 8);
1017 
1018  if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
1019  /* TODO: what? */
1020  av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
1021  return -1;
1022  } else {
1023  int slice_bits, slice_bytes, slice_length;
1024  int length = header >> 5 & 3;
1025 
1026  slice_length = show_bits(&s->gb, 8 * length);
1027  slice_bits = slice_length * 8;
1028  slice_bytes = slice_length + length - 1;
1029 
1030  if (slice_bytes > get_bits_left(&s->gb)) {
1031  av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
1032  return -1;
1033  }
1034 
1035  skip_bits(&s->gb, 8);
1036 
1038  if (!s->slice_buf)
1039  return AVERROR(ENOMEM);
1040 
1041  memcpy(s->slice_buf, s->gb.buffer + s->gb.index / 8, slice_bytes);
1042 
1043  init_get_bits(&s->gb_slice, s->slice_buf, slice_bits);
1044 
1045  if (s->watermark_key) {
1046  uint32_t header = AV_RL32(&s->gb_slice.buffer[1]);
1047  AV_WL32(&s->gb_slice.buffer[1], header ^ s->watermark_key);
1048  }
1049  if (length > 0) {
1050  memmove(s->slice_buf, &s->slice_buf[slice_length], length - 1);
1051  }
1052  skip_bits_long(&s->gb, slice_bytes * 8);
1053  }
1054 
1055  if ((slice_id = get_interleaved_ue_golomb(&s->gb_slice)) >= 3) {
1056  av_log(s->avctx, AV_LOG_ERROR, "illegal slice type %u \n", slice_id);
1057  return -1;
1058  }
1059 
1060  s->pict_type = ff_h264_golomb_to_pict_type[slice_id];
1061 
1062  if ((header & 0x9F) == 2) {
1063  i = (s->mb_num < 64) ? 6 : (1 + av_log2(s->mb_num - 1));
1064  get_bits(&s->gb_slice, i);
1065  } else {
1066  skip_bits1(&s->gb_slice);
1067  }
1068 
1069  s->slice_num = get_bits(&s->gb_slice, 8);
1070  s->qscale = get_bits(&s->gb_slice, 5);
1071  s->adaptive_quant = get_bits1(&s->gb_slice);
1072 
1073  /* unknown fields */
1074  skip_bits1(&s->gb_slice);
1075 
1076  if (s->has_watermark)
1077  skip_bits1(&s->gb_slice);
1078 
1079  skip_bits1(&s->gb_slice);
1080  skip_bits(&s->gb_slice, 2);
1081 
1082  if (skip_1stop_8data_bits(&s->gb_slice) < 0)
1083  return AVERROR_INVALIDDATA;
1084 
1085  /* reset intra predictors and invalidate motion vector references */
1086  if (s->mb_x > 0) {
1087  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - 1] + 3,
1088  -1, 4 * sizeof(int8_t));
1089  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - s->mb_x],
1090  -1, 8 * sizeof(int8_t) * s->mb_x);
1091  }
1092  if (s->mb_y > 0) {
1093  memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - s->mb_stride],
1094  -1, 8 * sizeof(int8_t) * (s->mb_width - s->mb_x));
1095 
1096  if (s->mb_x > 0)
1097  s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride - 1] + 3] = -1;
1098  }
1099 
1100  return 0;
1101 }
1102 
1104 {
1105  int q, x;
1106  const int max_qp = 51;
1107 
1108  for (q = 0; q < max_qp + 1; q++) {
1109  int shift = ff_h264_quant_div6[q] + 2;
1110  int idx = ff_h264_quant_rem6[q];
1111  for (x = 0; x < 16; x++)
1112  s->dequant4_coeff[q][(x >> 2) | ((x << 2) & 0xF)] =
1113  ((uint32_t)ff_h264_dequant4_coeff_init[idx][(x & 1) + ((x >> 2) & 1)] * 16) << shift;
1114  }
1115 }
1116 
1118 {
1119  SVQ3Context *s = avctx->priv_data;
1120  int m, x, y;
1121  unsigned char *extradata;
1122  unsigned char *extradata_end;
1123  unsigned int size;
1124  int marker_found = 0;
1125  int ret;
1126 
1127  s->cur_pic = av_mallocz(sizeof(*s->cur_pic));
1128  s->last_pic = av_mallocz(sizeof(*s->last_pic));
1129  s->next_pic = av_mallocz(sizeof(*s->next_pic));
1130  if (!s->next_pic || !s->last_pic || !s->cur_pic) {
1131  ret = AVERROR(ENOMEM);
1132  goto fail;
1133  }
1134 
1135  s->cur_pic->f = av_frame_alloc();
1136  s->last_pic->f = av_frame_alloc();
1137  s->next_pic->f = av_frame_alloc();
1138  if (!s->cur_pic->f || !s->last_pic->f || !s->next_pic->f)
1139  return AVERROR(ENOMEM);
1140 
1141  ff_h264dsp_init(&s->h264dsp, 8, 1);
1143  ff_videodsp_init(&s->vdsp, 8);
1144 
1145 
1146  avctx->bits_per_raw_sample = 8;
1147 
1148  ff_hpeldsp_init(&s->hdsp, avctx->flags);
1149  ff_tpeldsp_init(&s->tdsp);
1150 
1151  avctx->pix_fmt = AV_PIX_FMT_YUVJ420P;
1152  avctx->color_range = AVCOL_RANGE_JPEG;
1153 
1154  s->avctx = avctx;
1155  s->halfpel_flag = 1;
1156  s->thirdpel_flag = 1;
1157  s->has_watermark = 0;
1158 
1159  /* prowl for the "SEQH" marker in the extradata */
1160  extradata = (unsigned char *)avctx->extradata;
1161  extradata_end = avctx->extradata + avctx->extradata_size;
1162  if (extradata) {
1163  for (m = 0; m + 8 < avctx->extradata_size; m++) {
1164  if (!memcmp(extradata, "SEQH", 4)) {
1165  marker_found = 1;
1166  break;
1167  }
1168  extradata++;
1169  }
1170  }
1171 
1172  /* if a match was found, parse the extra data */
1173  if (marker_found) {
1174  GetBitContext gb;
1175  int frame_size_code;
1176  int unk0, unk1, unk2, unk3, unk4;
1177 
1178  size = AV_RB32(&extradata[4]);
1179  if (size > extradata_end - extradata - 8) {
1180  ret = AVERROR_INVALIDDATA;
1181  goto fail;
1182  }
1183  init_get_bits(&gb, extradata + 8, size * 8);
1184 
1185  /* 'frame size code' and optional 'width, height' */
1186  frame_size_code = get_bits(&gb, 3);
1187  switch (frame_size_code) {
1188  case 0:
1189  avctx->width = 160;
1190  avctx->height = 120;
1191  break;
1192  case 1:
1193  avctx->width = 128;
1194  avctx->height = 96;
1195  break;
1196  case 2:
1197  avctx->width = 176;
1198  avctx->height = 144;
1199  break;
1200  case 3:
1201  avctx->width = 352;
1202  avctx->height = 288;
1203  break;
1204  case 4:
1205  avctx->width = 704;
1206  avctx->height = 576;
1207  break;
1208  case 5:
1209  avctx->width = 240;
1210  avctx->height = 180;
1211  break;
1212  case 6:
1213  avctx->width = 320;
1214  avctx->height = 240;
1215  break;
1216  case 7:
1217  avctx->width = get_bits(&gb, 12);
1218  avctx->height = get_bits(&gb, 12);
1219  break;
1220  }
1221 
1222  s->halfpel_flag = get_bits1(&gb);
1223  s->thirdpel_flag = get_bits1(&gb);
1224 
1225  /* unknown fields */
1226  unk0 = get_bits1(&gb);
1227  unk1 = get_bits1(&gb);
1228  unk2 = get_bits1(&gb);
1229  unk3 = get_bits1(&gb);
1230 
1231  s->low_delay = get_bits1(&gb);
1232 
1233  /* unknown field */
1234  unk4 = get_bits1(&gb);
1235 
1236  av_log(avctx, AV_LOG_DEBUG, "Unknown fields %d %d %d %d %d\n",
1237  unk0, unk1, unk2, unk3, unk4);
1238 
1239  if (skip_1stop_8data_bits(&gb) < 0) {
1240  ret = AVERROR_INVALIDDATA;
1241  goto fail;
1242  }
1243 
1244  s->has_watermark = get_bits1(&gb);
1245  avctx->has_b_frames = !s->low_delay;
1246  if (s->has_watermark) {
1247 #if CONFIG_ZLIB
1248  unsigned watermark_width = get_interleaved_ue_golomb(&gb);
1249  unsigned watermark_height = get_interleaved_ue_golomb(&gb);
1250  int u1 = get_interleaved_ue_golomb(&gb);
1251  int u2 = get_bits(&gb, 8);
1252  int u3 = get_bits(&gb, 2);
1253  int u4 = get_interleaved_ue_golomb(&gb);
1254  unsigned long buf_len = watermark_width *
1255  watermark_height * 4;
1256  int offset = get_bits_count(&gb) + 7 >> 3;
1257  uint8_t *buf;
1258 
1259  if (watermark_height <= 0 ||
1260  (uint64_t)watermark_width * 4 > UINT_MAX / watermark_height) {
1261  ret = -1;
1262  goto fail;
1263  }
1264 
1265  buf = av_malloc(buf_len);
1266  if (!buf) {
1267  ret = AVERROR(ENOMEM);
1268  goto fail;
1269  }
1270  av_log(avctx, AV_LOG_DEBUG, "watermark size: %ux%u\n",
1271  watermark_width, watermark_height);
1272  av_log(avctx, AV_LOG_DEBUG,
1273  "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
1274  u1, u2, u3, u4, offset);
1275  if (uncompress(buf, &buf_len, extradata + 8 + offset,
1276  size - offset) != Z_OK) {
1277  av_log(avctx, AV_LOG_ERROR,
1278  "could not uncompress watermark logo\n");
1279  av_free(buf);
1280  ret = -1;
1281  goto fail;
1282  }
1283  s->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
1284  s->watermark_key = s->watermark_key << 16 | s->watermark_key;
1285  av_log(avctx, AV_LOG_DEBUG,
1286  "watermark key %#"PRIx32"\n", s->watermark_key);
1287  av_free(buf);
1288 #else
1289  av_log(avctx, AV_LOG_ERROR,
1290  "this svq3 file contains watermark which need zlib support compiled in\n");
1291  ret = -1;
1292  goto fail;
1293 #endif
1294  }
1295  }
1296 
1297  s->mb_width = (avctx->width + 15) / 16;
1298  s->mb_height = (avctx->height + 15) / 16;
1299  s->mb_stride = s->mb_width + 1;
1300  s->mb_num = s->mb_width * s->mb_height;
1301  s->b_stride = 4 * s->mb_width;
1302  s->h_edge_pos = s->mb_width * 16;
1303  s->v_edge_pos = s->mb_height * 16;
1304 
1305  s->intra4x4_pred_mode = av_mallocz(s->mb_stride * 2 * 8);
1306  if (!s->intra4x4_pred_mode)
1307  return AVERROR(ENOMEM);
1308 
1309  s->mb2br_xy = av_mallocz(s->mb_stride * (s->mb_height + 1) *
1310  sizeof(*s->mb2br_xy));
1311  if (!s->mb2br_xy)
1312  return AVERROR(ENOMEM);
1313 
1314  for (y = 0; y < s->mb_height; y++)
1315  for (x = 0; x < s->mb_width; x++) {
1316  const int mb_xy = x + y * s->mb_stride;
1317 
1318  s->mb2br_xy[mb_xy] = 8 * (mb_xy % (2 * s->mb_stride));
1319  }
1320 
1322 
1323  return 0;
1324 fail:
1325  svq3_decode_end(avctx);
1326  return ret;
1327 }
1328 
1329 static void free_picture(AVCodecContext *avctx, H264Picture *pic)
1330 {
1331  int i;
1332  for (i = 0; i < 2; i++) {
1333  av_buffer_unref(&pic->motion_val_buf[i]);
1334  av_buffer_unref(&pic->ref_index_buf[i]);
1335  }
1337 
1338  av_frame_unref(pic->f);
1339 }
1340 
1341 static int get_buffer(AVCodecContext *avctx, H264Picture *pic)
1342 {
1343  SVQ3Context *s = avctx->priv_data;
1344  const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
1345  const int mb_array_size = s->mb_stride * s->mb_height;
1346  const int b4_stride = s->mb_width * 4 + 1;
1347  const int b4_array_size = b4_stride * s->mb_height * 4;
1348  int ret;
1349 
1350  if (!pic->motion_val_buf[0]) {
1351  int i;
1352 
1353  pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) * sizeof(uint32_t));
1354  if (!pic->mb_type_buf)
1355  return AVERROR(ENOMEM);
1356  pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
1357 
1358  for (i = 0; i < 2; i++) {
1359  pic->motion_val_buf[i] = av_buffer_allocz(2 * (b4_array_size + 4) * sizeof(int16_t));
1360  pic->ref_index_buf[i] = av_buffer_allocz(4 * mb_array_size);
1361  if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i]) {
1362  ret = AVERROR(ENOMEM);
1363  goto fail;
1364  }
1365 
1366  pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
1367  pic->ref_index[i] = pic->ref_index_buf[i]->data;
1368  }
1369  }
1370  pic->reference = !(s->pict_type == AV_PICTURE_TYPE_B);
1371 
1372  ret = ff_get_buffer(avctx, pic->f,
1373  pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
1374  if (ret < 0)
1375  goto fail;
1376 
1377  if (!s->edge_emu_buffer) {
1378  s->edge_emu_buffer = av_mallocz_array(pic->f->linesize[0], 17);
1379  if (!s->edge_emu_buffer)
1380  return AVERROR(ENOMEM);
1381  }
1382 
1383  return 0;
1384 fail:
1385  free_picture(avctx, pic);
1386  return ret;
1387 }
1388 
1389 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1390  int *got_frame, AVPacket *avpkt)
1391 {
1392  SVQ3Context *s = avctx->priv_data;
1393  int buf_size = avpkt->size;
1394  int left;
1395  uint8_t *buf;
1396  int ret, m, i;
1397 
1398  /* special case for last picture */
1399  if (buf_size == 0) {
1400  if (s->next_pic->f->data[0] && !s->low_delay && !s->last_frame_output) {
1401  ret = av_frame_ref(data, s->next_pic->f);
1402  if (ret < 0)
1403  return ret;
1404  s->last_frame_output = 1;
1405  *got_frame = 1;
1406  }
1407  return 0;
1408  }
1409 
1410  s->mb_x = s->mb_y = s->mb_xy = 0;
1411 
1412  if (s->watermark_key) {
1413  av_fast_padded_malloc(&s->buf, &s->buf_size, buf_size);
1414  if (!s->buf)
1415  return AVERROR(ENOMEM);
1416  memcpy(s->buf, avpkt->data, buf_size);
1417  buf = s->buf;
1418  } else {
1419  buf = avpkt->data;
1420  }
1421 
1422  ret = init_get_bits(&s->gb, buf, 8 * buf_size);
1423  if (ret < 0)
1424  return ret;
1425 
1426  if (svq3_decode_slice_header(avctx))
1427  return -1;
1428 
1429  if (s->pict_type != AV_PICTURE_TYPE_B)
1430  FFSWAP(H264Picture*, s->next_pic, s->last_pic);
1431 
1432  av_frame_unref(s->cur_pic->f);
1433 
1434  /* for skipping the frame */
1435  s->cur_pic->f->pict_type = s->pict_type;
1437 
1438  ret = get_buffer(avctx, s->cur_pic);
1439  if (ret < 0)
1440  return ret;
1441 
1442  for (i = 0; i < 16; i++) {
1443  s->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * s->cur_pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
1444  s->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * s->cur_pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
1445  }
1446  for (i = 0; i < 16; i++) {
1447  s->block_offset[16 + i] =
1448  s->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * s->cur_pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
1449  s->block_offset[48 + 16 + i] =
1450  s->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * s->cur_pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
1451  }
1452 
1453  if (s->pict_type != AV_PICTURE_TYPE_I) {
1454  if (!s->last_pic->f->data[0]) {
1455  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1456  av_frame_unref(s->last_pic->f);
1457  ret = get_buffer(avctx, s->last_pic);
1458  if (ret < 0)
1459  return ret;
1460  memset(s->last_pic->f->data[0], 0, avctx->height * s->last_pic->f->linesize[0]);
1461  memset(s->last_pic->f->data[1], 0x80, (avctx->height / 2) *
1462  s->last_pic->f->linesize[1]);
1463  memset(s->last_pic->f->data[2], 0x80, (avctx->height / 2) *
1464  s->last_pic->f->linesize[2]);
1465  }
1466 
1467  if (s->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f->data[0]) {
1468  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1469  av_frame_unref(s->next_pic->f);
1470  ret = get_buffer(avctx, s->next_pic);
1471  if (ret < 0)
1472  return ret;
1473  memset(s->next_pic->f->data[0], 0, avctx->height * s->next_pic->f->linesize[0]);
1474  memset(s->next_pic->f->data[1], 0x80, (avctx->height / 2) *
1475  s->next_pic->f->linesize[1]);
1476  memset(s->next_pic->f->data[2], 0x80, (avctx->height / 2) *
1477  s->next_pic->f->linesize[2]);
1478  }
1479  }
1480 
1481  if (avctx->debug & FF_DEBUG_PICT_INFO)
1483  "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1485  s->halfpel_flag, s->thirdpel_flag,
1486  s->adaptive_quant, s->qscale, s->slice_num);
1487 
1488  if (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B ||
1490  avctx->skip_frame >= AVDISCARD_ALL)
1491  return 0;
1492 
1493  if (s->next_p_frame_damaged) {
1494  if (s->pict_type == AV_PICTURE_TYPE_B)
1495  return 0;
1496  else
1497  s->next_p_frame_damaged = 0;
1498  }
1499 
1500  if (s->pict_type == AV_PICTURE_TYPE_B) {
1502 
1503  if (s->frame_num_offset < 0)
1504  s->frame_num_offset += 256;
1505  if (s->frame_num_offset == 0 ||
1507  av_log(s->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1508  return -1;
1509  }
1510  } else {
1511  s->prev_frame_num = s->frame_num;
1512  s->frame_num = s->slice_num;
1514 
1515  if (s->prev_frame_num_offset < 0)
1516  s->prev_frame_num_offset += 256;
1517  }
1518 
1519  for (m = 0; m < 2; m++) {
1520  int i;
1521  for (i = 0; i < 4; i++) {
1522  int j;
1523  for (j = -1; j < 4; j++)
1524  s->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1525  if (i < 3)
1526  s->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1527  }
1528  }
1529 
1530  for (s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
1531  for (s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
1532  unsigned mb_type;
1533  s->mb_xy = s->mb_x + s->mb_y * s->mb_stride;
1534 
1535  if ((get_bits_left(&s->gb_slice)) <= 7) {
1536  if (((get_bits_count(&s->gb_slice) & 7) == 0 ||
1537  show_bits(&s->gb_slice, get_bits_left(&s->gb_slice) & 7) == 0)) {
1538 
1539  if (svq3_decode_slice_header(avctx))
1540  return -1;
1541  }
1542  /* TODO: support s->mb_skip_run */
1543  }
1544 
1545  mb_type = get_interleaved_ue_golomb(&s->gb_slice);
1546 
1547  if (s->pict_type == AV_PICTURE_TYPE_I)
1548  mb_type += 8;
1549  else if (s->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1550  mb_type += 4;
1551  if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1553  "error while decoding MB %d %d\n", s->mb_x, s->mb_y);
1554  return -1;
1555  }
1556 
1557  if (mb_type != 0 || s->cbp)
1558  hl_decode_mb(s);
1559 
1560  if (s->pict_type != AV_PICTURE_TYPE_B && !s->low_delay)
1561  s->cur_pic->mb_type[s->mb_x + s->mb_y * s->mb_stride] =
1562  (s->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1563  }
1564 
1565  ff_draw_horiz_band(avctx, s->cur_pic->f,
1566  s->last_pic->f->data[0] ? s->last_pic->f : NULL,
1567  16 * s->mb_y, 16, PICT_FRAME, 0,
1568  s->low_delay);
1569  }
1570 
1571  left = buf_size*8 - get_bits_count(&s->gb_slice);
1572 
1573  if (s->mb_y != s->mb_height || s->mb_x != s->mb_width) {
1574  av_log(avctx, AV_LOG_INFO, "frame num %d incomplete pic x %d y %d left %d\n", avctx->frame_number, s->mb_y, s->mb_x, left);
1575  //av_hex_dump(stderr, buf+buf_size-8, 8);
1576  }
1577 
1578  if (left < 0) {
1579  av_log(avctx, AV_LOG_ERROR, "frame num %d left %d\n", avctx->frame_number, left);
1580  return -1;
1581  }
1582 
1583  if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay)
1584  ret = av_frame_ref(data, s->cur_pic->f);
1585  else if (s->last_pic->f->data[0])
1586  ret = av_frame_ref(data, s->last_pic->f);
1587  if (ret < 0)
1588  return ret;
1589 
1590  /* Do not output the last pic after seeking. */
1591  if (s->last_pic->f->data[0] || s->low_delay)
1592  *got_frame = 1;
1593 
1594  if (s->pict_type != AV_PICTURE_TYPE_B) {
1595  FFSWAP(H264Picture*, s->cur_pic, s->next_pic);
1596  } else {
1597  av_frame_unref(s->cur_pic->f);
1598  }
1599 
1600  return buf_size;
1601 }
1602 
1604 {
1605  SVQ3Context *s = avctx->priv_data;
1606 
1607  free_picture(avctx, s->cur_pic);
1608  free_picture(avctx, s->next_pic);
1609  free_picture(avctx, s->last_pic);
1610  av_frame_free(&s->cur_pic->f);
1611  av_frame_free(&s->next_pic->f);
1612  av_frame_free(&s->last_pic->f);
1613  av_freep(&s->cur_pic);
1614  av_freep(&s->next_pic);
1615  av_freep(&s->last_pic);
1616  av_freep(&s->slice_buf);
1619  av_freep(&s->mb2br_xy);
1620 
1621 
1622  av_freep(&s->buf);
1623  s->buf_size = 0;
1624 
1625  return 0;
1626 }
1627 
1629  .name = "svq3",
1630  .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1631  .type = AVMEDIA_TYPE_VIDEO,
1632  .id = AV_CODEC_ID_SVQ3,
1633  .priv_data_size = sizeof(SVQ3Context),
1635  .close = svq3_decode_end,
1637  .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND |
1640  .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,
1641  AV_PIX_FMT_NONE},
1642 };
#define MB_TYPE_INTRA16x16
Definition: avcodec.h:1241
uint8_t pred_mode
Definition: h264data.h:35
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
#define NULL
Definition: coverity.c:32
#define MB_TYPE_SKIP
Definition: avcodec.h:1251
discard all frames except keyframes
Definition: avcodec.h:783
void(* prefetch)(uint8_t *buf, ptrdiff_t stride, int h)
Prefetch memory into cache (if supported by hardware).
Definition: videodsp.h:76
const char * s
Definition: avisynth_c.h:631
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
int cbp
Definition: svq3.c:98
static int shift(int a, int b)
Definition: sonic.c:82
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:124
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
#define C
HpelDSPContext hdsp
Definition: svq3.c:74
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:68
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:247
uint16_t ff_svq1_packet_checksum(const uint8_t *data, const int length, int value)
Definition: svq13.c:60
static int svq3_decode_block(GetBitContext *gb, int16_t *block, int index, const int type)
Definition: svq3.c:285
else temp
Definition: vf_mcdeint.c:259
static void skip_bits_long(GetBitContext *s, int n)
Definition: get_bits.h:204
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
int prev_frame_num
Definition: svq3.c:102
static av_always_inline void svq3_pred_motion(const SVQ3Context *s, int n, int part_width, int list, int ref, int *const mx, int *const my)
Get the predicted MV.
Definition: svq3.c:366
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2385
AVBufferRef * mb_type_buf
Definition: h264.h:273
int size
Definition: avcodec.h:1581
#define MB_TYPE_INTRA4x4
Definition: avcodec.h:1240
int mb_xy
Definition: svq3.c:108
const uint8_t * buffer
Definition: get_bits.h:56
int av_log2(unsigned v)
Definition: intmath.c:26
#define DECLARE_ALIGNED(n, t, v)
Definition: mem.h:53
uint8_t * slice_buf
Definition: svq3.c:83
#define INVALID_VLC
Definition: golomb.h:38
int16_t(*[2] motion_val)[2]
Definition: h264.h:271
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1877
av_cold void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, int chroma_format_idc)
Set the intra prediction function pointers.
Definition: h264pred.c:411
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:120
int v_edge_pos
Definition: svq3.c:94
const uint8_t ff_h264_quant_rem6[QP_MAX_NUM+1]
Definition: h264data.c:174
discard all
Definition: avcodec.h:784
AVFrame * f
Definition: h264.h:264
uint8_t run
Definition: svq3.c:192
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:3049
#define FULLPEL_MODE
Definition: svq3.c:136
void ff_draw_horiz_band(AVCodecContext *avctx, AVFrame *cur, AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
Definition: mpegutils.c:30
AVCodec.
Definition: avcodec.h:3542
#define AV_WN32A(p, v)
Definition: intreadwrite.h:538
#define AV_COPY32(d, s)
Definition: intreadwrite.h:586
int16_t mb[16 *48 *2]
Definition: svq3.c:129
Macro definitions for various function/variable attributes.
static int svq3_mc_dir(SVQ3Context *s, int size, int mode, int dir, int avg)
Definition: svq3.c:490
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:3264
static int16_t block[64]
Definition: dct.c:113
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:981
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:63
int has_watermark
Definition: svq3.c:87
int thirdpel_flag
Definition: svq3.c:86
int mb_num
Definition: svq3.c:110
const uint8_t ff_h264_dequant4_coeff_init[6][3]
Definition: h264data.c:152
static const uint8_t luma_dc_zigzag_scan[16]
Definition: svq3.c:157
uint8_t
static av_always_inline void hl_decode_mb_idct_luma(SVQ3Context *s, int mb_type, const int *block_offset, int linesize, uint8_t *dest_y)
Definition: svq3.c:603
#define av_cold
Definition: attributes.h:82
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:140
#define mb
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:63
#define DC_PRED8x8
Definition: h264pred.h:68
mode
Definition: f_perms.c:27
int block_offset[2 *(16 *3)]
Definition: svq3.c:133
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:2889
H264Picture * last_pic
Definition: svq3.c:80
static av_always_inline int svq3_fetch_diagonal_mv(const SVQ3Context *s, const int16_t **C, int i, int list, int part_width)
Definition: svq3.c:345
Definition: vf_geq.c:46
static av_always_inline int dctcoef_get(int16_t *mb, int index)
Definition: svq3.c:618
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:374
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1764
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:87
const uint8_t ff_h264_chroma_dc_scan[4]
Definition: h264data.c:54
Context for storing H.264 prediction functions.
Definition: h264pred.h:92
void(* pred8x8[4+3+4])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:97
#define height
uint8_t * data
Definition: avcodec.h:1580
thirdpel DSP context
Definition: tpeldsp.h:42
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:199
const IMbInfo ff_h264_i_mb_type_info[26]
Definition: h264data.c:66
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:91
const uint8_t ff_h264_golomb_to_inter_cbp[48]
Definition: h264data.c:48
int ff_h264_check_intra4x4_pred_mode(int8_t *pred_mode_cache, void *logctx, int top_samples_available, int left_samples_available)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264_parse.c:103
thirdpel DSP functions
static void fill_rectangle(SDL_Surface *screen, int x, int y, int w, int h, int color, int update)
Definition: ffplay.c:801
ptrdiff_t size
Definition: opengl_enc.c:101
static const uint8_t header[24]
Definition: sdr2.c:67
void(* pred4x4[9+3+3])(uint8_t *src, const uint8_t *topright, ptrdiff_t stride)
Definition: h264pred.h:93
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:858
#define A(x)
Definition: vp56_arith.h:28
#define av_log(a,...)
int prev_frame_num_offset
Definition: svq3.c:101
int low_delay
Definition: svq3.c:105
unsigned m
Definition: audioconvert.c:187
static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
Definition: svq3.c:710
H.264 / AVC / MPEG-4 part10 codec.
#define U(x)
Definition: vp56_arith.h:37
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:568
static void free_picture(AVCodecContext *avctx, H264Picture *pic)
Definition: svq3.c:1329
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1971
#define HALFPEL_MODE
Definition: svq3.c:137
AVCodecContext * avctx
Definition: svq3.c:70
int8_t * intra4x4_pred_mode
Definition: svq3.c:119
#define AVERROR(e)
Definition: error.h:43
uint8_t * edge_emu_buffer
Definition: svq3.c:125
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:153
av_cold void ff_tpeldsp_init(TpelDSPContext *c)
Definition: tpeldsp.c:312
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:176
const uint8_t ff_zigzag_scan[16+1]
Definition: mathtables.c:109
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1744
int frame_num
Definition: svq3.c:99
int mb_x
Definition: svq3.c:107
static int get_interleaved_se_golomb(GetBitContext *gb)
Definition: golomb.h:222
GLsizei GLsizei * length
Definition: opengl_enc.c:115
unsigned int left_samples_available
Definition: svq3.c:123
const char * name
Name of the codec implementation.
Definition: avcodec.h:3549
#define IS_SKIP(a)
Definition: mpegutils.h:83
int chroma_pred_mode
Definition: svq3.c:115
const uint8_t ff_h264_golomb_to_pict_type[5]
Definition: h264data.c:37
#define PREDICT_MODE
Definition: svq3.c:139
static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
Definition: h264.h:817
static const uint8_t offset[127][2]
Definition: vf_spp.c:92
#define fail()
Definition: checkasm.h:81
unsigned int topright_samples_available
Definition: svq3.c:122
Sorenson Vector Quantizer #1 (SVQ1) video codec.
static const uint8_t scan8[16 *3+3]
Definition: h264.h:801
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
useful rectangle filling function
tpel_mc_func avg_tpel_pixels_tab[11]
Definition: tpeldsp.h:54
Half-pel DSP context.
Definition: hpeldsp.h:45
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: avcodec.h:950
AVBufferRef * motion_val_buf[2]
Definition: h264.h:270
Context for storing H.264 DSP functions.
Definition: h264dsp.h:41
uint32_t dequant4_coeff[QP_MAX_NUM+1][16]
Definition: svq3.c:132
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:258
#define FFMIN(a, b)
Definition: common.h:96
av_cold void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
Definition: h264dsp.c:67
int reference
Definition: h264.h:295
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:74
#define width
int width
picture width / height.
Definition: avcodec.h:1836
uint32_t * mb_type
Definition: h264.h:274
int32_t
GetBitContext gb_slice
Definition: svq3.c:82
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:282
static av_cold int svq3_decode_init(AVCodecContext *avctx)
Definition: svq3.c:1117
static const struct @103 svq3_dct_tables[2][16]
tpel_mc_func put_tpel_pixels_tab[11]
Thirdpel motion compensation with rounding (a + b + 1) >> 1.
Definition: tpeldsp.h:53
int b_stride
Definition: svq3.c:111
H264PredContext hpc
Definition: svq3.c:73
int n
Definition: avisynth_c.h:547
H264Picture * cur_pic
Definition: svq3.c:78
#define src
Definition: vp9dsp.c:530
int last_frame_output
Definition: svq3.c:95
#define PART_NOT_AVAILABLE
Definition: h264.h:507
int next_p_frame_damaged
Definition: svq3.c:92
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:457
#define IS_INTRA16x16(a)
Definition: mpegutils.h:78
static const int8_t mv[256][2]
Definition: 4xm.c:77
H264DSPContext h264dsp
Definition: svq3.c:72
Half-pel DSP functions.
AVCodec ff_svq3_decoder
Definition: svq3.c:1628
GetBitContext gb
Definition: svq3.c:81
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
Libavcodec external API header.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:215
int debug
debug
Definition: avcodec.h:2888
int intra16x16_pred_mode
Definition: svq3.c:116
main external API structure.
Definition: avcodec.h:1649
const uint8_t ff_h264_chroma_qp[7][QP_MAX_NUM+1]
Definition: h264data.c:203
uint8_t * data
The data buffer.
Definition: buffer.h:89
#define QP_MAX_NUM
Definition: h264.h:112
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: utils.c:928
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
void * buf
Definition: avisynth_c.h:553
GLint GLenum type
Definition: opengl_enc.c:105
int extradata_size
Definition: avcodec.h:1765
void(* pred16x16[4+3+2])(uint8_t *src, ptrdiff_t stride)
Definition: h264pred.h:98
int qscale
Definition: svq3.c:97
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:82
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:299
const uint8_t ff_h264_golomb_to_intra4x4_cbp[48]
Definition: h264data.c:42
static void skip_bits1(GetBitContext *s)
Definition: get_bits.h:324
int mb_height
Definition: svq3.c:109
enum AVPictureType pict_type
Definition: svq3.c:104
const uint8_t ff_h264_quant_div6[QP_MAX_NUM+1]
Definition: h264data.c:182
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:292
int index
Definition: gxfenc.c:89
int8_t * ref_index[2]
Definition: h264.h:280
static void svq3_mc_dir_part(SVQ3Context *s, int x, int y, int width, int height, int mx, int my, int dxy, int thirdpel, int dir, int avg)
Definition: svq3.c:415
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:406
static void init_dequant4_coeff_table(SVQ3Context *s)
Definition: svq3.c:1103
#define MB_TYPE_16x16
Definition: avcodec.h:1243
void(* h264_chroma_dc_dequant_idct)(int16_t *block, int qmul)
Definition: h264dsp.h:103
static int svq3_decode_end(AVCodecContext *avctx)
Definition: svq3.c:1603
#define mid_pred
Definition: mathops.h:96
int8_t ref_cache[2][5 *8]
Definition: svq3.c:128
static const uint8_t svq3_pred_0[25][2]
Definition: svq3.c:164
int mb_y
Definition: svq3.c:107
AVPictureType
Definition: avutil.h:264
#define IS_INTER(a)
Definition: mpegutils.h:81
int slice_num
Definition: svq3.c:96
#define u(width,...)
uint8_t * buf
Definition: svq3.c:89
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:262
VideoDSPContext vdsp
Definition: svq3.c:76
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:484
uint32_t * mb2br_xy
Definition: svq3.c:113
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:198
uint8_t level
Definition: svq3.c:193
Definition: vp9.h:84
#define AV_ZERO128(d)
Definition: intreadwrite.h:622
static int decode(AVCodecContext *avctx, void *data, int *got_sub, AVPacket *avpkt)
Definition: ccaption_dec.c:722
#define avg(a, b, c, d)
discard all non reference
Definition: avcodec.h:780
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:105
static av_always_inline void hl_decode_mb_predict_luma(SVQ3Context *s, int mb_type, const int *block_offset, int linesize, uint8_t *dest_y)
Definition: svq3.c:623
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:499
uint8_t non_zero_count_cache[15 *8]
Definition: svq3.c:131
uint8_t cbp
Definition: h264data.h:36
common internal api header.
if(ret< 0)
Definition: vf_mcdeint.c:282
H264Picture * next_pic
Definition: svq3.c:79
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
int mb_stride
Definition: svq3.c:110
int ff_h264_check_intra_pred_mode(void *logctx, int top_samples_available, int left_samples_available, int mode, int is_chroma)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264_parse.c:151
int16_t mb_luma_dc[3][16 *2]
Definition: svq3.c:130
int h_edge_pos
Definition: svq3.c:93
static int get_buffer(AVCodecContext *avctx, H264Picture *pic)
Definition: svq3.c:1341
Bi-dir predicted.
Definition: avutil.h:268
static void svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
Definition: svq3.c:210
#define stride
int frame_num_offset
Definition: svq3.c:100
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:731
#define IS_INTRA(x, y)
static const uint32_t svq3_dequant_coeff[32]
Definition: svq3.c:201
void * priv_data
Definition: avcodec.h:1691
#define THIRDPEL_MODE
Definition: svq3.c:138
#define PICT_FRAME
Definition: mpegutils.h:39
unsigned int top_samples_available
Definition: svq3.c:121
#define IS_INTRA4x4(a)
Definition: mpegutils.h:77
#define av_free(p)
static void hl_decode_mb(SVQ3Context *s)
Definition: svq3.c:662
static int svq3_decode_slice_header(AVCodecContext *avctx)
Definition: svq3.c:1009
int slice_size
Definition: svq3.c:84
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:253
#define AV_ZERO32(d)
Definition: intreadwrite.h:614
TpelDSPContext tdsp
Definition: svq3.c:75
static const uint8_t svq3_scan[16]
Definition: svq3.c:150
#define AV_RN16A(p)
Definition: intreadwrite.h:522
int8_t intra4x4_pred_mode_cache[5 *8]
Definition: svq3.c:118
int mb_width
Definition: svq3.c:109
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> dc
AVBufferRef * ref_index_buf[2]
Definition: h264.h:279
static void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.h:229
static const int8_t svq3_pred_1[6][6][5]
Definition: svq3.c:176
static void svq3_add_idct_c(uint8_t *dst, int16_t *block, int stride, int qp, int dc)
Definition: svq3.c:245
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:2441
#define av_freep(p)
uint32_t watermark_key
Definition: svq3.c:88
static int skip_1stop_8data_bits(GetBitContext *gb)
Definition: get_bits.h:573
#define av_always_inline
Definition: attributes.h:39
#define FFSWAP(type, a, b)
Definition: common.h:99
static unsigned get_interleaved_ue_golomb(GetBitContext *gb)
Definition: golomb.h:115
int buf_size
Definition: svq3.c:90
exp golomb vlc stuff
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
AVPixelFormat
Pixel format.
Definition: pixfmt.h:60
This structure stores compressed data.
Definition: avcodec.h:1557
static int svq3_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: svq3.c:1389
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:1341
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:252
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:956
for(j=16;j >0;--j)
Predicted.
Definition: avutil.h:267
int halfpel_flag
Definition: svq3.c:85
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
int adaptive_quant
Definition: svq3.c:91
int16_t mv_cache[2][5 *8][2]
Definition: svq3.c:127