FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
svq3.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 The FFmpeg Project
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /*
22  * How to use this decoder:
23  * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24  * have stsd atoms to describe media trak properties. A stsd atom for a
25  * video trak contains 1 or more ImageDescription atoms. These atoms begin
26  * with the 4-byte length of the atom followed by the codec fourcc. Some
27  * decoders need information in this atom to operate correctly. Such
28  * is the case with SVQ3. In order to get the best use out of this decoder,
29  * the calling app must make the SVQ3 ImageDescription atom available
30  * via the AVCodecContext's extradata[_size] field:
31  *
32  * AVCodecContext.extradata = pointer to ImageDescription, first characters
33  * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34  * AVCodecContext.extradata_size = size of ImageDescription atom memory
35  * buffer (which will be the same as the ImageDescription atom size field
36  * from the QT file, minus 4 bytes since the length is missing)
37  *
38  * You will know you have these parameters passed correctly when the decoder
39  * correctly decodes this file:
40  * http://samples.mplayerhq.hu/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
41  */
42 
43 #include "libavutil/attributes.h"
44 #include "internal.h"
45 #include "avcodec.h"
46 #include "mpegvideo.h"
47 #include "h264.h"
48 
49 #include "h264data.h" // FIXME FIXME FIXME
50 
51 #include "h264_mvpred.h"
52 #include "golomb.h"
53 #include "hpeldsp.h"
54 #include "rectangle.h"
55 #include "vdpau_internal.h"
56 
57 #if CONFIG_ZLIB
58 #include <zlib.h>
59 #endif
60 
61 #include "svq1.h"
62 #include "svq3.h"
63 
64 /**
65  * @file
66  * svq3 decoder.
67  */
68 
69 typedef struct {
79  uint32_t watermark_key;
81  int buf_size;
87 } SVQ3Context;
88 
89 #define FULLPEL_MODE 1
90 #define HALFPEL_MODE 2
91 #define THIRDPEL_MODE 3
92 #define PREDICT_MODE 4
93 
94 /* dual scan (from some older h264 draft)
95  * o-->o-->o o
96  * | /|
97  * o o o / o
98  * | / | |/ |
99  * o o o o
100  * /
101  * o-->o-->o-->o
102  */
103 static const uint8_t svq3_scan[16] = {
104  0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
105  2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
106  0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
107  0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
108 };
109 
110 static const uint8_t svq3_pred_0[25][2] = {
111  { 0, 0 },
112  { 1, 0 }, { 0, 1 },
113  { 0, 2 }, { 1, 1 }, { 2, 0 },
114  { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
115  { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
116  { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
117  { 2, 4 }, { 3, 3 }, { 4, 2 },
118  { 4, 3 }, { 3, 4 },
119  { 4, 4 }
120 };
121 
122 static const int8_t svq3_pred_1[6][6][5] = {
123  { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
124  { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
125  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
126  { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
127  { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
128  { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
129  { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
130  { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
131  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
132  { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
133  { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
134  { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
135 };
136 
137 static const struct {
140 } svq3_dct_tables[2][16] = {
141  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
142  { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
143  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
144  { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
145 };
146 
147 static const uint32_t svq3_dequant_coeff[32] = {
148  3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
149  9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
150  24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
151  61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
152 };
153 
154 void ff_svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
155 {
156  const int qmul = svq3_dequant_coeff[qp];
157 #define stride 16
158  int i;
159  int temp[16];
160  static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
161 
162  for (i = 0; i < 4; i++) {
163  const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
164  const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
165  const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
166  const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
167 
168  temp[4 * i + 0] = z0 + z3;
169  temp[4 * i + 1] = z1 + z2;
170  temp[4 * i + 2] = z1 - z2;
171  temp[4 * i + 3] = z0 - z3;
172  }
173 
174  for (i = 0; i < 4; i++) {
175  const int offset = x_offset[i];
176  const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
177  const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
178  const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
179  const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
180 
181  output[stride * 0 + offset] = (z0 + z3) * qmul + 0x80000 >> 20;
182  output[stride * 2 + offset] = (z1 + z2) * qmul + 0x80000 >> 20;
183  output[stride * 8 + offset] = (z1 - z2) * qmul + 0x80000 >> 20;
184  output[stride * 10 + offset] = (z0 - z3) * qmul + 0x80000 >> 20;
185  }
186 }
187 #undef stride
188 
189 void ff_svq3_add_idct_c(uint8_t *dst, int16_t *block,
190  int stride, int qp, int dc)
191 {
192  const int qmul = svq3_dequant_coeff[qp];
193  int i;
194 
195  if (dc) {
196  dc = 13 * 13 * (dc == 1 ? 1538 * block[0]
197  : qmul * (block[0] >> 3) / 2);
198  block[0] = 0;
199  }
200 
201  for (i = 0; i < 4; i++) {
202  const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
203  const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
204  const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
205  const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
206 
207  block[0 + 4 * i] = z0 + z3;
208  block[1 + 4 * i] = z1 + z2;
209  block[2 + 4 * i] = z1 - z2;
210  block[3 + 4 * i] = z0 - z3;
211  }
212 
213  for (i = 0; i < 4; i++) {
214  const int z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
215  const int z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
216  const int z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
217  const int z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
218  const int rr = (dc + 0x80000);
219 
220  dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((z0 + z3) * qmul + rr >> 20));
221  dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((z1 + z2) * qmul + rr >> 20));
222  dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((z1 - z2) * qmul + rr >> 20));
223  dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((z0 - z3) * qmul + rr >> 20));
224  }
225 
226  memset(block, 0, 16 * sizeof(int16_t));
227 }
228 
229 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
230  int index, const int type)
231 {
232  static const uint8_t *const scan_patterns[4] =
234 
235  int run, level, sign, limit;
236  unsigned vlc;
237  const int intra = 3 * type >> 2;
238  const uint8_t *const scan = scan_patterns[type];
239 
240  for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
241  for (; (vlc = svq3_get_ue_golomb(gb)) != 0; index++) {
242  if ((int32_t)vlc < 0)
243  return -1;
244 
245  sign = (vlc & 1) ? 0 : -1;
246  vlc = vlc + 1 >> 1;
247 
248  if (type == 3) {
249  if (vlc < 3) {
250  run = 0;
251  level = vlc;
252  } else if (vlc < 4) {
253  run = 1;
254  level = 1;
255  } else {
256  run = vlc & 0x3;
257  level = (vlc + 9 >> 2) - run;
258  }
259  } else {
260  if (vlc < 16U) {
261  run = svq3_dct_tables[intra][vlc].run;
262  level = svq3_dct_tables[intra][vlc].level;
263  } else if (intra) {
264  run = vlc & 0x7;
265  level = (vlc >> 3) + ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
266  } else {
267  run = vlc & 0xF;
268  level = (vlc >> 4) + ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
269  }
270  }
271 
272 
273  if ((index += run) >= limit)
274  return -1;
275 
276  block[scan[index]] = (level ^ sign) - sign;
277  }
278 
279  if (type != 2) {
280  break;
281  }
282  }
283 
284  return 0;
285 }
286 
287 static inline void svq3_mc_dir_part(SVQ3Context *s,
288  int x, int y, int width, int height,
289  int mx, int my, int dxy,
290  int thirdpel, int dir, int avg)
291 {
292  H264Context *h = &s->h;
293  const Picture *pic = (dir == 0) ? s->last_pic : s->next_pic;
294  uint8_t *src, *dest;
295  int i, emu = 0;
296  int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
297 
298  mx += x;
299  my += y;
300 
301  if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
302  my < 0 || my >= s->v_edge_pos - height - 1) {
303  emu = 1;
304  mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
305  my = av_clip(my, -16, s->v_edge_pos - height + 15);
306  }
307 
308  /* form component predictions */
309  dest = h->cur_pic.f.data[0] + x + y * h->linesize;
310  src = pic->f.data[0] + mx + my * h->linesize;
311 
312  if (emu) {
314  width + 1, height + 1,
315  mx, my, s->h_edge_pos, s->v_edge_pos);
316  src = h->edge_emu_buffer;
317  }
318  if (thirdpel)
319  (avg ? h->dsp.avg_tpel_pixels_tab
320  : h->dsp.put_tpel_pixels_tab)[dxy](dest, src, h->linesize,
321  width, height);
322  else
323  (avg ? s->hdsp.avg_pixels_tab
324  : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, h->linesize,
325  height);
326 
327  if (!(h->flags & CODEC_FLAG_GRAY)) {
328  mx = mx + (mx < (int) x) >> 1;
329  my = my + (my < (int) y) >> 1;
330  width = width >> 1;
331  height = height >> 1;
332  blocksize++;
333 
334  for (i = 1; i < 3; i++) {
335  dest = h->cur_pic.f.data[i] + (x >> 1) + (y >> 1) * h->uvlinesize;
336  src = pic->f.data[i] + mx + my * h->uvlinesize;
337 
338  if (emu) {
340  width + 1, height + 1,
341  mx, my, (s->h_edge_pos >> 1),
342  s->v_edge_pos >> 1);
343  src = h->edge_emu_buffer;
344  }
345  if (thirdpel)
346  (avg ? h->dsp.avg_tpel_pixels_tab
347  : h->dsp.put_tpel_pixels_tab)[dxy](dest, src,
348  h->uvlinesize,
349  width, height);
350  else
351  (avg ? s->hdsp.avg_pixels_tab
352  : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
353  h->uvlinesize,
354  height);
355  }
356  }
357 }
358 
359 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
360  int dir, int avg)
361 {
362  int i, j, k, mx, my, dx, dy, x, y;
363  H264Context *h = &s->h;
364  const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
365  const int part_height = 16 >> ((unsigned)(size + 1) / 3);
366  const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
367  const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
368  const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
369 
370  for (i = 0; i < 16; i += part_height)
371  for (j = 0; j < 16; j += part_width) {
372  const int b_xy = (4 * h->mb_x + (j >> 2)) +
373  (4 * h->mb_y + (i >> 2)) * h->b_stride;
374  int dxy;
375  x = 16 * h->mb_x + j;
376  y = 16 * h->mb_y + i;
377  k = (j >> 2 & 1) + (i >> 1 & 2) +
378  (j >> 1 & 4) + (i & 8);
379 
380  if (mode != PREDICT_MODE) {
381  pred_motion(h, k, part_width >> 2, dir, 1, &mx, &my);
382  } else {
383  mx = s->next_pic->motion_val[0][b_xy][0] << 1;
384  my = s->next_pic->motion_val[0][b_xy][1] << 1;
385 
386  if (dir == 0) {
387  mx = mx * h->frame_num_offset /
388  h->prev_frame_num_offset + 1 >> 1;
389  my = my * h->frame_num_offset /
390  h->prev_frame_num_offset + 1 >> 1;
391  } else {
392  mx = mx * (h->frame_num_offset - h->prev_frame_num_offset) /
393  h->prev_frame_num_offset + 1 >> 1;
394  my = my * (h->frame_num_offset - h->prev_frame_num_offset) /
395  h->prev_frame_num_offset + 1 >> 1;
396  }
397  }
398 
399  /* clip motion vector prediction to frame border */
400  mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
401  my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
402 
403  /* get (optional) motion vector differential */
404  if (mode == PREDICT_MODE) {
405  dx = dy = 0;
406  } else {
407  dy = svq3_get_se_golomb(&h->gb);
408  dx = svq3_get_se_golomb(&h->gb);
409 
410  if (dx == INVALID_VLC || dy == INVALID_VLC) {
411  av_log(h->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
412  return -1;
413  }
414  }
415 
416  /* compute motion vector */
417  if (mode == THIRDPEL_MODE) {
418  int fx, fy;
419  mx = (mx + 1 >> 1) + dx;
420  my = (my + 1 >> 1) + dy;
421  fx = (unsigned)(mx + 0x3000) / 3 - 0x1000;
422  fy = (unsigned)(my + 0x3000) / 3 - 0x1000;
423  dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
424 
425  svq3_mc_dir_part(s, x, y, part_width, part_height,
426  fx, fy, dxy, 1, dir, avg);
427  mx += mx;
428  my += my;
429  } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
430  mx = (unsigned)(mx + 1 + 0x3000) / 3 + dx - 0x1000;
431  my = (unsigned)(my + 1 + 0x3000) / 3 + dy - 0x1000;
432  dxy = (mx & 1) + 2 * (my & 1);
433 
434  svq3_mc_dir_part(s, x, y, part_width, part_height,
435  mx >> 1, my >> 1, dxy, 0, dir, avg);
436  mx *= 3;
437  my *= 3;
438  } else {
439  mx = (unsigned)(mx + 3 + 0x6000) / 6 + dx - 0x1000;
440  my = (unsigned)(my + 3 + 0x6000) / 6 + dy - 0x1000;
441 
442  svq3_mc_dir_part(s, x, y, part_width, part_height,
443  mx, my, 0, 0, dir, avg);
444  mx *= 6;
445  my *= 6;
446  }
447 
448  /* update mv_cache */
449  if (mode != PREDICT_MODE) {
450  int32_t mv = pack16to32(mx, my);
451 
452  if (part_height == 8 && i < 8) {
453  AV_WN32A(h->mv_cache[dir][scan8[k] + 1 * 8], mv);
454 
455  if (part_width == 8 && j < 8)
456  AV_WN32A(h->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
457  }
458  if (part_width == 8 && j < 8)
459  AV_WN32A(h->mv_cache[dir][scan8[k] + 1], mv);
460  if (part_width == 4 || part_height == 4)
461  AV_WN32A(h->mv_cache[dir][scan8[k]], mv);
462  }
463 
464  /* write back motion vectors */
465  fill_rectangle(h->cur_pic.motion_val[dir][b_xy],
466  part_width >> 2, part_height >> 2, h->b_stride,
467  pack16to32(mx, my), 4);
468  }
469 
470  return 0;
471 }
472 
473 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
474 {
475  H264Context *h = &s->h;
476  int i, j, k, m, dir, mode;
477  int cbp = 0;
478  uint32_t vlc;
479  int8_t *top, *left;
480  const int mb_xy = h->mb_xy;
481  const int b_xy = 4 * h->mb_x + 4 * h->mb_y * h->b_stride;
482 
483  h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
484  h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
485  h->topright_samples_available = 0xFFFF;
486 
487  if (mb_type == 0) { /* SKIP */
488  if (h->pict_type == AV_PICTURE_TYPE_P ||
489  s->next_pic->mb_type[mb_xy] == -1) {
490  svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
491  0, 0, 0, 0, 0, 0);
492 
493  if (h->pict_type == AV_PICTURE_TYPE_B)
494  svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
495  0, 0, 0, 0, 1, 1);
496 
497  mb_type = MB_TYPE_SKIP;
498  } else {
499  mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
500  if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
501  return -1;
502  if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
503  return -1;
504 
505  mb_type = MB_TYPE_16x16;
506  }
507  } else if (mb_type < 8) { /* INTER */
508  if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&h->gb))
509  mode = THIRDPEL_MODE;
510  else if (s->halfpel_flag &&
511  s->thirdpel_flag == !get_bits1(&h->gb))
512  mode = HALFPEL_MODE;
513  else
514  mode = FULLPEL_MODE;
515 
516  /* fill caches */
517  /* note ref_cache should contain here:
518  * ????????
519  * ???11111
520  * N??11111
521  * N??11111
522  * N??11111
523  */
524 
525  for (m = 0; m < 2; m++) {
526  if (h->mb_x > 0 && h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6] != -1) {
527  for (i = 0; i < 4; i++)
528  AV_COPY32(h->mv_cache[m][scan8[0] - 1 + i * 8],
529  h->cur_pic.motion_val[m][b_xy - 1 + i * h->b_stride]);
530  } else {
531  for (i = 0; i < 4; i++)
532  AV_ZERO32(h->mv_cache[m][scan8[0] - 1 + i * 8]);
533  }
534  if (h->mb_y > 0) {
535  memcpy(h->mv_cache[m][scan8[0] - 1 * 8],
536  h->cur_pic.motion_val[m][b_xy - h->b_stride],
537  4 * 2 * sizeof(int16_t));
538  memset(&h->ref_cache[m][scan8[0] - 1 * 8],
539  (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
540 
541  if (h->mb_x < h->mb_width - 1) {
542  AV_COPY32(h->mv_cache[m][scan8[0] + 4 - 1 * 8],
543  h->cur_pic.motion_val[m][b_xy - h->b_stride + 4]);
544  h->ref_cache[m][scan8[0] + 4 - 1 * 8] =
545  (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride + 1] + 6] == -1 ||
546  h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
547  } else
548  h->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
549  if (h->mb_x > 0) {
550  AV_COPY32(h->mv_cache[m][scan8[0] - 1 - 1 * 8],
551  h->cur_pic.motion_val[m][b_xy - h->b_stride - 1]);
552  h->ref_cache[m][scan8[0] - 1 - 1 * 8] =
553  (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
554  } else
555  h->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
556  } else
557  memset(&h->ref_cache[m][scan8[0] - 1 * 8 - 1],
558  PART_NOT_AVAILABLE, 8);
559 
560  if (h->pict_type != AV_PICTURE_TYPE_B)
561  break;
562  }
563 
564  /* decode motion vector(s) and form prediction(s) */
565  if (h->pict_type == AV_PICTURE_TYPE_P) {
566  if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
567  return -1;
568  } else { /* AV_PICTURE_TYPE_B */
569  if (mb_type != 2) {
570  if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
571  return -1;
572  } else {
573  for (i = 0; i < 4; i++)
574  memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
575  0, 4 * 2 * sizeof(int16_t));
576  }
577  if (mb_type != 1) {
578  if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
579  return -1;
580  } else {
581  for (i = 0; i < 4; i++)
582  memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
583  0, 4 * 2 * sizeof(int16_t));
584  }
585  }
586 
587  mb_type = MB_TYPE_16x16;
588  } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
589  memset(h->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
590 
591  if (mb_type == 8) {
592  if (h->mb_x > 0) {
593  for (i = 0; i < 4; i++)
594  h->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6 - i];
595  if (h->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
596  h->left_samples_available = 0x5F5F;
597  }
598  if (h->mb_y > 0) {
599  h->intra4x4_pred_mode_cache[4 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 0];
600  h->intra4x4_pred_mode_cache[5 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 1];
601  h->intra4x4_pred_mode_cache[6 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 2];
602  h->intra4x4_pred_mode_cache[7 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 3];
603 
604  if (h->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
605  h->top_samples_available = 0x33FF;
606  }
607 
608  /* decode prediction codes for luma blocks */
609  for (i = 0; i < 16; i += 2) {
610  vlc = svq3_get_ue_golomb(&h->gb);
611 
612  if (vlc >= 25U) {
613  av_log(h->avctx, AV_LOG_ERROR, "luma prediction:%d\n", vlc);
614  return -1;
615  }
616 
617  left = &h->intra4x4_pred_mode_cache[scan8[i] - 1];
618  top = &h->intra4x4_pred_mode_cache[scan8[i] - 8];
619 
620  left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
621  left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
622 
623  if (left[1] == -1 || left[2] == -1) {
624  av_log(h->avctx, AV_LOG_ERROR, "weird prediction\n");
625  return -1;
626  }
627  }
628  } else { /* mb_type == 33, DC_128_PRED block type */
629  for (i = 0; i < 4; i++)
630  memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
631  }
632 
634 
635  if (mb_type == 8) {
637 
638  h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
639  h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
640  } else {
641  for (i = 0; i < 4; i++)
642  memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
643 
644  h->top_samples_available = 0x33FF;
645  h->left_samples_available = 0x5F5F;
646  }
647 
648  mb_type = MB_TYPE_INTRA4x4;
649  } else { /* INTRA16x16 */
650  dir = i_mb_type_info[mb_type - 8].pred_mode;
651  dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
652 
653  if ((h->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, dir, 0)) == -1) {
654  av_log(h->avctx, AV_LOG_ERROR, "check_intra_pred_mode = -1\n");
655  return -1;
656  }
657 
658  cbp = i_mb_type_info[mb_type - 8].cbp;
659  mb_type = MB_TYPE_INTRA16x16;
660  }
661 
662  if (!IS_INTER(mb_type) && h->pict_type != AV_PICTURE_TYPE_I) {
663  for (i = 0; i < 4; i++)
664  memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
665  0, 4 * 2 * sizeof(int16_t));
666  if (h->pict_type == AV_PICTURE_TYPE_B) {
667  for (i = 0; i < 4; i++)
668  memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
669  0, 4 * 2 * sizeof(int16_t));
670  }
671  }
672  if (!IS_INTRA4x4(mb_type)) {
673  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy], DC_PRED, 8);
674  }
675  if (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B) {
676  memset(h->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
677  }
678 
679  if (!IS_INTRA16x16(mb_type) &&
680  (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B)) {
681  if ((vlc = svq3_get_ue_golomb(&h->gb)) >= 48U){
682  av_log(h->avctx, AV_LOG_ERROR, "cbp_vlc=%d\n", vlc);
683  return -1;
684  }
685 
686  cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc]
687  : golomb_to_inter_cbp[vlc];
688  }
689  if (IS_INTRA16x16(mb_type) ||
690  (h->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
691  h->qscale += svq3_get_se_golomb(&h->gb);
692 
693  if (h->qscale > 31u) {
694  av_log(h->avctx, AV_LOG_ERROR, "qscale:%d\n", h->qscale);
695  return -1;
696  }
697  }
698  if (IS_INTRA16x16(mb_type)) {
699  AV_ZERO128(h->mb_luma_dc[0] + 0);
700  AV_ZERO128(h->mb_luma_dc[0] + 8);
701  if (svq3_decode_block(&h->gb, h->mb_luma_dc[0], 0, 1)) {
703  "error while decoding intra luma dc\n");
704  return -1;
705  }
706  }
707 
708  if (cbp) {
709  const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
710  const int type = ((h->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
711 
712  for (i = 0; i < 4; i++)
713  if ((cbp & (1 << i))) {
714  for (j = 0; j < 4; j++) {
715  k = index ? (1 * (j & 1) + 2 * (i & 1) +
716  2 * (j & 2) + 4 * (i & 2))
717  : (4 * i + j);
718  h->non_zero_count_cache[scan8[k]] = 1;
719 
720  if (svq3_decode_block(&h->gb, &h->mb[16 * k], index, type)) {
722  "error while decoding block\n");
723  return -1;
724  }
725  }
726  }
727 
728  if ((cbp & 0x30)) {
729  for (i = 1; i < 3; ++i)
730  if (svq3_decode_block(&h->gb, &h->mb[16 * 16 * i], 0, 3)) {
732  "error while decoding chroma dc block\n");
733  return -1;
734  }
735 
736  if ((cbp & 0x20)) {
737  for (i = 1; i < 3; i++) {
738  for (j = 0; j < 4; j++) {
739  k = 16 * i + j;
740  h->non_zero_count_cache[scan8[k]] = 1;
741 
742  if (svq3_decode_block(&h->gb, &h->mb[16 * k], 1, 1)) {
744  "error while decoding chroma ac block\n");
745  return -1;
746  }
747  }
748  }
749  }
750  }
751  }
752 
753  h->cbp = cbp;
754  h->cur_pic.mb_type[mb_xy] = mb_type;
755 
756  if (IS_INTRA(mb_type))
758 
759  return 0;
760 }
761 
763 {
764  SVQ3Context *s = avctx->priv_data;
765  H264Context *h = &s->h;
766  const int mb_xy = h->mb_xy;
767  int i, header;
768  unsigned slice_id;
769 
770  header = get_bits(&h->gb, 8);
771 
772  if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
773  /* TODO: what? */
774  av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
775  return -1;
776  } else {
777  int length = header >> 5 & 3;
778 
780  8 * show_bits(&h->gb, 8 * length) +
781  8 * length;
782 
783  if (s->next_slice_index > h->gb.size_in_bits) {
784  av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
785  return -1;
786  }
787 
788  h->gb.size_in_bits = s->next_slice_index - 8 * (length - 1);
789  skip_bits(&h->gb, 8);
790 
791  if (s->watermark_key) {
792  uint32_t header = AV_RL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1]);
793  AV_WL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1],
794  header ^ s->watermark_key);
795  }
796  if (length > 0) {
797  memmove((uint8_t *) &h->gb.buffer[get_bits_count(&h->gb) >> 3],
798  &h->gb.buffer[h->gb.size_in_bits >> 3], length - 1);
799  }
800  skip_bits_long(&h->gb, 0);
801  }
802 
803  if ((slice_id = svq3_get_ue_golomb(&h->gb)) >= 3) {
804  av_log(h->avctx, AV_LOG_ERROR, "illegal slice type %d \n", slice_id);
805  return -1;
806  }
807 
808  h->slice_type = golomb_to_pict_type[slice_id];
809 
810  if ((header & 0x9F) == 2) {
811  i = (h->mb_num < 64) ? 6 : (1 + av_log2(h->mb_num - 1));
812  h->mb_skip_run = get_bits(&h->gb, i) -
813  (h->mb_y * h->mb_width + h->mb_x);
814  } else {
815  skip_bits1(&h->gb);
816  h->mb_skip_run = 0;
817  }
818 
819  h->slice_num = get_bits(&h->gb, 8);
820  h->qscale = get_bits(&h->gb, 5);
821  s->adaptive_quant = get_bits1(&h->gb);
822 
823  /* unknown fields */
824  skip_bits1(&h->gb);
825 
826  if (s->unknown_flag)
827  skip_bits1(&h->gb);
828 
829  skip_bits1(&h->gb);
830  skip_bits(&h->gb, 2);
831 
832  while (get_bits1(&h->gb))
833  skip_bits(&h->gb, 8);
834 
835  /* reset intra predictors and invalidate motion vector references */
836  if (h->mb_x > 0) {
837  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - 1] + 3,
838  -1, 4 * sizeof(int8_t));
839  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_x],
840  -1, 8 * sizeof(int8_t) * h->mb_x);
841  }
842  if (h->mb_y > 0) {
843  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_stride],
844  -1, 8 * sizeof(int8_t) * (h->mb_width - h->mb_x));
845 
846  if (h->mb_x > 0)
847  h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] = -1;
848  }
849 
850  return 0;
851 }
852 
854 {
855  SVQ3Context *s = avctx->priv_data;
856  H264Context *h = &s->h;
857  int m;
858  unsigned char *extradata;
859  unsigned char *extradata_end;
860  unsigned int size;
861  int marker_found = 0;
862 
863  s->cur_pic = av_mallocz(sizeof(*s->cur_pic));
864  s->last_pic = av_mallocz(sizeof(*s->last_pic));
865  s->next_pic = av_mallocz(sizeof(*s->next_pic));
866  if (!s->next_pic || !s->last_pic || !s->cur_pic) {
867  av_freep(&s->cur_pic);
868  av_freep(&s->last_pic);
869  av_freep(&s->next_pic);
870  return AVERROR(ENOMEM);
871  }
872 
873  if (ff_h264_decode_init(avctx) < 0)
874  return -1;
875 
876  ff_hpeldsp_init(&s->hdsp, avctx->flags);
877  h->flags = avctx->flags;
878  h->is_complex = 1;
879  h->sps.chroma_format_idc = 1;
881  avctx->pix_fmt = avctx->codec->pix_fmts[0];
882 
883  h->chroma_qp[0] = h->chroma_qp[1] = 4;
884  h->chroma_x_shift = h->chroma_y_shift = 1;
885 
886  s->halfpel_flag = 1;
887  s->thirdpel_flag = 1;
888  s->unknown_flag = 0;
889 
890  /* prowl for the "SEQH" marker in the extradata */
891  extradata = (unsigned char *)avctx->extradata;
892  extradata_end = avctx->extradata + avctx->extradata_size;
893  if (extradata) {
894  for (m = 0; m + 8 < avctx->extradata_size; m++) {
895  if (!memcmp(extradata, "SEQH", 4)) {
896  marker_found = 1;
897  break;
898  }
899  extradata++;
900  }
901  }
902 
903  /* if a match was found, parse the extra data */
904  if (marker_found) {
905  GetBitContext gb;
906  int frame_size_code;
907 
908  size = AV_RB32(&extradata[4]);
909  if (size > extradata_end - extradata - 8)
910  return AVERROR_INVALIDDATA;
911  init_get_bits(&gb, extradata + 8, size * 8);
912 
913  /* 'frame size code' and optional 'width, height' */
914  frame_size_code = get_bits(&gb, 3);
915  switch (frame_size_code) {
916  case 0:
917  avctx->width = 160;
918  avctx->height = 120;
919  break;
920  case 1:
921  avctx->width = 128;
922  avctx->height = 96;
923  break;
924  case 2:
925  avctx->width = 176;
926  avctx->height = 144;
927  break;
928  case 3:
929  avctx->width = 352;
930  avctx->height = 288;
931  break;
932  case 4:
933  avctx->width = 704;
934  avctx->height = 576;
935  break;
936  case 5:
937  avctx->width = 240;
938  avctx->height = 180;
939  break;
940  case 6:
941  avctx->width = 320;
942  avctx->height = 240;
943  break;
944  case 7:
945  avctx->width = get_bits(&gb, 12);
946  avctx->height = get_bits(&gb, 12);
947  break;
948  }
949 
950  s->halfpel_flag = get_bits1(&gb);
951  s->thirdpel_flag = get_bits1(&gb);
952 
953  /* unknown fields */
954  skip_bits1(&gb);
955  skip_bits1(&gb);
956  skip_bits1(&gb);
957  skip_bits1(&gb);
958 
959  h->low_delay = get_bits1(&gb);
960 
961  /* unknown field */
962  skip_bits1(&gb);
963 
964  while (get_bits1(&gb))
965  skip_bits(&gb, 8);
966 
967  s->unknown_flag = get_bits1(&gb);
968  avctx->has_b_frames = !h->low_delay;
969  if (s->unknown_flag) {
970 #if CONFIG_ZLIB
971  unsigned watermark_width = svq3_get_ue_golomb(&gb);
972  unsigned watermark_height = svq3_get_ue_golomb(&gb);
973  int u1 = svq3_get_ue_golomb(&gb);
974  int u2 = get_bits(&gb, 8);
975  int u3 = get_bits(&gb, 2);
976  int u4 = svq3_get_ue_golomb(&gb);
977  unsigned long buf_len = watermark_width *
978  watermark_height * 4;
979  int offset = get_bits_count(&gb) + 7 >> 3;
980  uint8_t *buf;
981 
982  if (watermark_height <= 0 || (uint64_t)watermark_width*4 > UINT_MAX/watermark_height)
983  return -1;
984 
985  buf = av_malloc(buf_len);
986  av_log(avctx, AV_LOG_DEBUG, "watermark size: %dx%d\n",
987  watermark_width, watermark_height);
988  av_log(avctx, AV_LOG_DEBUG,
989  "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
990  u1, u2, u3, u4, offset);
991  if (uncompress(buf, &buf_len, extradata + 8 + offset,
992  size - offset) != Z_OK) {
993  av_log(avctx, AV_LOG_ERROR,
994  "could not uncompress watermark logo\n");
995  av_free(buf);
996  return -1;
997  }
998  s->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
999  s->watermark_key = s->watermark_key << 16 | s->watermark_key;
1000  av_log(avctx, AV_LOG_DEBUG,
1001  "watermark key %#x\n", s->watermark_key);
1002  av_free(buf);
1003 #else
1004  av_log(avctx, AV_LOG_ERROR,
1005  "this svq3 file contains watermark which need zlib support compiled in\n");
1006  return -1;
1007 #endif
1008  }
1009  }
1010 
1011  h->width = avctx->width;
1012  h->height = avctx->height;
1013  h->mb_width = (h->width + 15) / 16;
1014  h->mb_height = (h->height + 15) / 16;
1015  h->mb_stride = h->mb_width + 1;
1016  h->mb_num = h->mb_width * h->mb_height;
1017  h->b_stride = 4 * h->mb_width;
1018  s->h_edge_pos = h->mb_width * 16;
1019  s->v_edge_pos = h->mb_height * 16;
1020 
1021  if (ff_h264_alloc_tables(h) < 0) {
1022  av_log(avctx, AV_LOG_ERROR, "svq3 memory allocation failed\n");
1023  return AVERROR(ENOMEM);
1024  }
1025 
1026  return 0;
1027 }
1028 
1029 static void free_picture(AVCodecContext *avctx, Picture *pic)
1030 {
1031  int i;
1032  for (i = 0; i < 2; i++) {
1033  av_buffer_unref(&pic->motion_val_buf[i]);
1034  av_buffer_unref(&pic->ref_index_buf[i]);
1035  }
1037 
1038  av_frame_unref(&pic->f);
1039 }
1040 
1041 static int get_buffer(AVCodecContext *avctx, Picture *pic)
1042 {
1043  SVQ3Context *s = avctx->priv_data;
1044  H264Context *h = &s->h;
1045  const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
1046  const int mb_array_size = h->mb_stride * h->mb_height;
1047  const int b4_stride = h->mb_width * 4 + 1;
1048  const int b4_array_size = b4_stride * h->mb_height * 4;
1049  int ret;
1050 
1051  if (!pic->motion_val_buf[0]) {
1052  int i;
1053 
1054  pic->mb_type_buf = av_buffer_allocz((big_mb_num + h->mb_stride) * sizeof(uint32_t));
1055  if (!pic->mb_type_buf)
1056  return AVERROR(ENOMEM);
1057  pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
1058 
1059  for (i = 0; i < 2; i++) {
1060  pic->motion_val_buf[i] = av_buffer_allocz(2 * (b4_array_size + 4) * sizeof(int16_t));
1061  pic->ref_index_buf[i] = av_buffer_allocz(4 * mb_array_size);
1062  if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i]) {
1063  ret = AVERROR(ENOMEM);
1064  goto fail;
1065  }
1066 
1067  pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
1068  pic->ref_index[i] = pic->ref_index_buf[i]->data;
1069  }
1070  }
1071  pic->reference = !(h->pict_type == AV_PICTURE_TYPE_B);
1072 
1073  ret = ff_get_buffer(avctx, &pic->f,
1074  pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
1075  if (ret < 0)
1076  goto fail;
1077 
1078  if (!h->edge_emu_buffer) {
1079  h->edge_emu_buffer = av_mallocz(pic->f.linesize[0] * 17);
1080  if (!h->edge_emu_buffer)
1081  return AVERROR(ENOMEM);
1082  }
1083 
1084  h->linesize = pic->f.linesize[0];
1085  h->uvlinesize = pic->f.linesize[1];
1086 
1087  return 0;
1088 fail:
1089  free_picture(avctx, pic);
1090  return ret;
1091 }
1092 
1093 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1094  int *got_frame, AVPacket *avpkt)
1095 {
1096  SVQ3Context *s = avctx->priv_data;
1097  H264Context *h = &s->h;
1098  int buf_size = avpkt->size;
1099  int left;
1100  uint8_t *buf;
1101  int ret, m, i;
1102 
1103  /* special case for last picture */
1104  if (buf_size == 0) {
1105  if (s->next_pic->f.data[0] && !h->low_delay && !s->last_frame_output) {
1106  ret = av_frame_ref(data, &s->next_pic->f);
1107  if (ret < 0)
1108  return ret;
1109  s->last_frame_output = 1;
1110  *got_frame = 1;
1111  }
1112  return 0;
1113  }
1114 
1115  h->mb_x = h->mb_y = h->mb_xy = 0;
1116 
1117  if (s->watermark_key) {
1118  av_fast_malloc(&s->buf, &s->buf_size,
1119  buf_size+FF_INPUT_BUFFER_PADDING_SIZE);
1120  if (!s->buf)
1121  return AVERROR(ENOMEM);
1122  memcpy(s->buf, avpkt->data, buf_size);
1123  buf = s->buf;
1124  } else {
1125  buf = avpkt->data;
1126  }
1127 
1128  init_get_bits(&h->gb, buf, 8 * buf_size);
1129 
1130  if (svq3_decode_slice_header(avctx))
1131  return -1;
1132 
1133  h->pict_type = h->slice_type;
1134 
1135  if (h->pict_type != AV_PICTURE_TYPE_B)
1136  FFSWAP(Picture*, s->next_pic, s->last_pic);
1137 
1138  av_frame_unref(&s->cur_pic->f);
1139 
1140  /* for skipping the frame */
1141  s->cur_pic->f.pict_type = h->pict_type;
1143 
1144  ret = get_buffer(avctx, s->cur_pic);
1145  if (ret < 0)
1146  return ret;
1147 
1148  h->cur_pic_ptr = s->cur_pic;
1149  av_frame_unref(&h->cur_pic.f);
1150  h->cur_pic = *s->cur_pic;
1151  ret = av_frame_ref(&h->cur_pic.f, &s->cur_pic->f);
1152  if (ret < 0)
1153  return ret;
1154 
1155  for (i = 0; i < 16; i++) {
1156  h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1157  h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1158  }
1159  for (i = 0; i < 16; i++) {
1160  h->block_offset[16 + i] =
1161  h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1162  h->block_offset[48 + 16 + i] =
1163  h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1164  }
1165 
1166  if (h->pict_type != AV_PICTURE_TYPE_I) {
1167  if (!s->last_pic->f.data[0]) {
1168  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1169  ret = get_buffer(avctx, s->last_pic);
1170  if (ret < 0)
1171  return ret;
1172  memset(s->last_pic->f.data[0], 0, avctx->height * s->last_pic->f.linesize[0]);
1173  memset(s->last_pic->f.data[1], 0x80, (avctx->height / 2) *
1174  s->last_pic->f.linesize[1]);
1175  memset(s->last_pic->f.data[2], 0x80, (avctx->height / 2) *
1176  s->last_pic->f.linesize[2]);
1177  }
1178 
1179  if (h->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f.data[0]) {
1180  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1181  ret = get_buffer(avctx, s->next_pic);
1182  if (ret < 0)
1183  return ret;
1184  memset(s->next_pic->f.data[0], 0, avctx->height * s->next_pic->f.linesize[0]);
1185  memset(s->next_pic->f.data[1], 0x80, (avctx->height / 2) *
1186  s->next_pic->f.linesize[1]);
1187  memset(s->next_pic->f.data[2], 0x80, (avctx->height / 2) *
1188  s->next_pic->f.linesize[2]);
1189  }
1190  }
1191 
1192  if (avctx->debug & FF_DEBUG_PICT_INFO)
1194  "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1196  s->halfpel_flag, s->thirdpel_flag,
1197  s->adaptive_quant, h->qscale, h->slice_num);
1198 
1199  if (avctx->skip_frame >= AVDISCARD_NONREF && h->pict_type == AV_PICTURE_TYPE_B ||
1201  avctx->skip_frame >= AVDISCARD_ALL)
1202  return 0;
1203 
1204  if (s->next_p_frame_damaged) {
1205  if (h->pict_type == AV_PICTURE_TYPE_B)
1206  return 0;
1207  else
1208  s->next_p_frame_damaged = 0;
1209  }
1210 
1211  if (h->pict_type == AV_PICTURE_TYPE_B) {
1213 
1214  if (h->frame_num_offset < 0)
1215  h->frame_num_offset += 256;
1216  if (h->frame_num_offset == 0 ||
1218  av_log(h->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1219  return -1;
1220  }
1221  } else {
1222  h->prev_frame_num = h->frame_num;
1223  h->frame_num = h->slice_num;
1225 
1226  if (h->prev_frame_num_offset < 0)
1227  h->prev_frame_num_offset += 256;
1228  }
1229 
1230  for (m = 0; m < 2; m++) {
1231  int i;
1232  for (i = 0; i < 4; i++) {
1233  int j;
1234  for (j = -1; j < 4; j++)
1235  h->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1236  if (i < 3)
1237  h->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1238  }
1239  }
1240 
1241  for (h->mb_y = 0; h->mb_y < h->mb_height; h->mb_y++) {
1242  for (h->mb_x = 0; h->mb_x < h->mb_width; h->mb_x++) {
1243  unsigned mb_type;
1244  h->mb_xy = h->mb_x + h->mb_y * h->mb_stride;
1245 
1246  if ((get_bits_count(&h->gb) + 7) >= h->gb.size_in_bits &&
1247  ((get_bits_count(&h->gb) & 7) == 0 ||
1248  show_bits(&h->gb, -get_bits_count(&h->gb) & 7) == 0)) {
1249  skip_bits(&h->gb, s->next_slice_index - get_bits_count(&h->gb));
1250  h->gb.size_in_bits = 8 * buf_size;
1251 
1252  if (svq3_decode_slice_header(avctx))
1253  return -1;
1254 
1255  /* TODO: support s->mb_skip_run */
1256  }
1257 
1258  mb_type = svq3_get_ue_golomb(&h->gb);
1259 
1260  if (h->pict_type == AV_PICTURE_TYPE_I)
1261  mb_type += 8;
1262  else if (h->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1263  mb_type += 4;
1264  if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1266  "error while decoding MB %d %d\n", h->mb_x, h->mb_y);
1267  return -1;
1268  }
1269 
1270  if (mb_type != 0 || h->cbp)
1272 
1273  if (h->pict_type != AV_PICTURE_TYPE_B && !h->low_delay)
1274  h->cur_pic.mb_type[h->mb_x + h->mb_y * h->mb_stride] =
1275  (h->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1276  }
1277 
1278  ff_draw_horiz_band(avctx, NULL, s->cur_pic, s->last_pic->f.data[0] ? s->last_pic : NULL,
1279  16 * h->mb_y, 16, h->picture_structure, 0, 0,
1280  h->low_delay, h->mb_height * 16, h->mb_width * 16);
1281  }
1282 
1283  left = buf_size*8 - get_bits_count(&h->gb);
1284 
1285  if (h->mb_y != h->mb_height || h->mb_x != h->mb_width) {
1286  av_log(avctx, AV_LOG_INFO, "frame num %d incomplete pic x %d y %d left %d\n", avctx->frame_number, h->mb_y, h->mb_x, left);
1287  //av_hex_dump(stderr, buf+buf_size-8, 8);
1288  }
1289 
1290  if (left < 0) {
1291  av_log(avctx, AV_LOG_ERROR, "frame num %d left %d\n", avctx->frame_number, left);
1292  return -1;
1293  }
1294 
1295  if (h->pict_type == AV_PICTURE_TYPE_B || h->low_delay)
1296  ret = av_frame_ref(data, &s->cur_pic->f);
1297  else if (s->last_pic->f.data[0])
1298  ret = av_frame_ref(data, &s->last_pic->f);
1299  if (ret < 0)
1300  return ret;
1301 
1302  /* Do not output the last pic after seeking. */
1303  if (s->last_pic->f.data[0] || h->low_delay)
1304  *got_frame = 1;
1305 
1306  if (h->pict_type != AV_PICTURE_TYPE_B) {
1307  FFSWAP(Picture*, s->cur_pic, s->next_pic);
1308  } else {
1309  av_frame_unref(&s->cur_pic->f);
1310  }
1311 
1312  return buf_size;
1313 }
1314 
1316 {
1317  SVQ3Context *s = avctx->priv_data;
1318  H264Context *h = &s->h;
1319 
1320  free_picture(avctx, s->cur_pic);
1321  free_picture(avctx, s->next_pic);
1322  free_picture(avctx, s->last_pic);
1323  av_freep(&s->cur_pic);
1324  av_freep(&s->next_pic);
1325  av_freep(&s->last_pic);
1326 
1327  av_frame_unref(&h->cur_pic.f);
1328 
1330 
1331  av_freep(&s->buf);
1332  s->buf_size = 0;
1334 
1335  return 0;
1336 }
1337 
1339  .name = "svq3",
1340  .type = AVMEDIA_TYPE_VIDEO,
1341  .id = AV_CODEC_ID_SVQ3,
1342  .priv_data_size = sizeof(SVQ3Context),
1346  .capabilities = CODEC_CAP_DRAW_HORIZ_BAND |
1347  CODEC_CAP_DR1 |
1349  .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1350  .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,
1351  AV_PIX_FMT_NONE},
1352 };