FFmpeg
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
svq3.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 The FFmpeg Project
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /*
22  * How to use this decoder:
23  * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24  * have stsd atoms to describe media trak properties. A stsd atom for a
25  * video trak contains 1 or more ImageDescription atoms. These atoms begin
26  * with the 4-byte length of the atom followed by the codec fourcc. Some
27  * decoders need information in this atom to operate correctly. Such
28  * is the case with SVQ3. In order to get the best use out of this decoder,
29  * the calling app must make the SVQ3 ImageDescription atom available
30  * via the AVCodecContext's extradata[_size] field:
31  *
32  * AVCodecContext.extradata = pointer to ImageDescription, first characters
33  * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34  * AVCodecContext.extradata_size = size of ImageDescription atom memory
35  * buffer (which will be the same as the ImageDescription atom size field
36  * from the QT file, minus 4 bytes since the length is missing)
37  *
38  * You will know you have these parameters passed correctly when the decoder
39  * correctly decodes this file:
40  * http://samples.mplayerhq.hu/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
41  */
42 #include "internal.h"
43 #include "avcodec.h"
44 #include "mpegvideo.h"
45 #include "h264.h"
46 
47 #include "h264data.h" // FIXME FIXME FIXME
48 
49 #include "h264_mvpred.h"
50 #include "golomb.h"
51 #include "rectangle.h"
52 #include "vdpau_internal.h"
53 
54 #if CONFIG_ZLIB
55 #include <zlib.h>
56 #endif
57 
58 #include "svq1.h"
59 #include "svq3.h"
60 
61 /**
62  * @file
63  * svq3 decoder.
64  */
65 
66 typedef struct {
75  uint32_t watermark_key;
77  int buf_size;
83 } SVQ3Context;
84 
85 #define FULLPEL_MODE 1
86 #define HALFPEL_MODE 2
87 #define THIRDPEL_MODE 3
88 #define PREDICT_MODE 4
89 
90 /* dual scan (from some older h264 draft)
91  * o-->o-->o o
92  * | /|
93  * o o o / o
94  * | / | |/ |
95  * o o o o
96  * /
97  * o-->o-->o-->o
98  */
99 static const uint8_t svq3_scan[16] = {
100  0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
101  2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
102  0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
103  0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
104 };
105 
106 static const uint8_t svq3_pred_0[25][2] = {
107  { 0, 0 },
108  { 1, 0 }, { 0, 1 },
109  { 0, 2 }, { 1, 1 }, { 2, 0 },
110  { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
111  { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
112  { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
113  { 2, 4 }, { 3, 3 }, { 4, 2 },
114  { 4, 3 }, { 3, 4 },
115  { 4, 4 }
116 };
117 
118 static const int8_t svq3_pred_1[6][6][5] = {
119  { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
120  { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
121  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
122  { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
123  { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
124  { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
125  { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
126  { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
127  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
128  { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
129  { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
130  { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
131 };
132 
133 static const struct {
136 } svq3_dct_tables[2][16] = {
137  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
138  { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
139  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
140  { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
141 };
142 
143 static const uint32_t svq3_dequant_coeff[32] = {
144  3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
145  9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
146  24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
147  61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
148 };
149 
150 void ff_svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
151 {
152  const int qmul = svq3_dequant_coeff[qp];
153 #define stride 16
154  int i;
155  int temp[16];
156  static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
157 
158  for (i = 0; i < 4; i++) {
159  const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
160  const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
161  const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
162  const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
163 
164  temp[4 * i + 0] = z0 + z3;
165  temp[4 * i + 1] = z1 + z2;
166  temp[4 * i + 2] = z1 - z2;
167  temp[4 * i + 3] = z0 - z3;
168  }
169 
170  for (i = 0; i < 4; i++) {
171  const int offset = x_offset[i];
172  const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
173  const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
174  const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
175  const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
176 
177  output[stride * 0 + offset] = (z0 + z3) * qmul + 0x80000 >> 20;
178  output[stride * 2 + offset] = (z1 + z2) * qmul + 0x80000 >> 20;
179  output[stride * 8 + offset] = (z1 - z2) * qmul + 0x80000 >> 20;
180  output[stride * 10 + offset] = (z0 - z3) * qmul + 0x80000 >> 20;
181  }
182 }
183 #undef stride
184 
186  int stride, int qp, int dc)
187 {
188  const int qmul = svq3_dequant_coeff[qp];
189  int i;
190 
191  if (dc) {
192  dc = 13 * 13 * (dc == 1 ? 1538 * block[0]
193  : qmul * (block[0] >> 3) / 2);
194  block[0] = 0;
195  }
196 
197  for (i = 0; i < 4; i++) {
198  const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
199  const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
200  const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
201  const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
202 
203  block[0 + 4 * i] = z0 + z3;
204  block[1 + 4 * i] = z1 + z2;
205  block[2 + 4 * i] = z1 - z2;
206  block[3 + 4 * i] = z0 - z3;
207  }
208 
209  for (i = 0; i < 4; i++) {
210  const int z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
211  const int z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
212  const int z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
213  const int z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
214  const int rr = (dc + 0x80000);
215 
216  dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((z0 + z3) * qmul + rr >> 20));
217  dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((z1 + z2) * qmul + rr >> 20));
218  dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((z1 - z2) * qmul + rr >> 20));
219  dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((z0 - z3) * qmul + rr >> 20));
220  }
221 
222  memset(block, 0, 16 * sizeof(int16_t));
223 }
224 
225 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
226  int index, const int type)
227 {
228  static const uint8_t *const scan_patterns[4] =
230 
231  int run, level, sign, limit;
232  unsigned vlc;
233  const int intra = 3 * type >> 2;
234  const uint8_t *const scan = scan_patterns[type];
235 
236  for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
237  for (; (vlc = svq3_get_ue_golomb(gb)) != 0; index++) {
238  if ((int32_t)vlc < 0)
239  return -1;
240 
241  sign = (vlc & 1) ? 0 : -1;
242  vlc = vlc + 1 >> 1;
243 
244  if (type == 3) {
245  if (vlc < 3) {
246  run = 0;
247  level = vlc;
248  } else if (vlc < 4) {
249  run = 1;
250  level = 1;
251  } else {
252  run = vlc & 0x3;
253  level = (vlc + 9 >> 2) - run;
254  }
255  } else {
256  if (vlc < 16U) {
257  run = svq3_dct_tables[intra][vlc].run;
258  level = svq3_dct_tables[intra][vlc].level;
259  } else if (intra) {
260  run = vlc & 0x7;
261  level = (vlc >> 3) + ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
262  } else {
263  run = vlc & 0xF;
264  level = (vlc >> 4) + ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
265  }
266  }
267 
268 
269  if ((index += run) >= limit)
270  return -1;
271 
272  block[scan[index]] = (level ^ sign) - sign;
273  }
274 
275  if (type != 2) {
276  break;
277  }
278  }
279 
280  return 0;
281 }
282 
283 static inline void svq3_mc_dir_part(SVQ3Context *s,
284  int x, int y, int width, int height,
285  int mx, int my, int dxy,
286  int thirdpel, int dir, int avg)
287 {
288  H264Context *h = &s->h;
289  const Picture *pic = (dir == 0) ? s->last_pic : s->next_pic;
290  uint8_t *src, *dest;
291  int i, emu = 0;
292  int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
293 
294  mx += x;
295  my += y;
296 
297  if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
298  my < 0 || my >= s->v_edge_pos - height - 1) {
299  emu = 1;
300  mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
301  my = av_clip(my, -16, s->v_edge_pos - height + 15);
302  }
303 
304  /* form component predictions */
305  dest = h->cur_pic.f.data[0] + x + y * h->linesize;
306  src = pic->f.data[0] + mx + my * h->linesize;
307 
308  if (emu) {
310  width + 1, height + 1,
311  mx, my, s->h_edge_pos, s->v_edge_pos);
312  src = h->edge_emu_buffer;
313  }
314  if (thirdpel)
315  (avg ? h->dsp.avg_tpel_pixels_tab
316  : h->dsp.put_tpel_pixels_tab)[dxy](dest, src, h->linesize,
317  width, height);
318  else
319  (avg ? h->dsp.avg_pixels_tab
320  : h->dsp.put_pixels_tab)[blocksize][dxy](dest, src, h->linesize,
321  height);
322 
323  if (!(h->flags & CODEC_FLAG_GRAY)) {
324  mx = mx + (mx < (int) x) >> 1;
325  my = my + (my < (int) y) >> 1;
326  width = width >> 1;
327  height = height >> 1;
328  blocksize++;
329 
330  for (i = 1; i < 3; i++) {
331  dest = h->cur_pic.f.data[i] + (x >> 1) + (y >> 1) * h->uvlinesize;
332  src = pic->f.data[i] + mx + my * h->uvlinesize;
333 
334  if (emu) {
336  width + 1, height + 1,
337  mx, my, (s->h_edge_pos >> 1),
338  s->v_edge_pos >> 1);
339  src = h->edge_emu_buffer;
340  }
341  if (thirdpel)
342  (avg ? h->dsp.avg_tpel_pixels_tab
343  : h->dsp.put_tpel_pixels_tab)[dxy](dest, src,
344  h->uvlinesize,
345  width, height);
346  else
347  (avg ? h->dsp.avg_pixels_tab
348  : h->dsp.put_pixels_tab)[blocksize][dxy](dest, src,
349  h->uvlinesize,
350  height);
351  }
352  }
353 }
354 
355 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
356  int dir, int avg)
357 {
358  int i, j, k, mx, my, dx, dy, x, y;
359  H264Context *h = &s->h;
360  const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
361  const int part_height = 16 >> ((unsigned)(size + 1) / 3);
362  const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
363  const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
364  const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
365 
366  for (i = 0; i < 16; i += part_height)
367  for (j = 0; j < 16; j += part_width) {
368  const int b_xy = (4 * h->mb_x + (j >> 2)) +
369  (4 * h->mb_y + (i >> 2)) * h->b_stride;
370  int dxy;
371  x = 16 * h->mb_x + j;
372  y = 16 * h->mb_y + i;
373  k = (j >> 2 & 1) + (i >> 1 & 2) +
374  (j >> 1 & 4) + (i & 8);
375 
376  if (mode != PREDICT_MODE) {
377  pred_motion(h, k, part_width >> 2, dir, 1, &mx, &my);
378  } else {
379  mx = s->next_pic->f.motion_val[0][b_xy][0] << 1;
380  my = s->next_pic->f.motion_val[0][b_xy][1] << 1;
381 
382  if (dir == 0) {
383  mx = mx * h->frame_num_offset /
384  h->prev_frame_num_offset + 1 >> 1;
385  my = my * h->frame_num_offset /
386  h->prev_frame_num_offset + 1 >> 1;
387  } else {
388  mx = mx * (h->frame_num_offset - h->prev_frame_num_offset) /
389  h->prev_frame_num_offset + 1 >> 1;
390  my = my * (h->frame_num_offset - h->prev_frame_num_offset) /
391  h->prev_frame_num_offset + 1 >> 1;
392  }
393  }
394 
395  /* clip motion vector prediction to frame border */
396  mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
397  my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
398 
399  /* get (optional) motion vector differential */
400  if (mode == PREDICT_MODE) {
401  dx = dy = 0;
402  } else {
403  dy = svq3_get_se_golomb(&h->gb);
404  dx = svq3_get_se_golomb(&h->gb);
405 
406  if (dx == INVALID_VLC || dy == INVALID_VLC) {
407  av_log(h->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
408  return -1;
409  }
410  }
411 
412  /* compute motion vector */
413  if (mode == THIRDPEL_MODE) {
414  int fx, fy;
415  mx = (mx + 1 >> 1) + dx;
416  my = (my + 1 >> 1) + dy;
417  fx = (unsigned)(mx + 0x3000) / 3 - 0x1000;
418  fy = (unsigned)(my + 0x3000) / 3 - 0x1000;
419  dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
420 
421  svq3_mc_dir_part(s, x, y, part_width, part_height,
422  fx, fy, dxy, 1, dir, avg);
423  mx += mx;
424  my += my;
425  } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
426  mx = (unsigned)(mx + 1 + 0x3000) / 3 + dx - 0x1000;
427  my = (unsigned)(my + 1 + 0x3000) / 3 + dy - 0x1000;
428  dxy = (mx & 1) + 2 * (my & 1);
429 
430  svq3_mc_dir_part(s, x, y, part_width, part_height,
431  mx >> 1, my >> 1, dxy, 0, dir, avg);
432  mx *= 3;
433  my *= 3;
434  } else {
435  mx = (unsigned)(mx + 3 + 0x6000) / 6 + dx - 0x1000;
436  my = (unsigned)(my + 3 + 0x6000) / 6 + dy - 0x1000;
437 
438  svq3_mc_dir_part(s, x, y, part_width, part_height,
439  mx, my, 0, 0, dir, avg);
440  mx *= 6;
441  my *= 6;
442  }
443 
444  /* update mv_cache */
445  if (mode != PREDICT_MODE) {
446  int32_t mv = pack16to32(mx, my);
447 
448  if (part_height == 8 && i < 8) {
449  AV_WN32A(h->mv_cache[dir][scan8[k] + 1 * 8], mv);
450 
451  if (part_width == 8 && j < 8)
452  AV_WN32A(h->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
453  }
454  if (part_width == 8 && j < 8)
455  AV_WN32A(h->mv_cache[dir][scan8[k] + 1], mv);
456  if (part_width == 4 || part_height == 4)
457  AV_WN32A(h->mv_cache[dir][scan8[k]], mv);
458  }
459 
460  /* write back motion vectors */
461  fill_rectangle(h->cur_pic.f.motion_val[dir][b_xy],
462  part_width >> 2, part_height >> 2, h->b_stride,
463  pack16to32(mx, my), 4);
464  }
465 
466  return 0;
467 }
468 
469 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
470 {
471  H264Context *h = &s->h;
472  int i, j, k, m, dir, mode;
473  int cbp = 0;
474  uint32_t vlc;
475  int8_t *top, *left;
476  const int mb_xy = h->mb_xy;
477  const int b_xy = 4 * h->mb_x + 4 * h->mb_y * h->b_stride;
478 
479  h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
480  h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
481  h->topright_samples_available = 0xFFFF;
482 
483  if (mb_type == 0) { /* SKIP */
484  if (h->pict_type == AV_PICTURE_TYPE_P ||
485  s->next_pic->f.mb_type[mb_xy] == -1) {
486  svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
487  0, 0, 0, 0, 0, 0);
488 
489  if (h->pict_type == AV_PICTURE_TYPE_B)
490  svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
491  0, 0, 0, 0, 1, 1);
492 
493  mb_type = MB_TYPE_SKIP;
494  } else {
495  mb_type = FFMIN(s->next_pic->f.mb_type[mb_xy], 6);
496  if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
497  return -1;
498  if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
499  return -1;
500 
501  mb_type = MB_TYPE_16x16;
502  }
503  } else if (mb_type < 8) { /* INTER */
504  if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&h->gb))
505  mode = THIRDPEL_MODE;
506  else if (s->halfpel_flag &&
507  s->thirdpel_flag == !get_bits1(&h->gb))
508  mode = HALFPEL_MODE;
509  else
510  mode = FULLPEL_MODE;
511 
512  /* fill caches */
513  /* note ref_cache should contain here:
514  * ????????
515  * ???11111
516  * N??11111
517  * N??11111
518  * N??11111
519  */
520 
521  for (m = 0; m < 2; m++) {
522  if (h->mb_x > 0 && h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6] != -1) {
523  for (i = 0; i < 4; i++)
524  AV_COPY32(h->mv_cache[m][scan8[0] - 1 + i * 8],
525  h->cur_pic.f.motion_val[m][b_xy - 1 + i * h->b_stride]);
526  } else {
527  for (i = 0; i < 4; i++)
528  AV_ZERO32(h->mv_cache[m][scan8[0] - 1 + i * 8]);
529  }
530  if (h->mb_y > 0) {
531  memcpy(h->mv_cache[m][scan8[0] - 1 * 8],
532  h->cur_pic.f.motion_val[m][b_xy - h->b_stride],
533  4 * 2 * sizeof(int16_t));
534  memset(&h->ref_cache[m][scan8[0] - 1 * 8],
535  (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
536 
537  if (h->mb_x < h->mb_width - 1) {
538  AV_COPY32(h->mv_cache[m][scan8[0] + 4 - 1 * 8],
539  h->cur_pic.f.motion_val[m][b_xy - h->b_stride + 4]);
540  h->ref_cache[m][scan8[0] + 4 - 1 * 8] =
541  (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride + 1] + 6] == -1 ||
542  h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
543  } else
544  h->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
545  if (h->mb_x > 0) {
546  AV_COPY32(h->mv_cache[m][scan8[0] - 1 - 1 * 8],
547  h->cur_pic.f.motion_val[m][b_xy - h->b_stride - 1]);
548  h->ref_cache[m][scan8[0] - 1 - 1 * 8] =
549  (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
550  } else
551  h->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
552  } else
553  memset(&h->ref_cache[m][scan8[0] - 1 * 8 - 1],
554  PART_NOT_AVAILABLE, 8);
555 
556  if (h->pict_type != AV_PICTURE_TYPE_B)
557  break;
558  }
559 
560  /* decode motion vector(s) and form prediction(s) */
561  if (h->pict_type == AV_PICTURE_TYPE_P) {
562  if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
563  return -1;
564  } else { /* AV_PICTURE_TYPE_B */
565  if (mb_type != 2) {
566  if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
567  return -1;
568  } else {
569  for (i = 0; i < 4; i++)
570  memset(h->cur_pic.f.motion_val[0][b_xy + i * h->b_stride],
571  0, 4 * 2 * sizeof(int16_t));
572  }
573  if (mb_type != 1) {
574  if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
575  return -1;
576  } else {
577  for (i = 0; i < 4; i++)
578  memset(h->cur_pic.f.motion_val[1][b_xy + i * h->b_stride],
579  0, 4 * 2 * sizeof(int16_t));
580  }
581  }
582 
583  mb_type = MB_TYPE_16x16;
584  } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
585  memset(h->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
586 
587  if (mb_type == 8) {
588  if (h->mb_x > 0) {
589  for (i = 0; i < 4; i++)
590  h->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6 - i];
591  if (h->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
592  h->left_samples_available = 0x5F5F;
593  }
594  if (h->mb_y > 0) {
595  h->intra4x4_pred_mode_cache[4 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 0];
596  h->intra4x4_pred_mode_cache[5 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 1];
597  h->intra4x4_pred_mode_cache[6 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 2];
598  h->intra4x4_pred_mode_cache[7 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 3];
599 
600  if (h->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
601  h->top_samples_available = 0x33FF;
602  }
603 
604  /* decode prediction codes for luma blocks */
605  for (i = 0; i < 16; i += 2) {
606  vlc = svq3_get_ue_golomb(&h->gb);
607 
608  if (vlc >= 25U) {
609  av_log(h->avctx, AV_LOG_ERROR, "luma prediction:%d\n", vlc);
610  return -1;
611  }
612 
613  left = &h->intra4x4_pred_mode_cache[scan8[i] - 1];
614  top = &h->intra4x4_pred_mode_cache[scan8[i] - 8];
615 
616  left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
617  left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
618 
619  if (left[1] == -1 || left[2] == -1) {
620  av_log(h->avctx, AV_LOG_ERROR, "weird prediction\n");
621  return -1;
622  }
623  }
624  } else { /* mb_type == 33, DC_128_PRED block type */
625  for (i = 0; i < 4; i++)
626  memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
627  }
628 
630 
631  if (mb_type == 8) {
633 
634  h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
635  h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
636  } else {
637  for (i = 0; i < 4; i++)
638  memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
639 
640  h->top_samples_available = 0x33FF;
641  h->left_samples_available = 0x5F5F;
642  }
643 
644  mb_type = MB_TYPE_INTRA4x4;
645  } else { /* INTRA16x16 */
646  dir = i_mb_type_info[mb_type - 8].pred_mode;
647  dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
648 
649  if ((h->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, dir, 0)) == -1) {
650  av_log(h->avctx, AV_LOG_ERROR, "check_intra_pred_mode = -1\n");
651  return -1;
652  }
653 
654  cbp = i_mb_type_info[mb_type - 8].cbp;
655  mb_type = MB_TYPE_INTRA16x16;
656  }
657 
658  if (!IS_INTER(mb_type) && h->pict_type != AV_PICTURE_TYPE_I) {
659  for (i = 0; i < 4; i++)
660  memset(h->cur_pic.f.motion_val[0][b_xy + i * h->b_stride],
661  0, 4 * 2 * sizeof(int16_t));
662  if (h->pict_type == AV_PICTURE_TYPE_B) {
663  for (i = 0; i < 4; i++)
664  memset(h->cur_pic.f.motion_val[1][b_xy + i * h->b_stride],
665  0, 4 * 2 * sizeof(int16_t));
666  }
667  }
668  if (!IS_INTRA4x4(mb_type)) {
669  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy], DC_PRED, 8);
670  }
671  if (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B) {
672  memset(h->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
673  }
674 
675  if (!IS_INTRA16x16(mb_type) &&
676  (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B)) {
677  if ((vlc = svq3_get_ue_golomb(&h->gb)) >= 48U){
678  av_log(h->avctx, AV_LOG_ERROR, "cbp_vlc=%d\n", vlc);
679  return -1;
680  }
681 
682  cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc]
683  : golomb_to_inter_cbp[vlc];
684  }
685  if (IS_INTRA16x16(mb_type) ||
686  (h->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
687  h->qscale += svq3_get_se_golomb(&h->gb);
688 
689  if (h->qscale > 31u) {
690  av_log(h->avctx, AV_LOG_ERROR, "qscale:%d\n", h->qscale);
691  return -1;
692  }
693  }
694  if (IS_INTRA16x16(mb_type)) {
695  AV_ZERO128(h->mb_luma_dc[0] + 0);
696  AV_ZERO128(h->mb_luma_dc[0] + 8);
697  if (svq3_decode_block(&h->gb, h->mb_luma_dc[0], 0, 1)) {
699  "error while decoding intra luma dc\n");
700  return -1;
701  }
702  }
703 
704  if (cbp) {
705  const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
706  const int type = ((h->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
707 
708  for (i = 0; i < 4; i++)
709  if ((cbp & (1 << i))) {
710  for (j = 0; j < 4; j++) {
711  k = index ? (1 * (j & 1) + 2 * (i & 1) +
712  2 * (j & 2) + 4 * (i & 2))
713  : (4 * i + j);
714  h->non_zero_count_cache[scan8[k]] = 1;
715 
716  if (svq3_decode_block(&h->gb, &h->mb[16 * k], index, type)) {
718  "error while decoding block\n");
719  return -1;
720  }
721  }
722  }
723 
724  if ((cbp & 0x30)) {
725  for (i = 1; i < 3; ++i)
726  if (svq3_decode_block(&h->gb, &h->mb[16 * 16 * i], 0, 3)) {
728  "error while decoding chroma dc block\n");
729  return -1;
730  }
731 
732  if ((cbp & 0x20)) {
733  for (i = 1; i < 3; i++) {
734  for (j = 0; j < 4; j++) {
735  k = 16 * i + j;
736  h->non_zero_count_cache[scan8[k]] = 1;
737 
738  if (svq3_decode_block(&h->gb, &h->mb[16 * k], 1, 1)) {
740  "error while decoding chroma ac block\n");
741  return -1;
742  }
743  }
744  }
745  }
746  }
747  }
748 
749  h->cbp = cbp;
750  h->cur_pic.f.mb_type[mb_xy] = mb_type;
751 
752  if (IS_INTRA(mb_type))
754 
755  return 0;
756 }
757 
759 {
760  SVQ3Context *s = avctx->priv_data;
761  H264Context *h = &s->h;
762  const int mb_xy = h->mb_xy;
763  int i, header;
764  unsigned slice_id;
765 
766  header = get_bits(&h->gb, 8);
767 
768  if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
769  /* TODO: what? */
770  av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
771  return -1;
772  } else {
773  int length = header >> 5 & 3;
774 
776  8 * show_bits(&h->gb, 8 * length) +
777  8 * length;
778 
779  if (s->next_slice_index > h->gb.size_in_bits) {
780  av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
781  return -1;
782  }
783 
784  h->gb.size_in_bits = s->next_slice_index - 8 * (length - 1);
785  skip_bits(&h->gb, 8);
786 
787  if (s->watermark_key) {
788  uint32_t header = AV_RL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1]);
789  AV_WL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1],
790  header ^ s->watermark_key);
791  }
792  if (length > 0) {
793  memcpy((uint8_t *) &h->gb.buffer[get_bits_count(&h->gb) >> 3],
794  &h->gb.buffer[h->gb.size_in_bits >> 3], length - 1);
795  }
796  skip_bits_long(&h->gb, 0);
797  }
798 
799  if ((slice_id = svq3_get_ue_golomb(&h->gb)) >= 3) {
800  av_log(h->avctx, AV_LOG_ERROR, "illegal slice type %d \n", slice_id);
801  return -1;
802  }
803 
804  h->slice_type = golomb_to_pict_type[slice_id];
805 
806  if ((header & 0x9F) == 2) {
807  i = (h->mb_num < 64) ? 6 : (1 + av_log2(h->mb_num - 1));
808  h->mb_skip_run = get_bits(&h->gb, i) -
809  (h->mb_y * h->mb_width + h->mb_x);
810  } else {
811  skip_bits1(&h->gb);
812  h->mb_skip_run = 0;
813  }
814 
815  h->slice_num = get_bits(&h->gb, 8);
816  h->qscale = get_bits(&h->gb, 5);
817  s->adaptive_quant = get_bits1(&h->gb);
818 
819  /* unknown fields */
820  skip_bits1(&h->gb);
821 
822  if (s->unknown_flag)
823  skip_bits1(&h->gb);
824 
825  skip_bits1(&h->gb);
826  skip_bits(&h->gb, 2);
827 
828  while (get_bits1(&h->gb))
829  skip_bits(&h->gb, 8);
830 
831  /* reset intra predictors and invalidate motion vector references */
832  if (h->mb_x > 0) {
833  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - 1] + 3,
834  -1, 4 * sizeof(int8_t));
835  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_x],
836  -1, 8 * sizeof(int8_t) * h->mb_x);
837  }
838  if (h->mb_y > 0) {
839  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_stride],
840  -1, 8 * sizeof(int8_t) * (h->mb_width - h->mb_x));
841 
842  if (h->mb_x > 0)
843  h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] = -1;
844  }
845 
846  return 0;
847 }
848 
850 {
851  SVQ3Context *s = avctx->priv_data;
852  H264Context *h = &s->h;
853  int m;
854  unsigned char *extradata;
855  unsigned char *extradata_end;
856  unsigned int size;
857  int marker_found = 0;
858 
859  s->cur_pic = av_mallocz(sizeof(*s->cur_pic));
860  s->last_pic = av_mallocz(sizeof(*s->last_pic));
861  s->next_pic = av_mallocz(sizeof(*s->next_pic));
862  if (!s->next_pic || !s->last_pic || !s->cur_pic) {
863  av_freep(&s->cur_pic);
864  av_freep(&s->last_pic);
865  av_freep(&s->next_pic);
866  return AVERROR(ENOMEM);
867  }
868 
869  if (ff_h264_decode_init(avctx) < 0)
870  return -1;
871 
872  h->flags = avctx->flags;
873  h->is_complex = 1;
874  h->sps.chroma_format_idc = 1;
876  avctx->pix_fmt = avctx->codec->pix_fmts[0];
877 
878  h->chroma_qp[0] = h->chroma_qp[1] = 4;
879  h->chroma_x_shift = h->chroma_y_shift = 1;
880 
881  s->halfpel_flag = 1;
882  s->thirdpel_flag = 1;
883  s->unknown_flag = 0;
884 
885  /* prowl for the "SEQH" marker in the extradata */
886  extradata = (unsigned char *)avctx->extradata;
887  extradata_end = avctx->extradata + avctx->extradata_size;
888  if (extradata) {
889  for (m = 0; m + 8 < avctx->extradata_size; m++) {
890  if (!memcmp(extradata, "SEQH", 4)) {
891  marker_found = 1;
892  break;
893  }
894  extradata++;
895  }
896  }
897 
898  /* if a match was found, parse the extra data */
899  if (marker_found) {
900  GetBitContext gb;
901  int frame_size_code;
902 
903  size = AV_RB32(&extradata[4]);
904  if (size > extradata_end - extradata - 8)
905  return AVERROR_INVALIDDATA;
906  init_get_bits(&gb, extradata + 8, size * 8);
907 
908  /* 'frame size code' and optional 'width, height' */
909  frame_size_code = get_bits(&gb, 3);
910  switch (frame_size_code) {
911  case 0:
912  avctx->width = 160;
913  avctx->height = 120;
914  break;
915  case 1:
916  avctx->width = 128;
917  avctx->height = 96;
918  break;
919  case 2:
920  avctx->width = 176;
921  avctx->height = 144;
922  break;
923  case 3:
924  avctx->width = 352;
925  avctx->height = 288;
926  break;
927  case 4:
928  avctx->width = 704;
929  avctx->height = 576;
930  break;
931  case 5:
932  avctx->width = 240;
933  avctx->height = 180;
934  break;
935  case 6:
936  avctx->width = 320;
937  avctx->height = 240;
938  break;
939  case 7:
940  avctx->width = get_bits(&gb, 12);
941  avctx->height = get_bits(&gb, 12);
942  break;
943  }
944 
945  s->halfpel_flag = get_bits1(&gb);
946  s->thirdpel_flag = get_bits1(&gb);
947 
948  /* unknown fields */
949  skip_bits1(&gb);
950  skip_bits1(&gb);
951  skip_bits1(&gb);
952  skip_bits1(&gb);
953 
954  h->low_delay = get_bits1(&gb);
955 
956  /* unknown field */
957  skip_bits1(&gb);
958 
959  while (get_bits1(&gb))
960  skip_bits(&gb, 8);
961 
962  s->unknown_flag = get_bits1(&gb);
963  avctx->has_b_frames = !h->low_delay;
964  if (s->unknown_flag) {
965 #if CONFIG_ZLIB
966  unsigned watermark_width = svq3_get_ue_golomb(&gb);
967  unsigned watermark_height = svq3_get_ue_golomb(&gb);
968  int u1 = svq3_get_ue_golomb(&gb);
969  int u2 = get_bits(&gb, 8);
970  int u3 = get_bits(&gb, 2);
971  int u4 = svq3_get_ue_golomb(&gb);
972  unsigned long buf_len = watermark_width *
973  watermark_height * 4;
974  int offset = get_bits_count(&gb) + 7 >> 3;
975  uint8_t *buf;
976 
977  if (watermark_height <= 0 || (uint64_t)watermark_width*4 > UINT_MAX/watermark_height)
978  return -1;
979 
980  buf = av_malloc(buf_len);
981  av_log(avctx, AV_LOG_DEBUG, "watermark size: %dx%d\n",
982  watermark_width, watermark_height);
983  av_log(avctx, AV_LOG_DEBUG,
984  "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
985  u1, u2, u3, u4, offset);
986  if (uncompress(buf, &buf_len, extradata + 8 + offset,
987  size - offset) != Z_OK) {
988  av_log(avctx, AV_LOG_ERROR,
989  "could not uncompress watermark logo\n");
990  av_free(buf);
991  return -1;
992  }
993  s->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
994  s->watermark_key = s->watermark_key << 16 | s->watermark_key;
995  av_log(avctx, AV_LOG_DEBUG,
996  "watermark key %#x\n", s->watermark_key);
997  av_free(buf);
998 #else
999  av_log(avctx, AV_LOG_ERROR,
1000  "this svq3 file contains watermark which need zlib support compiled in\n");
1001  return -1;
1002 #endif
1003  }
1004  }
1005 
1006  h->width = avctx->width;
1007  h->height = avctx->height;
1008  h->mb_width = (h->width + 15) / 16;
1009  h->mb_height = (h->height + 15) / 16;
1010  h->mb_stride = h->mb_width + 1;
1011  h->mb_num = h->mb_width * h->mb_height;
1012  h->b_stride = 4 * h->mb_width;
1013  s->h_edge_pos = h->mb_width * 16;
1014  s->v_edge_pos = h->mb_height * 16;
1015 
1016  if (ff_h264_alloc_tables(h) < 0) {
1017  av_log(avctx, AV_LOG_ERROR, "svq3 memory allocation failed\n");
1018  return AVERROR(ENOMEM);
1019  }
1020 
1021  return 0;
1022 }
1023 
1024 static int get_buffer(AVCodecContext *avctx, Picture *pic)
1025 {
1026  SVQ3Context *s = avctx->priv_data;
1027  H264Context *h = &s->h;
1028  const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
1029  const int mb_array_size = h->mb_stride * h->mb_height;
1030  const int b4_stride = h->mb_width * 4 + 1;
1031  const int b4_array_size = b4_stride * h->mb_height * 4;
1032  int ret;
1033 
1034  if (!pic->motion_val_base[0]) {
1035  int i;
1036 
1037  pic->mb_type_base = av_mallocz((big_mb_num + h->mb_stride) * sizeof(uint32_t));
1038  if (!pic->mb_type_base)
1039  return AVERROR(ENOMEM);
1040  pic->f.mb_type = pic->mb_type_base + 2 * h->mb_stride + 1;
1041 
1042  for (i = 0; i < 2; i++) {
1043  pic->motion_val_base[i] = av_mallocz(2 * (b4_array_size + 4) * sizeof(int16_t));
1044  pic->f.ref_index[i] = av_mallocz(4 * mb_array_size);
1045  if (!pic->motion_val_base[i] || !pic->f.ref_index[i])
1046  return AVERROR(ENOMEM);
1047 
1048  pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
1049  }
1050  }
1051  pic->f.motion_subsample_log2 = 2;
1052  pic->f.reference = !(h->pict_type == AV_PICTURE_TYPE_B);
1053 
1054  ret = ff_get_buffer(avctx, &pic->f);
1055  if (!h->edge_emu_buffer) {
1056  h->edge_emu_buffer = av_mallocz(pic->f.linesize[0] * 17);
1057  if (!h->edge_emu_buffer)
1058  return AVERROR(ENOMEM);
1059  }
1060 
1061  h->linesize = pic->f.linesize[0];
1062  h->uvlinesize = pic->f.linesize[1];
1063 
1064  return ret;
1065 }
1066 
1067 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1068  int *got_frame, AVPacket *avpkt)
1069 {
1070  SVQ3Context *s = avctx->priv_data;
1071  H264Context *h = &s->h;
1072  int buf_size = avpkt->size;
1073  int left;
1074  uint8_t *buf;
1075  int ret, m, i;
1076 
1077  /* special case for last picture */
1078  if (buf_size == 0) {
1079  if (s->next_pic->f.data[0] && !h->low_delay && !s->last_frame_output) {
1080  *(AVFrame *) data = s->next_pic->f;
1081  s->last_frame_output = 1;
1082  *got_frame = 1;
1083  }
1084  return 0;
1085  }
1086 
1087  h->mb_x = h->mb_y = h->mb_xy = 0;
1088 
1089  if (s->watermark_key) {
1090  av_fast_malloc(&s->buf, &s->buf_size,
1091  buf_size+FF_INPUT_BUFFER_PADDING_SIZE);
1092  if (!s->buf)
1093  return AVERROR(ENOMEM);
1094  memcpy(s->buf, avpkt->data, buf_size);
1095  buf = s->buf;
1096  } else {
1097  buf = avpkt->data;
1098  }
1099 
1100  init_get_bits(&h->gb, buf, 8 * buf_size);
1101 
1102  if (svq3_decode_slice_header(avctx))
1103  return -1;
1104 
1105  h->pict_type = h->slice_type;
1106 
1107  if (h->pict_type != AV_PICTURE_TYPE_B)
1108  FFSWAP(Picture*, s->next_pic, s->last_pic);
1109 
1110  if (s->cur_pic->f.data[0])
1111  avctx->release_buffer(avctx, &s->cur_pic->f);
1112 
1113  /* for skipping the frame */
1114  s->cur_pic->f.pict_type = h->pict_type;
1116 
1117  ret = get_buffer(avctx, s->cur_pic);
1118  if (ret < 0)
1119  return ret;
1120 
1121  h->cur_pic_ptr = s->cur_pic;
1122  h->cur_pic = *s->cur_pic;
1123 
1124  for (i = 0; i < 16; i++) {
1125  h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1126  h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1127  }
1128  for (i = 0; i < 16; i++) {
1129  h->block_offset[16 + i] =
1130  h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1131  h->block_offset[48 + 16 + i] =
1132  h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1133  }
1134 
1135  if (h->pict_type != AV_PICTURE_TYPE_I) {
1136  if (!s->last_pic->f.data[0]) {
1137  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1138  ret = get_buffer(avctx, s->last_pic);
1139  if (ret < 0)
1140  return ret;
1141  memset(s->last_pic->f.data[0], 0, avctx->height * s->last_pic->f.linesize[0]);
1142  memset(s->last_pic->f.data[1], 0x80, (avctx->height / 2) *
1143  s->last_pic->f.linesize[1]);
1144  memset(s->last_pic->f.data[2], 0x80, (avctx->height / 2) *
1145  s->last_pic->f.linesize[2]);
1146  }
1147 
1148  if (h->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f.data[0]) {
1149  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1150  ret = get_buffer(avctx, s->next_pic);
1151  if (ret < 0)
1152  return ret;
1153  memset(s->next_pic->f.data[0], 0, avctx->height * s->next_pic->f.linesize[0]);
1154  memset(s->next_pic->f.data[1], 0x80, (avctx->height / 2) *
1155  s->next_pic->f.linesize[1]);
1156  memset(s->next_pic->f.data[2], 0x80, (avctx->height / 2) *
1157  s->next_pic->f.linesize[2]);
1158  }
1159  }
1160 
1161  if (avctx->debug & FF_DEBUG_PICT_INFO)
1163  "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1165  s->halfpel_flag, s->thirdpel_flag,
1166  s->adaptive_quant, h->qscale, h->slice_num);
1167 
1168  if (avctx->skip_frame >= AVDISCARD_NONREF && h->pict_type == AV_PICTURE_TYPE_B ||
1170  avctx->skip_frame >= AVDISCARD_ALL)
1171  return 0;
1172 
1173  if (s->next_p_frame_damaged) {
1174  if (h->pict_type == AV_PICTURE_TYPE_B)
1175  return 0;
1176  else
1177  s->next_p_frame_damaged = 0;
1178  }
1179 
1180  if (h->pict_type == AV_PICTURE_TYPE_B) {
1182 
1183  if (h->frame_num_offset < 0)
1184  h->frame_num_offset += 256;
1185  if (h->frame_num_offset == 0 ||
1187  av_log(h->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1188  return -1;
1189  }
1190  } else {
1191  h->prev_frame_num = h->frame_num;
1192  h->frame_num = h->slice_num;
1194 
1195  if (h->prev_frame_num_offset < 0)
1196  h->prev_frame_num_offset += 256;
1197  }
1198 
1199  for (m = 0; m < 2; m++) {
1200  int i;
1201  for (i = 0; i < 4; i++) {
1202  int j;
1203  for (j = -1; j < 4; j++)
1204  h->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1205  if (i < 3)
1206  h->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1207  }
1208  }
1209 
1210  for (h->mb_y = 0; h->mb_y < h->mb_height; h->mb_y++) {
1211  for (h->mb_x = 0; h->mb_x < h->mb_width; h->mb_x++) {
1212  unsigned mb_type;
1213  h->mb_xy = h->mb_x + h->mb_y * h->mb_stride;
1214 
1215  if ((get_bits_count(&h->gb) + 7) >= h->gb.size_in_bits &&
1216  ((get_bits_count(&h->gb) & 7) == 0 ||
1217  show_bits(&h->gb, -get_bits_count(&h->gb) & 7) == 0)) {
1218  skip_bits(&h->gb, s->next_slice_index - get_bits_count(&h->gb));
1219  h->gb.size_in_bits = 8 * buf_size;
1220 
1221  if (svq3_decode_slice_header(avctx))
1222  return -1;
1223 
1224  /* TODO: support s->mb_skip_run */
1225  }
1226 
1227  mb_type = svq3_get_ue_golomb(&h->gb);
1228 
1229  if (h->pict_type == AV_PICTURE_TYPE_I)
1230  mb_type += 8;
1231  else if (h->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1232  mb_type += 4;
1233  if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1235  "error while decoding MB %d %d\n", h->mb_x, h->mb_y);
1236  return -1;
1237  }
1238 
1239  if (mb_type != 0 || h->cbp)
1241 
1242  if (h->pict_type != AV_PICTURE_TYPE_B && !h->low_delay)
1243  h->cur_pic.f.mb_type[h->mb_x + h->mb_y * h->mb_stride] =
1244  (h->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1245  }
1246 
1247  ff_draw_horiz_band(avctx, NULL, s->cur_pic, s->last_pic->f.data[0] ? s->last_pic : NULL,
1248  16 * h->mb_y, 16, h->picture_structure, 0, 0,
1249  h->low_delay, h->mb_height * 16, h->mb_width * 16);
1250  }
1251 
1252  left = buf_size*8 - get_bits_count(&h->gb);
1253 
1254  if (h->mb_y != h->mb_height || h->mb_x != h->mb_width) {
1255  av_log(avctx, AV_LOG_INFO, "frame num %d incomplete pic x %d y %d left %d\n", avctx->frame_number, h->mb_y, h->mb_x, left);
1256  //av_hex_dump(stderr, buf+buf_size-8, 8);
1257  }
1258 
1259  if (left < 0) {
1260  av_log(avctx, AV_LOG_ERROR, "frame num %d left %d\n", avctx->frame_number, left);
1261  return -1;
1262  }
1263 
1264  if (h->pict_type == AV_PICTURE_TYPE_B || h->low_delay)
1265  *(AVFrame *)data = s->cur_pic->f;
1266  else
1267  *(AVFrame *)data = s->last_pic->f;
1268 
1269  /* Do not output the last pic after seeking. */
1270  if (s->last_pic->f.data[0] || h->low_delay)
1271  *got_frame = 1;
1272 
1273  if (h->pict_type != AV_PICTURE_TYPE_B) {
1274  FFSWAP(Picture*, s->cur_pic, s->next_pic);
1275  }
1276 
1277  return buf_size;
1278 }
1279 
1280 static void free_picture(AVCodecContext *avctx, Picture *pic)
1281 {
1282  int i;
1283  for (i = 0; i < 2; i++) {
1284  av_freep(&pic->motion_val_base[i]);
1285  av_freep(&pic->f.ref_index[i]);
1286  }
1287  av_freep(&pic->mb_type_base);
1288 
1289  if (pic->f.data[0])
1290  avctx->release_buffer(avctx, &pic->f);
1291  av_freep(&pic);
1292 }
1293 
1295 {
1296  SVQ3Context *s = avctx->priv_data;
1297  H264Context *h = &s->h;
1298 
1299  free_picture(avctx, s->cur_pic);
1300  free_picture(avctx, s->next_pic);
1301  free_picture(avctx, s->last_pic);
1302 
1304 
1305  av_freep(&s->buf);
1306  s->buf_size = 0;
1308 
1309  return 0;
1310 }
1311 
1313  .name = "svq3",
1314  .type = AVMEDIA_TYPE_VIDEO,
1315  .id = AV_CODEC_ID_SVQ3,
1316  .priv_data_size = sizeof(SVQ3Context),
1320  .capabilities = CODEC_CAP_DRAW_HORIZ_BAND |
1321  CODEC_CAP_DR1 |
1323  .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1324  .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,
1325  AV_PIX_FMT_NONE},
1326 };