FFmpeg
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
svq3.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 The FFmpeg Project
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /*
22  * How to use this decoder:
23  * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24  * have stsd atoms to describe media trak properties. A stsd atom for a
25  * video trak contains 1 or more ImageDescription atoms. These atoms begin
26  * with the 4-byte length of the atom followed by the codec fourcc. Some
27  * decoders need information in this atom to operate correctly. Such
28  * is the case with SVQ3. In order to get the best use out of this decoder,
29  * the calling app must make the SVQ3 ImageDescription atom available
30  * via the AVCodecContext's extradata[_size] field:
31  *
32  * AVCodecContext.extradata = pointer to ImageDescription, first characters
33  * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34  * AVCodecContext.extradata_size = size of ImageDescription atom memory
35  * buffer (which will be the same as the ImageDescription atom size field
36  * from the QT file, minus 4 bytes since the length is missing)
37  *
38  * You will know you have these parameters passed correctly when the decoder
39  * correctly decodes this file:
40  * http://samples.mplayerhq.hu/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
41  */
42 #include "internal.h"
43 #include "dsputil.h"
44 #include "avcodec.h"
45 #include "mpegvideo.h"
46 #include "h264.h"
47 
48 #include "h264data.h" // FIXME FIXME FIXME
49 
50 #include "h264_mvpred.h"
51 #include "golomb.h"
52 #include "rectangle.h"
53 #include "vdpau_internal.h"
54 
55 #if CONFIG_ZLIB
56 #include <zlib.h>
57 #endif
58 
59 #include "svq1.h"
60 
61 /**
62  * @file
63  * svq3 decoder.
64  */
65 
66 typedef struct {
72  uint32_t watermark_key;
74  int buf_size;
75 } SVQ3Context;
76 
77 #define FULLPEL_MODE 1
78 #define HALFPEL_MODE 2
79 #define THIRDPEL_MODE 3
80 #define PREDICT_MODE 4
81 
82 /* dual scan (from some older h264 draft)
83  * o-->o-->o o
84  * | /|
85  * o o o / o
86  * | / | |/ |
87  * o o o o
88  * /
89  * o-->o-->o-->o
90  */
91 static const uint8_t svq3_scan[16] = {
92  0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
93  2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
94  0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
95  0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
96 };
97 
98 static const uint8_t svq3_pred_0[25][2] = {
99  { 0, 0 },
100  { 1, 0 }, { 0, 1 },
101  { 0, 2 }, { 1, 1 }, { 2, 0 },
102  { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
103  { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
104  { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
105  { 2, 4 }, { 3, 3 }, { 4, 2 },
106  { 4, 3 }, { 3, 4 },
107  { 4, 4 }
108 };
109 
110 static const int8_t svq3_pred_1[6][6][5] = {
111  { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
112  { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
113  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
114  { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
115  { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
116  { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
117  { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
118  { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
119  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
120  { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
121  { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
122  { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
123 };
124 
125 static const struct {
128 } svq3_dct_tables[2][16] = {
129  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
130  { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
131  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
132  { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
133 };
134 
135 static const uint32_t svq3_dequant_coeff[32] = {
136  3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
137  9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
138  24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
139  61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
140 };
141 
142 void ff_svq3_luma_dc_dequant_idct_c(DCTELEM *output, DCTELEM *input, int qp)
143 {
144  const int qmul = svq3_dequant_coeff[qp];
145 #define stride 16
146  int i;
147  int temp[16];
148  static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
149 
150  for (i = 0; i < 4; i++) {
151  const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
152  const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
153  const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
154  const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
155 
156  temp[4 * i + 0] = z0 + z3;
157  temp[4 * i + 1] = z1 + z2;
158  temp[4 * i + 2] = z1 - z2;
159  temp[4 * i + 3] = z0 - z3;
160  }
161 
162  for (i = 0; i < 4; i++) {
163  const int offset = x_offset[i];
164  const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
165  const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
166  const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
167  const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
168 
169  output[stride * 0 + offset] = (z0 + z3) * qmul + 0x80000 >> 20;
170  output[stride * 2 + offset] = (z1 + z2) * qmul + 0x80000 >> 20;
171  output[stride * 8 + offset] = (z1 - z2) * qmul + 0x80000 >> 20;
172  output[stride * 10 + offset] = (z0 - z3) * qmul + 0x80000 >> 20;
173  }
174 }
175 #undef stride
176 
178  int stride, int qp, int dc)
179 {
180  const int qmul = svq3_dequant_coeff[qp];
181  int i;
182 
183  if (dc) {
184  dc = 13 * 13 * (dc == 1 ? 1538 * block[0]
185  : qmul * (block[0] >> 3) / 2);
186  block[0] = 0;
187  }
188 
189  for (i = 0; i < 4; i++) {
190  const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
191  const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
192  const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
193  const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
194 
195  block[0 + 4 * i] = z0 + z3;
196  block[1 + 4 * i] = z1 + z2;
197  block[2 + 4 * i] = z1 - z2;
198  block[3 + 4 * i] = z0 - z3;
199  }
200 
201  for (i = 0; i < 4; i++) {
202  const int z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
203  const int z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
204  const int z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
205  const int z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
206  const int rr = (dc + 0x80000);
207 
208  dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((z0 + z3) * qmul + rr >> 20));
209  dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((z1 + z2) * qmul + rr >> 20));
210  dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((z1 - z2) * qmul + rr >> 20));
211  dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((z0 - z3) * qmul + rr >> 20));
212  }
213 }
214 
216  int index, const int type)
217 {
218  static const uint8_t *const scan_patterns[4] =
220 
221  int run, level, sign, limit;
222  unsigned vlc;
223  const int intra = 3 * type >> 2;
224  const uint8_t *const scan = scan_patterns[type];
225 
226  for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
227  for (; (vlc = svq3_get_ue_golomb(gb)) != 0; index++) {
228  if ((int32_t)vlc < 0)
229  return -1;
230 
231  sign = (vlc & 1) ? 0 : -1;
232  vlc = vlc + 1 >> 1;
233 
234  if (type == 3) {
235  if (vlc < 3) {
236  run = 0;
237  level = vlc;
238  } else if (vlc < 4) {
239  run = 1;
240  level = 1;
241  } else {
242  run = vlc & 0x3;
243  level = (vlc + 9 >> 2) - run;
244  }
245  } else {
246  if (vlc < 16U) {
247  run = svq3_dct_tables[intra][vlc].run;
248  level = svq3_dct_tables[intra][vlc].level;
249  } else if (intra) {
250  run = vlc & 0x7;
251  level = (vlc >> 3) + ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
252  } else {
253  run = vlc & 0xF;
254  level = (vlc >> 4) + ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
255  }
256  }
257 
258 
259  if ((index += run) >= limit)
260  return -1;
261 
262  block[scan[index]] = (level ^ sign) - sign;
263  }
264 
265  if (type != 2) {
266  break;
267  }
268  }
269 
270  return 0;
271 }
272 
273 static inline void svq3_mc_dir_part(MpegEncContext *s,
274  int x, int y, int width, int height,
275  int mx, int my, int dxy,
276  int thirdpel, int dir, int avg)
277 {
278  const Picture *pic = (dir == 0) ? &s->last_picture : &s->next_picture;
279  uint8_t *src, *dest;
280  int i, emu = 0;
281  int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
282 
283  mx += x;
284  my += y;
285 
286  if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
287  my < 0 || my >= s->v_edge_pos - height - 1) {
288  if ((s->flags & CODEC_FLAG_EMU_EDGE))
289  emu = 1;
290 
291  mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
292  my = av_clip(my, -16, s->v_edge_pos - height + 15);
293  }
294 
295  /* form component predictions */
296  dest = s->current_picture.f.data[0] + x + y * s->linesize;
297  src = pic->f.data[0] + mx + my * s->linesize;
298 
299  if (emu) {
301  width + 1, height + 1,
302  mx, my, s->h_edge_pos, s->v_edge_pos);
303  src = s->edge_emu_buffer;
304  }
305  if (thirdpel)
306  (avg ? s->dsp.avg_tpel_pixels_tab
307  : s->dsp.put_tpel_pixels_tab)[dxy](dest, src, s->linesize,
308  width, height);
309  else
310  (avg ? s->dsp.avg_pixels_tab
311  : s->dsp.put_pixels_tab)[blocksize][dxy](dest, src, s->linesize,
312  height);
313 
314  if (!(s->flags & CODEC_FLAG_GRAY)) {
315  mx = mx + (mx < (int) x) >> 1;
316  my = my + (my < (int) y) >> 1;
317  width = width >> 1;
318  height = height >> 1;
319  blocksize++;
320 
321  for (i = 1; i < 3; i++) {
322  dest = s->current_picture.f.data[i] + (x >> 1) + (y >> 1) * s->uvlinesize;
323  src = pic->f.data[i] + mx + my * s->uvlinesize;
324 
325  if (emu) {
327  width + 1, height + 1,
328  mx, my, (s->h_edge_pos >> 1),
329  s->v_edge_pos >> 1);
330  src = s->edge_emu_buffer;
331  }
332  if (thirdpel)
333  (avg ? s->dsp.avg_tpel_pixels_tab
334  : s->dsp.put_tpel_pixels_tab)[dxy](dest, src,
335  s->uvlinesize,
336  width, height);
337  else
338  (avg ? s->dsp.avg_pixels_tab
339  : s->dsp.put_pixels_tab)[blocksize][dxy](dest, src,
340  s->uvlinesize,
341  height);
342  }
343  }
344 }
345 
346 static inline int svq3_mc_dir(H264Context *h, int size, int mode,
347  int dir, int avg)
348 {
349  int i, j, k, mx, my, dx, dy, x, y;
350  MpegEncContext *const s = (MpegEncContext *)h;
351  const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
352  const int part_height = 16 >> ((unsigned)(size + 1) / 3);
353  const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
354  const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
355  const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
356 
357  for (i = 0; i < 16; i += part_height)
358  for (j = 0; j < 16; j += part_width) {
359  const int b_xy = (4 * s->mb_x + (j >> 2)) +
360  (4 * s->mb_y + (i >> 2)) * h->b_stride;
361  int dxy;
362  x = 16 * s->mb_x + j;
363  y = 16 * s->mb_y + i;
364  k = (j >> 2 & 1) + (i >> 1 & 2) +
365  (j >> 1 & 4) + (i & 8);
366 
367  if (mode != PREDICT_MODE) {
368  pred_motion(h, k, part_width >> 2, dir, 1, &mx, &my);
369  } else {
370  mx = s->next_picture.f.motion_val[0][b_xy][0] << 1;
371  my = s->next_picture.f.motion_val[0][b_xy][1] << 1;
372 
373  if (dir == 0) {
374  mx = mx * h->frame_num_offset /
375  h->prev_frame_num_offset + 1 >> 1;
376  my = my * h->frame_num_offset /
377  h->prev_frame_num_offset + 1 >> 1;
378  } else {
379  mx = mx * (h->frame_num_offset - h->prev_frame_num_offset) /
380  h->prev_frame_num_offset + 1 >> 1;
381  my = my * (h->frame_num_offset - h->prev_frame_num_offset) /
382  h->prev_frame_num_offset + 1 >> 1;
383  }
384  }
385 
386  /* clip motion vector prediction to frame border */
387  mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
388  my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
389 
390  /* get (optional) motion vector differential */
391  if (mode == PREDICT_MODE) {
392  dx = dy = 0;
393  } else {
394  dy = svq3_get_se_golomb(&s->gb);
395  dx = svq3_get_se_golomb(&s->gb);
396 
397  if (dx == INVALID_VLC || dy == INVALID_VLC) {
398  av_log(h->s.avctx, AV_LOG_ERROR, "invalid MV vlc\n");
399  return -1;
400  }
401  }
402 
403  /* compute motion vector */
404  if (mode == THIRDPEL_MODE) {
405  int fx, fy;
406  mx = (mx + 1 >> 1) + dx;
407  my = (my + 1 >> 1) + dy;
408  fx = (unsigned)(mx + 0x3000) / 3 - 0x1000;
409  fy = (unsigned)(my + 0x3000) / 3 - 0x1000;
410  dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
411 
412  svq3_mc_dir_part(s, x, y, part_width, part_height,
413  fx, fy, dxy, 1, dir, avg);
414  mx += mx;
415  my += my;
416  } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
417  mx = (unsigned)(mx + 1 + 0x3000) / 3 + dx - 0x1000;
418  my = (unsigned)(my + 1 + 0x3000) / 3 + dy - 0x1000;
419  dxy = (mx & 1) + 2 * (my & 1);
420 
421  svq3_mc_dir_part(s, x, y, part_width, part_height,
422  mx >> 1, my >> 1, dxy, 0, dir, avg);
423  mx *= 3;
424  my *= 3;
425  } else {
426  mx = (unsigned)(mx + 3 + 0x6000) / 6 + dx - 0x1000;
427  my = (unsigned)(my + 3 + 0x6000) / 6 + dy - 0x1000;
428 
429  svq3_mc_dir_part(s, x, y, part_width, part_height,
430  mx, my, 0, 0, dir, avg);
431  mx *= 6;
432  my *= 6;
433  }
434 
435  /* update mv_cache */
436  if (mode != PREDICT_MODE) {
437  int32_t mv = pack16to32(mx, my);
438 
439  if (part_height == 8 && i < 8) {
440  AV_WN32A(h->mv_cache[dir][scan8[k] + 1 * 8], mv);
441 
442  if (part_width == 8 && j < 8)
443  AV_WN32A(h->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
444  }
445  if (part_width == 8 && j < 8)
446  AV_WN32A(h->mv_cache[dir][scan8[k] + 1], mv);
447  if (part_width == 4 || part_height == 4)
448  AV_WN32A(h->mv_cache[dir][scan8[k]], mv);
449  }
450 
451  /* write back motion vectors */
453  part_width >> 2, part_height >> 2, h->b_stride,
454  pack16to32(mx, my), 4);
455  }
456 
457  return 0;
458 }
459 
460 static int svq3_decode_mb(SVQ3Context *svq3, unsigned int mb_type)
461 {
462  H264Context *h = &svq3->h;
463  int i, j, k, m, dir, mode;
464  int cbp = 0;
465  uint32_t vlc;
466  int8_t *top, *left;
467  MpegEncContext *const s = (MpegEncContext *)h;
468  const int mb_xy = h->mb_xy;
469  const int b_xy = 4 * s->mb_x + 4 * s->mb_y * h->b_stride;
470 
471  h->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
472  h->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
473  h->topright_samples_available = 0xFFFF;
474 
475  if (mb_type == 0) { /* SKIP */
476  if (s->pict_type == AV_PICTURE_TYPE_P ||
477  s->next_picture.f.mb_type[mb_xy] == -1) {
478  svq3_mc_dir_part(s, 16 * s->mb_x, 16 * s->mb_y, 16, 16,
479  0, 0, 0, 0, 0, 0);
480 
481  if (s->pict_type == AV_PICTURE_TYPE_B)
482  svq3_mc_dir_part(s, 16 * s->mb_x, 16 * s->mb_y, 16, 16,
483  0, 0, 0, 0, 1, 1);
484 
485  mb_type = MB_TYPE_SKIP;
486  } else {
487  mb_type = FFMIN(s->next_picture.f.mb_type[mb_xy], 6);
488  if (svq3_mc_dir(h, mb_type, PREDICT_MODE, 0, 0) < 0)
489  return -1;
490  if (svq3_mc_dir(h, mb_type, PREDICT_MODE, 1, 1) < 0)
491  return -1;
492 
493  mb_type = MB_TYPE_16x16;
494  }
495  } else if (mb_type < 8) { /* INTER */
496  if (svq3->thirdpel_flag && svq3->halfpel_flag == !get_bits1(&s->gb))
497  mode = THIRDPEL_MODE;
498  else if (svq3->halfpel_flag &&
499  svq3->thirdpel_flag == !get_bits1(&s->gb))
500  mode = HALFPEL_MODE;
501  else
502  mode = FULLPEL_MODE;
503 
504  /* fill caches */
505  /* note ref_cache should contain here:
506  * ????????
507  * ???11111
508  * N??11111
509  * N??11111
510  * N??11111
511  */
512 
513  for (m = 0; m < 2; m++) {
514  if (s->mb_x > 0 && h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6] != -1) {
515  for (i = 0; i < 4; i++)
516  AV_COPY32(h->mv_cache[m][scan8[0] - 1 + i * 8],
517  s->current_picture.f.motion_val[m][b_xy - 1 + i * h->b_stride]);
518  } else {
519  for (i = 0; i < 4; i++)
520  AV_ZERO32(h->mv_cache[m][scan8[0] - 1 + i * 8]);
521  }
522  if (s->mb_y > 0) {
523  memcpy(h->mv_cache[m][scan8[0] - 1 * 8],
524  s->current_picture.f.motion_val[m][b_xy - h->b_stride],
525  4 * 2 * sizeof(int16_t));
526  memset(&h->ref_cache[m][scan8[0] - 1 * 8],
527  (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
528 
529  if (s->mb_x < s->mb_width - 1) {
530  AV_COPY32(h->mv_cache[m][scan8[0] + 4 - 1 * 8],
531  s->current_picture.f.motion_val[m][b_xy - h->b_stride + 4]);
532  h->ref_cache[m][scan8[0] + 4 - 1 * 8] =
533  (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride + 1] + 6] == -1 ||
534  h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
535  } else
536  h->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
537  if (s->mb_x > 0) {
538  AV_COPY32(h->mv_cache[m][scan8[0] - 1 - 1 * 8],
539  s->current_picture.f.motion_val[m][b_xy - h->b_stride - 1]);
540  h->ref_cache[m][scan8[0] - 1 - 1 * 8] =
541  (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
542  } else
543  h->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
544  } else
545  memset(&h->ref_cache[m][scan8[0] - 1 * 8 - 1],
546  PART_NOT_AVAILABLE, 8);
547 
548  if (s->pict_type != AV_PICTURE_TYPE_B)
549  break;
550  }
551 
552  /* decode motion vector(s) and form prediction(s) */
553  if (s->pict_type == AV_PICTURE_TYPE_P) {
554  if (svq3_mc_dir(h, mb_type - 1, mode, 0, 0) < 0)
555  return -1;
556  } else { /* AV_PICTURE_TYPE_B */
557  if (mb_type != 2) {
558  if (svq3_mc_dir(h, 0, mode, 0, 0) < 0)
559  return -1;
560  } else {
561  for (i = 0; i < 4; i++)
562  memset(s->current_picture.f.motion_val[0][b_xy + i * h->b_stride],
563  0, 4 * 2 * sizeof(int16_t));
564  }
565  if (mb_type != 1) {
566  if (svq3_mc_dir(h, 0, mode, 1, mb_type == 3) < 0)
567  return -1;
568  } else {
569  for (i = 0; i < 4; i++)
570  memset(s->current_picture.f.motion_val[1][b_xy + i * h->b_stride],
571  0, 4 * 2 * sizeof(int16_t));
572  }
573  }
574 
575  mb_type = MB_TYPE_16x16;
576  } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
577  memset(h->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
578 
579  if (mb_type == 8) {
580  if (s->mb_x > 0) {
581  for (i = 0; i < 4; i++)
582  h->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6 - i];
583  if (h->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
584  h->left_samples_available = 0x5F5F;
585  }
586  if (s->mb_y > 0) {
587  h->intra4x4_pred_mode_cache[4 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride] + 0];
588  h->intra4x4_pred_mode_cache[5 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride] + 1];
589  h->intra4x4_pred_mode_cache[6 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride] + 2];
590  h->intra4x4_pred_mode_cache[7 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride] + 3];
591 
592  if (h->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
593  h->top_samples_available = 0x33FF;
594  }
595 
596  /* decode prediction codes for luma blocks */
597  for (i = 0; i < 16; i += 2) {
598  vlc = svq3_get_ue_golomb(&s->gb);
599 
600  if (vlc >= 25U) {
601  av_log(h->s.avctx, AV_LOG_ERROR, "luma prediction:%d\n", vlc);
602  return -1;
603  }
604 
605  left = &h->intra4x4_pred_mode_cache[scan8[i] - 1];
606  top = &h->intra4x4_pred_mode_cache[scan8[i] - 8];
607 
608  left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
609  left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
610 
611  if (left[1] == -1 || left[2] == -1) {
612  av_log(h->s.avctx, AV_LOG_ERROR, "weird prediction\n");
613  return -1;
614  }
615  }
616  } else { /* mb_type == 33, DC_128_PRED block type */
617  for (i = 0; i < 4; i++)
618  memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
619  }
620 
622 
623  if (mb_type == 8) {
625 
626  h->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
627  h->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
628  } else {
629  for (i = 0; i < 4; i++)
630  memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
631 
632  h->top_samples_available = 0x33FF;
633  h->left_samples_available = 0x5F5F;
634  }
635 
636  mb_type = MB_TYPE_INTRA4x4;
637  } else { /* INTRA16x16 */
638  dir = i_mb_type_info[mb_type - 8].pred_mode;
639  dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
640 
641  if ((h->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, dir, 0)) == -1) {
642  av_log(h->s.avctx, AV_LOG_ERROR, "check_intra_pred_mode = -1\n");
643  return -1;
644  }
645 
646  cbp = i_mb_type_info[mb_type - 8].cbp;
647  mb_type = MB_TYPE_INTRA16x16;
648  }
649 
650  if (!IS_INTER(mb_type) && s->pict_type != AV_PICTURE_TYPE_I) {
651  for (i = 0; i < 4; i++)
652  memset(s->current_picture.f.motion_val[0][b_xy + i * h->b_stride],
653  0, 4 * 2 * sizeof(int16_t));
654  if (s->pict_type == AV_PICTURE_TYPE_B) {
655  for (i = 0; i < 4; i++)
656  memset(s->current_picture.f.motion_val[1][b_xy + i * h->b_stride],
657  0, 4 * 2 * sizeof(int16_t));
658  }
659  }
660  if (!IS_INTRA4x4(mb_type)) {
661  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy], DC_PRED, 8);
662  }
663  if (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B) {
664  memset(h->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
665  s->dsp.clear_blocks(h->mb + 0);
666  s->dsp.clear_blocks(h->mb + 384);
667  }
668 
669  if (!IS_INTRA16x16(mb_type) &&
670  (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B)) {
671  if ((vlc = svq3_get_ue_golomb(&s->gb)) >= 48U){
672  av_log(h->s.avctx, AV_LOG_ERROR, "cbp_vlc=%d\n", vlc);
673  return -1;
674  }
675 
676  cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc]
677  : golomb_to_inter_cbp[vlc];
678  }
679  if (IS_INTRA16x16(mb_type) ||
680  (s->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
681  s->qscale += svq3_get_se_golomb(&s->gb);
682 
683  if (s->qscale > 31u) {
684  av_log(h->s.avctx, AV_LOG_ERROR, "qscale:%d\n", s->qscale);
685  return -1;
686  }
687  }
688  if (IS_INTRA16x16(mb_type)) {
689  AV_ZERO128(h->mb_luma_dc[0] + 0);
690  AV_ZERO128(h->mb_luma_dc[0] + 8);
691  if (svq3_decode_block(&s->gb, h->mb_luma_dc[0], 0, 1)) {
693  "error while decoding intra luma dc\n");
694  return -1;
695  }
696  }
697 
698  if (cbp) {
699  const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
700  const int type = ((s->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
701 
702  for (i = 0; i < 4; i++)
703  if ((cbp & (1 << i))) {
704  for (j = 0; j < 4; j++) {
705  k = index ? (1 * (j & 1) + 2 * (i & 1) +
706  2 * (j & 2) + 4 * (i & 2))
707  : (4 * i + j);
708  h->non_zero_count_cache[scan8[k]] = 1;
709 
710  if (svq3_decode_block(&s->gb, &h->mb[16 * k], index, type)) {
712  "error while decoding block\n");
713  return -1;
714  }
715  }
716  }
717 
718  if ((cbp & 0x30)) {
719  for (i = 1; i < 3; ++i)
720  if (svq3_decode_block(&s->gb, &h->mb[16 * 16 * i], 0, 3)) {
722  "error while decoding chroma dc block\n");
723  return -1;
724  }
725 
726  if ((cbp & 0x20)) {
727  for (i = 1; i < 3; i++) {
728  for (j = 0; j < 4; j++) {
729  k = 16 * i + j;
730  h->non_zero_count_cache[scan8[k]] = 1;
731 
732  if (svq3_decode_block(&s->gb, &h->mb[16 * k], 1, 1)) {
734  "error while decoding chroma ac block\n");
735  return -1;
736  }
737  }
738  }
739  }
740  }
741  }
742 
743  h->cbp = cbp;
744  s->current_picture.f.mb_type[mb_xy] = mb_type;
745 
746  if (IS_INTRA(mb_type))
748 
749  return 0;
750 }
751 
753 {
754  SVQ3Context *svq3 = avctx->priv_data;
755  H264Context *h = &svq3->h;
756  MpegEncContext *s = &h->s;
757  const int mb_xy = h->mb_xy;
758  int i, header;
759  unsigned slice_id;
760 
761  header = get_bits(&s->gb, 8);
762 
763  if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
764  /* TODO: what? */
765  av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
766  return -1;
767  } else {
768  int length = header >> 5 & 3;
769 
770  svq3->next_slice_index = get_bits_count(&s->gb) +
771  8 * show_bits(&s->gb, 8 * length) +
772  8 * length;
773 
774  if (svq3->next_slice_index > s->gb.size_in_bits) {
775  av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
776  return -1;
777  }
778 
779  s->gb.size_in_bits = svq3->next_slice_index - 8 * (length - 1);
780  skip_bits(&s->gb, 8);
781 
782  if (svq3->watermark_key) {
783  uint32_t header = AV_RL32(&s->gb.buffer[(get_bits_count(&s->gb) >> 3) + 1]);
784  AV_WL32(&s->gb.buffer[(get_bits_count(&s->gb) >> 3) + 1],
785  header ^ svq3->watermark_key);
786  }
787  if (length > 0) {
788  memcpy((uint8_t *) &s->gb.buffer[get_bits_count(&s->gb) >> 3],
789  &s->gb.buffer[s->gb.size_in_bits >> 3], length - 1);
790  }
791  skip_bits_long(&s->gb, 0);
792  }
793 
794  if ((slice_id = svq3_get_ue_golomb(&s->gb)) >= 3) {
795  av_log(h->s.avctx, AV_LOG_ERROR, "illegal slice type %d \n", slice_id);
796  return -1;
797  }
798 
799  h->slice_type = golomb_to_pict_type[slice_id];
800 
801  if ((header & 0x9F) == 2) {
802  i = (s->mb_num < 64) ? 6 : (1 + av_log2(s->mb_num - 1));
803  s->mb_skip_run = get_bits(&s->gb, i) -
804  (s->mb_y * s->mb_width + s->mb_x);
805  } else {
806  skip_bits1(&s->gb);
807  s->mb_skip_run = 0;
808  }
809 
810  h->slice_num = get_bits(&s->gb, 8);
811  s->qscale = get_bits(&s->gb, 5);
812  s->adaptive_quant = get_bits1(&s->gb);
813 
814  /* unknown fields */
815  skip_bits1(&s->gb);
816 
817  if (svq3->unknown_flag)
818  skip_bits1(&s->gb);
819 
820  skip_bits1(&s->gb);
821  skip_bits(&s->gb, 2);
822 
823  while (get_bits1(&s->gb))
824  skip_bits(&s->gb, 8);
825 
826  /* reset intra predictors and invalidate motion vector references */
827  if (s->mb_x > 0) {
828  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - 1] + 3,
829  -1, 4 * sizeof(int8_t));
830  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - s->mb_x],
831  -1, 8 * sizeof(int8_t) * s->mb_x);
832  }
833  if (s->mb_y > 0) {
834  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - s->mb_stride],
835  -1, 8 * sizeof(int8_t) * (s->mb_width - s->mb_x));
836 
837  if (s->mb_x > 0)
838  h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - s->mb_stride - 1] + 3] = -1;
839  }
840 
841  return 0;
842 }
843 
845 {
846  SVQ3Context *svq3 = avctx->priv_data;
847  H264Context *h = &svq3->h;
848  MpegEncContext *s = &h->s;
849  int m;
850  unsigned char *extradata;
851  unsigned char *extradata_end;
852  unsigned int size;
853  int marker_found = 0;
854 
855  if (ff_h264_decode_init(avctx) < 0)
856  return -1;
857 
858  s->flags = avctx->flags;
859  s->flags2 = avctx->flags2;
860  s->unrestricted_mv = 1;
861  h->is_complex = 1;
862  h->sps.chroma_format_idc = 1;
863  avctx->pix_fmt = avctx->codec->pix_fmts[0];
864 
865  if (!s->context_initialized) {
866  h->chroma_qp[0] = h->chroma_qp[1] = 4;
867 
868  svq3->halfpel_flag = 1;
869  svq3->thirdpel_flag = 1;
870  svq3->unknown_flag = 0;
871 
872 
873  /* prowl for the "SEQH" marker in the extradata */
874  extradata = (unsigned char *)avctx->extradata;
875  extradata_end = avctx->extradata + avctx->extradata_size;
876  if (extradata) {
877  for (m = 0; m + 8 < avctx->extradata_size; m++) {
878  if (!memcmp(extradata, "SEQH", 4)) {
879  marker_found = 1;
880  break;
881  }
882  extradata++;
883  }
884  }
885 
886  /* if a match was found, parse the extra data */
887  if (marker_found) {
888  GetBitContext gb;
889  int frame_size_code;
890 
891  size = AV_RB32(&extradata[4]);
892  if (size > extradata_end - extradata - 8)
893  return AVERROR_INVALIDDATA;
894  init_get_bits(&gb, extradata + 8, size * 8);
895 
896  /* 'frame size code' and optional 'width, height' */
897  frame_size_code = get_bits(&gb, 3);
898  switch (frame_size_code) {
899  case 0:
900  avctx->width = 160;
901  avctx->height = 120;
902  break;
903  case 1:
904  avctx->width = 128;
905  avctx->height = 96;
906  break;
907  case 2:
908  avctx->width = 176;
909  avctx->height = 144;
910  break;
911  case 3:
912  avctx->width = 352;
913  avctx->height = 288;
914  break;
915  case 4:
916  avctx->width = 704;
917  avctx->height = 576;
918  break;
919  case 5:
920  avctx->width = 240;
921  avctx->height = 180;
922  break;
923  case 6:
924  avctx->width = 320;
925  avctx->height = 240;
926  break;
927  case 7:
928  avctx->width = get_bits(&gb, 12);
929  avctx->height = get_bits(&gb, 12);
930  break;
931  }
932 
933  svq3->halfpel_flag = get_bits1(&gb);
934  svq3->thirdpel_flag = get_bits1(&gb);
935 
936  /* unknown fields */
937  skip_bits1(&gb);
938  skip_bits1(&gb);
939  skip_bits1(&gb);
940  skip_bits1(&gb);
941 
942  s->low_delay = get_bits1(&gb);
943 
944  /* unknown field */
945  skip_bits1(&gb);
946 
947  while (get_bits1(&gb))
948  skip_bits(&gb, 8);
949 
950  svq3->unknown_flag = get_bits1(&gb);
951  avctx->has_b_frames = !s->low_delay;
952  if (svq3->unknown_flag) {
953 #if CONFIG_ZLIB
954  unsigned watermark_width = svq3_get_ue_golomb(&gb);
955  unsigned watermark_height = svq3_get_ue_golomb(&gb);
956  int u1 = svq3_get_ue_golomb(&gb);
957  int u2 = get_bits(&gb, 8);
958  int u3 = get_bits(&gb, 2);
959  int u4 = svq3_get_ue_golomb(&gb);
960  unsigned long buf_len = watermark_width *
961  watermark_height * 4;
962  int offset = get_bits_count(&gb) + 7 >> 3;
963  uint8_t *buf;
964 
965  if (watermark_height <= 0 || (uint64_t)watermark_width*4 > UINT_MAX/watermark_height)
966  return -1;
967 
968  buf = av_malloc(buf_len);
969  av_log(avctx, AV_LOG_DEBUG, "watermark size: %dx%d\n",
970  watermark_width, watermark_height);
971  av_log(avctx, AV_LOG_DEBUG,
972  "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
973  u1, u2, u3, u4, offset);
974  if (uncompress(buf, &buf_len, extradata + 8 + offset,
975  size - offset) != Z_OK) {
976  av_log(avctx, AV_LOG_ERROR,
977  "could not uncompress watermark logo\n");
978  av_free(buf);
979  return -1;
980  }
981  svq3->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
982  svq3->watermark_key = svq3->watermark_key << 16 |
983  svq3->watermark_key;
984  av_log(avctx, AV_LOG_DEBUG,
985  "watermark key %#x\n", svq3->watermark_key);
986  av_free(buf);
987 #else
988  av_log(avctx, AV_LOG_ERROR,
989  "this svq3 file contains watermark which need zlib support compiled in\n");
990  return -1;
991 #endif
992  }
993  }
994 
995  s->width = avctx->width;
996  s->height = avctx->height;
997 
998  if (ff_MPV_common_init(s) < 0)
999  return -1;
1000 
1001  h->b_stride = 4 * s->mb_width;
1002 
1003  if (ff_h264_alloc_tables(h) < 0) {
1004  av_log(avctx, AV_LOG_ERROR, "svq3 memory allocation failed\n");
1005  return AVERROR(ENOMEM);
1006  }
1007  }
1008 
1009  return 0;
1010 }
1011 
1012 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1013  int *got_frame, AVPacket *avpkt)
1014 {
1015  SVQ3Context *svq3 = avctx->priv_data;
1016  H264Context *h = &svq3->h;
1017  MpegEncContext *s = &h->s;
1018  int buf_size = avpkt->size;
1019  int m, left;
1020  uint8_t *buf;
1021 
1022  /* special case for last picture */
1023  if (buf_size == 0) {
1024  if (s->next_picture_ptr && !s->low_delay) {
1025  *(AVFrame *) data = s->next_picture.f;
1026  s->next_picture_ptr = NULL;
1027  *got_frame = 1;
1028  }
1029  return 0;
1030  }
1031 
1032  s->mb_x = s->mb_y = h->mb_xy = 0;
1033 
1034  if (svq3->watermark_key) {
1035  av_fast_malloc(&svq3->buf, &svq3->buf_size,
1036  buf_size+FF_INPUT_BUFFER_PADDING_SIZE);
1037  if (!svq3->buf)
1038  return AVERROR(ENOMEM);
1039  memcpy(svq3->buf, avpkt->data, buf_size);
1040  buf = svq3->buf;
1041  } else {
1042  buf = avpkt->data;
1043  }
1044 
1045  init_get_bits(&s->gb, buf, 8 * buf_size);
1046 
1047  if (svq3_decode_slice_header(avctx))
1048  return -1;
1049 
1050  s->pict_type = h->slice_type;
1051  s->picture_number = h->slice_num;
1052 
1053  if (avctx->debug & FF_DEBUG_PICT_INFO)
1054  av_log(h->s.avctx, AV_LOG_DEBUG,
1055  "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1057  svq3->halfpel_flag, svq3->thirdpel_flag,
1058  s->adaptive_quant, s->qscale, h->slice_num);
1059 
1060  /* for skipping the frame */
1063 
1064  /* Skip B-frames if we do not have reference frames. */
1065  if (s->last_picture_ptr == NULL && s->pict_type == AV_PICTURE_TYPE_B)
1066  return 0;
1067  if (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B ||
1069  avctx->skip_frame >= AVDISCARD_ALL)
1070  return 0;
1071 
1072  if (s->next_p_frame_damaged) {
1073  if (s->pict_type == AV_PICTURE_TYPE_B)
1074  return 0;
1075  else
1076  s->next_p_frame_damaged = 0;
1077  }
1078 
1079  if (ff_h264_frame_start(h) < 0)
1080  return -1;
1081 
1082  if (s->pict_type == AV_PICTURE_TYPE_B) {
1084 
1085  if (h->frame_num_offset < 0)
1086  h->frame_num_offset += 256;
1087  if (h->frame_num_offset == 0 ||
1089  av_log(h->s.avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1090  return -1;
1091  }
1092  } else {
1093  h->prev_frame_num = h->frame_num;
1094  h->frame_num = h->slice_num;
1096 
1097  if (h->prev_frame_num_offset < 0)
1098  h->prev_frame_num_offset += 256;
1099  }
1100 
1101  for (m = 0; m < 2; m++) {
1102  int i;
1103  for (i = 0; i < 4; i++) {
1104  int j;
1105  for (j = -1; j < 4; j++)
1106  h->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1107  if (i < 3)
1108  h->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1109  }
1110  }
1111 
1112  for (s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
1113  for (s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
1114  unsigned mb_type;
1115  h->mb_xy = s->mb_x + s->mb_y * s->mb_stride;
1116 
1117  if ((get_bits_count(&s->gb) + 7) >= s->gb.size_in_bits &&
1118  ((get_bits_count(&s->gb) & 7) == 0 ||
1119  show_bits(&s->gb, -get_bits_count(&s->gb) & 7) == 0)) {
1120  skip_bits(&s->gb, svq3->next_slice_index - get_bits_count(&s->gb));
1121  s->gb.size_in_bits = 8 * buf_size;
1122 
1123  if (svq3_decode_slice_header(avctx))
1124  return -1;
1125 
1126  /* TODO: support s->mb_skip_run */
1127  }
1128 
1129  mb_type = svq3_get_ue_golomb(&s->gb);
1130 
1131  if (s->pict_type == AV_PICTURE_TYPE_I)
1132  mb_type += 8;
1133  else if (s->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1134  mb_type += 4;
1135  if (mb_type > 33 || svq3_decode_mb(svq3, mb_type)) {
1136  av_log(h->s.avctx, AV_LOG_ERROR,
1137  "error while decoding MB %d %d\n", s->mb_x, s->mb_y);
1138  return -1;
1139  }
1140 
1141  if (mb_type != 0)
1143 
1144  if (s->pict_type != AV_PICTURE_TYPE_B && !s->low_delay)
1145  s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride] =
1146  (s->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1147  }
1148 
1149  ff_draw_horiz_band(s, 16 * s->mb_y, 16);
1150  }
1151 
1152  left = buf_size*8 - get_bits_count(&s->gb);
1153 
1154  if (s->mb_y != s->mb_height || s->mb_x != s->mb_width) {
1155  av_log(avctx, AV_LOG_INFO, "frame num %d incomplete pic x %d y %d left %d\n", avctx->frame_number, s->mb_y, s->mb_x, left);
1156  //av_hex_dump(stderr, buf+buf_size-8, 8);
1157  }
1158 
1159  if (left < 0) {
1160  av_log(avctx, AV_LOG_ERROR, "frame num %d left %d\n", avctx->frame_number, left);
1161  return -1;
1162  }
1163 
1164  ff_MPV_frame_end(s);
1165 
1166  if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay)
1167  *(AVFrame *)data = s->current_picture.f;
1168  else
1169  *(AVFrame *)data = s->last_picture.f;
1170 
1171  /* Do not output the last pic after seeking. */
1172  if (s->last_picture_ptr || s->low_delay)
1173  *got_frame = 1;
1174 
1175  return buf_size;
1176 }
1177 
1179 {
1180  SVQ3Context *svq3 = avctx->priv_data;
1181  H264Context *h = &svq3->h;
1182  MpegEncContext *s = &h->s;
1183 
1185 
1186  ff_MPV_common_end(s);
1187 
1188  av_freep(&svq3->buf);
1189  svq3->buf_size = 0;
1190 
1191  return 0;
1192 }
1193 
1195  .name = "svq3",
1196  .type = AVMEDIA_TYPE_VIDEO,
1197  .id = AV_CODEC_ID_SVQ3,
1198  .priv_data_size = sizeof(SVQ3Context),
1202  .capabilities = CODEC_CAP_DRAW_HORIZ_BAND |
1203  CODEC_CAP_DR1 |
1205  .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1206  .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,
1207  AV_PIX_FMT_NONE},
1208 };