FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
vc1dec.c
Go to the documentation of this file.
1 /*
2  * VC-1 and WMV3 decoder
3  * Copyright (c) 2011 Mashiat Sarker Shakkhar
4  * Copyright (c) 2006-2007 Konstantin Shishkov
5  * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 /**
25  * @file
26  * VC-1 and WMV3 decoder
27  */
28 
29 #include "internal.h"
30 #include "avcodec.h"
31 #include "error_resilience.h"
32 #include "mpegvideo.h"
33 #include "h263.h"
34 #include "h264chroma.h"
35 #include "vc1.h"
36 #include "vc1data.h"
37 #include "vc1acdata.h"
38 #include "msmpeg4data.h"
39 #include "unary.h"
40 #include "mathops.h"
41 #include "vdpau_internal.h"
42 #include "libavutil/avassert.h"
43 
44 #undef NDEBUG
45 #include <assert.h>
46 
47 #define MB_INTRA_VLC_BITS 9
48 #define DC_VLC_BITS 9
49 
50 
51 // offset tables for interlaced picture MVDATA decoding
52 static const int offset_table1[9] = { 0, 1, 2, 4, 8, 16, 32, 64, 128 };
53 static const int offset_table2[9] = { 0, 1, 3, 7, 15, 31, 63, 127, 255 };
54 
55 /***********************************************************************/
56 /**
57  * @name VC-1 Bitplane decoding
58  * @see 8.7, p56
59  * @{
60  */
61 
62 
64 {
65  MpegEncContext *s = &v->s;
67  if (v->field_mode && !(v->second_field ^ v->tff)) {
68  s->dest[0] += s->current_picture_ptr->f.linesize[0];
69  s->dest[1] += s->current_picture_ptr->f.linesize[1];
70  s->dest[2] += s->current_picture_ptr->f.linesize[2];
71  }
72 }
73 
74 /** @} */ //Bitplane group
75 
77 {
78  MpegEncContext *s = &v->s;
79  int topleft_mb_pos, top_mb_pos;
80  int stride_y, fieldtx = 0;
81  int v_dist;
82 
83  /* The put pixels loop is always one MB row behind the decoding loop,
84  * because we can only put pixels when overlap filtering is done, and
85  * for filtering of the bottom edge of a MB, we need the next MB row
86  * present as well.
87  * Within the row, the put pixels loop is also one MB col behind the
88  * decoding loop. The reason for this is again, because for filtering
89  * of the right MB edge, we need the next MB present. */
90  if (!s->first_slice_line) {
91  if (s->mb_x) {
92  topleft_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x - 1;
93  if (v->fcm == ILACE_FRAME)
94  fieldtx = v->fieldtx_plane[topleft_mb_pos];
95  stride_y = s->linesize << fieldtx;
96  v_dist = (16 - fieldtx) >> (fieldtx == 0);
98  s->dest[0] - 16 * s->linesize - 16,
99  stride_y);
101  s->dest[0] - 16 * s->linesize - 8,
102  stride_y);
104  s->dest[0] - v_dist * s->linesize - 16,
105  stride_y);
107  s->dest[0] - v_dist * s->linesize - 8,
108  stride_y);
110  s->dest[1] - 8 * s->uvlinesize - 8,
111  s->uvlinesize);
113  s->dest[2] - 8 * s->uvlinesize - 8,
114  s->uvlinesize);
115  }
116  if (s->mb_x == s->mb_width - 1) {
117  top_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x;
118  if (v->fcm == ILACE_FRAME)
119  fieldtx = v->fieldtx_plane[top_mb_pos];
120  stride_y = s->linesize << fieldtx;
121  v_dist = fieldtx ? 15 : 8;
123  s->dest[0] - 16 * s->linesize,
124  stride_y);
126  s->dest[0] - 16 * s->linesize + 8,
127  stride_y);
129  s->dest[0] - v_dist * s->linesize,
130  stride_y);
132  s->dest[0] - v_dist * s->linesize + 8,
133  stride_y);
135  s->dest[1] - 8 * s->uvlinesize,
136  s->uvlinesize);
138  s->dest[2] - 8 * s->uvlinesize,
139  s->uvlinesize);
140  }
141  }
142 
143 #define inc_blk_idx(idx) do { \
144  idx++; \
145  if (idx >= v->n_allocated_blks) \
146  idx = 0; \
147  } while (0)
148 
153 }
154 
155 static void vc1_loop_filter_iblk(VC1Context *v, int pq)
156 {
157  MpegEncContext *s = &v->s;
158  int j;
159  if (!s->first_slice_line) {
160  v->vc1dsp.vc1_v_loop_filter16(s->dest[0], s->linesize, pq);
161  if (s->mb_x)
162  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
163  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
164  for (j = 0; j < 2; j++) {
165  v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1], s->uvlinesize, pq);
166  if (s->mb_x)
167  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
168  }
169  }
170  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] + 8 * s->linesize, s->linesize, pq);
171 
172  if (s->mb_y == s->end_mb_y - 1) {
173  if (s->mb_x) {
174  v->vc1dsp.vc1_h_loop_filter16(s->dest[0], s->linesize, pq);
175  v->vc1dsp.vc1_h_loop_filter8(s->dest[1], s->uvlinesize, pq);
176  v->vc1dsp.vc1_h_loop_filter8(s->dest[2], s->uvlinesize, pq);
177  }
178  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] + 8, s->linesize, pq);
179  }
180 }
181 
183 {
184  MpegEncContext *s = &v->s;
185  int j;
186 
187  /* The loopfilter runs 1 row and 1 column behind the overlap filter, which
188  * means it runs two rows/cols behind the decoding loop. */
189  if (!s->first_slice_line) {
190  if (s->mb_x) {
191  if (s->mb_y >= s->start_mb_y + 2) {
192  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
193 
194  if (s->mb_x >= 2)
195  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 16, s->linesize, pq);
196  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 8, s->linesize, pq);
197  for (j = 0; j < 2; j++) {
198  v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
199  if (s->mb_x >= 2) {
200  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize - 8, s->uvlinesize, pq);
201  }
202  }
203  }
204  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize - 16, s->linesize, pq);
205  }
206 
207  if (s->mb_x == s->mb_width - 1) {
208  if (s->mb_y >= s->start_mb_y + 2) {
209  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
210 
211  if (s->mb_x)
212  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize, s->linesize, pq);
213  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize + 8, s->linesize, pq);
214  for (j = 0; j < 2; j++) {
215  v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
216  if (s->mb_x >= 2) {
217  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize, s->uvlinesize, pq);
218  }
219  }
220  }
221  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize, s->linesize, pq);
222  }
223 
224  if (s->mb_y == s->end_mb_y) {
225  if (s->mb_x) {
226  if (s->mb_x >= 2)
227  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
228  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 8, s->linesize, pq);
229  if (s->mb_x >= 2) {
230  for (j = 0; j < 2; j++) {
231  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
232  }
233  }
234  }
235 
236  if (s->mb_x == s->mb_width - 1) {
237  if (s->mb_x)
238  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
239  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
240  if (s->mb_x) {
241  for (j = 0; j < 2; j++) {
242  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
243  }
244  }
245  }
246  }
247  }
248 }
249 
251 {
252  MpegEncContext *s = &v->s;
253  int mb_pos;
254 
255  if (v->condover == CONDOVER_NONE)
256  return;
257 
258  mb_pos = s->mb_x + s->mb_y * s->mb_stride;
259 
260  /* Within a MB, the horizontal overlap always runs before the vertical.
261  * To accomplish that, we run the H on left and internal borders of the
262  * currently decoded MB. Then, we wait for the next overlap iteration
263  * to do H overlap on the right edge of this MB, before moving over and
264  * running the V overlap. Therefore, the V overlap makes us trail by one
265  * MB col and the H overlap filter makes us trail by one MB row. This
266  * is reflected in the time at which we run the put_pixels loop. */
267  if (v->condover == CONDOVER_ALL || v->pq >= 9 || v->over_flags_plane[mb_pos]) {
268  if (s->mb_x && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
269  v->over_flags_plane[mb_pos - 1])) {
271  v->block[v->cur_blk_idx][0]);
273  v->block[v->cur_blk_idx][2]);
274  if (!(s->flags & CODEC_FLAG_GRAY)) {
276  v->block[v->cur_blk_idx][4]);
278  v->block[v->cur_blk_idx][5]);
279  }
280  }
282  v->block[v->cur_blk_idx][1]);
284  v->block[v->cur_blk_idx][3]);
285 
286  if (s->mb_x == s->mb_width - 1) {
287  if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
288  v->over_flags_plane[mb_pos - s->mb_stride])) {
290  v->block[v->cur_blk_idx][0]);
292  v->block[v->cur_blk_idx][1]);
293  if (!(s->flags & CODEC_FLAG_GRAY)) {
295  v->block[v->cur_blk_idx][4]);
297  v->block[v->cur_blk_idx][5]);
298  }
299  }
301  v->block[v->cur_blk_idx][2]);
303  v->block[v->cur_blk_idx][3]);
304  }
305  }
306  if (s->mb_x && (v->condover == CONDOVER_ALL || v->over_flags_plane[mb_pos - 1])) {
307  if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
308  v->over_flags_plane[mb_pos - s->mb_stride - 1])) {
310  v->block[v->left_blk_idx][0]);
312  v->block[v->left_blk_idx][1]);
313  if (!(s->flags & CODEC_FLAG_GRAY)) {
315  v->block[v->left_blk_idx][4]);
317  v->block[v->left_blk_idx][5]);
318  }
319  }
321  v->block[v->left_blk_idx][2]);
323  v->block[v->left_blk_idx][3]);
324  }
325 }
326 
327 /** Do motion compensation over 1 macroblock
328  * Mostly adapted hpel_motion and qpel_motion from mpegvideo.c
329  */
330 static void vc1_mc_1mv(VC1Context *v, int dir)
331 {
332  MpegEncContext *s = &v->s;
333  H264ChromaContext *h264chroma = &v->h264chroma;
334  uint8_t *srcY, *srcU, *srcV;
335  int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
336  int v_edge_pos = s->v_edge_pos >> v->field_mode;
337  int i;
338  uint8_t (*luty)[256], (*lutuv)[256];
339  int use_ic;
340 
341  if ((!v->field_mode ||
342  (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
343  !v->s.last_picture.f.data[0])
344  return;
345 
346  mx = s->mv[dir][0][0];
347  my = s->mv[dir][0][1];
348 
349  // store motion vectors for further use in B frames
350  if (s->pict_type == AV_PICTURE_TYPE_P) {
351  for (i = 0; i < 4; i++) {
352  s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][0] = mx;
353  s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][1] = my;
354  }
355  }
356 
357  uvmx = (mx + ((mx & 3) == 3)) >> 1;
358  uvmy = (my + ((my & 3) == 3)) >> 1;
359  v->luma_mv[s->mb_x][0] = uvmx;
360  v->luma_mv[s->mb_x][1] = uvmy;
361 
362  if (v->field_mode &&
363  v->cur_field_type != v->ref_field_type[dir]) {
364  my = my - 2 + 4 * v->cur_field_type;
365  uvmy = uvmy - 2 + 4 * v->cur_field_type;
366  }
367 
368  // fastuvmc shall be ignored for interlaced frame picture
369  if (v->fastuvmc && (v->fcm != ILACE_FRAME)) {
370  uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
371  uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
372  }
373  if (!dir) {
374  if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
375  srcY = s->current_picture.f.data[0];
376  srcU = s->current_picture.f.data[1];
377  srcV = s->current_picture.f.data[2];
378  luty = v->curr_luty;
379  lutuv = v->curr_lutuv;
380  use_ic = *v->curr_use_ic;
381  } else {
382  srcY = s->last_picture.f.data[0];
383  srcU = s->last_picture.f.data[1];
384  srcV = s->last_picture.f.data[2];
385  luty = v->last_luty;
386  lutuv = v->last_lutuv;
387  use_ic = v->last_use_ic;
388  }
389  } else {
390  srcY = s->next_picture.f.data[0];
391  srcU = s->next_picture.f.data[1];
392  srcV = s->next_picture.f.data[2];
393  luty = v->next_luty;
394  lutuv = v->next_lutuv;
395  use_ic = v->next_use_ic;
396  }
397 
398  if (!srcY || !srcU) {
399  av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
400  return;
401  }
402 
403  src_x = s->mb_x * 16 + (mx >> 2);
404  src_y = s->mb_y * 16 + (my >> 2);
405  uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
406  uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
407 
408  if (v->profile != PROFILE_ADVANCED) {
409  src_x = av_clip( src_x, -16, s->mb_width * 16);
410  src_y = av_clip( src_y, -16, s->mb_height * 16);
411  uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
412  uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
413  } else {
414  src_x = av_clip( src_x, -17, s->avctx->coded_width);
415  src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
416  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
417  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
418  }
419 
420  srcY += src_y * s->linesize + src_x;
421  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
422  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
423 
424  if (v->field_mode && v->ref_field_type[dir]) {
425  srcY += s->current_picture_ptr->f.linesize[0];
426  srcU += s->current_picture_ptr->f.linesize[1];
427  srcV += s->current_picture_ptr->f.linesize[2];
428  }
429 
430  /* for grayscale we should not try to read from unknown area */
431  if (s->flags & CODEC_FLAG_GRAY) {
432  srcU = s->edge_emu_buffer + 18 * s->linesize;
433  srcV = s->edge_emu_buffer + 18 * s->linesize;
434  }
435 
436  if (v->rangeredfrm || use_ic
437  || s->h_edge_pos < 22 || v_edge_pos < 22
438  || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel * 3
439  || (unsigned)(src_y - 1) > v_edge_pos - (my&3) - 16 - 3) {
440  uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
441 
442  srcY -= s->mspel * (1 + s->linesize);
444  s->linesize, s->linesize,
445  17 + s->mspel * 2, 17 + s->mspel * 2,
446  src_x - s->mspel, src_y - s->mspel,
447  s->h_edge_pos, v_edge_pos);
448  srcY = s->edge_emu_buffer;
449  s->vdsp.emulated_edge_mc(uvbuf, srcU,
450  s->uvlinesize, s->uvlinesize,
451  8 + 1, 8 + 1,
452  uvsrc_x, uvsrc_y,
453  s->h_edge_pos >> 1, v_edge_pos >> 1);
454  s->vdsp.emulated_edge_mc(uvbuf + 16, srcV,
455  s->uvlinesize, s->uvlinesize,
456  8 + 1, 8 + 1,
457  uvsrc_x, uvsrc_y,
458  s->h_edge_pos >> 1, v_edge_pos >> 1);
459  srcU = uvbuf;
460  srcV = uvbuf + 16;
461  /* if we deal with range reduction we need to scale source blocks */
462  if (v->rangeredfrm) {
463  int i, j;
464  uint8_t *src, *src2;
465 
466  src = srcY;
467  for (j = 0; j < 17 + s->mspel * 2; j++) {
468  for (i = 0; i < 17 + s->mspel * 2; i++)
469  src[i] = ((src[i] - 128) >> 1) + 128;
470  src += s->linesize;
471  }
472  src = srcU;
473  src2 = srcV;
474  for (j = 0; j < 9; j++) {
475  for (i = 0; i < 9; i++) {
476  src[i] = ((src[i] - 128) >> 1) + 128;
477  src2[i] = ((src2[i] - 128) >> 1) + 128;
478  }
479  src += s->uvlinesize;
480  src2 += s->uvlinesize;
481  }
482  }
483  /* if we deal with intensity compensation we need to scale source blocks */
484  if (use_ic) {
485  int i, j;
486  uint8_t *src, *src2;
487 
488  src = srcY;
489  for (j = 0; j < 17 + s->mspel * 2; j++) {
490  int f = v->field_mode ? v->ref_field_type[dir] : ((j + src_y - s->mspel) & 1) ;
491  for (i = 0; i < 17 + s->mspel * 2; i++)
492  src[i] = luty[f][src[i]];
493  src += s->linesize;
494  }
495  src = srcU;
496  src2 = srcV;
497  for (j = 0; j < 9; j++) {
498  int f = v->field_mode ? v->ref_field_type[dir] : ((j + uvsrc_y) & 1);
499  for (i = 0; i < 9; i++) {
500  src[i] = lutuv[f][src[i]];
501  src2[i] = lutuv[f][src2[i]];
502  }
503  src += s->uvlinesize;
504  src2 += s->uvlinesize;
505  }
506  }
507  srcY += s->mspel * (1 + s->linesize);
508  }
509 
510  if (s->mspel) {
511  dxy = ((my & 3) << 2) | (mx & 3);
512  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] , srcY , s->linesize, v->rnd);
513  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8, srcY + 8, s->linesize, v->rnd);
514  srcY += s->linesize * 8;
515  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize , srcY , s->linesize, v->rnd);
516  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
517  } else { // hpel mc - always used for luma
518  dxy = (my & 2) | ((mx & 2) >> 1);
519  if (!v->rnd)
520  s->hdsp.put_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
521  else
522  s->hdsp.put_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
523  }
524 
525  if (s->flags & CODEC_FLAG_GRAY) return;
526  /* Chroma MC always uses qpel bilinear */
527  uvmx = (uvmx & 3) << 1;
528  uvmy = (uvmy & 3) << 1;
529  if (!v->rnd) {
530  h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
531  h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
532  } else {
533  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
534  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
535  }
536 }
537 
538 static inline int median4(int a, int b, int c, int d)
539 {
540  if (a < b) {
541  if (c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
542  else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
543  } else {
544  if (c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
545  else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
546  }
547 }
548 
549 /** Do motion compensation for 4-MV macroblock - luminance block
550  */
551 static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg)
552 {
553  MpegEncContext *s = &v->s;
554  uint8_t *srcY;
555  int dxy, mx, my, src_x, src_y;
556  int off;
557  int fieldmv = (v->fcm == ILACE_FRAME) ? v->blk_mv_type[s->block_index[n]] : 0;
558  int v_edge_pos = s->v_edge_pos >> v->field_mode;
559  uint8_t (*luty)[256];
560  int use_ic;
561 
562  if ((!v->field_mode ||
563  (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
564  !v->s.last_picture.f.data[0])
565  return;
566 
567  mx = s->mv[dir][n][0];
568  my = s->mv[dir][n][1];
569 
570  if (!dir) {
571  if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
572  srcY = s->current_picture.f.data[0];
573  luty = v->curr_luty;
574  use_ic = *v->curr_use_ic;
575  } else {
576  srcY = s->last_picture.f.data[0];
577  luty = v->last_luty;
578  use_ic = v->last_use_ic;
579  }
580  } else {
581  srcY = s->next_picture.f.data[0];
582  luty = v->next_luty;
583  use_ic = v->next_use_ic;
584  }
585 
586  if (!srcY) {
587  av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
588  return;
589  }
590 
591  if (v->field_mode) {
592  if (v->cur_field_type != v->ref_field_type[dir])
593  my = my - 2 + 4 * v->cur_field_type;
594  }
595 
596  if (s->pict_type == AV_PICTURE_TYPE_P && n == 3 && v->field_mode) {
597  int same_count = 0, opp_count = 0, k;
598  int chosen_mv[2][4][2], f;
599  int tx, ty;
600  for (k = 0; k < 4; k++) {
601  f = v->mv_f[0][s->block_index[k] + v->blocks_off];
602  chosen_mv[f][f ? opp_count : same_count][0] = s->mv[0][k][0];
603  chosen_mv[f][f ? opp_count : same_count][1] = s->mv[0][k][1];
604  opp_count += f;
605  same_count += 1 - f;
606  }
607  f = opp_count > same_count;
608  switch (f ? opp_count : same_count) {
609  case 4:
610  tx = median4(chosen_mv[f][0][0], chosen_mv[f][1][0],
611  chosen_mv[f][2][0], chosen_mv[f][3][0]);
612  ty = median4(chosen_mv[f][0][1], chosen_mv[f][1][1],
613  chosen_mv[f][2][1], chosen_mv[f][3][1]);
614  break;
615  case 3:
616  tx = mid_pred(chosen_mv[f][0][0], chosen_mv[f][1][0], chosen_mv[f][2][0]);
617  ty = mid_pred(chosen_mv[f][0][1], chosen_mv[f][1][1], chosen_mv[f][2][1]);
618  break;
619  case 2:
620  tx = (chosen_mv[f][0][0] + chosen_mv[f][1][0]) / 2;
621  ty = (chosen_mv[f][0][1] + chosen_mv[f][1][1]) / 2;
622  break;
623  default:
624  av_assert0(0);
625  }
626  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
627  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
628  for (k = 0; k < 4; k++)
629  v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
630  }
631 
632  if (v->fcm == ILACE_FRAME) { // not sure if needed for other types of picture
633  int qx, qy;
634  int width = s->avctx->coded_width;
635  int height = s->avctx->coded_height >> 1;
636  if (s->pict_type == AV_PICTURE_TYPE_P) {
637  s->current_picture.motion_val[1][s->block_index[n] + v->blocks_off][0] = mx;
638  s->current_picture.motion_val[1][s->block_index[n] + v->blocks_off][1] = my;
639  }
640  qx = (s->mb_x * 16) + (mx >> 2);
641  qy = (s->mb_y * 8) + (my >> 3);
642 
643  if (qx < -17)
644  mx -= 4 * (qx + 17);
645  else if (qx > width)
646  mx -= 4 * (qx - width);
647  if (qy < -18)
648  my -= 8 * (qy + 18);
649  else if (qy > height + 1)
650  my -= 8 * (qy - height - 1);
651  }
652 
653  if ((v->fcm == ILACE_FRAME) && fieldmv)
654  off = ((n > 1) ? s->linesize : 0) + (n & 1) * 8;
655  else
656  off = s->linesize * 4 * (n & 2) + (n & 1) * 8;
657 
658  src_x = s->mb_x * 16 + (n & 1) * 8 + (mx >> 2);
659  if (!fieldmv)
660  src_y = s->mb_y * 16 + (n & 2) * 4 + (my >> 2);
661  else
662  src_y = s->mb_y * 16 + ((n > 1) ? 1 : 0) + (my >> 2);
663 
664  if (v->profile != PROFILE_ADVANCED) {
665  src_x = av_clip(src_x, -16, s->mb_width * 16);
666  src_y = av_clip(src_y, -16, s->mb_height * 16);
667  } else {
668  src_x = av_clip(src_x, -17, s->avctx->coded_width);
669  if (v->fcm == ILACE_FRAME) {
670  if (src_y & 1)
671  src_y = av_clip(src_y, -17, s->avctx->coded_height + 1);
672  else
673  src_y = av_clip(src_y, -18, s->avctx->coded_height);
674  } else {
675  src_y = av_clip(src_y, -18, s->avctx->coded_height + 1);
676  }
677  }
678 
679  srcY += src_y * s->linesize + src_x;
680  if (v->field_mode && v->ref_field_type[dir])
681  srcY += s->current_picture_ptr->f.linesize[0];
682 
683  if (fieldmv && !(src_y & 1))
684  v_edge_pos--;
685  if (fieldmv && (src_y & 1) && src_y < 4)
686  src_y--;
687  if (v->rangeredfrm || use_ic
688  || s->h_edge_pos < 13 || v_edge_pos < 23
689  || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx & 3) - 8 - s->mspel * 2
690  || (unsigned)(src_y - (s->mspel << fieldmv)) > v_edge_pos - (my & 3) - ((8 + s->mspel * 2) << fieldmv)) {
691  srcY -= s->mspel * (1 + (s->linesize << fieldmv));
692  /* check emulate edge stride and offset */
694  s->linesize, s->linesize,
695  9 + s->mspel * 2, (9 + s->mspel * 2) << fieldmv,
696  src_x - s->mspel, src_y - (s->mspel << fieldmv),
697  s->h_edge_pos, v_edge_pos);
698  srcY = s->edge_emu_buffer;
699  /* if we deal with range reduction we need to scale source blocks */
700  if (v->rangeredfrm) {
701  int i, j;
702  uint8_t *src;
703 
704  src = srcY;
705  for (j = 0; j < 9 + s->mspel * 2; j++) {
706  for (i = 0; i < 9 + s->mspel * 2; i++)
707  src[i] = ((src[i] - 128) >> 1) + 128;
708  src += s->linesize << fieldmv;
709  }
710  }
711  /* if we deal with intensity compensation we need to scale source blocks */
712  if (use_ic) {
713  int i, j;
714  uint8_t *src;
715 
716  src = srcY;
717  for (j = 0; j < 9 + s->mspel * 2; j++) {
718  int f = v->field_mode ? v->ref_field_type[dir] : (((j<<fieldmv)+src_y - (s->mspel << fieldmv)) & 1);
719  for (i = 0; i < 9 + s->mspel * 2; i++)
720  src[i] = luty[f][src[i]];
721  src += s->linesize << fieldmv;
722  }
723  }
724  srcY += s->mspel * (1 + (s->linesize << fieldmv));
725  }
726 
727  if (s->mspel) {
728  dxy = ((my & 3) << 2) | (mx & 3);
729  if (avg)
730  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
731  else
732  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
733  } else { // hpel mc - always used for luma
734  dxy = (my & 2) | ((mx & 2) >> 1);
735  if (!v->rnd)
736  s->hdsp.put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
737  else
738  s->hdsp.put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
739  }
740 }
741 
742 static av_always_inline int get_chroma_mv(int *mvx, int *mvy, int *a, int flag, int *tx, int *ty)
743 {
744  int idx, i;
745  static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
746 
747  idx = ((a[3] != flag) << 3)
748  | ((a[2] != flag) << 2)
749  | ((a[1] != flag) << 1)
750  | (a[0] != flag);
751  if (!idx) {
752  *tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
753  *ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
754  return 4;
755  } else if (count[idx] == 1) {
756  switch (idx) {
757  case 0x1:
758  *tx = mid_pred(mvx[1], mvx[2], mvx[3]);
759  *ty = mid_pred(mvy[1], mvy[2], mvy[3]);
760  return 3;
761  case 0x2:
762  *tx = mid_pred(mvx[0], mvx[2], mvx[3]);
763  *ty = mid_pred(mvy[0], mvy[2], mvy[3]);
764  return 3;
765  case 0x4:
766  *tx = mid_pred(mvx[0], mvx[1], mvx[3]);
767  *ty = mid_pred(mvy[0], mvy[1], mvy[3]);
768  return 3;
769  case 0x8:
770  *tx = mid_pred(mvx[0], mvx[1], mvx[2]);
771  *ty = mid_pred(mvy[0], mvy[1], mvy[2]);
772  return 3;
773  }
774  } else if (count[idx] == 2) {
775  int t1 = 0, t2 = 0;
776  for (i = 0; i < 3; i++)
777  if (!a[i]) {
778  t1 = i;
779  break;
780  }
781  for (i = t1 + 1; i < 4; i++)
782  if (!a[i]) {
783  t2 = i;
784  break;
785  }
786  *tx = (mvx[t1] + mvx[t2]) / 2;
787  *ty = (mvy[t1] + mvy[t2]) / 2;
788  return 2;
789  } else {
790  return 0;
791  }
792  return -1;
793 }
794 
795 /** Do motion compensation for 4-MV macroblock - both chroma blocks
796  */
797 static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
798 {
799  MpegEncContext *s = &v->s;
800  H264ChromaContext *h264chroma = &v->h264chroma;
801  uint8_t *srcU, *srcV;
802  int uvmx, uvmy, uvsrc_x, uvsrc_y;
803  int k, tx = 0, ty = 0;
804  int mvx[4], mvy[4], intra[4], mv_f[4];
805  int valid_count;
806  int chroma_ref_type = v->cur_field_type;
807  int v_edge_pos = s->v_edge_pos >> v->field_mode;
808  uint8_t (*lutuv)[256];
809  int use_ic;
810 
811  if (!v->field_mode && !v->s.last_picture.f.data[0])
812  return;
813  if (s->flags & CODEC_FLAG_GRAY)
814  return;
815 
816  for (k = 0; k < 4; k++) {
817  mvx[k] = s->mv[dir][k][0];
818  mvy[k] = s->mv[dir][k][1];
819  intra[k] = v->mb_type[0][s->block_index[k]];
820  if (v->field_mode)
821  mv_f[k] = v->mv_f[dir][s->block_index[k] + v->blocks_off];
822  }
823 
824  /* calculate chroma MV vector from four luma MVs */
825  if (!v->field_mode || (v->field_mode && !v->numref)) {
826  valid_count = get_chroma_mv(mvx, mvy, intra, 0, &tx, &ty);
827  chroma_ref_type = v->reffield;
828  if (!valid_count) {
829  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
830  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
831  v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
832  return; //no need to do MC for intra blocks
833  }
834  } else {
835  int dominant = 0;
836  if (mv_f[0] + mv_f[1] + mv_f[2] + mv_f[3] > 2)
837  dominant = 1;
838  valid_count = get_chroma_mv(mvx, mvy, mv_f, dominant, &tx, &ty);
839  if (dominant)
840  chroma_ref_type = !v->cur_field_type;
841  }
842  if (v->field_mode && chroma_ref_type == 1 && v->cur_field_type == 1 && !v->s.last_picture.f.data[0])
843  return;
844  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
845  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
846  uvmx = (tx + ((tx & 3) == 3)) >> 1;
847  uvmy = (ty + ((ty & 3) == 3)) >> 1;
848 
849  v->luma_mv[s->mb_x][0] = uvmx;
850  v->luma_mv[s->mb_x][1] = uvmy;
851 
852  if (v->fastuvmc) {
853  uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
854  uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
855  }
856  // Field conversion bias
857  if (v->cur_field_type != chroma_ref_type)
858  uvmy += 2 - 4 * chroma_ref_type;
859 
860  uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
861  uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
862 
863  if (v->profile != PROFILE_ADVANCED) {
864  uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
865  uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
866  } else {
867  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
868  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
869  }
870 
871  if (!dir) {
872  if (v->field_mode && (v->cur_field_type != chroma_ref_type) && v->second_field) {
873  srcU = s->current_picture.f.data[1];
874  srcV = s->current_picture.f.data[2];
875  lutuv = v->curr_lutuv;
876  use_ic = *v->curr_use_ic;
877  } else {
878  srcU = s->last_picture.f.data[1];
879  srcV = s->last_picture.f.data[2];
880  lutuv = v->last_lutuv;
881  use_ic = v->last_use_ic;
882  }
883  } else {
884  srcU = s->next_picture.f.data[1];
885  srcV = s->next_picture.f.data[2];
886  lutuv = v->next_lutuv;
887  use_ic = v->next_use_ic;
888  }
889 
890  if (!srcU) {
891  av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
892  return;
893  }
894 
895  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
896  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
897 
898  if (v->field_mode) {
899  if (chroma_ref_type) {
900  srcU += s->current_picture_ptr->f.linesize[1];
901  srcV += s->current_picture_ptr->f.linesize[2];
902  }
903  }
904 
905  if (v->rangeredfrm || use_ic
906  || s->h_edge_pos < 18 || v_edge_pos < 18
907  || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
908  || (unsigned)uvsrc_y > (v_edge_pos >> 1) - 9) {
910  s->uvlinesize, s->uvlinesize,
911  8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
912  s->h_edge_pos >> 1, v_edge_pos >> 1);
913  s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV,
914  s->uvlinesize, s->uvlinesize,
915  8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
916  s->h_edge_pos >> 1, v_edge_pos >> 1);
917  srcU = s->edge_emu_buffer;
918  srcV = s->edge_emu_buffer + 16;
919 
920  /* if we deal with range reduction we need to scale source blocks */
921  if (v->rangeredfrm) {
922  int i, j;
923  uint8_t *src, *src2;
924 
925  src = srcU;
926  src2 = srcV;
927  for (j = 0; j < 9; j++) {
928  for (i = 0; i < 9; i++) {
929  src[i] = ((src[i] - 128) >> 1) + 128;
930  src2[i] = ((src2[i] - 128) >> 1) + 128;
931  }
932  src += s->uvlinesize;
933  src2 += s->uvlinesize;
934  }
935  }
936  /* if we deal with intensity compensation we need to scale source blocks */
937  if (use_ic) {
938  int i, j;
939  uint8_t *src, *src2;
940 
941  src = srcU;
942  src2 = srcV;
943  for (j = 0; j < 9; j++) {
944  int f = v->field_mode ? chroma_ref_type : ((j + uvsrc_y) & 1);
945  for (i = 0; i < 9; i++) {
946  src[i] = lutuv[f][src[i]];
947  src2[i] = lutuv[f][src2[i]];
948  }
949  src += s->uvlinesize;
950  src2 += s->uvlinesize;
951  }
952  }
953  }
954 
955  /* Chroma MC always uses qpel bilinear */
956  uvmx = (uvmx & 3) << 1;
957  uvmy = (uvmy & 3) << 1;
958  if (!v->rnd) {
959  h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
960  h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
961  } else {
962  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
963  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
964  }
965 }
966 
967 /** Do motion compensation for 4-MV interlaced frame chroma macroblock (both U and V)
968  */
969 static void vc1_mc_4mv_chroma4(VC1Context *v, int dir, int dir2, int avg)
970 {
971  MpegEncContext *s = &v->s;
972  H264ChromaContext *h264chroma = &v->h264chroma;
973  uint8_t *srcU, *srcV;
974  int uvsrc_x, uvsrc_y;
975  int uvmx_field[4], uvmy_field[4];
976  int i, off, tx, ty;
977  int fieldmv = v->blk_mv_type[s->block_index[0]];
978  static const int s_rndtblfield[16] = { 0, 0, 1, 2, 4, 4, 5, 6, 2, 2, 3, 8, 6, 6, 7, 12 };
979  int v_dist = fieldmv ? 1 : 4; // vertical offset for lower sub-blocks
980  int v_edge_pos = s->v_edge_pos >> 1;
981  int use_ic;
982  uint8_t (*lutuv)[256];
983 
984  if (s->flags & CODEC_FLAG_GRAY)
985  return;
986 
987  for (i = 0; i < 4; i++) {
988  int d = i < 2 ? dir: dir2;
989  tx = s->mv[d][i][0];
990  uvmx_field[i] = (tx + ((tx & 3) == 3)) >> 1;
991  ty = s->mv[d][i][1];
992  if (fieldmv)
993  uvmy_field[i] = (ty >> 4) * 8 + s_rndtblfield[ty & 0xF];
994  else
995  uvmy_field[i] = (ty + ((ty & 3) == 3)) >> 1;
996  }
997 
998  for (i = 0; i < 4; i++) {
999  off = (i & 1) * 4 + ((i & 2) ? v_dist * s->uvlinesize : 0);
1000  uvsrc_x = s->mb_x * 8 + (i & 1) * 4 + (uvmx_field[i] >> 2);
1001  uvsrc_y = s->mb_y * 8 + ((i & 2) ? v_dist : 0) + (uvmy_field[i] >> 2);
1002  // FIXME: implement proper pull-back (see vc1cropmv.c, vc1CROPMV_ChromaPullBack())
1003  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1004  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1005  if (i < 2 ? dir : dir2) {
1006  srcU = s->next_picture.f.data[1];
1007  srcV = s->next_picture.f.data[2];
1008  lutuv = v->next_lutuv;
1009  use_ic = v->next_use_ic;
1010  } else {
1011  srcU = s->last_picture.f.data[1];
1012  srcV = s->last_picture.f.data[2];
1013  lutuv = v->last_lutuv;
1014  use_ic = v->last_use_ic;
1015  }
1016  if (!srcU)
1017  return;
1018  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
1019  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
1020  uvmx_field[i] = (uvmx_field[i] & 3) << 1;
1021  uvmy_field[i] = (uvmy_field[i] & 3) << 1;
1022 
1023  if (fieldmv && !(uvsrc_y & 1))
1024  v_edge_pos = (s->v_edge_pos >> 1) - 1;
1025 
1026  if (fieldmv && (uvsrc_y & 1) && uvsrc_y < 2)
1027  uvsrc_y--;
1028  if (use_ic
1029  || s->h_edge_pos < 10 || v_edge_pos < (5 << fieldmv)
1030  || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 5
1031  || (unsigned)uvsrc_y > v_edge_pos - (5 << fieldmv)) {
1033  s->uvlinesize, s->uvlinesize,
1034  5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1035  s->h_edge_pos >> 1, v_edge_pos);
1036  s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV,
1037  s->uvlinesize, s->uvlinesize,
1038  5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1039  s->h_edge_pos >> 1, v_edge_pos);
1040  srcU = s->edge_emu_buffer;
1041  srcV = s->edge_emu_buffer + 16;
1042 
1043  /* if we deal with intensity compensation we need to scale source blocks */
1044  if (use_ic) {
1045  int i, j;
1046  uint8_t *src, *src2;
1047 
1048  src = srcU;
1049  src2 = srcV;
1050  for (j = 0; j < 5; j++) {
1051  int f = (uvsrc_y + (j << fieldmv)) & 1;
1052  for (i = 0; i < 5; i++) {
1053  src[i] = lutuv[f][src[i]];
1054  src2[i] = lutuv[f][src2[i]];
1055  }
1056  src += s->uvlinesize << fieldmv;
1057  src2 += s->uvlinesize << fieldmv;
1058  }
1059  }
1060  }
1061  if (avg) {
1062  if (!v->rnd) {
1063  h264chroma->avg_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1064  h264chroma->avg_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1065  } else {
1066  v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1067  v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1068  }
1069  } else {
1070  if (!v->rnd) {
1071  h264chroma->put_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1072  h264chroma->put_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1073  } else {
1074  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1075  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1076  }
1077  }
1078  }
1079 }
1080 
1081 /***********************************************************************/
1082 /**
1083  * @name VC-1 Block-level functions
1084  * @see 7.1.4, p91 and 8.1.1.7, p(1)04
1085  * @{
1086  */
1087 
1088 /**
1089  * @def GET_MQUANT
1090  * @brief Get macroblock-level quantizer scale
1091  */
1092 #define GET_MQUANT() \
1093  if (v->dquantfrm) { \
1094  int edges = 0; \
1095  if (v->dqprofile == DQPROFILE_ALL_MBS) { \
1096  if (v->dqbilevel) { \
1097  mquant = (get_bits1(gb)) ? v->altpq : v->pq; \
1098  } else { \
1099  mqdiff = get_bits(gb, 3); \
1100  if (mqdiff != 7) \
1101  mquant = v->pq + mqdiff; \
1102  else \
1103  mquant = get_bits(gb, 5); \
1104  } \
1105  } \
1106  if (v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1107  edges = 1 << v->dqsbedge; \
1108  else if (v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1109  edges = (3 << v->dqsbedge) % 15; \
1110  else if (v->dqprofile == DQPROFILE_FOUR_EDGES) \
1111  edges = 15; \
1112  if ((edges&1) && !s->mb_x) \
1113  mquant = v->altpq; \
1114  if ((edges&2) && s->first_slice_line) \
1115  mquant = v->altpq; \
1116  if ((edges&4) && s->mb_x == (s->mb_width - 1)) \
1117  mquant = v->altpq; \
1118  if ((edges&8) && s->mb_y == (s->mb_height - 1)) \
1119  mquant = v->altpq; \
1120  if (!mquant || mquant > 31) { \
1121  av_log(v->s.avctx, AV_LOG_ERROR, \
1122  "Overriding invalid mquant %d\n", mquant); \
1123  mquant = 1; \
1124  } \
1125  }
1126 
1127 /**
1128  * @def GET_MVDATA(_dmv_x, _dmv_y)
1129  * @brief Get MV differentials
1130  * @see MVDATA decoding from 8.3.5.2, p(1)20
1131  * @param _dmv_x Horizontal differential for decoded MV
1132  * @param _dmv_y Vertical differential for decoded MV
1133  */
1134 #define GET_MVDATA(_dmv_x, _dmv_y) \
1135  index = 1 + get_vlc2(gb, ff_vc1_mv_diff_vlc[s->mv_table_index].table, \
1136  VC1_MV_DIFF_VLC_BITS, 2); \
1137  if (index > 36) { \
1138  mb_has_coeffs = 1; \
1139  index -= 37; \
1140  } else \
1141  mb_has_coeffs = 0; \
1142  s->mb_intra = 0; \
1143  if (!index) { \
1144  _dmv_x = _dmv_y = 0; \
1145  } else if (index == 35) { \
1146  _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1147  _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1148  } else if (index == 36) { \
1149  _dmv_x = 0; \
1150  _dmv_y = 0; \
1151  s->mb_intra = 1; \
1152  } else { \
1153  index1 = index % 6; \
1154  if (!s->quarter_sample && index1 == 5) val = 1; \
1155  else val = 0; \
1156  if (size_table[index1] - val > 0) \
1157  val = get_bits(gb, size_table[index1] - val); \
1158  else val = 0; \
1159  sign = 0 - (val&1); \
1160  _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1161  \
1162  index1 = index / 6; \
1163  if (!s->quarter_sample && index1 == 5) val = 1; \
1164  else val = 0; \
1165  if (size_table[index1] - val > 0) \
1166  val = get_bits(gb, size_table[index1] - val); \
1167  else val = 0; \
1168  sign = 0 - (val & 1); \
1169  _dmv_y = (sign ^ ((val >> 1) + offset_table[index1])) - sign; \
1170  }
1171 
1173  int *dmv_y, int *pred_flag)
1174 {
1175  int index, index1;
1176  int extend_x = 0, extend_y = 0;
1177  GetBitContext *gb = &v->s.gb;
1178  int bits, esc;
1179  int val, sign;
1180  const int* offs_tab;
1181 
1182  if (v->numref) {
1183  bits = VC1_2REF_MVDATA_VLC_BITS;
1184  esc = 125;
1185  } else {
1186  bits = VC1_1REF_MVDATA_VLC_BITS;
1187  esc = 71;
1188  }
1189  switch (v->dmvrange) {
1190  case 1:
1191  extend_x = 1;
1192  break;
1193  case 2:
1194  extend_y = 1;
1195  break;
1196  case 3:
1197  extend_x = extend_y = 1;
1198  break;
1199  }
1200  index = get_vlc2(gb, v->imv_vlc->table, bits, 3);
1201  if (index == esc) {
1202  *dmv_x = get_bits(gb, v->k_x);
1203  *dmv_y = get_bits(gb, v->k_y);
1204  if (v->numref) {
1205  if (pred_flag) {
1206  *pred_flag = *dmv_y & 1;
1207  *dmv_y = (*dmv_y + *pred_flag) >> 1;
1208  } else {
1209  *dmv_y = (*dmv_y + (*dmv_y & 1)) >> 1;
1210  }
1211  }
1212  }
1213  else {
1214  av_assert0(index < esc);
1215  if (extend_x)
1216  offs_tab = offset_table2;
1217  else
1218  offs_tab = offset_table1;
1219  index1 = (index + 1) % 9;
1220  if (index1 != 0) {
1221  val = get_bits(gb, index1 + extend_x);
1222  sign = 0 -(val & 1);
1223  *dmv_x = (sign ^ ((val >> 1) + offs_tab[index1])) - sign;
1224  } else
1225  *dmv_x = 0;
1226  if (extend_y)
1227  offs_tab = offset_table2;
1228  else
1229  offs_tab = offset_table1;
1230  index1 = (index + 1) / 9;
1231  if (index1 > v->numref) {
1232  val = get_bits(gb, (index1 + (extend_y << v->numref)) >> v->numref);
1233  sign = 0 - (val & 1);
1234  *dmv_y = (sign ^ ((val >> 1) + offs_tab[index1 >> v->numref])) - sign;
1235  } else
1236  *dmv_y = 0;
1237  if (v->numref && pred_flag)
1238  *pred_flag = index1 & 1;
1239  }
1240 }
1241 
1242 static av_always_inline int scaleforsame_x(VC1Context *v, int n /* MV */, int dir)
1243 {
1244  int scaledvalue, refdist;
1245  int scalesame1, scalesame2;
1246  int scalezone1_x, zone1offset_x;
1247  int table_index = dir ^ v->second_field;
1248 
1249  if (v->s.pict_type != AV_PICTURE_TYPE_B)
1250  refdist = v->refdist;
1251  else
1252  refdist = dir ? v->brfd : v->frfd;
1253  if (refdist > 3)
1254  refdist = 3;
1255  scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
1256  scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
1257  scalezone1_x = ff_vc1_field_mvpred_scales[table_index][3][refdist];
1258  zone1offset_x = ff_vc1_field_mvpred_scales[table_index][5][refdist];
1259 
1260  if (FFABS(n) > 255)
1261  scaledvalue = n;
1262  else {
1263  if (FFABS(n) < scalezone1_x)
1264  scaledvalue = (n * scalesame1) >> 8;
1265  else {
1266  if (n < 0)
1267  scaledvalue = ((n * scalesame2) >> 8) - zone1offset_x;
1268  else
1269  scaledvalue = ((n * scalesame2) >> 8) + zone1offset_x;
1270  }
1271  }
1272  return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1273 }
1274 
1275 static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n /* MV */, int dir)
1276 {
1277  int scaledvalue, refdist;
1278  int scalesame1, scalesame2;
1279  int scalezone1_y, zone1offset_y;
1280  int table_index = dir ^ v->second_field;
1281 
1282  if (v->s.pict_type != AV_PICTURE_TYPE_B)
1283  refdist = v->refdist;
1284  else
1285  refdist = dir ? v->brfd : v->frfd;
1286  if (refdist > 3)
1287  refdist = 3;
1288  scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
1289  scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
1290  scalezone1_y = ff_vc1_field_mvpred_scales[table_index][4][refdist];
1291  zone1offset_y = ff_vc1_field_mvpred_scales[table_index][6][refdist];
1292 
1293  if (FFABS(n) > 63)
1294  scaledvalue = n;
1295  else {
1296  if (FFABS(n) < scalezone1_y)
1297  scaledvalue = (n * scalesame1) >> 8;
1298  else {
1299  if (n < 0)
1300  scaledvalue = ((n * scalesame2) >> 8) - zone1offset_y;
1301  else
1302  scaledvalue = ((n * scalesame2) >> 8) + zone1offset_y;
1303  }
1304  }
1305 
1306  if (v->cur_field_type && !v->ref_field_type[dir])
1307  return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1308  else
1309  return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1310 }
1311 
1312 static av_always_inline int scaleforopp_x(VC1Context *v, int n /* MV */)
1313 {
1314  int scalezone1_x, zone1offset_x;
1315  int scaleopp1, scaleopp2, brfd;
1316  int scaledvalue;
1317 
1318  brfd = FFMIN(v->brfd, 3);
1319  scalezone1_x = ff_vc1_b_field_mvpred_scales[3][brfd];
1320  zone1offset_x = ff_vc1_b_field_mvpred_scales[5][brfd];
1321  scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
1322  scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
1323 
1324  if (FFABS(n) > 255)
1325  scaledvalue = n;
1326  else {
1327  if (FFABS(n) < scalezone1_x)
1328  scaledvalue = (n * scaleopp1) >> 8;
1329  else {
1330  if (n < 0)
1331  scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_x;
1332  else
1333  scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_x;
1334  }
1335  }
1336  return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1337 }
1338 
1339 static av_always_inline int scaleforopp_y(VC1Context *v, int n /* MV */, int dir)
1340 {
1341  int scalezone1_y, zone1offset_y;
1342  int scaleopp1, scaleopp2, brfd;
1343  int scaledvalue;
1344 
1345  brfd = FFMIN(v->brfd, 3);
1346  scalezone1_y = ff_vc1_b_field_mvpred_scales[4][brfd];
1347  zone1offset_y = ff_vc1_b_field_mvpred_scales[6][brfd];
1348  scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
1349  scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
1350 
1351  if (FFABS(n) > 63)
1352  scaledvalue = n;
1353  else {
1354  if (FFABS(n) < scalezone1_y)
1355  scaledvalue = (n * scaleopp1) >> 8;
1356  else {
1357  if (n < 0)
1358  scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_y;
1359  else
1360  scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_y;
1361  }
1362  }
1363  if (v->cur_field_type && !v->ref_field_type[dir]) {
1364  return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1365  } else {
1366  return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1367  }
1368 }
1369 
1370 static av_always_inline int scaleforsame(VC1Context *v, int i, int n /* MV */,
1371  int dim, int dir)
1372 {
1373  int brfd, scalesame;
1374  int hpel = 1 - v->s.quarter_sample;
1375 
1376  n >>= hpel;
1377  if (v->s.pict_type != AV_PICTURE_TYPE_B || v->second_field || !dir) {
1378  if (dim)
1379  n = scaleforsame_y(v, i, n, dir) << hpel;
1380  else
1381  n = scaleforsame_x(v, n, dir) << hpel;
1382  return n;
1383  }
1384  brfd = FFMIN(v->brfd, 3);
1385  scalesame = ff_vc1_b_field_mvpred_scales[0][brfd];
1386 
1387  n = (n * scalesame >> 8) << hpel;
1388  return n;
1389 }
1390 
1391 static av_always_inline int scaleforopp(VC1Context *v, int n /* MV */,
1392  int dim, int dir)
1393 {
1394  int refdist, scaleopp;
1395  int hpel = 1 - v->s.quarter_sample;
1396 
1397  n >>= hpel;
1398  if (v->s.pict_type == AV_PICTURE_TYPE_B && !v->second_field && dir == 1) {
1399  if (dim)
1400  n = scaleforopp_y(v, n, dir) << hpel;
1401  else
1402  n = scaleforopp_x(v, n) << hpel;
1403  return n;
1404  }
1405  if (v->s.pict_type != AV_PICTURE_TYPE_B)
1406  refdist = FFMIN(v->refdist, 3);
1407  else
1408  refdist = dir ? v->brfd : v->frfd;
1409  scaleopp = ff_vc1_field_mvpred_scales[dir ^ v->second_field][0][refdist];
1410 
1411  n = (n * scaleopp >> 8) << hpel;
1412  return n;
1413 }
1414 
1415 /** Predict and set motion vector
1416  */
1417 static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
1418  int mv1, int r_x, int r_y, uint8_t* is_intra,
1419  int pred_flag, int dir)
1420 {
1421  MpegEncContext *s = &v->s;
1422  int xy, wrap, off = 0;
1423  int16_t *A, *B, *C;
1424  int px, py;
1425  int sum;
1426  int mixedmv_pic, num_samefield = 0, num_oppfield = 0;
1427  int opposite, a_f, b_f, c_f;
1428  int16_t field_predA[2];
1429  int16_t field_predB[2];
1430  int16_t field_predC[2];
1431  int a_valid, b_valid, c_valid;
1432  int hybridmv_thresh, y_bias = 0;
1433 
1434  if (v->mv_mode == MV_PMODE_MIXED_MV ||
1436  mixedmv_pic = 1;
1437  else
1438  mixedmv_pic = 0;
1439  /* scale MV difference to be quad-pel */
1440  dmv_x <<= 1 - s->quarter_sample;
1441  dmv_y <<= 1 - s->quarter_sample;
1442 
1443  wrap = s->b8_stride;
1444  xy = s->block_index[n];
1445 
1446  if (s->mb_intra) {
1447  s->mv[0][n][0] = s->current_picture.motion_val[0][xy + v->blocks_off][0] = 0;
1448  s->mv[0][n][1] = s->current_picture.motion_val[0][xy + v->blocks_off][1] = 0;
1449  s->current_picture.motion_val[1][xy + v->blocks_off][0] = 0;
1450  s->current_picture.motion_val[1][xy + v->blocks_off][1] = 0;
1451  if (mv1) { /* duplicate motion data for 1-MV block */
1452  s->current_picture.motion_val[0][xy + 1 + v->blocks_off][0] = 0;
1453  s->current_picture.motion_val[0][xy + 1 + v->blocks_off][1] = 0;
1454  s->current_picture.motion_val[0][xy + wrap + v->blocks_off][0] = 0;
1455  s->current_picture.motion_val[0][xy + wrap + v->blocks_off][1] = 0;
1456  s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0;
1457  s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0;
1458  v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1459  s->current_picture.motion_val[1][xy + 1 + v->blocks_off][0] = 0;
1460  s->current_picture.motion_val[1][xy + 1 + v->blocks_off][1] = 0;
1461  s->current_picture.motion_val[1][xy + wrap][0] = 0;
1462  s->current_picture.motion_val[1][xy + wrap + v->blocks_off][1] = 0;
1463  s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][0] = 0;
1464  s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][1] = 0;
1465  }
1466  return;
1467  }
1468 
1469  C = s->current_picture.motion_val[dir][xy - 1 + v->blocks_off];
1470  A = s->current_picture.motion_val[dir][xy - wrap + v->blocks_off];
1471  if (mv1) {
1472  if (v->field_mode && mixedmv_pic)
1473  off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
1474  else
1475  off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
1476  } else {
1477  //in 4-MV mode different blocks have different B predictor position
1478  switch (n) {
1479  case 0:
1480  off = (s->mb_x > 0) ? -1 : 1;
1481  break;
1482  case 1:
1483  off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
1484  break;
1485  case 2:
1486  off = 1;
1487  break;
1488  case 3:
1489  off = -1;
1490  }
1491  }
1492  B = s->current_picture.motion_val[dir][xy - wrap + off + v->blocks_off];
1493 
1494  a_valid = !s->first_slice_line || (n == 2 || n == 3);
1495  b_valid = a_valid && (s->mb_width > 1);
1496  c_valid = s->mb_x || (n == 1 || n == 3);
1497  if (v->field_mode) {
1498  a_valid = a_valid && !is_intra[xy - wrap];
1499  b_valid = b_valid && !is_intra[xy - wrap + off];
1500  c_valid = c_valid && !is_intra[xy - 1];
1501  }
1502 
1503  if (a_valid) {
1504  a_f = v->mv_f[dir][xy - wrap + v->blocks_off];
1505  num_oppfield += a_f;
1506  num_samefield += 1 - a_f;
1507  field_predA[0] = A[0];
1508  field_predA[1] = A[1];
1509  } else {
1510  field_predA[0] = field_predA[1] = 0;
1511  a_f = 0;
1512  }
1513  if (b_valid) {
1514  b_f = v->mv_f[dir][xy - wrap + off + v->blocks_off];
1515  num_oppfield += b_f;
1516  num_samefield += 1 - b_f;
1517  field_predB[0] = B[0];
1518  field_predB[1] = B[1];
1519  } else {
1520  field_predB[0] = field_predB[1] = 0;
1521  b_f = 0;
1522  }
1523  if (c_valid) {
1524  c_f = v->mv_f[dir][xy - 1 + v->blocks_off];
1525  num_oppfield += c_f;
1526  num_samefield += 1 - c_f;
1527  field_predC[0] = C[0];
1528  field_predC[1] = C[1];
1529  } else {
1530  field_predC[0] = field_predC[1] = 0;
1531  c_f = 0;
1532  }
1533 
1534  if (v->field_mode) {
1535  if (!v->numref)
1536  // REFFIELD determines if the last field or the second-last field is
1537  // to be used as reference
1538  opposite = 1 - v->reffield;
1539  else {
1540  if (num_samefield <= num_oppfield)
1541  opposite = 1 - pred_flag;
1542  else
1543  opposite = pred_flag;
1544  }
1545  } else
1546  opposite = 0;
1547  if (opposite) {
1548  if (a_valid && !a_f) {
1549  field_predA[0] = scaleforopp(v, field_predA[0], 0, dir);
1550  field_predA[1] = scaleforopp(v, field_predA[1], 1, dir);
1551  }
1552  if (b_valid && !b_f) {
1553  field_predB[0] = scaleforopp(v, field_predB[0], 0, dir);
1554  field_predB[1] = scaleforopp(v, field_predB[1], 1, dir);
1555  }
1556  if (c_valid && !c_f) {
1557  field_predC[0] = scaleforopp(v, field_predC[0], 0, dir);
1558  field_predC[1] = scaleforopp(v, field_predC[1], 1, dir);
1559  }
1560  v->mv_f[dir][xy + v->blocks_off] = 1;
1561  v->ref_field_type[dir] = !v->cur_field_type;
1562  } else {
1563  if (a_valid && a_f) {
1564  field_predA[0] = scaleforsame(v, n, field_predA[0], 0, dir);
1565  field_predA[1] = scaleforsame(v, n, field_predA[1], 1, dir);
1566  }
1567  if (b_valid && b_f) {
1568  field_predB[0] = scaleforsame(v, n, field_predB[0], 0, dir);
1569  field_predB[1] = scaleforsame(v, n, field_predB[1], 1, dir);
1570  }
1571  if (c_valid && c_f) {
1572  field_predC[0] = scaleforsame(v, n, field_predC[0], 0, dir);
1573  field_predC[1] = scaleforsame(v, n, field_predC[1], 1, dir);
1574  }
1575  v->mv_f[dir][xy + v->blocks_off] = 0;
1576  v->ref_field_type[dir] = v->cur_field_type;
1577  }
1578 
1579  if (a_valid) {
1580  px = field_predA[0];
1581  py = field_predA[1];
1582  } else if (c_valid) {
1583  px = field_predC[0];
1584  py = field_predC[1];
1585  } else if (b_valid) {
1586  px = field_predB[0];
1587  py = field_predB[1];
1588  } else {
1589  px = 0;
1590  py = 0;
1591  }
1592 
1593  if (num_samefield + num_oppfield > 1) {
1594  px = mid_pred(field_predA[0], field_predB[0], field_predC[0]);
1595  py = mid_pred(field_predA[1], field_predB[1], field_predC[1]);
1596  }
1597 
1598  /* Pullback MV as specified in 8.3.5.3.4 */
1599  if (!v->field_mode) {
1600  int qx, qy, X, Y;
1601  qx = (s->mb_x << 6) + ((n == 1 || n == 3) ? 32 : 0);
1602  qy = (s->mb_y << 6) + ((n == 2 || n == 3) ? 32 : 0);
1603  X = (s->mb_width << 6) - 4;
1604  Y = (s->mb_height << 6) - 4;
1605  if (mv1) {
1606  if (qx + px < -60) px = -60 - qx;
1607  if (qy + py < -60) py = -60 - qy;
1608  } else {
1609  if (qx + px < -28) px = -28 - qx;
1610  if (qy + py < -28) py = -28 - qy;
1611  }
1612  if (qx + px > X) px = X - qx;
1613  if (qy + py > Y) py = Y - qy;
1614  }
1615 
1616  if (!v->field_mode || s->pict_type != AV_PICTURE_TYPE_B) {
1617  /* Calculate hybrid prediction as specified in 8.3.5.3.5 (also 10.3.5.4.3.5) */
1618  hybridmv_thresh = 32;
1619  if (a_valid && c_valid) {
1620  if (is_intra[xy - wrap])
1621  sum = FFABS(px) + FFABS(py);
1622  else
1623  sum = FFABS(px - field_predA[0]) + FFABS(py - field_predA[1]);
1624  if (sum > hybridmv_thresh) {
1625  if (get_bits1(&s->gb)) { // read HYBRIDPRED bit
1626  px = field_predA[0];
1627  py = field_predA[1];
1628  } else {
1629  px = field_predC[0];
1630  py = field_predC[1];
1631  }
1632  } else {
1633  if (is_intra[xy - 1])
1634  sum = FFABS(px) + FFABS(py);
1635  else
1636  sum = FFABS(px - field_predC[0]) + FFABS(py - field_predC[1]);
1637  if (sum > hybridmv_thresh) {
1638  if (get_bits1(&s->gb)) {
1639  px = field_predA[0];
1640  py = field_predA[1];
1641  } else {
1642  px = field_predC[0];
1643  py = field_predC[1];
1644  }
1645  }
1646  }
1647  }
1648  }
1649 
1650  if (v->field_mode && v->numref)
1651  r_y >>= 1;
1652  if (v->field_mode && v->cur_field_type && v->ref_field_type[dir] == 0)
1653  y_bias = 1;
1654  /* store MV using signed modulus of MV range defined in 4.11 */
1655  s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1656  s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1] = ((py + dmv_y + r_y - y_bias) & ((r_y << 1) - 1)) - r_y + y_bias;
1657  if (mv1) { /* duplicate motion data for 1-MV block */
1658  s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1659  s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1660  s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1661  s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1662  s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1663  s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1664  v->mv_f[dir][xy + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1665  v->mv_f[dir][xy + wrap + v->blocks_off] = v->mv_f[dir][xy + wrap + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1666  }
1667 }
1668 
1669 /** Predict and set motion vector for interlaced frame picture MBs
1670  */
1671 static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
1672  int mvn, int r_x, int r_y, uint8_t* is_intra, int dir)
1673 {
1674  MpegEncContext *s = &v->s;
1675  int xy, wrap, off = 0;
1676  int A[2], B[2], C[2];
1677  int px = 0, py = 0;
1678  int a_valid = 0, b_valid = 0, c_valid = 0;
1679  int field_a, field_b, field_c; // 0: same, 1: opposit
1680  int total_valid, num_samefield, num_oppfield;
1681  int pos_c, pos_b, n_adj;
1682 
1683  wrap = s->b8_stride;
1684  xy = s->block_index[n];
1685 
1686  if (s->mb_intra) {
1687  s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0;
1688  s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0;
1689  s->current_picture.motion_val[1][xy][0] = 0;
1690  s->current_picture.motion_val[1][xy][1] = 0;
1691  if (mvn == 1) { /* duplicate motion data for 1-MV block */
1692  s->current_picture.motion_val[0][xy + 1][0] = 0;
1693  s->current_picture.motion_val[0][xy + 1][1] = 0;
1694  s->current_picture.motion_val[0][xy + wrap][0] = 0;
1695  s->current_picture.motion_val[0][xy + wrap][1] = 0;
1696  s->current_picture.motion_val[0][xy + wrap + 1][0] = 0;
1697  s->current_picture.motion_val[0][xy + wrap + 1][1] = 0;
1698  v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1699  s->current_picture.motion_val[1][xy + 1][0] = 0;
1700  s->current_picture.motion_val[1][xy + 1][1] = 0;
1701  s->current_picture.motion_val[1][xy + wrap][0] = 0;
1702  s->current_picture.motion_val[1][xy + wrap][1] = 0;
1703  s->current_picture.motion_val[1][xy + wrap + 1][0] = 0;
1704  s->current_picture.motion_val[1][xy + wrap + 1][1] = 0;
1705  }
1706  return;
1707  }
1708 
1709  off = ((n == 0) || (n == 1)) ? 1 : -1;
1710  /* predict A */
1711  if (s->mb_x || (n == 1) || (n == 3)) {
1712  if ((v->blk_mv_type[xy]) // current block (MB) has a field MV
1713  || (!v->blk_mv_type[xy] && !v->blk_mv_type[xy - 1])) { // or both have frame MV
1714  A[0] = s->current_picture.motion_val[dir][xy - 1][0];
1715  A[1] = s->current_picture.motion_val[dir][xy - 1][1];
1716  a_valid = 1;
1717  } else { // current block has frame mv and cand. has field MV (so average)
1718  A[0] = (s->current_picture.motion_val[dir][xy - 1][0]
1719  + s->current_picture.motion_val[dir][xy - 1 + off * wrap][0] + 1) >> 1;
1720  A[1] = (s->current_picture.motion_val[dir][xy - 1][1]
1721  + s->current_picture.motion_val[dir][xy - 1 + off * wrap][1] + 1) >> 1;
1722  a_valid = 1;
1723  }
1724  if (!(n & 1) && v->is_intra[s->mb_x - 1]) {
1725  a_valid = 0;
1726  A[0] = A[1] = 0;
1727  }
1728  } else
1729  A[0] = A[1] = 0;
1730  /* Predict B and C */
1731  B[0] = B[1] = C[0] = C[1] = 0;
1732  if (n == 0 || n == 1 || v->blk_mv_type[xy]) {
1733  if (!s->first_slice_line) {
1734  if (!v->is_intra[s->mb_x - s->mb_stride]) {
1735  b_valid = 1;
1736  n_adj = n | 2;
1737  pos_b = s->block_index[n_adj] - 2 * wrap;
1738  if (v->blk_mv_type[pos_b] && v->blk_mv_type[xy]) {
1739  n_adj = (n & 2) | (n & 1);
1740  }
1741  B[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap][0];
1742  B[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap][1];
1743  if (v->blk_mv_type[pos_b] && !v->blk_mv_type[xy]) {
1744  B[0] = (B[0] + s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1;
1745  B[1] = (B[1] + s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1;
1746  }
1747  }
1748  if (s->mb_width > 1) {
1749  if (!v->is_intra[s->mb_x - s->mb_stride + 1]) {
1750  c_valid = 1;
1751  n_adj = 2;
1752  pos_c = s->block_index[2] - 2 * wrap + 2;
1753  if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1754  n_adj = n & 2;
1755  }
1756  C[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][0];
1757  C[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][1];
1758  if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1759  C[0] = (1 + C[0] + (s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1;
1760  C[1] = (1 + C[1] + (s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1;
1761  }
1762  if (s->mb_x == s->mb_width - 1) {
1763  if (!v->is_intra[s->mb_x - s->mb_stride - 1]) {
1764  c_valid = 1;
1765  n_adj = 3;
1766  pos_c = s->block_index[3] - 2 * wrap - 2;
1767  if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1768  n_adj = n | 1;
1769  }
1770  C[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][0];
1771  C[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][1];
1772  if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1773  C[0] = (1 + C[0] + s->current_picture.motion_val[dir][s->block_index[1] - 2 * wrap - 2][0]) >> 1;
1774  C[1] = (1 + C[1] + s->current_picture.motion_val[dir][s->block_index[1] - 2 * wrap - 2][1]) >> 1;
1775  }
1776  } else
1777  c_valid = 0;
1778  }
1779  }
1780  }
1781  }
1782  } else {
1783  pos_b = s->block_index[1];
1784  b_valid = 1;
1785  B[0] = s->current_picture.motion_val[dir][pos_b][0];
1786  B[1] = s->current_picture.motion_val[dir][pos_b][1];
1787  pos_c = s->block_index[0];
1788  c_valid = 1;
1789  C[0] = s->current_picture.motion_val[dir][pos_c][0];
1790  C[1] = s->current_picture.motion_val[dir][pos_c][1];
1791  }
1792 
1793  total_valid = a_valid + b_valid + c_valid;
1794  // check if predictor A is out of bounds
1795  if (!s->mb_x && !(n == 1 || n == 3)) {
1796  A[0] = A[1] = 0;
1797  }
1798  // check if predictor B is out of bounds
1799  if ((s->first_slice_line && v->blk_mv_type[xy]) || (s->first_slice_line && !(n & 2))) {
1800  B[0] = B[1] = C[0] = C[1] = 0;
1801  }
1802  if (!v->blk_mv_type[xy]) {
1803  if (s->mb_width == 1) {
1804  px = B[0];
1805  py = B[1];
1806  } else {
1807  if (total_valid >= 2) {
1808  px = mid_pred(A[0], B[0], C[0]);
1809  py = mid_pred(A[1], B[1], C[1]);
1810  } else if (total_valid) {
1811  if (a_valid) { px = A[0]; py = A[1]; }
1812  else if (b_valid) { px = B[0]; py = B[1]; }
1813  else { px = C[0]; py = C[1]; }
1814  }
1815  }
1816  } else {
1817  if (a_valid)
1818  field_a = (A[1] & 4) ? 1 : 0;
1819  else
1820  field_a = 0;
1821  if (b_valid)
1822  field_b = (B[1] & 4) ? 1 : 0;
1823  else
1824  field_b = 0;
1825  if (c_valid)
1826  field_c = (C[1] & 4) ? 1 : 0;
1827  else
1828  field_c = 0;
1829 
1830  num_oppfield = field_a + field_b + field_c;
1831  num_samefield = total_valid - num_oppfield;
1832  if (total_valid == 3) {
1833  if ((num_samefield == 3) || (num_oppfield == 3)) {
1834  px = mid_pred(A[0], B[0], C[0]);
1835  py = mid_pred(A[1], B[1], C[1]);
1836  } else if (num_samefield >= num_oppfield) {
1837  /* take one MV from same field set depending on priority
1838  the check for B may not be necessary */
1839  px = !field_a ? A[0] : B[0];
1840  py = !field_a ? A[1] : B[1];
1841  } else {
1842  px = field_a ? A[0] : B[0];
1843  py = field_a ? A[1] : B[1];
1844  }
1845  } else if (total_valid == 2) {
1846  if (num_samefield >= num_oppfield) {
1847  if (!field_a && a_valid) {
1848  px = A[0];
1849  py = A[1];
1850  } else if (!field_b && b_valid) {
1851  px = B[0];
1852  py = B[1];
1853  } else /*if (c_valid)*/ {
1854  av_assert1(c_valid);
1855  px = C[0];
1856  py = C[1];
1857  } /*else px = py = 0;*/
1858  } else {
1859  if (field_a && a_valid) {
1860  px = A[0];
1861  py = A[1];
1862  } else /*if (field_b && b_valid)*/ {
1863  av_assert1(field_b && b_valid);
1864  px = B[0];
1865  py = B[1];
1866  } /*else if (c_valid) {
1867  px = C[0];
1868  py = C[1];
1869  }*/
1870  }
1871  } else if (total_valid == 1) {
1872  px = (a_valid) ? A[0] : ((b_valid) ? B[0] : C[0]);
1873  py = (a_valid) ? A[1] : ((b_valid) ? B[1] : C[1]);
1874  }
1875  }
1876 
1877  /* store MV using signed modulus of MV range defined in 4.11 */
1878  s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1879  s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
1880  if (mvn == 1) { /* duplicate motion data for 1-MV block */
1881  s->current_picture.motion_val[dir][xy + 1 ][0] = s->current_picture.motion_val[dir][xy][0];
1882  s->current_picture.motion_val[dir][xy + 1 ][1] = s->current_picture.motion_val[dir][xy][1];
1883  s->current_picture.motion_val[dir][xy + wrap ][0] = s->current_picture.motion_val[dir][xy][0];
1884  s->current_picture.motion_val[dir][xy + wrap ][1] = s->current_picture.motion_val[dir][xy][1];
1885  s->current_picture.motion_val[dir][xy + wrap + 1][0] = s->current_picture.motion_val[dir][xy][0];
1886  s->current_picture.motion_val[dir][xy + wrap + 1][1] = s->current_picture.motion_val[dir][xy][1];
1887  } else if (mvn == 2) { /* duplicate motion data for 2-Field MV block */
1888  s->current_picture.motion_val[dir][xy + 1][0] = s->current_picture.motion_val[dir][xy][0];
1889  s->current_picture.motion_val[dir][xy + 1][1] = s->current_picture.motion_val[dir][xy][1];
1890  s->mv[dir][n + 1][0] = s->mv[dir][n][0];
1891  s->mv[dir][n + 1][1] = s->mv[dir][n][1];
1892  }
1893 }
1894 
1895 /** Motion compensation for direct or interpolated blocks in B-frames
1896  */
1898 {
1899  MpegEncContext *s = &v->s;
1900  H264ChromaContext *h264chroma = &v->h264chroma;
1901  uint8_t *srcY, *srcU, *srcV;
1902  int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
1903  int off, off_uv;
1904  int v_edge_pos = s->v_edge_pos >> v->field_mode;
1905  int use_ic = v->next_use_ic;
1906 
1907  if (!v->field_mode && !v->s.next_picture.f.data[0])
1908  return;
1909 
1910  mx = s->mv[1][0][0];
1911  my = s->mv[1][0][1];
1912  uvmx = (mx + ((mx & 3) == 3)) >> 1;
1913  uvmy = (my + ((my & 3) == 3)) >> 1;
1914  if (v->field_mode) {
1915  if (v->cur_field_type != v->ref_field_type[1])
1916  my = my - 2 + 4 * v->cur_field_type;
1917  uvmy = uvmy - 2 + 4 * v->cur_field_type;
1918  }
1919  if (v->fastuvmc) {
1920  uvmx = uvmx + ((uvmx < 0) ? -(uvmx & 1) : (uvmx & 1));
1921  uvmy = uvmy + ((uvmy < 0) ? -(uvmy & 1) : (uvmy & 1));
1922  }
1923  srcY = s->next_picture.f.data[0];
1924  srcU = s->next_picture.f.data[1];
1925  srcV = s->next_picture.f.data[2];
1926 
1927  src_x = s->mb_x * 16 + (mx >> 2);
1928  src_y = s->mb_y * 16 + (my >> 2);
1929  uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1930  uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
1931 
1932  if (v->profile != PROFILE_ADVANCED) {
1933  src_x = av_clip( src_x, -16, s->mb_width * 16);
1934  src_y = av_clip( src_y, -16, s->mb_height * 16);
1935  uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
1936  uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
1937  } else {
1938  src_x = av_clip( src_x, -17, s->avctx->coded_width);
1939  src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
1940  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1941  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1942  }
1943 
1944  srcY += src_y * s->linesize + src_x;
1945  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
1946  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
1947 
1948  if (v->field_mode && v->ref_field_type[1]) {
1949  srcY += s->current_picture_ptr->f.linesize[0];
1950  srcU += s->current_picture_ptr->f.linesize[1];
1951  srcV += s->current_picture_ptr->f.linesize[2];
1952  }
1953 
1954  /* for grayscale we should not try to read from unknown area */
1955  if (s->flags & CODEC_FLAG_GRAY) {
1956  srcU = s->edge_emu_buffer + 18 * s->linesize;
1957  srcV = s->edge_emu_buffer + 18 * s->linesize;
1958  }
1959 
1960  if (v->rangeredfrm || s->h_edge_pos < 22 || v_edge_pos < 22 || use_ic
1961  || (unsigned)(src_x - 1) > s->h_edge_pos - (mx & 3) - 16 - 3
1962  || (unsigned)(src_y - 1) > v_edge_pos - (my & 3) - 16 - 3) {
1963  uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
1964 
1965  srcY -= s->mspel * (1 + s->linesize);
1967  s->linesize, s->linesize,
1968  17 + s->mspel * 2, 17 + s->mspel * 2,
1969  src_x - s->mspel, src_y - s->mspel,
1970  s->h_edge_pos, v_edge_pos);
1971  srcY = s->edge_emu_buffer;
1972  s->vdsp.emulated_edge_mc(uvbuf, srcU,
1973  s->uvlinesize, s->uvlinesize,
1974  8 + 1, 8 + 1,
1975  uvsrc_x, uvsrc_y,
1976  s->h_edge_pos >> 1, v_edge_pos >> 1);
1977  s->vdsp.emulated_edge_mc(uvbuf + 16, srcV,
1978  s->uvlinesize, s->uvlinesize,
1979  8 + 1, 8 + 1,
1980  uvsrc_x, uvsrc_y,
1981  s->h_edge_pos >> 1, v_edge_pos >> 1);
1982  srcU = uvbuf;
1983  srcV = uvbuf + 16;
1984  /* if we deal with range reduction we need to scale source blocks */
1985  if (v->rangeredfrm) {
1986  int i, j;
1987  uint8_t *src, *src2;
1988 
1989  src = srcY;
1990  for (j = 0; j < 17 + s->mspel * 2; j++) {
1991  for (i = 0; i < 17 + s->mspel * 2; i++)
1992  src[i] = ((src[i] - 128) >> 1) + 128;
1993  src += s->linesize;
1994  }
1995  src = srcU;
1996  src2 = srcV;
1997  for (j = 0; j < 9; j++) {
1998  for (i = 0; i < 9; i++) {
1999  src[i] = ((src[i] - 128) >> 1) + 128;
2000  src2[i] = ((src2[i] - 128) >> 1) + 128;
2001  }
2002  src += s->uvlinesize;
2003  src2 += s->uvlinesize;
2004  }
2005  }
2006 
2007  if (use_ic) {
2008  uint8_t (*luty )[256] = v->next_luty;
2009  uint8_t (*lutuv)[256] = v->next_lutuv;
2010  int i, j;
2011  uint8_t *src, *src2;
2012 
2013  src = srcY;
2014  for (j = 0; j < 17 + s->mspel * 2; j++) {
2015  int f = v->field_mode ? v->ref_field_type[1] : ((j+src_y - s->mspel) & 1);
2016  for (i = 0; i < 17 + s->mspel * 2; i++)
2017  src[i] = luty[f][src[i]];
2018  src += s->linesize;
2019  }
2020  src = srcU;
2021  src2 = srcV;
2022  for (j = 0; j < 9; j++) {
2023  int f = v->field_mode ? v->ref_field_type[1] : ((j+uvsrc_y) & 1);
2024  for (i = 0; i < 9; i++) {
2025  src[i] = lutuv[f][src[i]];
2026  src2[i] = lutuv[f][src2[i]];
2027  }
2028  src += s->uvlinesize;
2029  src2 += s->uvlinesize;
2030  }
2031  }
2032  srcY += s->mspel * (1 + s->linesize);
2033  }
2034 
2035  off = 0;
2036  off_uv = 0;
2037 
2038  if (s->mspel) {
2039  dxy = ((my & 3) << 2) | (mx & 3);
2040  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
2041  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8, srcY + 8, s->linesize, v->rnd);
2042  srcY += s->linesize * 8;
2043  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize , srcY , s->linesize, v->rnd);
2044  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
2045  } else { // hpel mc
2046  dxy = (my & 2) | ((mx & 2) >> 1);
2047 
2048  if (!v->rnd)
2049  s->hdsp.avg_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
2050  else
2051  s->hdsp.avg_no_rnd_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, 16);
2052  }
2053 
2054  if (s->flags & CODEC_FLAG_GRAY) return;
2055  /* Chroma MC always uses qpel blilinear */
2056  uvmx = (uvmx & 3) << 1;
2057  uvmy = (uvmy & 3) << 1;
2058  if (!v->rnd) {
2059  h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2060  h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2061  } else {
2062  v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2063  v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2064  }
2065 }
2066 
2067 static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
2068 {
2069  int n = bfrac;
2070 
2071 #if B_FRACTION_DEN==256
2072  if (inv)
2073  n -= 256;
2074  if (!qs)
2075  return 2 * ((value * n + 255) >> 9);
2076  return (value * n + 128) >> 8;
2077 #else
2078  if (inv)
2079  n -= B_FRACTION_DEN;
2080  if (!qs)
2081  return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
2082  return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
2083 #endif
2084 }
2085 
2086 /** Reconstruct motion vector for B-frame and do motion compensation
2087  */
2088 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2],
2089  int direct, int mode)
2090 {
2091  if (direct) {
2092  vc1_mc_1mv(v, 0);
2093  vc1_interp_mc(v);
2094  return;
2095  }
2096  if (mode == BMV_TYPE_INTERPOLATED) {
2097  vc1_mc_1mv(v, 0);
2098  vc1_interp_mc(v);
2099  return;
2100  }
2101 
2102  vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
2103 }
2104 
2105 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
2106  int direct, int mvtype)
2107 {
2108  MpegEncContext *s = &v->s;
2109  int xy, wrap, off = 0;
2110  int16_t *A, *B, *C;
2111  int px, py;
2112  int sum;
2113  int r_x, r_y;
2114  const uint8_t *is_intra = v->mb_type[0];
2115 
2116  av_assert0(!v->field_mode);
2117 
2118  r_x = v->range_x;
2119  r_y = v->range_y;
2120  /* scale MV difference to be quad-pel */
2121  dmv_x[0] <<= 1 - s->quarter_sample;
2122  dmv_y[0] <<= 1 - s->quarter_sample;
2123  dmv_x[1] <<= 1 - s->quarter_sample;
2124  dmv_y[1] <<= 1 - s->quarter_sample;
2125 
2126  wrap = s->b8_stride;
2127  xy = s->block_index[0];
2128 
2129  if (s->mb_intra) {
2130  s->current_picture.motion_val[0][xy][0] =
2131  s->current_picture.motion_val[0][xy][1] =
2132  s->current_picture.motion_val[1][xy][0] =
2133  s->current_picture.motion_val[1][xy][1] = 0;
2134  return;
2135  }
2136  if (direct && s->next_picture_ptr->field_picture)
2137  av_log(s->avctx, AV_LOG_WARNING, "Mixed frame/field direct mode not supported\n");
2138 
2139  s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
2140  s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
2141  s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
2142  s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
2143 
2144  /* Pullback predicted motion vectors as specified in 8.4.5.4 */
2145  s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2146  s->mv[0][0][1] = av_clip(s->mv[0][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2147  s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2148  s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2149  if (direct) {
2150  s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2151  s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2152  s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2153  s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2154  return;
2155  }
2156 
2157  if ((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2158  C = s->current_picture.motion_val[0][xy - 2];
2159  A = s->current_picture.motion_val[0][xy - wrap * 2];
2160  off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2161  B = s->current_picture.motion_val[0][xy - wrap * 2 + off];
2162 
2163  if (!s->mb_x) C[0] = C[1] = 0;
2164  if (!s->first_slice_line) { // predictor A is not out of bounds
2165  if (s->mb_width == 1) {
2166  px = A[0];
2167  py = A[1];
2168  } else {
2169  px = mid_pred(A[0], B[0], C[0]);
2170  py = mid_pred(A[1], B[1], C[1]);
2171  }
2172  } else if (s->mb_x) { // predictor C is not out of bounds
2173  px = C[0];
2174  py = C[1];
2175  } else {
2176  px = py = 0;
2177  }
2178  /* Pullback MV as specified in 8.3.5.3.4 */
2179  {
2180  int qx, qy, X, Y;
2181  if (v->profile < PROFILE_ADVANCED) {
2182  qx = (s->mb_x << 5);
2183  qy = (s->mb_y << 5);
2184  X = (s->mb_width << 5) - 4;
2185  Y = (s->mb_height << 5) - 4;
2186  if (qx + px < -28) px = -28 - qx;
2187  if (qy + py < -28) py = -28 - qy;
2188  if (qx + px > X) px = X - qx;
2189  if (qy + py > Y) py = Y - qy;
2190  } else {
2191  qx = (s->mb_x << 6);
2192  qy = (s->mb_y << 6);
2193  X = (s->mb_width << 6) - 4;
2194  Y = (s->mb_height << 6) - 4;
2195  if (qx + px < -60) px = -60 - qx;
2196  if (qy + py < -60) py = -60 - qy;
2197  if (qx + px > X) px = X - qx;
2198  if (qy + py > Y) py = Y - qy;
2199  }
2200  }
2201  /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2202  if (0 && !s->first_slice_line && s->mb_x) {
2203  if (is_intra[xy - wrap])
2204  sum = FFABS(px) + FFABS(py);
2205  else
2206  sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2207  if (sum > 32) {
2208  if (get_bits1(&s->gb)) {
2209  px = A[0];
2210  py = A[1];
2211  } else {
2212  px = C[0];
2213  py = C[1];
2214  }
2215  } else {
2216  if (is_intra[xy - 2])
2217  sum = FFABS(px) + FFABS(py);
2218  else
2219  sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2220  if (sum > 32) {
2221  if (get_bits1(&s->gb)) {
2222  px = A[0];
2223  py = A[1];
2224  } else {
2225  px = C[0];
2226  py = C[1];
2227  }
2228  }
2229  }
2230  }
2231  /* store MV using signed modulus of MV range defined in 4.11 */
2232  s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2233  s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2234  }
2235  if ((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2236  C = s->current_picture.motion_val[1][xy - 2];
2237  A = s->current_picture.motion_val[1][xy - wrap * 2];
2238  off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2239  B = s->current_picture.motion_val[1][xy - wrap * 2 + off];
2240 
2241  if (!s->mb_x)
2242  C[0] = C[1] = 0;
2243  if (!s->first_slice_line) { // predictor A is not out of bounds
2244  if (s->mb_width == 1) {
2245  px = A[0];
2246  py = A[1];
2247  } else {
2248  px = mid_pred(A[0], B[0], C[0]);
2249  py = mid_pred(A[1], B[1], C[1]);
2250  }
2251  } else if (s->mb_x) { // predictor C is not out of bounds
2252  px = C[0];
2253  py = C[1];
2254  } else {
2255  px = py = 0;
2256  }
2257  /* Pullback MV as specified in 8.3.5.3.4 */
2258  {
2259  int qx, qy, X, Y;
2260  if (v->profile < PROFILE_ADVANCED) {
2261  qx = (s->mb_x << 5);
2262  qy = (s->mb_y << 5);
2263  X = (s->mb_width << 5) - 4;
2264  Y = (s->mb_height << 5) - 4;
2265  if (qx + px < -28) px = -28 - qx;
2266  if (qy + py < -28) py = -28 - qy;
2267  if (qx + px > X) px = X - qx;
2268  if (qy + py > Y) py = Y - qy;
2269  } else {
2270  qx = (s->mb_x << 6);
2271  qy = (s->mb_y << 6);
2272  X = (s->mb_width << 6) - 4;
2273  Y = (s->mb_height << 6) - 4;
2274  if (qx + px < -60) px = -60 - qx;
2275  if (qy + py < -60) py = -60 - qy;
2276  if (qx + px > X) px = X - qx;
2277  if (qy + py > Y) py = Y - qy;
2278  }
2279  }
2280  /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2281  if (0 && !s->first_slice_line && s->mb_x) {
2282  if (is_intra[xy - wrap])
2283  sum = FFABS(px) + FFABS(py);
2284  else
2285  sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2286  if (sum > 32) {
2287  if (get_bits1(&s->gb)) {
2288  px = A[0];
2289  py = A[1];
2290  } else {
2291  px = C[0];
2292  py = C[1];
2293  }
2294  } else {
2295  if (is_intra[xy - 2])
2296  sum = FFABS(px) + FFABS(py);
2297  else
2298  sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2299  if (sum > 32) {
2300  if (get_bits1(&s->gb)) {
2301  px = A[0];
2302  py = A[1];
2303  } else {
2304  px = C[0];
2305  py = C[1];
2306  }
2307  }
2308  }
2309  }
2310  /* store MV using signed modulus of MV range defined in 4.11 */
2311 
2312  s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2313  s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2314  }
2315  s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2316  s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2317  s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2318  s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2319 }
2320 
2321 static inline void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, int mv1, int *pred_flag)
2322 {
2323  int dir = (v->bmvtype == BMV_TYPE_BACKWARD) ? 1 : 0;
2324  MpegEncContext *s = &v->s;
2325  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2326 
2327  if (v->bmvtype == BMV_TYPE_DIRECT) {
2328  int total_opp, k, f;
2329  if (s->next_picture.mb_type[mb_pos + v->mb_off] != MB_TYPE_INTRA) {
2330  s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
2331  v->bfraction, 0, s->quarter_sample);
2332  s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1],
2333  v->bfraction, 0, s->quarter_sample);
2334  s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
2335  v->bfraction, 1, s->quarter_sample);
2336  s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1],
2337  v->bfraction, 1, s->quarter_sample);
2338 
2339  total_opp = v->mv_f_next[0][s->block_index[0] + v->blocks_off]
2340  + v->mv_f_next[0][s->block_index[1] + v->blocks_off]
2341  + v->mv_f_next[0][s->block_index[2] + v->blocks_off]
2342  + v->mv_f_next[0][s->block_index[3] + v->blocks_off];
2343  f = (total_opp > 2) ? 1 : 0;
2344  } else {
2345  s->mv[0][0][0] = s->mv[0][0][1] = 0;
2346  s->mv[1][0][0] = s->mv[1][0][1] = 0;
2347  f = 0;
2348  }
2349  v->ref_field_type[0] = v->ref_field_type[1] = v->cur_field_type ^ f;
2350  for (k = 0; k < 4; k++) {
2351  s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][0] = s->mv[0][0][0];
2352  s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][1] = s->mv[0][0][1];
2353  s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][0] = s->mv[1][0][0];
2354  s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][1] = s->mv[1][0][1];
2355  v->mv_f[0][s->block_index[k] + v->blocks_off] = f;
2356  v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
2357  }
2358  return;
2359  }
2360  if (v->bmvtype == BMV_TYPE_INTERPOLATED) {
2361  vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2362  vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2363  return;
2364  }
2365  if (dir) { // backward
2366  vc1_pred_mv(v, n, dmv_x[1], dmv_y[1], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2367  if (n == 3 || mv1) {
2368  vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
2369  }
2370  } else { // forward
2371  vc1_pred_mv(v, n, dmv_x[0], dmv_y[0], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2372  if (n == 3 || mv1) {
2373  vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], 0, 1);
2374  }
2375  }
2376 }
2377 
2378 /** Get predicted DC value for I-frames only
2379  * prediction dir: left=0, top=1
2380  * @param s MpegEncContext
2381  * @param overlap flag indicating that overlap filtering is used
2382  * @param pq integer part of picture quantizer
2383  * @param[in] n block index in the current MB
2384  * @param dc_val_ptr Pointer to DC predictor
2385  * @param dir_ptr Prediction direction for use in AC prediction
2386  */
2387 static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2388  int16_t **dc_val_ptr, int *dir_ptr)
2389 {
2390  int a, b, c, wrap, pred, scale;
2391  int16_t *dc_val;
2392  static const uint16_t dcpred[32] = {
2393  -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2394  114, 102, 93, 85, 79, 73, 68, 64,
2395  60, 57, 54, 51, 49, 47, 45, 43,
2396  41, 39, 38, 37, 35, 34, 33
2397  };
2398 
2399  /* find prediction - wmv3_dc_scale always used here in fact */
2400  if (n < 4) scale = s->y_dc_scale;
2401  else scale = s->c_dc_scale;
2402 
2403  wrap = s->block_wrap[n];
2404  dc_val = s->dc_val[0] + s->block_index[n];
2405 
2406  /* B A
2407  * C X
2408  */
2409  c = dc_val[ - 1];
2410  b = dc_val[ - 1 - wrap];
2411  a = dc_val[ - wrap];
2412 
2413  if (pq < 9 || !overlap) {
2414  /* Set outer values */
2415  if (s->first_slice_line && (n != 2 && n != 3))
2416  b = a = dcpred[scale];
2417  if (s->mb_x == 0 && (n != 1 && n != 3))
2418  b = c = dcpred[scale];
2419  } else {
2420  /* Set outer values */
2421  if (s->first_slice_line && (n != 2 && n != 3))
2422  b = a = 0;
2423  if (s->mb_x == 0 && (n != 1 && n != 3))
2424  b = c = 0;
2425  }
2426 
2427  if (abs(a - b) <= abs(b - c)) {
2428  pred = c;
2429  *dir_ptr = 1; // left
2430  } else {
2431  pred = a;
2432  *dir_ptr = 0; // top
2433  }
2434 
2435  /* update predictor */
2436  *dc_val_ptr = &dc_val[0];
2437  return pred;
2438 }
2439 
2440 
2441 /** Get predicted DC value
2442  * prediction dir: left=0, top=1
2443  * @param s MpegEncContext
2444  * @param overlap flag indicating that overlap filtering is used
2445  * @param pq integer part of picture quantizer
2446  * @param[in] n block index in the current MB
2447  * @param a_avail flag indicating top block availability
2448  * @param c_avail flag indicating left block availability
2449  * @param dc_val_ptr Pointer to DC predictor
2450  * @param dir_ptr Prediction direction for use in AC prediction
2451  */
2452 static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2453  int a_avail, int c_avail,
2454  int16_t **dc_val_ptr, int *dir_ptr)
2455 {
2456  int a, b, c, wrap, pred;
2457  int16_t *dc_val;
2458  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2459  int q1, q2 = 0;
2460  int dqscale_index;
2461 
2462  wrap = s->block_wrap[n];
2463  dc_val = s->dc_val[0] + s->block_index[n];
2464 
2465  /* B A
2466  * C X
2467  */
2468  c = dc_val[ - 1];
2469  b = dc_val[ - 1 - wrap];
2470  a = dc_val[ - wrap];
2471  /* scale predictors if needed */
2472  q1 = s->current_picture.qscale_table[mb_pos];
2473  dqscale_index = s->y_dc_scale_table[q1] - 1;
2474  if (dqscale_index < 0)
2475  return 0;
2476  if (c_avail && (n != 1 && n != 3)) {
2477  q2 = s->current_picture.qscale_table[mb_pos - 1];
2478  if (q2 && q2 != q1)
2479  c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2480  }
2481  if (a_avail && (n != 2 && n != 3)) {
2482  q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2483  if (q2 && q2 != q1)
2484  a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2485  }
2486  if (a_avail && c_avail && (n != 3)) {
2487  int off = mb_pos;
2488  if (n != 1)
2489  off--;
2490  if (n != 2)
2491  off -= s->mb_stride;
2492  q2 = s->current_picture.qscale_table[off];
2493  if (q2 && q2 != q1)
2494  b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2495  }
2496 
2497  if (a_avail && c_avail) {
2498  if (abs(a - b) <= abs(b - c)) {
2499  pred = c;
2500  *dir_ptr = 1; // left
2501  } else {
2502  pred = a;
2503  *dir_ptr = 0; // top
2504  }
2505  } else if (a_avail) {
2506  pred = a;
2507  *dir_ptr = 0; // top
2508  } else if (c_avail) {
2509  pred = c;
2510  *dir_ptr = 1; // left
2511  } else {
2512  pred = 0;
2513  *dir_ptr = 1; // left
2514  }
2515 
2516  /* update predictor */
2517  *dc_val_ptr = &dc_val[0];
2518  return pred;
2519 }
2520 
2521 /** @} */ // Block group
2522 
2523 /**
2524  * @name VC1 Macroblock-level functions in Simple/Main Profiles
2525  * @see 7.1.4, p91 and 8.1.1.7, p(1)04
2526  * @{
2527  */
2528 
2529 static inline int vc1_coded_block_pred(MpegEncContext * s, int n,
2530  uint8_t **coded_block_ptr)
2531 {
2532  int xy, wrap, pred, a, b, c;
2533 
2534  xy = s->block_index[n];
2535  wrap = s->b8_stride;
2536 
2537  /* B C
2538  * A X
2539  */
2540  a = s->coded_block[xy - 1 ];
2541  b = s->coded_block[xy - 1 - wrap];
2542  c = s->coded_block[xy - wrap];
2543 
2544  if (b == c) {
2545  pred = a;
2546  } else {
2547  pred = c;
2548  }
2549 
2550  /* store value */
2551  *coded_block_ptr = &s->coded_block[xy];
2552 
2553  return pred;
2554 }
2555 
2556 /**
2557  * Decode one AC coefficient
2558  * @param v The VC1 context
2559  * @param last Last coefficient
2560  * @param skip How much zero coefficients to skip
2561  * @param value Decoded AC coefficient value
2562  * @param codingset set of VLC to decode data
2563  * @see 8.1.3.4
2564  */
2565 static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip,
2566  int *value, int codingset)
2567 {
2568  GetBitContext *gb = &v->s.gb;
2569  int index, escape, run = 0, level = 0, lst = 0;
2570 
2571  index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2572  if (index != ff_vc1_ac_sizes[codingset] - 1) {
2573  run = vc1_index_decode_table[codingset][index][0];
2574  level = vc1_index_decode_table[codingset][index][1];
2575  lst = index >= vc1_last_decode_table[codingset] || get_bits_left(gb) < 0;
2576  if (get_bits1(gb))
2577  level = -level;
2578  } else {
2579  escape = decode210(gb);
2580  if (escape != 2) {
2581  index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2582  run = vc1_index_decode_table[codingset][index][0];
2583  level = vc1_index_decode_table[codingset][index][1];
2584  lst = index >= vc1_last_decode_table[codingset];
2585  if (escape == 0) {
2586  if (lst)
2587  level += vc1_last_delta_level_table[codingset][run];
2588  else
2589  level += vc1_delta_level_table[codingset][run];
2590  } else {
2591  if (lst)
2592  run += vc1_last_delta_run_table[codingset][level] + 1;
2593  else
2594  run += vc1_delta_run_table[codingset][level] + 1;
2595  }
2596  if (get_bits1(gb))
2597  level = -level;
2598  } else {
2599  int sign;
2600  lst = get_bits1(gb);
2601  if (v->s.esc3_level_length == 0) {
2602  if (v->pq < 8 || v->dquantfrm) { // table 59
2603  v->s.esc3_level_length = get_bits(gb, 3);
2604  if (!v->s.esc3_level_length)
2605  v->s.esc3_level_length = get_bits(gb, 2) + 8;
2606  } else { // table 60
2607  v->s.esc3_level_length = get_unary(gb, 1, 6) + 2;
2608  }
2609  v->s.esc3_run_length = 3 + get_bits(gb, 2);
2610  }
2611  run = get_bits(gb, v->s.esc3_run_length);
2612  sign = get_bits1(gb);
2613  level = get_bits(gb, v->s.esc3_level_length);
2614  if (sign)
2615  level = -level;
2616  }
2617  }
2618 
2619  *last = lst;
2620  *skip = run;
2621  *value = level;
2622 }
2623 
2624 /** Decode intra block in intra frames - should be faster than decode_intra_block
2625  * @param v VC1Context
2626  * @param block block to decode
2627  * @param[in] n subblock index
2628  * @param coded are AC coeffs present or not
2629  * @param codingset set of VLC to decode data
2630  */
2631 static int vc1_decode_i_block(VC1Context *v, int16_t block[64], int n,
2632  int coded, int codingset)
2633 {
2634  GetBitContext *gb = &v->s.gb;
2635  MpegEncContext *s = &v->s;
2636  int dc_pred_dir = 0; /* Direction of the DC prediction used */
2637  int i;
2638  int16_t *dc_val;
2639  int16_t *ac_val, *ac_val2;
2640  int dcdiff;
2641 
2642  /* Get DC differential */
2643  if (n < 4) {
2645  } else {
2647  }
2648  if (dcdiff < 0) {
2649  av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2650  return -1;
2651  }
2652  if (dcdiff) {
2653  if (dcdiff == 119 /* ESC index value */) {
2654  /* TODO: Optimize */
2655  if (v->pq == 1) dcdiff = get_bits(gb, 10);
2656  else if (v->pq == 2) dcdiff = get_bits(gb, 9);
2657  else dcdiff = get_bits(gb, 8);
2658  } else {
2659  if (v->pq == 1)
2660  dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2661  else if (v->pq == 2)
2662  dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2663  }
2664  if (get_bits1(gb))
2665  dcdiff = -dcdiff;
2666  }
2667 
2668  /* Prediction */
2669  dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
2670  *dc_val = dcdiff;
2671 
2672  /* Store the quantized DC coeff, used for prediction */
2673  if (n < 4) {
2674  block[0] = dcdiff * s->y_dc_scale;
2675  } else {
2676  block[0] = dcdiff * s->c_dc_scale;
2677  }
2678  /* Skip ? */
2679  if (!coded) {
2680  goto not_coded;
2681  }
2682 
2683  // AC Decoding
2684  i = 1;
2685 
2686  {
2687  int last = 0, skip, value;
2688  const uint8_t *zz_table;
2689  int scale;
2690  int k;
2691 
2692  scale = v->pq * 2 + v->halfpq;
2693 
2694  if (v->s.ac_pred) {
2695  if (!dc_pred_dir)
2696  zz_table = v->zz_8x8[2];
2697  else
2698  zz_table = v->zz_8x8[3];
2699  } else
2700  zz_table = v->zz_8x8[1];
2701 
2702  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2703  ac_val2 = ac_val;
2704  if (dc_pred_dir) // left
2705  ac_val -= 16;
2706  else // top
2707  ac_val -= 16 * s->block_wrap[n];
2708 
2709  while (!last) {
2710  vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2711  i += skip;
2712  if (i > 63)
2713  break;
2714  block[zz_table[i++]] = value;
2715  }
2716 
2717  /* apply AC prediction if needed */
2718  if (s->ac_pred) {
2719  if (dc_pred_dir) { // left
2720  for (k = 1; k < 8; k++)
2721  block[k << v->left_blk_sh] += ac_val[k];
2722  } else { // top
2723  for (k = 1; k < 8; k++)
2724  block[k << v->top_blk_sh] += ac_val[k + 8];
2725  }
2726  }
2727  /* save AC coeffs for further prediction */
2728  for (k = 1; k < 8; k++) {
2729  ac_val2[k] = block[k << v->left_blk_sh];
2730  ac_val2[k + 8] = block[k << v->top_blk_sh];
2731  }
2732 
2733  /* scale AC coeffs */
2734  for (k = 1; k < 64; k++)
2735  if (block[k]) {
2736  block[k] *= scale;
2737  if (!v->pquantizer)
2738  block[k] += (block[k] < 0) ? -v->pq : v->pq;
2739  }
2740 
2741  if (s->ac_pred) i = 63;
2742  }
2743 
2744 not_coded:
2745  if (!coded) {
2746  int k, scale;
2747  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2748  ac_val2 = ac_val;
2749 
2750  i = 0;
2751  scale = v->pq * 2 + v->halfpq;
2752  memset(ac_val2, 0, 16 * 2);
2753  if (dc_pred_dir) { // left
2754  ac_val -= 16;
2755  if (s->ac_pred)
2756  memcpy(ac_val2, ac_val, 8 * 2);
2757  } else { // top
2758  ac_val -= 16 * s->block_wrap[n];
2759  if (s->ac_pred)
2760  memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2761  }
2762 
2763  /* apply AC prediction if needed */
2764  if (s->ac_pred) {
2765  if (dc_pred_dir) { //left
2766  for (k = 1; k < 8; k++) {
2767  block[k << v->left_blk_sh] = ac_val[k] * scale;
2768  if (!v->pquantizer && block[k << v->left_blk_sh])
2769  block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -v->pq : v->pq;
2770  }
2771  } else { // top
2772  for (k = 1; k < 8; k++) {
2773  block[k << v->top_blk_sh] = ac_val[k + 8] * scale;
2774  if (!v->pquantizer && block[k << v->top_blk_sh])
2775  block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -v->pq : v->pq;
2776  }
2777  }
2778  i = 63;
2779  }
2780  }
2781  s->block_last_index[n] = i;
2782 
2783  return 0;
2784 }
2785 
2786 /** Decode intra block in intra frames - should be faster than decode_intra_block
2787  * @param v VC1Context
2788  * @param block block to decode
2789  * @param[in] n subblock number
2790  * @param coded are AC coeffs present or not
2791  * @param codingset set of VLC to decode data
2792  * @param mquant quantizer value for this macroblock
2793  */
2794 static int vc1_decode_i_block_adv(VC1Context *v, int16_t block[64], int n,
2795  int coded, int codingset, int mquant)
2796 {
2797  GetBitContext *gb = &v->s.gb;
2798  MpegEncContext *s = &v->s;
2799  int dc_pred_dir = 0; /* Direction of the DC prediction used */
2800  int i;
2801  int16_t *dc_val = NULL;
2802  int16_t *ac_val, *ac_val2;
2803  int dcdiff;
2804  int a_avail = v->a_avail, c_avail = v->c_avail;
2805  int use_pred = s->ac_pred;
2806  int scale;
2807  int q1, q2 = 0;
2808  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2809 
2810  /* Get DC differential */
2811  if (n < 4) {
2813  } else {
2815  }
2816  if (dcdiff < 0) {
2817  av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2818  return -1;
2819  }
2820  if (dcdiff) {
2821  if (dcdiff == 119 /* ESC index value */) {
2822  /* TODO: Optimize */
2823  if (mquant == 1) dcdiff = get_bits(gb, 10);
2824  else if (mquant == 2) dcdiff = get_bits(gb, 9);
2825  else dcdiff = get_bits(gb, 8);
2826  } else {
2827  if (mquant == 1)
2828  dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2829  else if (mquant == 2)
2830  dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2831  }
2832  if (get_bits1(gb))
2833  dcdiff = -dcdiff;
2834  }
2835 
2836  /* Prediction */
2837  dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
2838  *dc_val = dcdiff;
2839 
2840  /* Store the quantized DC coeff, used for prediction */
2841  if (n < 4) {
2842  block[0] = dcdiff * s->y_dc_scale;
2843  } else {
2844  block[0] = dcdiff * s->c_dc_scale;
2845  }
2846 
2847  //AC Decoding
2848  i = 1;
2849 
2850  /* check if AC is needed at all */
2851  if (!a_avail && !c_avail)
2852  use_pred = 0;
2853  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2854  ac_val2 = ac_val;
2855 
2856  scale = mquant * 2 + ((mquant == v->pq) ? v->halfpq : 0);
2857 
2858  if (dc_pred_dir) // left
2859  ac_val -= 16;
2860  else // top
2861  ac_val -= 16 * s->block_wrap[n];
2862 
2863  q1 = s->current_picture.qscale_table[mb_pos];
2864  if ( dc_pred_dir && c_avail && mb_pos)
2865  q2 = s->current_picture.qscale_table[mb_pos - 1];
2866  if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
2867  q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2868  if ( dc_pred_dir && n == 1)
2869  q2 = q1;
2870  if (!dc_pred_dir && n == 2)
2871  q2 = q1;
2872  if (n == 3)
2873  q2 = q1;
2874 
2875  if (coded) {
2876  int last = 0, skip, value;
2877  const uint8_t *zz_table;
2878  int k;
2879 
2880  if (v->s.ac_pred) {
2881  if (!use_pred && v->fcm == ILACE_FRAME) {
2882  zz_table = v->zzi_8x8;
2883  } else {
2884  if (!dc_pred_dir) // top
2885  zz_table = v->zz_8x8[2];
2886  else // left
2887  zz_table = v->zz_8x8[3];
2888  }
2889  } else {
2890  if (v->fcm != ILACE_FRAME)
2891  zz_table = v->zz_8x8[1];
2892  else
2893  zz_table = v->zzi_8x8;
2894  }
2895 
2896  while (!last) {
2897  vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2898  i += skip;
2899  if (i > 63)
2900  break;
2901  block[zz_table[i++]] = value;
2902  }
2903 
2904  /* apply AC prediction if needed */
2905  if (use_pred) {
2906  /* scale predictors if needed*/
2907  if (q2 && q1 != q2) {
2908  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2909  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2910 
2911  if (q1 < 1)
2912  return AVERROR_INVALIDDATA;
2913  if (dc_pred_dir) { // left
2914  for (k = 1; k < 8; k++)
2915  block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2916  } else { // top
2917  for (k = 1; k < 8; k++)
2918  block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2919  }
2920  } else {
2921  if (dc_pred_dir) { //left
2922  for (k = 1; k < 8; k++)
2923  block[k << v->left_blk_sh] += ac_val[k];
2924  } else { //top
2925  for (k = 1; k < 8; k++)
2926  block[k << v->top_blk_sh] += ac_val[k + 8];
2927  }
2928  }
2929  }
2930  /* save AC coeffs for further prediction */
2931  for (k = 1; k < 8; k++) {
2932  ac_val2[k ] = block[k << v->left_blk_sh];
2933  ac_val2[k + 8] = block[k << v->top_blk_sh];
2934  }
2935 
2936  /* scale AC coeffs */
2937  for (k = 1; k < 64; k++)
2938  if (block[k]) {
2939  block[k] *= scale;
2940  if (!v->pquantizer)
2941  block[k] += (block[k] < 0) ? -mquant : mquant;
2942  }
2943 
2944  if (use_pred) i = 63;
2945  } else { // no AC coeffs
2946  int k;
2947 
2948  memset(ac_val2, 0, 16 * 2);
2949  if (dc_pred_dir) { // left
2950  if (use_pred) {
2951  memcpy(ac_val2, ac_val, 8 * 2);
2952  if (q2 && q1 != q2) {
2953  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2954  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2955  if (q1 < 1)
2956  return AVERROR_INVALIDDATA;
2957  for (k = 1; k < 8; k++)
2958  ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2959  }
2960  }
2961  } else { // top
2962  if (use_pred) {
2963  memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2964  if (q2 && q1 != q2) {
2965  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2966  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2967  if (q1 < 1)
2968  return AVERROR_INVALIDDATA;
2969  for (k = 1; k < 8; k++)
2970  ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2971  }
2972  }
2973  }
2974 
2975  /* apply AC prediction if needed */
2976  if (use_pred) {
2977  if (dc_pred_dir) { // left
2978  for (k = 1; k < 8; k++) {
2979  block[k << v->left_blk_sh] = ac_val2[k] * scale;
2980  if (!v->pquantizer && block[k << v->left_blk_sh])
2981  block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
2982  }
2983  } else { // top
2984  for (k = 1; k < 8; k++) {
2985  block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
2986  if (!v->pquantizer && block[k << v->top_blk_sh])
2987  block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
2988  }
2989  }
2990  i = 63;
2991  }
2992  }
2993  s->block_last_index[n] = i;
2994 
2995  return 0;
2996 }
2997 
2998 /** Decode intra block in inter frames - more generic version than vc1_decode_i_block
2999  * @param v VC1Context
3000  * @param block block to decode
3001  * @param[in] n subblock index
3002  * @param coded are AC coeffs present or not
3003  * @param mquant block quantizer
3004  * @param codingset set of VLC to decode data
3005  */
3006 static int vc1_decode_intra_block(VC1Context *v, int16_t block[64], int n,
3007  int coded, int mquant, int codingset)
3008 {
3009  GetBitContext *gb = &v->s.gb;
3010  MpegEncContext *s = &v->s;
3011  int dc_pred_dir = 0; /* Direction of the DC prediction used */
3012  int i;
3013  int16_t *dc_val = NULL;
3014  int16_t *ac_val, *ac_val2;
3015  int dcdiff;
3016  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3017  int a_avail = v->a_avail, c_avail = v->c_avail;
3018  int use_pred = s->ac_pred;
3019  int scale;
3020  int q1, q2 = 0;
3021 
3022  s->dsp.clear_block(block);
3023 
3024  /* XXX: Guard against dumb values of mquant */
3025  mquant = (mquant < 1) ? 0 : ((mquant > 31) ? 31 : mquant);
3026 
3027  /* Set DC scale - y and c use the same */
3028  s->y_dc_scale = s->y_dc_scale_table[mquant];
3029  s->c_dc_scale = s->c_dc_scale_table[mquant];
3030 
3031  /* Get DC differential */
3032  if (n < 4) {
3034  } else {
3036  }
3037  if (dcdiff < 0) {
3038  av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
3039  return -1;
3040  }
3041  if (dcdiff) {
3042  if (dcdiff == 119 /* ESC index value */) {
3043  /* TODO: Optimize */
3044  if (mquant == 1) dcdiff = get_bits(gb, 10);
3045  else if (mquant == 2) dcdiff = get_bits(gb, 9);
3046  else dcdiff = get_bits(gb, 8);
3047  } else {
3048  if (mquant == 1)
3049  dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
3050  else if (mquant == 2)
3051  dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
3052  }
3053  if (get_bits1(gb))
3054  dcdiff = -dcdiff;
3055  }
3056 
3057  /* Prediction */
3058  dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
3059  *dc_val = dcdiff;
3060 
3061  /* Store the quantized DC coeff, used for prediction */
3062 
3063  if (n < 4) {
3064  block[0] = dcdiff * s->y_dc_scale;
3065  } else {
3066  block[0] = dcdiff * s->c_dc_scale;
3067  }
3068 
3069  //AC Decoding
3070  i = 1;
3071 
3072  /* check if AC is needed at all and adjust direction if needed */
3073  if (!a_avail) dc_pred_dir = 1;
3074  if (!c_avail) dc_pred_dir = 0;
3075  if (!a_avail && !c_avail) use_pred = 0;
3076  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
3077  ac_val2 = ac_val;
3078 
3079  scale = mquant * 2 + v->halfpq;
3080 
3081  if (dc_pred_dir) //left
3082  ac_val -= 16;
3083  else //top
3084  ac_val -= 16 * s->block_wrap[n];
3085 
3086  q1 = s->current_picture.qscale_table[mb_pos];
3087  if (dc_pred_dir && c_avail && mb_pos)
3088  q2 = s->current_picture.qscale_table[mb_pos - 1];
3089  if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
3090  q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
3091  if ( dc_pred_dir && n == 1)
3092  q2 = q1;
3093  if (!dc_pred_dir && n == 2)
3094  q2 = q1;
3095  if (n == 3) q2 = q1;
3096 
3097  if (coded) {
3098  int last = 0, skip, value;
3099  int k;
3100 
3101  while (!last) {
3102  vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
3103  i += skip;
3104  if (i > 63)
3105  break;
3106  if (v->fcm == PROGRESSIVE)
3107  block[v->zz_8x8[0][i++]] = value;
3108  else {
3109  if (use_pred && (v->fcm == ILACE_FRAME)) {
3110  if (!dc_pred_dir) // top
3111  block[v->zz_8x8[2][i++]] = value;
3112  else // left
3113  block[v->zz_8x8[3][i++]] = value;
3114  } else {
3115  block[v->zzi_8x8[i++]] = value;
3116  }
3117  }
3118  }
3119 
3120  /* apply AC prediction if needed */
3121  if (use_pred) {
3122  /* scale predictors if needed*/
3123  if (q2 && q1 != q2) {
3124  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3125  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3126 
3127  if (q1 < 1)
3128  return AVERROR_INVALIDDATA;
3129  if (dc_pred_dir) { // left
3130  for (k = 1; k < 8; k++)
3131  block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3132  } else { //top
3133  for (k = 1; k < 8; k++)
3134  block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3135  }
3136  } else {
3137  if (dc_pred_dir) { // left
3138  for (k = 1; k < 8; k++)
3139  block[k << v->left_blk_sh] += ac_val[k];
3140  } else { // top
3141  for (k = 1; k < 8; k++)
3142  block[k << v->top_blk_sh] += ac_val[k + 8];
3143  }
3144  }
3145  }
3146  /* save AC coeffs for further prediction */
3147  for (k = 1; k < 8; k++) {
3148  ac_val2[k ] = block[k << v->left_blk_sh];
3149  ac_val2[k + 8] = block[k << v->top_blk_sh];
3150  }
3151 
3152  /* scale AC coeffs */
3153  for (k = 1; k < 64; k++)
3154  if (block[k]) {
3155  block[k] *= scale;
3156  if (!v->pquantizer)
3157  block[k] += (block[k] < 0) ? -mquant : mquant;
3158  }
3159 
3160  if (use_pred) i = 63;
3161  } else { // no AC coeffs
3162  int k;
3163 
3164  memset(ac_val2, 0, 16 * 2);
3165  if (dc_pred_dir) { // left
3166  if (use_pred) {
3167  memcpy(ac_val2, ac_val, 8 * 2);
3168  if (q2 && q1 != q2) {
3169  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3170  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3171  if (q1 < 1)
3172  return AVERROR_INVALIDDATA;
3173  for (k = 1; k < 8; k++)
3174  ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3175  }
3176  }
3177  } else { // top
3178  if (use_pred) {
3179  memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3180  if (q2 && q1 != q2) {
3181  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3182  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3183  if (q1 < 1)
3184  return AVERROR_INVALIDDATA;
3185  for (k = 1; k < 8; k++)
3186  ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3187  }
3188  }
3189  }
3190 
3191  /* apply AC prediction if needed */
3192  if (use_pred) {
3193  if (dc_pred_dir) { // left
3194  for (k = 1; k < 8; k++) {
3195  block[k << v->left_blk_sh] = ac_val2[k] * scale;
3196  if (!v->pquantizer && block[k << v->left_blk_sh])
3197  block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
3198  }
3199  } else { // top
3200  for (k = 1; k < 8; k++) {
3201  block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
3202  if (!v->pquantizer && block[k << v->top_blk_sh])
3203  block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
3204  }
3205  }
3206  i = 63;
3207  }
3208  }
3209  s->block_last_index[n] = i;
3210 
3211  return 0;
3212 }
3213 
3214 /** Decode P block
3215  */
3216 static int vc1_decode_p_block(VC1Context *v, int16_t block[64], int n,
3217  int mquant, int ttmb, int first_block,
3218  uint8_t *dst, int linesize, int skip_block,
3219  int *ttmb_out)
3220 {
3221  MpegEncContext *s = &v->s;
3222  GetBitContext *gb = &s->gb;
3223  int i, j;
3224  int subblkpat = 0;
3225  int scale, off, idx, last, skip, value;
3226  int ttblk = ttmb & 7;
3227  int pat = 0;
3228 
3229  s->dsp.clear_block(block);
3230 
3231  if (ttmb == -1) {
3233  }
3234  if (ttblk == TT_4X4) {
3235  subblkpat = ~(get_vlc2(gb, ff_vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
3236  }
3237  if ((ttblk != TT_8X8 && ttblk != TT_4X4)
3238  && ((v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))
3239  || (!v->res_rtm_flag && !first_block))) {
3240  subblkpat = decode012(gb);
3241  if (subblkpat)
3242  subblkpat ^= 3; // swap decoded pattern bits
3243  if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM)
3244  ttblk = TT_8X4;
3245  if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT)
3246  ttblk = TT_4X8;
3247  }
3248  scale = 2 * mquant + ((v->pq == mquant) ? v->halfpq : 0);
3249 
3250  // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
3251  if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
3252  subblkpat = 2 - (ttblk == TT_8X4_TOP);
3253  ttblk = TT_8X4;
3254  }
3255  if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
3256  subblkpat = 2 - (ttblk == TT_4X8_LEFT);
3257  ttblk = TT_4X8;
3258  }
3259  switch (ttblk) {
3260  case TT_8X8:
3261  pat = 0xF;
3262  i = 0;
3263  last = 0;
3264  while (!last) {
3265  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3266  i += skip;
3267  if (i > 63)
3268  break;
3269  if (!v->fcm)
3270  idx = v->zz_8x8[0][i++];
3271  else
3272  idx = v->zzi_8x8[i++];
3273  block[idx] = value * scale;
3274  if (!v->pquantizer)
3275  block[idx] += (block[idx] < 0) ? -mquant : mquant;
3276  }
3277  if (!skip_block) {
3278  if (i == 1)
3279  v->vc1dsp.vc1_inv_trans_8x8_dc(dst, linesize, block);
3280  else {
3281  v->vc1dsp.vc1_inv_trans_8x8(block);
3282  s->dsp.add_pixels_clamped(block, dst, linesize);
3283  }
3284  }
3285  break;
3286  case TT_4X4:
3287  pat = ~subblkpat & 0xF;
3288  for (j = 0; j < 4; j++) {
3289  last = subblkpat & (1 << (3 - j));
3290  i = 0;
3291  off = (j & 1) * 4 + (j & 2) * 16;
3292  while (!last) {
3293  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3294  i += skip;
3295  if (i > 15)
3296  break;
3297  if (!v->fcm)
3299  else
3300  idx = ff_vc1_adv_interlaced_4x4_zz[i++];
3301  block[idx + off] = value * scale;
3302  if (!v->pquantizer)
3303  block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3304  }
3305  if (!(subblkpat & (1 << (3 - j))) && !skip_block) {
3306  if (i == 1)
3307  v->vc1dsp.vc1_inv_trans_4x4_dc(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3308  else
3309  v->vc1dsp.vc1_inv_trans_4x4(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3310  }
3311  }
3312  break;
3313  case TT_8X4:
3314  pat = ~((subblkpat & 2) * 6 + (subblkpat & 1) * 3) & 0xF;
3315  for (j = 0; j < 2; j++) {
3316  last = subblkpat & (1 << (1 - j));
3317  i = 0;
3318  off = j * 32;
3319  while (!last) {
3320  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3321  i += skip;
3322  if (i > 31)
3323  break;
3324  if (!v->fcm)
3325  idx = v->zz_8x4[i++] + off;
3326  else
3327  idx = ff_vc1_adv_interlaced_8x4_zz[i++] + off;
3328  block[idx] = value * scale;
3329  if (!v->pquantizer)
3330  block[idx] += (block[idx] < 0) ? -mquant : mquant;
3331  }
3332  if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3333  if (i == 1)
3334  v->vc1dsp.vc1_inv_trans_8x4_dc(dst + j * 4 * linesize, linesize, block + off);
3335  else
3336  v->vc1dsp.vc1_inv_trans_8x4(dst + j * 4 * linesize, linesize, block + off);
3337  }
3338  }
3339  break;
3340  case TT_4X8:
3341  pat = ~(subblkpat * 5) & 0xF;
3342  for (j = 0; j < 2; j++) {
3343  last = subblkpat & (1 << (1 - j));
3344  i = 0;
3345  off = j * 4;
3346  while (!last) {
3347  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3348  i += skip;
3349  if (i > 31)
3350  break;
3351  if (!v->fcm)
3352  idx = v->zz_4x8[i++] + off;
3353  else
3354  idx = ff_vc1_adv_interlaced_4x8_zz[i++] + off;
3355  block[idx] = value * scale;
3356  if (!v->pquantizer)
3357  block[idx] += (block[idx] < 0) ? -mquant : mquant;
3358  }
3359  if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3360  if (i == 1)
3361  v->vc1dsp.vc1_inv_trans_4x8_dc(dst + j * 4, linesize, block + off);
3362  else
3363  v->vc1dsp.vc1_inv_trans_4x8(dst + j*4, linesize, block + off);
3364  }
3365  }
3366  break;
3367  }
3368  if (ttmb_out)
3369  *ttmb_out |= ttblk << (n * 4);
3370  return pat;
3371 }
3372 
3373 /** @} */ // Macroblock group
3374 
3375 static const int size_table [6] = { 0, 2, 3, 4, 5, 8 };
3376 static const int offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3377 
3379 {
3380  MpegEncContext *s = &v->s;
3381  int mb_cbp = v->cbp[s->mb_x - s->mb_stride],
3382  block_cbp = mb_cbp >> (block_num * 4), bottom_cbp,
3383  mb_is_intra = v->is_intra[s->mb_x - s->mb_stride],
3384  block_is_intra = mb_is_intra >> (block_num * 4), bottom_is_intra;
3385  int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3386  uint8_t *dst;
3387 
3388  if (block_num > 3) {
3389  dst = s->dest[block_num - 3];
3390  } else {
3391  dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 8) * linesize;
3392  }
3393  if (s->mb_y != s->end_mb_y || block_num < 2) {
3394  int16_t (*mv)[2];
3395  int mv_stride;
3396 
3397  if (block_num > 3) {
3398  bottom_cbp = v->cbp[s->mb_x] >> (block_num * 4);
3399  bottom_is_intra = v->is_intra[s->mb_x] >> (block_num * 4);
3400  mv = &v->luma_mv[s->mb_x - s->mb_stride];
3401  mv_stride = s->mb_stride;
3402  } else {
3403  bottom_cbp = (block_num < 2) ? (mb_cbp >> ((block_num + 2) * 4))
3404  : (v->cbp[s->mb_x] >> ((block_num - 2) * 4));
3405  bottom_is_intra = (block_num < 2) ? (mb_is_intra >> ((block_num + 2) * 4))
3406  : (v->is_intra[s->mb_x] >> ((block_num - 2) * 4));
3407  mv_stride = s->b8_stride;
3408  mv = &s->current_picture.motion_val[0][s->block_index[block_num] - 2 * mv_stride];
3409  }
3410 
3411  if (bottom_is_intra & 1 || block_is_intra & 1 ||
3412  mv[0][0] != mv[mv_stride][0] || mv[0][1] != mv[mv_stride][1]) {
3413  v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3414  } else {
3415  idx = ((bottom_cbp >> 2) | block_cbp) & 3;
3416  if (idx == 3) {
3417  v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3418  } else if (idx) {
3419  if (idx == 1)
3420  v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3421  else
3422  v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3423  }
3424  }
3425  }
3426 
3427  dst -= 4 * linesize;
3428  ttblk = (v->ttblk[s->mb_x - s->mb_stride] >> (block_num * 4)) & 0xF;
3429  if (ttblk == TT_4X4 || ttblk == TT_8X4) {
3430  idx = (block_cbp | (block_cbp >> 2)) & 3;
3431  if (idx == 3) {
3432  v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3433  } else if (idx) {
3434  if (idx == 1)
3435  v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3436  else
3437  v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3438  }
3439  }
3440 }
3441 
3443 {
3444  MpegEncContext *s = &v->s;
3445  int mb_cbp = v->cbp[s->mb_x - 1 - s->mb_stride],
3446  block_cbp = mb_cbp >> (block_num * 4), right_cbp,
3447  mb_is_intra = v->is_intra[s->mb_x - 1 - s->mb_stride],
3448  block_is_intra = mb_is_intra >> (block_num * 4), right_is_intra;
3449  int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3450  uint8_t *dst;
3451 
3452  if (block_num > 3) {
3453  dst = s->dest[block_num - 3] - 8 * linesize;
3454  } else {
3455  dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 16) * linesize - 8;
3456  }
3457 
3458  if (s->mb_x != s->mb_width || !(block_num & 5)) {
3459  int16_t (*mv)[2];
3460 
3461  if (block_num > 3) {
3462  right_cbp = v->cbp[s->mb_x - s->mb_stride] >> (block_num * 4);
3463  right_is_intra = v->is_intra[s->mb_x - s->mb_stride] >> (block_num * 4);
3464  mv = &v->luma_mv[s->mb_x - s->mb_stride - 1];
3465  } else {
3466  right_cbp = (block_num & 1) ? (v->cbp[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3467  : (mb_cbp >> ((block_num + 1) * 4));
3468  right_is_intra = (block_num & 1) ? (v->is_intra[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3469  : (mb_is_intra >> ((block_num + 1) * 4));
3470  mv = &s->current_picture.motion_val[0][s->block_index[block_num] - s->b8_stride * 2 - 2];
3471  }
3472  if (block_is_intra & 1 || right_is_intra & 1 || mv[0][0] != mv[1][0] || mv[0][1] != mv[1][1]) {
3473  v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3474  } else {
3475  idx = ((right_cbp >> 1) | block_cbp) & 5; // FIXME check
3476  if (idx == 5) {
3477  v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3478  } else if (idx) {
3479  if (idx == 1)
3480  v->vc1dsp.vc1_h_loop_filter4(dst + 4 * linesize, linesize, v->pq);
3481  else
3482  v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3483  }
3484  }
3485  }
3486 
3487  dst -= 4;
3488  ttblk = (v->ttblk[s->mb_x - s->mb_stride - 1] >> (block_num * 4)) & 0xf;
3489  if (ttblk == TT_4X4 || ttblk == TT_4X8) {
3490  idx = (block_cbp | (block_cbp >> 1)) & 5;
3491  if (idx == 5) {
3492  v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3493  } else if (idx) {
3494  if (idx == 1)
3495  v->vc1dsp.vc1_h_loop_filter4(dst + linesize * 4, linesize, v->pq);
3496  else
3497  v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3498  }
3499  }
3500 }
3501 
3503 {
3504  MpegEncContext *s = &v->s;
3505  int i;
3506 
3507  for (i = 0; i < 6; i++) {
3509  }
3510 
3511  /* V always precedes H, therefore we run H one MB before V;
3512  * at the end of a row, we catch up to complete the row */
3513  if (s->mb_x) {
3514  for (i = 0; i < 6; i++) {
3516  }
3517  if (s->mb_x == s->mb_width - 1) {
3518  s->mb_x++;
3520  for (i = 0; i < 6; i++) {
3522  }
3523  }
3524  }
3525 }
3526 
3527 /** Decode one P-frame MB
3528  */
3530 {
3531  MpegEncContext *s = &v->s;
3532  GetBitContext *gb = &s->gb;
3533  int i, j;
3534  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3535  int cbp; /* cbp decoding stuff */
3536  int mqdiff, mquant; /* MB quantization */
3537  int ttmb = v->ttfrm; /* MB Transform type */
3538 
3539  int mb_has_coeffs = 1; /* last_flag */
3540  int dmv_x, dmv_y; /* Differential MV components */
3541  int index, index1; /* LUT indexes */
3542  int val, sign; /* temp values */
3543  int first_block = 1;
3544  int dst_idx, off;
3545  int skipped, fourmv;
3546  int block_cbp = 0, pat, block_tt = 0, block_intra = 0;
3547 
3548  mquant = v->pq; /* lossy initialization */
3549 
3550  if (v->mv_type_is_raw)
3551  fourmv = get_bits1(gb);
3552  else
3553  fourmv = v->mv_type_mb_plane[mb_pos];
3554  if (v->skip_is_raw)
3555  skipped = get_bits1(gb);
3556  else
3557  skipped = v->s.mbskip_table[mb_pos];
3558 
3559  if (!fourmv) { /* 1MV mode */
3560  if (!skipped) {
3561  GET_MVDATA(dmv_x, dmv_y);
3562 
3563  if (s->mb_intra) {
3564  s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3565  s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3566  }
3568  vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3569 
3570  /* FIXME Set DC val for inter block ? */
3571  if (s->mb_intra && !mb_has_coeffs) {
3572  GET_MQUANT();
3573  s->ac_pred = get_bits1(gb);
3574  cbp = 0;
3575  } else if (mb_has_coeffs) {
3576  if (s->mb_intra)
3577  s->ac_pred = get_bits1(gb);
3578  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3579  GET_MQUANT();
3580  } else {
3581  mquant = v->pq;
3582  cbp = 0;
3583  }
3584  s->current_picture.qscale_table[mb_pos] = mquant;
3585 
3586  if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3587  ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
3588  VC1_TTMB_VLC_BITS, 2);
3589  if (!s->mb_intra) vc1_mc_1mv(v, 0);
3590  dst_idx = 0;
3591  for (i = 0; i < 6; i++) {
3592  s->dc_val[0][s->block_index[i]] = 0;
3593  dst_idx += i >> 2;
3594  val = ((cbp >> (5 - i)) & 1);
3595  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3596  v->mb_type[0][s->block_index[i]] = s->mb_intra;
3597  if (s->mb_intra) {
3598  /* check if prediction blocks A and C are available */
3599  v->a_avail = v->c_avail = 0;
3600  if (i == 2 || i == 3 || !s->first_slice_line)
3601  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3602  if (i == 1 || i == 3 || s->mb_x)
3603  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3604 
3605  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3606  (i & 4) ? v->codingset2 : v->codingset);
3607  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3608  continue;
3609  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3610  if (v->rangeredfrm)
3611  for (j = 0; j < 64; j++)
3612  s->block[i][j] <<= 1;
3613  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3614  if (v->pq >= 9 && v->overlap) {
3615  if (v->c_avail)
3616  v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3617  if (v->a_avail)
3618  v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3619  }
3620  block_cbp |= 0xF << (i << 2);
3621  block_intra |= 1 << i;
3622  } else if (val) {
3623  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block,
3624  s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize,
3625  (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3626  block_cbp |= pat << (i << 2);
3627  if (!v->ttmbf && ttmb < 8)
3628  ttmb = -1;
3629  first_block = 0;
3630  }
3631  }
3632  } else { // skipped
3633  s->mb_intra = 0;
3634  for (i = 0; i < 6; i++) {
3635  v->mb_type[0][s->block_index[i]] = 0;
3636  s->dc_val[0][s->block_index[i]] = 0;
3637  }
3638  s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
3639  s->current_picture.qscale_table[mb_pos] = 0;
3640  vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3641  vc1_mc_1mv(v, 0);
3642  }
3643  } else { // 4MV mode
3644  if (!skipped /* unskipped MB */) {
3645  int intra_count = 0, coded_inter = 0;
3646  int is_intra[6], is_coded[6];
3647  /* Get CBPCY */
3648  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3649  for (i = 0; i < 6; i++) {
3650  val = ((cbp >> (5 - i)) & 1);
3651  s->dc_val[0][s->block_index[i]] = 0;
3652  s->mb_intra = 0;
3653  if (i < 4) {
3654  dmv_x = dmv_y = 0;
3655  s->mb_intra = 0;
3656  mb_has_coeffs = 0;
3657  if (val) {
3658  GET_MVDATA(dmv_x, dmv_y);
3659  }
3660  vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3661  if (!s->mb_intra)
3662  vc1_mc_4mv_luma(v, i, 0, 0);
3663  intra_count += s->mb_intra;
3664  is_intra[i] = s->mb_intra;
3665  is_coded[i] = mb_has_coeffs;
3666  }
3667  if (i & 4) {
3668  is_intra[i] = (intra_count >= 3);
3669  is_coded[i] = val;
3670  }
3671  if (i == 4)
3672  vc1_mc_4mv_chroma(v, 0);
3673  v->mb_type[0][s->block_index[i]] = is_intra[i];
3674  if (!coded_inter)
3675  coded_inter = !is_intra[i] & is_coded[i];
3676  }
3677  // if there are no coded blocks then don't do anything more
3678  dst_idx = 0;
3679  if (!intra_count && !coded_inter)
3680  goto end;
3681  GET_MQUANT();
3682  s->current_picture.qscale_table[mb_pos] = mquant;
3683  /* test if block is intra and has pred */
3684  {
3685  int intrapred = 0;
3686  for (i = 0; i < 6; i++)
3687  if (is_intra[i]) {
3688  if (((!s->first_slice_line || (i == 2 || i == 3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
3689  || ((s->mb_x || (i == 1 || i == 3)) && v->mb_type[0][s->block_index[i] - 1])) {
3690  intrapred = 1;
3691  break;
3692  }
3693  }
3694  if (intrapred)
3695  s->ac_pred = get_bits1(gb);
3696  else
3697  s->ac_pred = 0;
3698  }
3699  if (!v->ttmbf && coded_inter)
3701  for (i = 0; i < 6; i++) {
3702  dst_idx += i >> 2;
3703  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3704  s->mb_intra = is_intra[i];
3705  if (is_intra[i]) {
3706  /* check if prediction blocks A and C are available */
3707  v->a_avail = v->c_avail = 0;
3708  if (i == 2 || i == 3 || !s->first_slice_line)
3709  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3710  if (i == 1 || i == 3 || s->mb_x)
3711  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3712 
3713  vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant,
3714  (i & 4) ? v->codingset2 : v->codingset);
3715  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3716  continue;
3717  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3718  if (v->rangeredfrm)
3719  for (j = 0; j < 64; j++)
3720  s->block[i][j] <<= 1;
3721  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off,
3722  (i & 4) ? s->uvlinesize : s->linesize);
3723  if (v->pq >= 9 && v->overlap) {
3724  if (v->c_avail)
3725  v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3726  if (v->a_avail)
3727  v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3728  }
3729  block_cbp |= 0xF << (i << 2);
3730  block_intra |= 1 << i;
3731  } else if (is_coded[i]) {
3732  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3733  first_block, s->dest[dst_idx] + off,
3734  (i & 4) ? s->uvlinesize : s->linesize,
3735  (i & 4) && (s->flags & CODEC_FLAG_GRAY),
3736  &block_tt);
3737  block_cbp |= pat << (i << 2);
3738  if (!v->ttmbf && ttmb < 8)
3739  ttmb = -1;
3740  first_block = 0;
3741  }
3742  }
3743  } else { // skipped MB
3744  s->mb_intra = 0;
3745  s->current_picture.qscale_table[mb_pos] = 0;
3746  for (i = 0; i < 6; i++) {
3747  v->mb_type[0][s->block_index[i]] = 0;
3748  s->dc_val[0][s->block_index[i]] = 0;
3749  }
3750  for (i = 0; i < 4; i++) {
3751  vc1_pred_mv(v, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3752  vc1_mc_4mv_luma(v, i, 0, 0);
3753  }
3754  vc1_mc_4mv_chroma(v, 0);
3755  s->current_picture.qscale_table[mb_pos] = 0;
3756  }
3757  }
3758 end:
3759  v->cbp[s->mb_x] = block_cbp;
3760  v->ttblk[s->mb_x] = block_tt;
3761  v->is_intra[s->mb_x] = block_intra;
3762 
3763  return 0;
3764 }
3765 
3766 /* Decode one macroblock in an interlaced frame p picture */
3767 
3769 {
3770  MpegEncContext *s = &v->s;
3771  GetBitContext *gb = &s->gb;
3772  int i;
3773  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3774  int cbp = 0; /* cbp decoding stuff */
3775  int mqdiff, mquant; /* MB quantization */
3776  int ttmb = v->ttfrm; /* MB Transform type */
3777 
3778  int mb_has_coeffs = 1; /* last_flag */
3779  int dmv_x, dmv_y; /* Differential MV components */
3780  int val; /* temp value */
3781  int first_block = 1;
3782  int dst_idx, off;
3783  int skipped, fourmv = 0, twomv = 0;
3784  int block_cbp = 0, pat, block_tt = 0;
3785  int idx_mbmode = 0, mvbp;
3786  int stride_y, fieldtx;
3787 
3788  mquant = v->pq; /* Lossy initialization */
3789 
3790  if (v->skip_is_raw)
3791  skipped = get_bits1(gb);
3792  else
3793  skipped = v->s.mbskip_table[mb_pos];
3794  if (!skipped) {
3795  if (v->fourmvswitch)
3796  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_4MV_MBMODE_VLC_BITS, 2); // try getting this done
3797  else
3798  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2); // in a single line
3799  switch (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0]) {
3800  /* store the motion vector type in a flag (useful later) */
3801  case MV_PMODE_INTFR_4MV:
3802  fourmv = 1;
3803  v->blk_mv_type[s->block_index[0]] = 0;
3804  v->blk_mv_type[s->block_index[1]] = 0;
3805  v->blk_mv_type[s->block_index[2]] = 0;
3806  v->blk_mv_type[s->block_index[3]] = 0;
3807  break;
3809  fourmv = 1;
3810  v->blk_mv_type[s->block_index[0]] = 1;
3811  v->blk_mv_type[s->block_index[1]] = 1;
3812  v->blk_mv_type[s->block_index[2]] = 1;
3813  v->blk_mv_type[s->block_index[3]] = 1;
3814  break;
3816  twomv = 1;
3817  v->blk_mv_type[s->block_index[0]] = 1;
3818  v->blk_mv_type[s->block_index[1]] = 1;
3819  v->blk_mv_type[s->block_index[2]] = 1;
3820  v->blk_mv_type[s->block_index[3]] = 1;
3821  break;
3822  case MV_PMODE_INTFR_1MV:
3823  v->blk_mv_type[s->block_index[0]] = 0;
3824  v->blk_mv_type[s->block_index[1]] = 0;
3825  v->blk_mv_type[s->block_index[2]] = 0;
3826  v->blk_mv_type[s->block_index[3]] = 0;
3827  break;
3828  }
3829  if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
3830  for (i = 0; i < 4; i++) {
3831  s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
3832  s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
3833  }
3834  s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
3835  s->mb_intra = v->is_intra[s->mb_x] = 1;
3836  for (i = 0; i < 6; i++)
3837  v->mb_type[0][s->block_index[i]] = 1;
3838  fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
3839  mb_has_coeffs = get_bits1(gb);
3840  if (mb_has_coeffs)
3841  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3842  v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
3843  GET_MQUANT();
3844  s->current_picture.qscale_table[mb_pos] = mquant;
3845  /* Set DC scale - y and c use the same (not sure if necessary here) */
3846  s->y_dc_scale = s->y_dc_scale_table[mquant];
3847  s->c_dc_scale = s->c_dc_scale_table[mquant];
3848  dst_idx = 0;
3849  for (i = 0; i < 6; i++) {
3850  s->dc_val[0][s->block_index[i]] = 0;
3851  dst_idx += i >> 2;
3852  val = ((cbp >> (5 - i)) & 1);
3853  v->mb_type[0][s->block_index[i]] = s->mb_intra;
3854  v->a_avail = v->c_avail = 0;
3855  if (i == 2 || i == 3 || !s->first_slice_line)
3856  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3857  if (i == 1 || i == 3 || s->mb_x)
3858  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3859 
3860  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3861  (i & 4) ? v->codingset2 : v->codingset);
3862  if ((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3863  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3864  if (i < 4) {
3865  stride_y = s->linesize << fieldtx;
3866  off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
3867  } else {
3868  stride_y = s->uvlinesize;
3869  off = 0;
3870  }
3871  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, stride_y);
3872  //TODO: loop filter
3873  }
3874 
3875  } else { // inter MB
3876  mb_has_coeffs = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][3];
3877  if (mb_has_coeffs)
3878  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3879  if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
3881  } else {
3882  if ((ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV)
3883  || (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV_FIELD)) {
3885  }
3886  }
3887  s->mb_intra = v->is_intra[s->mb_x] = 0;
3888  for (i = 0; i < 6; i++)
3889  v->mb_type[0][s->block_index[i]] = 0;
3890  fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][1];
3891  /* for all motion vector read MVDATA and motion compensate each block */
3892  dst_idx = 0;
3893  if (fourmv) {
3894  mvbp = v->fourmvbp;
3895  for (i = 0; i < 6; i++) {
3896  if (i < 4) {
3897  dmv_x = dmv_y = 0;
3898  val = ((mvbp >> (3 - i)) & 1);
3899  if (val) {
3900  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3901  }
3902  vc1_pred_mv_intfr(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0);
3903  vc1_mc_4mv_luma(v, i, 0, 0);
3904  } else if (i == 4) {
3905  vc1_mc_4mv_chroma4(v, 0, 0, 0);
3906  }
3907  }
3908  } else if (twomv) {
3909  mvbp = v->twomvbp;
3910  dmv_x = dmv_y = 0;
3911  if (mvbp & 2) {
3912  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3913  }
3914  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], 0);
3915  vc1_mc_4mv_luma(v, 0, 0, 0);
3916  vc1_mc_4mv_luma(v, 1, 0, 0);
3917  dmv_x = dmv_y = 0;
3918  if (mvbp & 1) {
3919  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3920  }
3921  vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], 0);
3922  vc1_mc_4mv_luma(v, 2, 0, 0);
3923  vc1_mc_4mv_luma(v, 3, 0, 0);
3924  vc1_mc_4mv_chroma4(v, 0, 0, 0);
3925  } else {
3926  mvbp = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][2];
3927  dmv_x = dmv_y = 0;
3928  if (mvbp) {
3929  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3930  }
3931  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0);
3932  vc1_mc_1mv(v, 0);
3933  }
3934  if (cbp)
3935  GET_MQUANT(); // p. 227
3936  s->current_picture.qscale_table[mb_pos] = mquant;
3937  if (!v->ttmbf && cbp)
3939  for (i = 0; i < 6; i++) {
3940  s->dc_val[0][s->block_index[i]] = 0;
3941  dst_idx += i >> 2;
3942  val = ((cbp >> (5 - i)) & 1);
3943  if (!fieldtx)
3944  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3945  else
3946  off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
3947  if (val) {
3948  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3949  first_block, s->dest[dst_idx] + off,
3950  (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
3951  (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3952  block_cbp |= pat << (i << 2);
3953  if (!v->ttmbf && ttmb < 8)
3954  ttmb = -1;
3955  first_block = 0;
3956  }
3957  }
3958  }
3959  } else { // skipped
3960  s->mb_intra = v->is_intra[s->mb_x] = 0;
3961  for (i = 0; i < 6; i++) {
3962  v->mb_type[0][s->block_index[i]] = 0;
3963  s->dc_val[0][s->block_index[i]] = 0;
3964  }
3965  s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
3966  s->current_picture.qscale_table[mb_pos] = 0;
3967  v->blk_mv_type[s->block_index[0]] = 0;
3968  v->blk_mv_type[s->block_index[1]] = 0;
3969  v->blk_mv_type[s->block_index[2]] = 0;
3970  v->blk_mv_type[s->block_index[3]] = 0;
3971  vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0);
3972  vc1_mc_1mv(v, 0);
3973  }
3974  if (s->mb_x == s->mb_width - 1)
3975  memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0])*s->mb_stride);
3976  return 0;
3977 }
3978 
3980 {
3981  MpegEncContext *s = &v->s;
3982  GetBitContext *gb = &s->gb;
3983  int i;
3984  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3985  int cbp = 0; /* cbp decoding stuff */
3986  int mqdiff, mquant; /* MB quantization */
3987  int ttmb = v->ttfrm; /* MB Transform type */
3988 
3989  int mb_has_coeffs = 1; /* last_flag */
3990  int dmv_x, dmv_y; /* Differential MV components */
3991  int val; /* temp values */
3992  int first_block = 1;
3993  int dst_idx, off;
3994  int pred_flag = 0;
3995  int block_cbp = 0, pat, block_tt = 0;
3996  int idx_mbmode = 0;
3997 
3998  mquant = v->pq; /* Lossy initialization */
3999 
4000  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4001  if (idx_mbmode <= 1) { // intra MB
4002  s->mb_intra = v->is_intra[s->mb_x] = 1;
4003  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4004  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4005  s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4006  GET_MQUANT();
4007  s->current_picture.qscale_table[mb_pos] = mquant;
4008  /* Set DC scale - y and c use the same (not sure if necessary here) */
4009  s->y_dc_scale = s->y_dc_scale_table[mquant];
4010  s->c_dc_scale = s->c_dc_scale_table[mquant];
4011  v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4012  mb_has_coeffs = idx_mbmode & 1;
4013  if (mb_has_coeffs)
4014  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4015  dst_idx = 0;
4016  for (i = 0; i < 6; i++) {
4017  s->dc_val[0][s->block_index[i]] = 0;
4018  v->mb_type[0][s->block_index[i]] = 1;
4019  dst_idx += i >> 2;
4020  val = ((cbp >> (5 - i)) & 1);
4021  v->a_avail = v->c_avail = 0;
4022  if (i == 2 || i == 3 || !s->first_slice_line)
4023  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4024  if (i == 1 || i == 3 || s->mb_x)
4025  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4026 
4027  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4028  (i & 4) ? v->codingset2 : v->codingset);
4029  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4030  continue;
4031  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4032  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4033  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
4034  // TODO: loop filter
4035  }
4036  } else {
4037  s->mb_intra = v->is_intra[s->mb_x] = 0;
4038  s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4039  for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4040  if (idx_mbmode <= 5) { // 1-MV
4041  dmv_x = dmv_y = pred_flag = 0;
4042  if (idx_mbmode & 1) {
4043  get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
4044  }
4045  vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
4046  vc1_mc_1mv(v, 0);
4047  mb_has_coeffs = !(idx_mbmode & 2);
4048  } else { // 4-MV
4050  for (i = 0; i < 6; i++) {
4051  if (i < 4) {
4052  dmv_x = dmv_y = pred_flag = 0;
4053  val = ((v->fourmvbp >> (3 - i)) & 1);
4054  if (val) {
4055  get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
4056  }
4057  vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
4058  vc1_mc_4mv_luma(v, i, 0, 0);
4059  } else if (i == 4)
4060  vc1_mc_4mv_chroma(v, 0);
4061  }
4062  mb_has_coeffs = idx_mbmode & 1;
4063  }
4064  if (mb_has_coeffs)
4065  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4066  if (cbp) {
4067  GET_MQUANT();
4068  }
4069  s->current_picture.qscale_table[mb_pos] = mquant;
4070  if (!v->ttmbf && cbp) {
4072  }
4073  dst_idx = 0;
4074  for (i = 0; i < 6; i++) {
4075  s->dc_val[0][s->block_index[i]] = 0;
4076  dst_idx += i >> 2;
4077  val = ((cbp >> (5 - i)) & 1);
4078  off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4079  if (val) {
4080  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4081  first_block, s->dest[dst_idx] + off,
4082  (i & 4) ? s->uvlinesize : s->linesize,
4083  (i & 4) && (s->flags & CODEC_FLAG_GRAY),
4084  &block_tt);
4085  block_cbp |= pat << (i << 2);
4086  if (!v->ttmbf && ttmb < 8) ttmb = -1;
4087  first_block = 0;
4088  }
4089  }
4090  }
4091  if (s->mb_x == s->mb_width - 1)
4092  memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4093  return 0;
4094 }
4095 
4096 /** Decode one B-frame MB (in Main profile)
4097  */
4099 {
4100  MpegEncContext *s = &v->s;
4101  GetBitContext *gb = &s->gb;
4102  int i, j;
4103  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4104  int cbp = 0; /* cbp decoding stuff */
4105  int mqdiff, mquant; /* MB quantization */
4106  int ttmb = v->ttfrm; /* MB Transform type */
4107  int mb_has_coeffs = 0; /* last_flag */
4108  int index, index1; /* LUT indexes */
4109  int val, sign; /* temp values */
4110  int first_block = 1;
4111  int dst_idx, off;
4112  int skipped, direct;
4113  int dmv_x[2], dmv_y[2];
4114  int bmvtype = BMV_TYPE_BACKWARD;
4115 
4116  mquant = v->pq; /* lossy initialization */
4117  s->mb_intra = 0;
4118 
4119  if (v->dmb_is_raw)
4120  direct = get_bits1(gb);
4121  else
4122  direct = v->direct_mb_plane[mb_pos];
4123  if (v->skip_is_raw)
4124  skipped = get_bits1(gb);
4125  else
4126  skipped = v->s.mbskip_table[mb_pos];
4127 
4128  dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4129  for (i = 0; i < 6; i++) {
4130  v->mb_type[0][s->block_index[i]] = 0;
4131  s->dc_val[0][s->block_index[i]] = 0;
4132  }
4133  s->current_picture.qscale_table[mb_pos] = 0;
4134 
4135  if (!direct) {
4136  if (!skipped) {
4137  GET_MVDATA(dmv_x[0], dmv_y[0]);
4138  dmv_x[1] = dmv_x[0];
4139  dmv_y[1] = dmv_y[0];
4140  }
4141  if (skipped || !s->mb_intra) {
4142  bmvtype = decode012(gb);
4143  switch (bmvtype) {
4144  case 0:
4145  bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
4146  break;
4147  case 1:
4148  bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
4149  break;
4150  case 2:
4151  bmvtype = BMV_TYPE_INTERPOLATED;
4152  dmv_x[0] = dmv_y[0] = 0;
4153  }
4154  }
4155  }
4156  for (i = 0; i < 6; i++)
4157  v->mb_type[0][s->block_index[i]] = s->mb_intra;
4158 
4159  if (skipped) {
4160  if (direct)
4161  bmvtype = BMV_TYPE_INTERPOLATED;
4162  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4163  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4164  return;
4165  }
4166  if (direct) {
4167  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4168  GET_MQUANT();
4169  s->mb_intra = 0;
4170  s->current_picture.qscale_table[mb_pos] = mquant;
4171  if (!v->ttmbf)
4173  dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
4174  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4175  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4176  } else {
4177  if (!mb_has_coeffs && !s->mb_intra) {
4178  /* no coded blocks - effectively skipped */
4179  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4180  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4181  return;
4182  }
4183  if (s->mb_intra && !mb_has_coeffs) {
4184  GET_MQUANT();
4185  s->current_picture.qscale_table[mb_pos] = mquant;
4186  s->ac_pred = get_bits1(gb);
4187  cbp = 0;
4188  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4189  } else {
4190  if (bmvtype == BMV_TYPE_INTERPOLATED) {
4191  GET_MVDATA(dmv_x[0], dmv_y[0]);
4192  if (!mb_has_coeffs) {
4193  /* interpolated skipped block */
4194  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4195  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4196  return;
4197  }
4198  }
4199  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4200  if (!s->mb_intra) {
4201  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4202  }
4203  if (s->mb_intra)
4204  s->ac_pred = get_bits1(gb);
4205  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4206  GET_MQUANT();
4207  s->current_picture.qscale_table[mb_pos] = mquant;
4208  if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
4210  }
4211  }
4212  dst_idx = 0;
4213  for (i = 0; i < 6; i++) {
4214  s->dc_val[0][s->block_index[i]] = 0;
4215  dst_idx += i >> 2;
4216  val = ((cbp >> (5 - i)) & 1);
4217  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4218  v->mb_type[0][s->block_index[i]] = s->mb_intra;
4219  if (s->mb_intra) {
4220  /* check if prediction blocks A and C are available */
4221  v->a_avail = v->c_avail = 0;
4222  if (i == 2 || i == 3 || !s->first_slice_line)
4223  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4224  if (i == 1 || i == 3 || s->mb_x)
4225  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4226 
4227  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4228  (i & 4) ? v->codingset2 : v->codingset);
4229  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4230  continue;
4231  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4232  if (v->rangeredfrm)
4233  for (j = 0; j < 64; j++)
4234  s->block[i][j] <<= 1;
4235  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
4236  } else if (val) {
4237  vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4238  first_block, s->dest[dst_idx] + off,
4239  (i & 4) ? s->uvlinesize : s->linesize,
4240  (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4241  if (!v->ttmbf && ttmb < 8)
4242  ttmb = -1;
4243  first_block = 0;
4244  }
4245  }
4246 }
4247 
4248 /** Decode one B-frame MB (in interlaced field B picture)
4249  */
4251 {
4252  MpegEncContext *s = &v->s;
4253  GetBitContext *gb = &s->gb;
4254  int i, j;
4255  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4256  int cbp = 0; /* cbp decoding stuff */
4257  int mqdiff, mquant; /* MB quantization */
4258  int ttmb = v->ttfrm; /* MB Transform type */
4259  int mb_has_coeffs = 0; /* last_flag */
4260  int val; /* temp value */
4261  int first_block = 1;
4262  int dst_idx, off;
4263  int fwd;
4264  int dmv_x[2], dmv_y[2], pred_flag[2];
4265  int bmvtype = BMV_TYPE_BACKWARD;
4266  int idx_mbmode;
4267 
4268  mquant = v->pq; /* Lossy initialization */
4269  s->mb_intra = 0;
4270 
4271  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4272  if (idx_mbmode <= 1) { // intra MB
4273  s->mb_intra = v->is_intra[s->mb_x] = 1;
4274  s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
4275  s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
4276  s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4277  GET_MQUANT();
4278  s->current_picture.qscale_table[mb_pos] = mquant;
4279  /* Set DC scale - y and c use the same (not sure if necessary here) */
4280  s->y_dc_scale = s->y_dc_scale_table[mquant];
4281  s->c_dc_scale = s->c_dc_scale_table[mquant];
4282  v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4283  mb_has_coeffs = idx_mbmode & 1;
4284  if (mb_has_coeffs)
4285  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4286  dst_idx = 0;
4287  for (i = 0; i < 6; i++) {
4288  s->dc_val[0][s->block_index[i]] = 0;
4289  dst_idx += i >> 2;
4290  val = ((cbp >> (5 - i)) & 1);
4291  v->mb_type[0][s->block_index[i]] = s->mb_intra;
4292  v->a_avail = v->c_avail = 0;
4293  if (i == 2 || i == 3 || !s->first_slice_line)
4294  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4295  if (i == 1 || i == 3 || s->mb_x)
4296  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4297 
4298  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4299  (i & 4) ? v->codingset2 : v->codingset);
4300  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4301  continue;
4302  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4303  if (v->rangeredfrm)
4304  for (j = 0; j < 64; j++)
4305  s->block[i][j] <<= 1;
4306  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4307  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
4308  // TODO: yet to perform loop filter
4309  }
4310  } else {
4311  s->mb_intra = v->is_intra[s->mb_x] = 0;
4312  s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4313  for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4314  if (v->fmb_is_raw)
4315  fwd = v->forward_mb_plane[mb_pos] = get_bits1(gb);
4316  else
4317  fwd = v->forward_mb_plane[mb_pos];
4318  if (idx_mbmode <= 5) { // 1-MV
4319  int interpmvp = 0;
4320  dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4321  pred_flag[0] = pred_flag[1] = 0;
4322  if (fwd)
4323  bmvtype = BMV_TYPE_FORWARD;
4324  else {
4325  bmvtype = decode012(gb);
4326  switch (bmvtype) {
4327  case 0:
4328  bmvtype = BMV_TYPE_BACKWARD;
4329  break;
4330  case 1:
4331  bmvtype = BMV_TYPE_DIRECT;
4332  break;
4333  case 2:
4334  bmvtype = BMV_TYPE_INTERPOLATED;
4335  interpmvp = get_bits1(gb);
4336  }
4337  }
4338  v->bmvtype = bmvtype;
4339  if (bmvtype != BMV_TYPE_DIRECT && idx_mbmode & 1) {
4340  get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD], &dmv_y[bmvtype == BMV_TYPE_BACKWARD], &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4341  }
4342  if (interpmvp) {
4343  get_mvdata_interlaced(v, &dmv_x[1], &dmv_y[1], &pred_flag[1]);
4344  }
4345  if (bmvtype == BMV_TYPE_DIRECT) {
4346  dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4347  dmv_x[1] = dmv_y[1] = pred_flag[0] = 0;
4348  if (!s->next_picture_ptr->field_picture) {
4349  av_log(s->avctx, AV_LOG_ERROR, "Mixed field/frame direct mode not supported\n");
4350  return;
4351  }
4352  }
4353  vc1_pred_b_mv_intfi(v, 0, dmv_x, dmv_y, 1, pred_flag);
4354  vc1_b_mc(v, dmv_x, dmv_y, (bmvtype == BMV_TYPE_DIRECT), bmvtype);
4355  mb_has_coeffs = !(idx_mbmode & 2);
4356  } else { // 4-MV
4357  if (fwd)
4358  bmvtype = BMV_TYPE_FORWARD;
4359  v->bmvtype = bmvtype;
4361  for (i = 0; i < 6; i++) {
4362  if (i < 4) {
4363  dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4364  dmv_x[1] = dmv_y[1] = pred_flag[1] = 0;
4365  val = ((v->fourmvbp >> (3 - i)) & 1);
4366  if (val) {
4367  get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD],
4368  &dmv_y[bmvtype == BMV_TYPE_BACKWARD],
4369  &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4370  }
4371  vc1_pred_b_mv_intfi(v, i, dmv_x, dmv_y, 0, pred_flag);
4372  vc1_mc_4mv_luma(v, i, bmvtype == BMV_TYPE_BACKWARD, 0);
4373  } else if (i == 4)
4374  vc1_mc_4mv_chroma(v, bmvtype == BMV_TYPE_BACKWARD);
4375  }
4376  mb_has_coeffs = idx_mbmode & 1;
4377  }
4378  if (mb_has_coeffs)
4379  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4380  if (cbp) {
4381  GET_MQUANT();
4382  }
4383  s->current_picture.qscale_table[mb_pos] = mquant;
4384  if (!v->ttmbf && cbp) {
4386  }
4387  dst_idx = 0;
4388  for (i = 0; i < 6; i++) {
4389  s->dc_val[0][s->block_index[i]] = 0;
4390  dst_idx += i >> 2;
4391  val = ((cbp >> (5 - i)) & 1);
4392  off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4393  if (val) {
4394  vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4395  first_block, s->dest[dst_idx] + off,
4396  (i & 4) ? s->uvlinesize : s->linesize,
4397  (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4398  if (!v->ttmbf && ttmb < 8)
4399  ttmb = -1;
4400  first_block = 0;
4401  }
4402  }
4403  }
4404 }
4405 
4406 /** Decode one B-frame MB (in interlaced frame B picture)
4407  */
4409 {
4410  MpegEncContext *s = &v->s;
4411  GetBitContext *gb = &s->gb;
4412  int i, j;
4413  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4414  int cbp = 0; /* cbp decoding stuff */
4415  int mqdiff, mquant; /* MB quantization */
4416  int ttmb = v->ttfrm; /* MB Transform type */
4417  int mvsw = 0; /* motion vector switch */
4418  int mb_has_coeffs = 1; /* last_flag */
4419  int dmv_x, dmv_y; /* Differential MV components */
4420  int val; /* temp value */
4421  int first_block = 1;
4422  int dst_idx, off;
4423  int skipped, direct, twomv = 0;
4424  int block_cbp = 0, pat, block_tt = 0;
4425  int idx_mbmode = 0, mvbp;
4426  int stride_y, fieldtx;
4427  int bmvtype = BMV_TYPE_BACKWARD;
4428  int dir, dir2;
4429 
4430  mquant = v->pq; /* Lossy initialization */
4431  s->mb_intra = 0;
4432  if (v->skip_is_raw)
4433  skipped = get_bits1(gb);
4434  else
4435  skipped = v->s.mbskip_table[mb_pos];
4436 
4437  if (!skipped) {
4438  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2);
4439  if (ff_vc1_mbmode_intfrp[0][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
4440  twomv = 1;
4441  v->blk_mv_type[s->block_index[0]] = 1;
4442  v->blk_mv_type[s->block_index[1]] = 1;
4443  v->blk_mv_type[s->block_index[2]] = 1;
4444  v->blk_mv_type[s->block_index[3]] = 1;
4445  } else {
4446  v->blk_mv_type[s->block_index[0]] = 0;
4447  v->blk_mv_type[s->block_index[1]] = 0;
4448  v->blk_mv_type[s->block_index[2]] = 0;
4449  v->blk_mv_type[s->block_index[3]] = 0;
4450  }
4451  }
4452 
4453  if (v->dmb_is_raw)
4454  direct = get_bits1(gb);
4455  else
4456  direct = v->direct_mb_plane[mb_pos];
4457 
4458  if (direct) {
4460  av_log(s->avctx, AV_LOG_WARNING, "Mixed frame/field direct mode not supported\n");
4461  s->mv[0][0][0] = s->current_picture.motion_val[0][s->block_index[0]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][0], v->bfraction, 0, s->quarter_sample);
4462  s->mv[0][0][1] = s->current_picture.motion_val[0][s->block_index[0]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][1], v->bfraction, 0, s->quarter_sample);
4463  s->mv[1][0][0] = s->current_picture.motion_val[1][s->block_index[0]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][0], v->bfraction, 1, s->quarter_sample);
4464  s->mv[1][0][1] = s->current_picture.motion_val[1][s->block_index[0]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][1], v->bfraction, 1, s->quarter_sample);
4465 
4466  if (twomv) {
4467  s->mv[0][2][0] = s->current_picture.motion_val[0][s->block_index[2]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][0], v->bfraction, 0, s->quarter_sample);
4468  s->mv[0][2][1] = s->current_picture.motion_val[0][s->block_index[2]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][1], v->bfraction, 0, s->quarter_sample);
4469  s->mv[1][2][0] = s->current_picture.motion_val[1][s->block_index[2]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][0], v->bfraction, 1, s->quarter_sample);
4470  s->mv[1][2][1] = s->current_picture.motion_val[1][s->block_index[2]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][1], v->bfraction, 1, s->quarter_sample);
4471 
4472  for (i = 1; i < 4; i += 2) {
4473  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = s->mv[0][i-1][0];
4474  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = s->mv[0][i-1][1];
4475  s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = s->mv[1][i-1][0];
4476  s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = s->mv[1][i-1][1];
4477  }
4478  } else {
4479  for (i = 1; i < 4; i++) {
4480  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = s->mv[0][0][0];
4481  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = s->mv[0][0][1];
4482  s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = s->mv[1][0][0];
4483  s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = s->mv[1][0][1];
4484  }
4485  }
4486  }
4487 
4488  if (ff_vc1_mbmode_intfrp[0][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
4489  for (i = 0; i < 4; i++) {
4490  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = 0;
4491  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = 0;
4492  s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
4493  s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
4494  }
4495  s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
4496  s->mb_intra = v->is_intra[s->mb_x] = 1;
4497  for (i = 0; i < 6; i++)
4498  v->mb_type[0][s->block_index[i]] = 1;
4499  fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
4500  mb_has_coeffs = get_bits1(gb);
4501  if (mb_has_coeffs)
4502  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4503  v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4504  GET_MQUANT();
4505  s->current_picture.qscale_table[mb_pos] = mquant;
4506  /* Set DC scale - y and c use the same (not sure if necessary here) */
4507  s->y_dc_scale = s->y_dc_scale_table[mquant];
4508  s->c_dc_scale = s->c_dc_scale_table[mquant];
4509  dst_idx = 0;
4510  for (i = 0; i < 6; i++) {
4511  s->dc_val[0][s->block_index[i]] = 0;
4512  dst_idx += i >> 2;
4513  val = ((cbp >> (5 - i)) & 1);
4514  v->mb_type[0][s->block_index[i]] = s->mb_intra;
4515  v->a_avail = v->c_avail = 0;
4516  if (i == 2 || i == 3 || !s->first_slice_line)
4517  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4518  if (i == 1 || i == 3 || s->mb_x)
4519  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4520 
4521  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4522  (i & 4) ? v->codingset2 : v->codingset);
4523  if (i > 3 && (s->flags & CODEC_FLAG_GRAY))
4524  continue;
4525  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4526  if (i < 4) {
4527  stride_y = s->linesize << fieldtx;
4528  off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
4529  } else {
4530  stride_y = s->uvlinesize;
4531  off = 0;
4532  }
4533  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, stride_y);
4534  }
4535  } else {
4536  s->mb_intra = v->is_intra[s->mb_x] = 0;
4537  if (!direct) {
4538  if (skipped || !s->mb_intra) {
4539  bmvtype = decode012(gb);
4540  switch (bmvtype) {
4541  case 0:
4542  bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
4543  break;
4544  case 1:
4545  bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
4546  break;
4547  case 2:
4548  bmvtype = BMV_TYPE_INTERPOLATED;
4549  }
4550  }
4551 
4552  if (twomv && bmvtype != BMV_TYPE_INTERPOLATED)
4553  mvsw = get_bits1(gb);
4554  }
4555 
4556  if (!skipped) { // inter MB
4557  mb_has_coeffs = ff_vc1_mbmode_intfrp[0][idx_mbmode][3];
4558  if (mb_has_coeffs)
4559  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4560  if (!direct) {
4561  if (bmvtype == BMV_TYPE_INTERPOLATED && twomv) {
4563  } else if (bmvtype == BMV_TYPE_INTERPOLATED || twomv) {
4565  }
4566  }
4567 
4568  for (i = 0; i < 6; i++)
4569  v->mb_type[0][s->block_index[i]] = 0;
4570  fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[0][idx_mbmode][1];
4571  /* for all motion vector read MVDATA and motion compensate each block */
4572  dst_idx = 0;
4573  if (direct) {
4574  if (twomv) {
4575  for (i = 0; i < 4; i++) {
4576  vc1_mc_4mv_luma(v, i, 0, 0);
4577  vc1_mc_4mv_luma(v, i, 1, 1);
4578  }
4579  vc1_mc_4mv_chroma4(v, 0, 0, 0);
4580  vc1_mc_4mv_chroma4(v, 1, 1, 1);
4581  } else {
4582  vc1_mc_1mv(v, 0);
4583  vc1_interp_mc(v);
4584  }
4585  } else if (twomv && bmvtype == BMV_TYPE_INTERPOLATED) {
4586  mvbp = v->fourmvbp;
4587  for (i = 0; i < 4; i++) {
4588  dir = i==1 || i==3;
4589  dmv_x = dmv_y = 0;
4590  val = ((mvbp >> (3 - i)) & 1);
4591  if (val)
4592  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4593  j = i > 1 ? 2 : 0;
4594  vc1_pred_mv_intfr(v, j, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir);
4595  vc1_mc_4mv_luma(v, j, dir, dir);
4596  vc1_mc_4mv_luma(v, j+1, dir, dir);
4597  }
4598 
4599  vc1_mc_4mv_chroma4(v, 0, 0, 0);
4600  vc1_mc_4mv_chroma4(v, 1, 1, 1);
4601  } else if (bmvtype == BMV_TYPE_INTERPOLATED) {
4602  mvbp = v->twomvbp;
4603  dmv_x = dmv_y = 0;
4604  if (mvbp & 2)
4605  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4606 
4607  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0);
4608  vc1_mc_1mv(v, 0);
4609 
4610  dmv_x = dmv_y = 0;
4611  if (mvbp & 1)
4612  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4613 
4614  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 1);
4615  vc1_interp_mc(v);
4616  } else if (twomv) {
4617  dir = bmvtype == BMV_TYPE_BACKWARD;
4618  dir2 = dir;
4619  if (mvsw)
4620  dir2 = !dir;
4621  mvbp = v->twomvbp;
4622  dmv_x = dmv_y = 0;
4623  if (mvbp & 2)
4624  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4625  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir);
4626 
4627  dmv_x = dmv_y = 0;
4628  if (mvbp & 1)
4629  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4630  vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir2);
4631 
4632  if (mvsw) {
4633  for (i = 0; i < 2; i++) {
4634  s->mv[dir][i+2][0] = s->mv[dir][i][0] = s->current_picture.motion_val[dir][s->block_index[i+2]][0] = s->current_picture.motion_val[dir][s->block_index[i]][0];
4635  s->mv[dir][i+2][1] = s->mv[dir][i][1] = s->current_picture.motion_val[dir][s->block_index[i+2]][1] = s->current_picture.motion_val[dir][s->block_index[i]][1];
4636  s->mv[dir2][i+2][0] = s->mv[dir2][i][0] = s->current_picture.motion_val[dir2][s->block_index[i]][0] = s->current_picture.motion_val[dir2][s->block_index[i+2]][0];
4637  s->mv[dir2][i+2][1] = s->mv[dir2][i][1] = s->current_picture.motion_val[dir2][s->block_index[i]][1] = s->current_picture.motion_val[dir2][s->block_index[i+2]][1];
4638  }
4639  } else {
4640  vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, v->mb_type[0], !dir);
4641  vc1_pred_mv_intfr(v, 2, 0, 0, 2, v->range_x, v->range_y, v->mb_type[0], !dir);
4642  }
4643 
4644  vc1_mc_4mv_luma(v, 0, dir, 0);
4645  vc1_mc_4mv_luma(v, 1, dir, 0);
4646  vc1_mc_4mv_luma(v, 2, dir2, 0);
4647  vc1_mc_4mv_luma(v, 3, dir2, 0);
4648  vc1_mc_4mv_chroma4(v, dir, dir2, 0);
4649  } else {
4650  dir = bmvtype == BMV_TYPE_BACKWARD;
4651 
4652  mvbp = ff_vc1_mbmode_intfrp[0][idx_mbmode][2];
4653  dmv_x = dmv_y = 0;
4654  if (mvbp)
4655  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4656 
4657  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], dir);
4658  v->blk_mv_type[s->block_index[0]] = 1;
4659  v->blk_mv_type[s->block_index[1]] = 1;
4660  v->blk_mv_type[s->block_index[2]] = 1;
4661  v->blk_mv_type[s->block_index[3]] = 1;
4662  vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, 0, !dir);
4663  for (i = 0; i < 2; i++) {
4664  s->mv[!dir][i+2][0] = s->mv[!dir][i][0] = s->current_picture.motion_val[!dir][s->block_index[i+2]][0] = s->current_picture.motion_val[!dir][s->block_index[i]][0];
4665  s->mv[!dir][i+2][1] = s->mv[!dir][i][1] = s->current_picture.motion_val[!dir][s->block_index[i+2]][1] = s->current_picture.motion_val[!dir][s->block_index[i]][1];
4666  }
4667  vc1_mc_1mv(v, dir);
4668  }
4669 
4670  if (cbp)
4671  GET_MQUANT(); // p. 227
4672  s->current_picture.qscale_table[mb_pos] = mquant;
4673  if (!v->ttmbf && cbp)
4675  for (i = 0; i < 6; i++) {
4676  s->dc_val[0][s->block_index[i]] = 0;
4677  dst_idx += i >> 2;
4678  val = ((cbp >> (5 - i)) & 1);
4679  if (!fieldtx)
4680  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4681  else
4682  off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
4683  if (val) {
4684  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4685  first_block, s->dest[dst_idx] + off,
4686  (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
4687  (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
4688  block_cbp |= pat << (i << 2);
4689  if (!v->ttmbf && ttmb < 8)
4690  ttmb = -1;
4691  first_block = 0;
4692  }
4693  }
4694 
4695  } else { // skipped
4696  dir = 0;
4697  for (i = 0; i < 6; i++) {
4698  v->mb_type[0][s->block_index[i]] = 0;
4699  s->dc_val[0][s->block_index[i]] = 0;
4700  }
4701  s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
4702  s->current_picture.qscale_table[mb_pos] = 0;
4703  v->blk_mv_type[s->block_index[0]] = 0;
4704  v->blk_mv_type[s->block_index[1]] = 0;
4705  v->blk_mv_type[s->block_index[2]] = 0;
4706  v->blk_mv_type[s->block_index[3]] = 0;
4707 
4708  if (!direct) {
4709  if (bmvtype == BMV_TYPE_INTERPOLATED) {
4710  vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0);
4711  vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 1);
4712  } else {
4713  dir = bmvtype == BMV_TYPE_BACKWARD;
4714  vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], dir);
4715  if (mvsw) {
4716  int dir2 = dir;
4717  if (mvsw)
4718  dir2 = !dir;
4719  for (i = 0; i < 2; i++) {
4720  s->mv[dir][i+2][0] = s->mv[dir][i][0] = s->current_picture.motion_val[dir][s->block_index[i+2]][0] = s->current_picture.motion_val[dir][s->block_index[i]][0];
4721  s->mv[dir][i+2][1] = s->mv[dir][i][1] = s->current_picture.motion_val[dir][s->block_index[i+2]][1] = s->current_picture.motion_val[dir][s->block_index[i]][1];
4722  s->mv[dir2][i+2][0] = s->mv[dir2][i][0] = s->current_picture.motion_val[dir2][s->block_index[i]][0] = s->current_picture.motion_val[dir2][s->block_index[i+2]][0];
4723  s->mv[dir2][i+2][1] = s->mv[dir2][i][1] = s->current_picture.motion_val[dir2][s->block_index[i]][1] = s->current_picture.motion_val[dir2][s->block_index[i+2]][1];
4724  }
4725  } else {
4726  v->blk_mv_type[s->block_index[0]] = 1;
4727  v->blk_mv_type[s->block_index[1]] = 1;
4728  v->blk_mv_type[s->block_index[2]] = 1;
4729  v->blk_mv_type[s->block_index[3]] = 1;
4730  vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, 0, !dir);
4731  for (i = 0; i < 2; i++) {
4732  s->mv[!dir][i+2][0] = s->mv[!dir][i][0] = s->current_picture.motion_val[!dir][s->block_index[i+2]][0] = s->current_picture.motion_val[!dir][s->block_index[i]][0];
4733  s->mv[!dir][i+2][1] = s->mv[!dir][i][1] = s->current_picture.motion_val[!dir][s->block_index[i+2]][1] = s->current_picture.motion_val[!dir][s->block_index[i]][1];
4734  }
4735  }
4736  }
4737  }
4738 
4739  vc1_mc_1mv(v, dir);
4740  if (direct || bmvtype == BMV_TYPE_INTERPOLATED) {
4741  vc1_interp_mc(v);
4742  }
4743  }
4744  }
4745  if (s->mb_x == s->mb_width - 1)
4746  memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4747  v->cbp[s->mb_x] = block_cbp;
4748  v->ttblk[s->mb_x] = block_tt;
4749  return 0;
4750 }
4751 
4752 /** Decode blocks of I-frame
4753  */
4755 {
4756  int k, j;
4757  MpegEncContext *s = &v->s;
4758  int cbp, val;
4759  uint8_t *coded_val;
4760  int mb_pos;
4761 
4762  /* select codingmode used for VLC tables selection */
4763  switch (v->y_ac_table_index) {
4764  case 0:
4766  break;
4767  case 1:
4769  break;
4770  case 2:
4772  break;
4773  }
4774 
4775  switch (v->c_ac_table_index) {
4776  case 0:
4778  break;
4779  case 1:
4781  break;
4782  case 2:
4784  break;
4785  }
4786 
4787  /* Set DC scale - y and c use the same */
4788  s->y_dc_scale = s->y_dc_scale_table[v->pq];
4789  s->c_dc_scale = s->c_dc_scale_table[v->pq];
4790 
4791  //do frame decode
4792  s->mb_x = s->mb_y = 0;
4793  s->mb_intra = 1;
4794  s->first_slice_line = 1;
4795  for (s->mb_y = 0; s->mb_y < s->end_mb_y; s->mb_y++) {
4796  s->mb_x = 0;
4797  init_block_index(v);
4798  for (; s->mb_x < v->end_mb_x; s->mb_x++) {
4799  uint8_t *dst[6];
4801  dst[0] = s->dest[0];
4802  dst[1] = dst[0] + 8;
4803  dst[2] = s->dest[0] + s->linesize * 8;
4804  dst[3] = dst[2] + 8;
4805  dst[4] = s->dest[1];
4806  dst[5] = s->dest[2];
4807  s->dsp.clear_blocks(s->block[0]);
4808  mb_pos = s->mb_x + s->mb_y * s->mb_width;
4809  s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
4810  s->current_picture.qscale_table[mb_pos] = v->pq;
4811  s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
4812  s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
4813 
4814  // do actual MB decoding and displaying
4816  v->s.ac_pred = get_bits1(&v->s.gb);
4817 
4818  for (k = 0; k < 6; k++) {
4819  val = ((cbp >> (5 - k)) & 1);
4820 
4821  if (k < 4) {
4822  int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4823  val = val ^ pred;
4824  *coded_val = val;
4825  }
4826  cbp |= val << (5 - k);
4827 
4828  vc1_decode_i_block(v, s->block[k], k, val, (k < 4) ? v->codingset : v->codingset2);
4829 
4830  if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4831  continue;
4832  v->vc1dsp.vc1_inv_trans_8x8(s->block[k]);
4833  if (v->pq >= 9 && v->overlap) {
4834  if (v->rangeredfrm)
4835  for (j = 0; j < 64; j++)
4836  s->block[k][j] <<= 1;
4837  s->dsp.put_signed_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4838  } else {
4839  if (v->rangeredfrm)
4840  for (j = 0; j < 64; j++)
4841  s->block[k][j] = (s->block[k][j] - 64) << 1;
4842  s->dsp.put_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4843  }
4844  }
4845 
4846  if (v->pq >= 9 && v->overlap) {
4847  if (s->mb_x) {
4848  v->vc1dsp.vc1_h_overlap(s->dest[0], s->linesize);
4849  v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4850  if (!(s->flags & CODEC_FLAG_GRAY)) {
4851  v->vc1dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
4852  v->vc1dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
4853  }
4854  }
4855  v->vc1dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
4856  v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4857  if (!s->first_slice_line) {
4858  v->vc1dsp.vc1_v_overlap(s->dest[0], s->linesize);
4859  v->vc1dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
4860  if (!(s->flags & CODEC_FLAG_GRAY)) {
4861  v->vc1dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
4862  v->vc1dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
4863  }
4864  }
4865  v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4866  v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4867  }
4868  if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
4869 
4870  if (get_bits_count(&s->gb) > v->bits) {
4871  ff_er_add_slice(&s->er, 0, 0, s->mb_x, s->mb_y, ER_MB_ERROR);
4872  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4873  get_bits_count(&s->gb), v->bits);
4874  return;
4875  }
4876  }
4877  if (!v->s.loop_filter)
4878  ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
4879  else if (s->mb_y)
4880  ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4881 
4882  s->first_slice_line = 0;
4883  }
4884  if (v->s.loop_filter)
4885  ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
4886 
4887  /* This is intentionally mb_height and not end_mb_y - unlike in advanced
4888  * profile, these only differ are when decoding MSS2 rectangles. */
4889  ff_er_add_slice(&s->er, 0, 0, s->mb_width - 1, s->mb_height - 1, ER_MB_END);
4890 }
4891 
4892 /** Decode blocks of I-frame for advanced profile
4893  */
4895 {
4896  int k;
4897  MpegEncContext *s = &v->s;
4898  int cbp, val;
4899  uint8_t *coded_val;
4900  int mb_pos;
4901  int mquant = v->pq;
4902  int mqdiff;
4903  GetBitContext *gb = &s->gb;
4904 
4905  /* select codingmode used for VLC tables selection */
4906  switch (v->y_ac_table_index) {
4907  case 0:
4909  break;
4910  case 1:
4912  break;
4913  case 2:
4915  break;
4916  }
4917 
4918  switch (v->c_ac_table_index) {
4919  case 0:
4921  break;
4922  case 1:
4924  break;
4925  case 2:
4927  break;
4928  }
4929 
4930  // do frame decode
4931  s->mb_x = s->mb_y = 0;
4932  s->mb_intra = 1;
4933  s->first_slice_line = 1;
4934  s->mb_y = s->start_mb_y;
4935  if (s->start_mb_y) {
4936  s->mb_x = 0;
4937  init_block_index(v);
4938  memset(&s->coded_block[s->block_index[0] - s->b8_stride], 0,
4939  (1 + s->b8_stride) * sizeof(*s->coded_block));
4940  }
4941  for (; s->mb_y < s->end_mb_y; s->mb_y++) {
4942  s->mb_x = 0;
4943  init_block_index(v);
4944  for (;s->mb_x < s->mb_width; s->mb_x++) {
4945  int16_t (*block)[64] = v->block[v->cur_blk_idx];
4947  s->dsp.clear_blocks(block[0]);
4948  mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4949  s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4950  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4951  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4952 
4953  // do actual MB decoding and displaying
4954  if (v->fieldtx_is_raw)
4955  v->fieldtx_plane[mb_pos] = get_bits1(&v->s.gb);
4957  if ( v->acpred_is_raw)
4958  v->s.ac_pred = get_bits1(&v->s.gb);
4959  else
4960  v->s.ac_pred = v->acpred_plane[mb_pos];
4961 
4962  if (v->condover == CONDOVER_SELECT && v->overflg_is_raw)
4963  v->over_flags_plane[mb_pos] = get_bits1(&v->s.gb);
4964 
4965  GET_MQUANT();
4966 
4967  s->current_picture.qscale_table[mb_pos] = mquant;
4968  /* Set DC scale - y and c use the same */
4969  s->y_dc_scale = s->y_dc_scale_table[mquant];
4970  s->c_dc_scale = s->c_dc_scale_table[mquant];
4971 
4972  for (k = 0; k < 6; k++) {
4973  val = ((cbp >> (5 - k)) & 1);
4974 
4975  if (k < 4) {
4976  int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4977  val = val ^ pred;
4978  *coded_val = val;
4979  }
4980  cbp |= val << (5 - k);
4981 
4982  v->a_avail = !s->first_slice_line || (k == 2 || k == 3);
4983  v->c_avail = !!s->mb_x || (k == 1 || k == 3);
4984 
4985  vc1_decode_i_block_adv(v, block[k], k, val,
4986  (k < 4) ? v->codingset : v->codingset2, mquant);
4987 
4988  if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4989  continue;
4991  }
4992 
4996 
4997  if (get_bits_count(&s->gb) > v->bits) {
4998  // TODO: may need modification to handle slice coding
4999  ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
5000  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
5001  get_bits_count(&s->gb), v->bits);
5002  return;
5003  }
5004  }
5005  if (!v->s.loop_filter)
5006  ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
5007  else if (s->mb_y)
5008  ff_mpeg_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
5009  s->first_slice_line = 0;
5010  }
5011 
5012  /* raw bottom MB row */
5013  s->mb_x = 0;
5014  init_block_index(v);
5015 
5016  for (;s->mb_x < s->mb_width; s->mb_x++) {
5019  if (v->s.loop_filter)
5021  }
5022  if (v->s.loop_filter)
5023  ff_mpeg_draw_horiz_band(s, (s->end_mb_y-1)*16, 16);
5024  ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
5025  (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
5026 }
5027 
5029 {
5030  MpegEncContext *s = &v->s;
5031  int apply_loop_filter;
5032 
5033  /* select codingmode used for VLC tables selection */
5034  switch (v->c_ac_table_index) {
5035  case 0:
5037  break;
5038  case 1:
5040  break;
5041  case 2:
5043  break;
5044  }
5045 
5046  switch (v->c_ac_table_index) {
5047  case 0:
5049  break;
5050  case 1:
5052  break;
5053  case 2:
5055  break;
5056  }
5057 
5058  apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY) &&
5059  v->fcm == PROGRESSIVE;
5060  s->first_slice_line = 1;
5061  memset(v->cbp_base, 0, sizeof(v->cbp_base[0])*2*s->mb_stride);
5062  for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
5063  s->mb_x = 0;
5064  init_block_index(v);
5065  for (; s->mb_x < s->mb_width; s->mb_x++) {
5067 
5068  if (v->fcm == ILACE_FIELD)
5070  else if (v->fcm == ILACE_FRAME)
5072  else vc1_decode_p_mb(v);
5073  if (s->mb_y != s->start_mb_y && apply_loop_filter)
5075  if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
5076  // TODO: may need modification to handle slice coding
5077  ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
5078  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
5079  get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
5080  return;
5081  }
5082  }
5083  memmove(v->cbp_base, v->cbp, sizeof(v->cbp_base[0]) * s->mb_stride);
5084  memmove(v->ttblk_base, v->ttblk, sizeof(v->ttblk_base[0]) * s->mb_stride);
5085  memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
5086  memmove(v->luma_mv_base, v->luma_mv, sizeof(v->luma_mv_base[0]) * s->mb_stride);
5087  if (s->mb_y != s->start_mb_y) ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
5088  s->first_slice_line = 0;
5089  }
5090  if (apply_loop_filter) {
5091  s->mb_x = 0;
5092  init_block_index(v);
5093  for (; s->mb_x < s->mb_width; s->mb_x++) {
5096  }
5097  }
5098  if (s->end_mb_y >= s->start_mb_y)
5099  ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
5100  ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
5101  (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
5102 }
5103 
5105 {
5106  MpegEncContext *s = &v->s;
5107 
5108  /* select codingmode used for VLC tables selection */
5109  switch (v->c_ac_table_index) {
5110  case 0:
5112  break;
5113  case 1:
5115  break;
5116  case 2:
5118  break;
5119  }
5120 
5121  switch (v->c_ac_table_index) {
5122  case 0:
5124  break;
5125  case 1:
5127  break;
5128  case 2:
5130  break;
5131  }
5132 
5133  s->first_slice_line = 1;
5134  for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
5135  s->mb_x = 0;
5136  init_block_index(v);
5137  for (; s->mb_x < s->mb_width; s->mb_x++) {
5139 
5140  if (v->fcm == ILACE_FIELD)
5142  else if (v->fcm == ILACE_FRAME)
5144  else
5145  vc1_decode_b_mb(v);
5146  if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
5147  // TODO: may need modification to handle slice coding
5148  ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
5149  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
5150  get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
5151  return;
5152  }
5153  if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
5154  }
5155  if (!v->s.loop_filter)
5156  ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
5157  else if (s->mb_y)
5158  ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
5159  s->first_slice_line = 0;
5160  }
5161  if (v->s.loop_filter)
5162  ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
5163  ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
5164  (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
5165 }
5166 
5168 {
5169  MpegEncContext *s = &v->s;
5170 
5171  if (!v->s.last_picture.f.data[0])
5172  return;
5173 
5174  ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, ER_MB_END);
5175  s->first_slice_line = 1;
5176  for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
5177  s->mb_x = 0;
5178  init_block_index(v);
5180  memcpy(s->dest[0], s->last_picture.f.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
5181  memcpy(s->dest[1], s->last_picture.f.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
5182  memcpy(s->dest[2], s->last_picture.f.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
5183  ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
5184  s->first_slice_line = 0;
5185  }
5187 }
5188 
5190 {
5191 
5192  v->s.esc3_level_length = 0;
5193  if (v->x8_type) {
5194  ff_intrax8_decode_picture(&v->x8, 2*v->pq + v->halfpq, v->pq * !v->pquantizer);
5195  } else {
5196  v->cur_blk_idx = 0;
5197  v->left_blk_idx = -1;
5198  v->topleft_blk_idx = 1;
5199  v->top_blk_idx = 2;
5200  switch (v->s.pict_type) {
5201  case AV_PICTURE_TYPE_I:
5202  if (v->profile == PROFILE_ADVANCED)
5204  else
5206  break;
5207  case AV_PICTURE_TYPE_P:
5208  if (v->p_frame_skipped)
5210  else
5212  break;
5213  case AV_PICTURE_TYPE_B:
5214  if (v->bi_type) {
5215  if (v->profile == PROFILE_ADVANCED)
5217  else
5219  } else
5221  break;
5222  }
5223  }
5224 }
5225 
5226 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
5227 
5228 typedef struct {
5229  /**
5230  * Transform coefficients for both sprites in 16.16 fixed point format,
5231  * in the order they appear in the bitstream:
5232  * x scale
5233  * rotation 1 (unused)
5234  * x offset
5235  * rotation 2 (unused)
5236  * y scale
5237  * y offset
5238  * alpha
5239  */
5240  int coefs[2][7];
5241 
5242  int effect_type, effect_flag;
5243  int effect_pcount1, effect_pcount2; ///< amount of effect parameters stored in effect_params
5244  int effect_params1[15], effect_params2[10]; ///< effect parameters in 16.16 fixed point format
5245 } SpriteData;
5246 
5247 static inline int get_fp_val(GetBitContext* gb)
5248 {
5249  return (get_bits_long(gb, 30) - (1 << 29)) << 1;
5250 }
5251 
5252 static void vc1_sprite_parse_transform(GetBitContext* gb, int c[7])
5253 {
5254  c[1] = c[3] = 0;
5255 
5256  switch (get_bits(gb, 2)) {
5257  case 0:
5258  c[0] = 1 << 16;
5259  c[2] = get_fp_val(gb);
5260  c[4] = 1 << 16;
5261  break;
5262  case 1:
5263  c[0] = c[4] = get_fp_val(gb);
5264  c[2] = get_fp_val(gb);
5265  break;
5266  case 2:
5267  c[0] = get_fp_val(gb);
5268  c[2] = get_fp_val(gb);
5269  c[4] = get_fp_val(gb);
5270  break;
5271  case 3:
5272  c[0] = get_fp_val(gb);
5273  c[1] = get_fp_val(gb);
5274  c[2] = get_fp_val(gb);
5275  c[3] = get_fp_val(gb);
5276  c[4] = get_fp_val(gb);
5277  break;
5278  }
5279  c[5] = get_fp_val(gb);
5280  if (get_bits1(gb))
5281  c[6] = get_fp_val(gb);
5282  else
5283  c[6] = 1 << 16;
5284 }
5285 
5286 static int vc1_parse_sprites(VC1Context *v, GetBitContext* gb, SpriteData* sd)
5287 {
5288  AVCodecContext *avctx = v->s.avctx;
5289  int sprite, i;
5290 
5291  for (sprite = 0; sprite <= v->two_sprites; sprite++) {
5292  vc1_sprite_parse_transform(gb, sd->coefs[sprite]);
5293  if (sd->coefs[sprite][1] || sd->coefs[sprite][3])
5294  avpriv_request_sample(avctx, "Non-zero rotation coefficients");
5295  av_log(avctx, AV_LOG_DEBUG, sprite ? "S2:" : "S1:");
5296  for (i = 0; i < 7; i++)
5297  av_log(avctx, AV_LOG_DEBUG, " %d.%.3d",
5298  sd->coefs[sprite][i] / (1<<16),
5299  (abs(sd->coefs[sprite][i]) & 0xFFFF) * 1000 / (1 << 16));
5300  av_log(avctx, AV_LOG_DEBUG, "\n");
5301  }
5302 
5303  skip_bits(gb, 2);
5304  if (sd->effect_type = get_bits_long(gb, 30)) {
5305  switch (sd->effect_pcount1 = get_bits(gb, 4)) {
5306  case 7:
5307  vc1_sprite_parse_transform(gb, sd->effect_params1);
5308  break;
5309  case 14:
5310  vc1_sprite_parse_transform(gb, sd->effect_params1);
5311  vc1_sprite_parse_transform(gb, sd->effect_params1 + 7);
5312  break;
5313  default:
5314  for (i = 0; i < sd->effect_pcount1; i++)
5315  sd->effect_params1[i] = get_fp_val(gb);
5316  }
5317  if (sd->effect_type != 13 || sd->effect_params1[0] != sd->coefs[0][6]) {
5318  // effect 13 is simple alpha blending and matches the opacity above
5319  av_log(avctx, AV_LOG_DEBUG, "Effect: %d; params: ", sd->effect_type);
5320  for (i = 0; i < sd->effect_pcount1; i++)
5321  av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
5322  sd->effect_params1[i] / (1 << 16),
5323  (abs(sd->effect_params1[i]) & 0xFFFF) * 1000 / (1 << 16));
5324  av_log(avctx, AV_LOG_DEBUG, "\n");
5325  }
5326 
5327  sd->effect_pcount2 = get_bits(gb, 16);
5328  if (sd->effect_pcount2 > 10) {
5329  av_log(avctx, AV_LOG_ERROR, "Too many effect parameters\n");
5330  return AVERROR_INVALIDDATA;
5331  } else if (sd->effect_pcount2) {
5332  i = -1;
5333  av_log(avctx, AV_LOG_DEBUG, "Effect params 2: ");
5334  while (++i < sd->effect_pcount2) {
5335  sd->effect_params2[i] = get_fp_val(gb);
5336  av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
5337  sd->effect_params2[i] / (1 << 16),
5338  (abs(sd->effect_params2[i]) & 0xFFFF) * 1000 / (1 << 16));
5339  }
5340  av_log(avctx, AV_LOG_DEBUG, "\n");
5341  }
5342  }
5343  if (sd->effect_flag = get_bits1(gb))
5344  av_log(avctx, AV_LOG_DEBUG, "Effect flag set\n");
5345 
5346  if (get_bits_count(gb) >= gb->size_in_bits +
5347  (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE ? 64 : 0)) {
5348  av_log(avctx, AV_LOG_ERROR, "Buffer overrun\n");
5349  return AVERROR_INVALIDDATA;
5350  }
5351  if (get_bits_count(gb) < gb->size_in_bits - 8)
5352  av_log(avctx, AV_LOG_WARNING, "Buffer not fully read\n");
5353 
5354  return 0;
5355 }
5356 
5357 static void vc1_draw_sprites(VC1Context *v, SpriteData* sd)
5358 {
5359  int i, plane, row, sprite;
5360  int sr_cache[2][2] = { { -1, -1 }, { -1, -1 } };
5361  uint8_t* src_h[2][2];
5362  int xoff[2], xadv[2], yoff[2], yadv[2], alpha;
5363  int ysub[2];
5364  MpegEncContext *s = &v->s;
5365 
5366  for (i = 0; i <= v->two_sprites; i++) {
5367  xoff[i] = av_clip(sd->coefs[i][2], 0, v->sprite_width-1 << 16);
5368  xadv[i] = sd->coefs[i][0];
5369  if (xadv[i] != 1<<16 || (v->sprite_width << 16) - (v->output_width << 16) - xoff[i])
5370  xadv[i] = av_clip(xadv[i], 0, ((v->sprite_width<<16) - xoff[i] - 1) / v->output_width);
5371 
5372  yoff[i] = av_clip(sd->coefs[i][5], 0, v->sprite_height-1 << 16);
5373  yadv[i] = av_clip(sd->coefs[i][4], 0, ((v->sprite_height << 16) - yoff[i]) / v->output_height);
5374  }
5375  alpha = av_clip(sd->coefs[1][6], 0, (1<<16) - 1);
5376 
5377  for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++) {
5378  int width = v->output_width>>!!plane;
5379 
5380  for (row = 0; row < v->output_height>>!!plane; row++) {
5381  uint8_t *dst = v->sprite_output_frame->data[plane] +
5382  v->sprite_output_frame->linesize[plane] * row;
5383 
5384  for (sprite = 0; sprite <= v->two_sprites; sprite++) {
5385  uint8_t *iplane = s->current_picture.f.data[plane];
5386  int iline = s->current_picture.f.linesize[plane];
5387  int ycoord = yoff[sprite] + yadv[sprite] * row;
5388  int yline = ycoord >> 16;
5389  int next_line;
5390  ysub[sprite] = ycoord & 0xFFFF;
5391  if (sprite) {
5392  iplane = s->last_picture.f.data[plane];
5393  iline = s->last_picture.f.linesize[plane];
5394  }
5395  next_line = FFMIN(yline + 1, (v->sprite_height >> !!plane) - 1) * iline;
5396  if (!(xoff[sprite] & 0xFFFF) && xadv[sprite] == 1 << 16) {
5397  src_h[sprite][0] = iplane + (xoff[sprite] >> 16) + yline * iline;
5398  if (ysub[sprite])
5399  src_h[sprite][1] = iplane + (xoff[sprite] >> 16) + next_line;
5400  } else {
5401  if (sr_cache[sprite][0] != yline) {
5402  if (sr_cache[sprite][1] == yline) {
5403  FFSWAP(uint8_t*, v->sr_rows[sprite][0], v->sr_rows[sprite][1]);
5404  FFSWAP(int, sr_cache[sprite][0], sr_cache[sprite][1]);
5405  } else {
5406  v->vc1dsp.sprite_h(v->sr_rows[sprite][0], iplane + yline * iline, xoff[sprite], xadv[sprite], width);
5407  sr_cache[sprite][0] = yline;
5408  }
5409  }
5410  if (ysub[sprite] && sr_cache[sprite][1] != yline + 1) {
5411  v->vc1dsp.sprite_h(v->sr_rows[sprite][1],
5412  iplane + next_line, xoff[sprite],
5413  xadv[sprite], width);
5414  sr_cache[sprite][1] = yline + 1;
5415  }
5416  src_h[sprite][0] = v->sr_rows[sprite][0];
5417  src_h[sprite][1] = v->sr_rows[sprite][1];
5418  }
5419  }
5420 
5421  if (!v->two_sprites) {
5422  if (ysub[0]) {
5423  v->vc1dsp.sprite_v_single(dst, src_h[0][0], src_h[0][1], ysub[0], width);
5424  } else {
5425  memcpy(dst, src_h[0][0], width);
5426  }
5427  } else {
5428  if (ysub[0] && ysub[1]) {
5429  v->vc1dsp.sprite_v_double_twoscale(dst, src_h[0][0], src_h[0][1], ysub[0],
5430  src_h[1][0], src_h[1][1], ysub[1], alpha, width);
5431  } else if (ysub[0]) {
5432  v->vc1dsp.sprite_v_double_onescale(dst, src_h[0][0], src_h[0][1], ysub[0],
5433  src_h[1][0], alpha, width);
5434  } else if (ysub[1]) {
5435  v->vc1dsp.sprite_v_double_onescale(dst, src_h[1][0], src_h[1][1], ysub[1],
5436  src_h[0][0], (1<<16)-1-alpha, width);
5437  } else {
5438  v->vc1dsp.sprite_v_double_noscale(dst, src_h[0][0], src_h[1][0], alpha, width);
5439  }
5440  }
5441  }
5442 
5443  if (!plane) {
5444  for (i = 0; i <= v->two_sprites; i++) {
5445  xoff[i] >>= 1;
5446  yoff[i] >>= 1;
5447  }
5448  }
5449 
5450  }
5451 }
5452 
5453 
5454 static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb)
5455 {
5456  int ret;
5457  MpegEncContext *s = &v->s;
5458  AVCodecContext *avctx = s->avctx;
5459  SpriteData sd;
5460 
5461  memset(&sd, 0, sizeof(sd));
5462 
5463  ret = vc1_parse_sprites(v, gb, &sd);
5464  if (ret < 0)
5465  return ret;
5466 
5467  if (!s->current_picture.f.data[0]) {
5468  av_log(avctx, AV_LOG_ERROR, "Got no sprites\n");
5469  return -1;
5470  }
5471 
5472  if (v->two_sprites && (!s->last_picture_ptr || !s->last_picture.f.data[0])) {
5473  av_log(avctx, AV_LOG_WARNING, "Need two sprites, only got one\n");
5474  v->two_sprites = 0;
5475  }
5476 
5478  if ((ret = ff_get_buffer(avctx, v->sprite_output_frame, 0)) < 0)
5479  return ret;
5480 
5481  vc1_draw_sprites(v, &sd);
5482 
5483  return 0;
5484 }
5485 
5486 static void vc1_sprite_flush(AVCodecContext *avctx)
5487 {
5488  VC1Context *v = avctx->priv_data;
5489  MpegEncContext *s = &v->s;
5490  AVFrame *f = &s->current_picture.f;
5491  int plane, i;
5492 
5493  /* Windows Media Image codecs have a convergence interval of two keyframes.
5494  Since we can't enforce it, clear to black the missing sprite. This is
5495  wrong but it looks better than doing nothing. */
5496 
5497  if (f->data[0])
5498  for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++)
5499  for (i = 0; i < v->sprite_height>>!!plane; i++)
5500  memset(f->data[plane] + i * f->linesize[plane],
5501  plane ? 128 : 0, f->linesize[plane]);
5502 }
5503 
5504 #endif
5505 
5507 {
5508  MpegEncContext *s = &v->s;
5509  int i;
5510  int mb_height = FFALIGN(s->mb_height, 2);
5511 
5512  /* Allocate mb bitplanes */
5513  v->mv_type_mb_plane = av_malloc (s->mb_stride * mb_height);
5514  v->direct_mb_plane = av_malloc (s->mb_stride * mb_height);
5515  v->forward_mb_plane = av_malloc (s->mb_stride * mb_height);
5516  v->fieldtx_plane = av_mallocz(s->mb_stride * mb_height);
5517  v->acpred_plane = av_malloc (s->mb_stride * mb_height);
5518  v->over_flags_plane = av_malloc (s->mb_stride * mb_height);
5519 
5520  v->n_allocated_blks = s->mb_width + 2;
5521  v->block = av_malloc(sizeof(*v->block) * v->n_allocated_blks);
5522  v->cbp_base = av_malloc(sizeof(v->cbp_base[0]) * 2 * s->mb_stride);
5523  v->cbp = v->cbp_base + s->mb_stride;
5524  v->ttblk_base = av_malloc(sizeof(v->ttblk_base[0]) * 2 * s->mb_stride);
5525  v->ttblk = v->ttblk_base + s->mb_stride;
5526  v->is_intra_base = av_mallocz(sizeof(v->is_intra_base[0]) * 2 * s->mb_stride);
5527  v->is_intra = v->is_intra_base + s->mb_stride;
5528  v->luma_mv_base = av_mallocz(sizeof(v->luma_mv_base[0]) * 2 * s->mb_stride);
5529  v->luma_mv = v->luma_mv_base + s->mb_stride;
5530 
5531  /* allocate block type info in that way so it could be used with s->block_index[] */
5532  v->mb_type_base = av_malloc(s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
5533  v->mb_type[0] = v->mb_type_base + s->b8_stride + 1;
5534  v->mb_type[1] = v->mb_type_base + s->b8_stride * (mb_height * 2 + 1) + s->mb_stride + 1;
5535  v->mb_type[2] = v->mb_type[1] + s->mb_stride * (mb_height + 1);
5536 
5537  /* allocate memory to store block level MV info */
5538  v->blk_mv_type_base = av_mallocz( s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
5539  v->blk_mv_type = v->blk_mv_type_base + s->b8_stride + 1;
5540  v->mv_f_base = av_mallocz(2 * (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2));
5541  v->mv_f[0] = v->mv_f_base + s->b8_stride + 1;
5542  v->mv_f[1] = v->mv_f[0] + (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
5543  v->mv_f_next_base = av_mallocz(2 * (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2));
5544  v->mv_f_next[0] = v->mv_f_next_base + s->b8_stride + 1;
5545  v->mv_f_next[1] = v->mv_f_next[0] + (s->b8_stride * (mb_height * 2 + 1) + s->mb_stride * (mb_height + 1) * 2);
5546 
5547  /* Init coded blocks info */
5548  if (v->profile == PROFILE_ADVANCED) {
5549 // if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0)
5550 // return -1;
5551 // if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0)
5552 // return -1;
5553  }
5554 
5555  ff_intrax8_common_init(&v->x8,s);
5556 
5558  for (i = 0; i < 4; i++)
5559  if (!(v->sr_rows[i >> 1][i & 1] = av_malloc(v->output_width)))
5560  return AVERROR(ENOMEM);
5561  }
5562 
5563  if (!v->mv_type_mb_plane || !v->direct_mb_plane || !v->acpred_plane || !v->over_flags_plane ||
5564  !v->block || !v->cbp_base || !v->ttblk_base || !v->is_intra_base || !v->luma_mv_base ||
5565  !v->mb_type_base) {
5568  av_freep(&v->acpred_plane);
5570  av_freep(&v->block);
5571  av_freep(&v->cbp_base);
5572  av_freep(&v->ttblk_base);
5573  av_freep(&v->is_intra_base);
5574  av_freep(&v->luma_mv_base);
5575  av_freep(&v->mb_type_base);
5576  return AVERROR(ENOMEM);
5577  }
5578 
5579  return 0;
5580 }
5581 
5583 {
5584  int i;
5585  for (i = 0; i < 64; i++) {
5586 #define transpose(x) ((x >> 3) | ((x & 7) << 3))
5587  v->zz_8x8[0][i] = transpose(ff_wmv1_scantable[0][i]);
5588  v->zz_8x8[1][i] = transpose(ff_wmv1_scantable[1][i]);
5589  v->zz_8x8[2][i] = transpose(ff_wmv1_scantable[2][i]);
5590  v->zz_8x8[3][i] = transpose(ff_wmv1_scantable[3][i]);
5592  }
5593  v->left_blk_sh = 0;
5594  v->top_blk_sh = 3;
5595 }
5596 
5597 /** Initialize a VC1/WMV3 decoder
5598  * @todo TODO: Handle VC-1 IDUs (Transport level?)
5599  * @todo TODO: Decypher remaining bits in extra_data
5600  */
5602 {
5603  VC1Context *v = avctx->priv_data;
5604  MpegEncContext *s = &v->s;
5605  GetBitContext gb;
5606  int ret;
5607 
5608  /* save the container output size for WMImage */
5609  v->output_width = avctx->width;
5610  v->output_height = avctx->height;
5611 
5612  if (!avctx->extradata_size || !avctx->extradata)
5613  return -1;
5614  if (!(avctx->flags & CODEC_FLAG_GRAY))
5615  avctx->pix_fmt = avctx->get_format(avctx, avctx->codec->pix_fmts);
5616  else
5617  avctx->pix_fmt = AV_PIX_FMT_GRAY8;
5618  avctx->hwaccel = ff_find_hwaccel(avctx);
5619  v->s.avctx = avctx;
5620 
5621  if ((ret = ff_vc1_init_common(v)) < 0)
5622  return ret;
5623  // ensure static VLC tables are initialized
5624  if ((ret = ff_msmpeg4_decode_init(avctx)) < 0)
5625  return ret;
5626  if ((ret = ff_vc1_decode_init_alloc_tables(v)) < 0)
5627  return ret;
5628  // Hack to ensure the above functions will be called
5629  // again once we know all necessary settings.
5630  // That this is necessary might indicate a bug.
5631  ff_vc1_decode_end(avctx);
5632 
5634  ff_vc1dsp_init(&v->vc1dsp);
5635 
5636  if (avctx->codec_id == AV_CODEC_ID_WMV3 || avctx->codec_id == AV_CODEC_ID_WMV3IMAGE) {
5637  int count = 0;
5638 
5639  // looks like WMV3 has a sequence header stored in the extradata
5640  // advanced sequence header may be before the first frame
5641  // the last byte of the extradata is a version number, 1 for the
5642  // samples we can decode
5643 
5644  init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
5645 
5646  if ((ret = ff_vc1_decode_sequence_header(avctx, v, &gb)) < 0)
5647  return ret;
5648 
5649  count = avctx->extradata_size*8 - get_bits_count(&gb);
5650  if (count > 0) {
5651  av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
5652  count, get_bits(&gb, count));
5653  } else if (count < 0) {
5654  av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
5655  }
5656  } else { // VC1/WVC1/WVP2
5657  const uint8_t *start = avctx->extradata;
5658  uint8_t *end = avctx->extradata + avctx->extradata_size;
5659  const uint8_t *next;
5660  int size, buf2_size;
5661  uint8_t *buf2 = NULL;
5662  int seq_initialized = 0, ep_initialized = 0;
5663 
5664  if (avctx->extradata_size < 16) {
5665  av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", avctx->extradata_size);
5666  return -1;
5667  }
5668 
5670  start = find_next_marker(start, end); // in WVC1 extradata first byte is its size, but can be 0 in mkv
5671  next = start;
5672  for (; next < end; start = next) {
5673  next = find_next_marker(start + 4, end);
5674  size = next - start - 4;
5675  if (size <= 0)
5676  continue;
5677  buf2_size = vc1_unescape_buffer(start + 4, size, buf2);
5678  init_get_bits(&gb, buf2, buf2_size * 8);
5679  switch (AV_RB32(start)) {
5680  case VC1_CODE_SEQHDR:
5681  if ((ret = ff_vc1_decode_sequence_header(avctx, v, &gb)) < 0) {
5682  av_free(buf2);
5683  return ret;
5684  }
5685  seq_initialized = 1;
5686  break;
5687  case VC1_CODE_ENTRYPOINT:
5688  if ((ret = ff_vc1_decode_entry_point(avctx, v, &gb)) < 0) {
5689  av_free(buf2);
5690  return ret;
5691  }
5692  ep_initialized = 1;
5693  break;
5694  }
5695  }
5696  av_free(buf2);
5697  if (!seq_initialized || !ep_initialized) {
5698  av_log(avctx, AV_LOG_ERROR, "Incomplete extradata\n");
5699  return -1;
5700  }
5701  v->res_sprite = (avctx->codec_id == AV_CODEC_ID_VC1IMAGE);
5702  }
5703 
5705  if (!v->sprite_output_frame)
5706  return AVERROR(ENOMEM);
5707 
5708  avctx->profile = v->profile;
5709  if (v->profile == PROFILE_ADVANCED)
5710  avctx->level = v->level;
5711 
5712  avctx->has_b_frames = !!avctx->max_b_frames;
5713 
5714  s->mb_width = (avctx->coded_width + 15) >> 4;
5715  s->mb_height = (avctx->coded_height + 15) >> 4;
5716 
5717  if (v->profile == PROFILE_ADVANCED || v->res_fasttx) {
5719  } else {
5720  memcpy(v->zz_8x8, ff_wmv1_scantable, 4*64);
5721  v->left_blk_sh = 3;
5722  v->top_blk_sh = 0;
5723  }
5724 
5725  if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5726  v->sprite_width = avctx->coded_width;
5727  v->sprite_height = avctx->coded_height;
5728 
5729  avctx->coded_width = avctx->width = v->output_width;
5730  avctx->coded_height = avctx->height = v->output_height;
5731 
5732  // prevent 16.16 overflows
5733  if (v->sprite_width > 1 << 14 ||
5734  v->sprite_height > 1 << 14 ||
5735  v->output_width > 1 << 14 ||
5736  v->output_height > 1 << 14) return -1;
5737 
5738  if ((v->sprite_width&1) || (v->sprite_height&1)) {
5739  avpriv_request_sample(avctx, "odd sprites support");
5740  return AVERROR_PATCHWELCOME;
5741  }
5742  }
5743  return 0;
5744 }
5745 
5746 /** Close a VC1/WMV3 decoder
5747  * @warning Initial try at using MpegEncContext stuff
5748  */
5750 {
5751  VC1Context *v = avctx->priv_data;
5752  int i;
5753 
5755 
5756  for (i = 0; i < 4; i++)
5757  av_freep(&v->sr_rows[i >> 1][i & 1]);
5758  av_freep(&v->hrd_rate);
5759  av_freep(&v->hrd_buffer);
5760  ff_MPV_common_end(&v->s);
5764  av_freep(&v->fieldtx_plane);
5765  av_freep(&v->acpred_plane);
5767  av_freep(&v->mb_type_base);
5769  av_freep(&v->mv_f_base);
5770  av_freep(&v->mv_f_next_base);
5771  av_freep(&v->block);
5772  av_freep(&v->cbp_base);
5773  av_freep(&v->ttblk_base);
5774  av_freep(&v->is_intra_base); // FIXME use v->mb_type[]
5775  av_freep(&v->luma_mv_base);
5777  return 0;
5778 }
5779 
5780 
5781 /** Decode a VC1/WMV3 frame
5782  * @todo TODO: Handle VC-1 IDUs (Transport level?)
5783  */
5784 static int vc1_decode_frame(AVCodecContext *avctx, void *data,
5785  int *got_frame, AVPacket *avpkt)
5786 {
5787  const uint8_t *buf = avpkt->data;
5788  int buf_size = avpkt->size, n_slices = 0, i, ret;
5789  VC1Context *v = avctx->priv_data;
5790  MpegEncContext *s = &v->s;
5791  AVFrame *pict = data;
5792  uint8_t *buf2 = NULL;
5793  const uint8_t *buf_start = buf, *buf_start_second_field = NULL;
5794  int mb_height, n_slices1=-1;
5795  struct {
5796  uint8_t *buf;
5797  GetBitContext gb;
5798  int mby_start;
5799  } *slices = NULL, *tmp;
5800 
5801  v->second_field = 0;
5802 
5803  if(s->flags & CODEC_FLAG_LOW_DELAY)
5804  s->low_delay = 1;
5805 
5806  /* no supplementary picture */
5807  if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == VC1_CODE_ENDOFSEQ)) {
5808  /* special case for last picture */
5809  if (s->low_delay == 0 && s->next_picture_ptr) {
5810  if ((ret = av_frame_ref(pict, &s->next_picture_ptr->f)) < 0)
5811  return ret;
5812  s->next_picture_ptr = NULL;
5813 
5814  *got_frame = 1;
5815  }
5816 
5817  return buf_size;
5818  }
5819 
5821  if (v->profile < PROFILE_ADVANCED)
5822  avctx->pix_fmt = AV_PIX_FMT_VDPAU_WMV3;
5823  else
5824  avctx->pix_fmt = AV_PIX_FMT_VDPAU_VC1;
5825  }
5826 
5827  //for advanced profile we may need to parse and unescape data
5828  if (avctx->codec_id == AV_CODEC_ID_VC1 || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5829  int buf_size2 = 0;
5830  buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5831  if (!buf2)
5832  return AVERROR(ENOMEM);
5833 
5834  if (IS_MARKER(AV_RB32(buf))) { /* frame starts with marker and needs to be parsed */
5835  const uint8_t *start, *end, *next;
5836  int size;
5837 
5838  next = buf;
5839  for (start = buf, end = buf + buf_size; next < end; start = next) {
5840  next = find_next_marker(start + 4, end);
5841  size = next - start - 4;
5842  if (size <= 0) continue;
5843  switch (AV_RB32(start)) {
5844  case VC1_CODE_FRAME:
5845  if (avctx->hwaccel ||
5847  buf_start = start;
5848  buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5849  break;
5850  case VC1_CODE_FIELD: {
5851  int buf_size3;
5852  if (avctx->hwaccel ||
5854  buf_start_second_field = start;
5855  tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5856  if (!tmp)
5857  goto err;
5858  slices = tmp;
5859  slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5860  if (!slices[n_slices].buf)
5861  goto err;
5862  buf_size3 = vc1_unescape_buffer(start + 4, size,
5863  slices[n_slices].buf);
5864  init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5865  buf_size3 << 3);
5866  /* assuming that the field marker is at the exact middle,
5867  hope it's correct */
5868  slices[n_slices].mby_start = s->mb_height + 1 >> 1;
5869  n_slices1 = n_slices - 1; // index of the last slice of the first field
5870  n_slices++;
5871  break;
5872  }
5873  case VC1_CODE_ENTRYPOINT: /* it should be before frame data */
5874  buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5875  init_get_bits(&s->gb, buf2, buf_size2 * 8);
5876  ff_vc1_decode_entry_point(avctx, v, &s->gb);
5877  break;
5878  case VC1_CODE_SLICE: {
5879  int buf_size3;
5880  tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5881  if (!tmp)
5882  goto err;
5883  slices = tmp;
5884  slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5885  if (!slices[n_slices].buf)
5886  goto err;
5887  buf_size3 = vc1_unescape_buffer(start + 4, size,
5888  slices[n_slices].buf);
5889  init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5890  buf_size3 << 3);
5891  slices[n_slices].mby_start = get_bits(&slices[n_slices].gb, 9);
5892  n_slices++;
5893  break;
5894  }
5895  }
5896  }
5897  } else if (v->interlace && ((buf[0] & 0xC0) == 0xC0)) { /* WVC1 interlaced stores both fields divided by marker */
5898  const uint8_t *divider;
5899  int buf_size3;
5900 
5901  divider = find_next_marker(buf, buf + buf_size);
5902  if ((divider == (buf + buf_size)) || AV_RB32(divider) != VC1_CODE_FIELD) {
5903  av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n");
5904  goto err;
5905  } else { // found field marker, unescape second field
5906  if (avctx->hwaccel ||
5908  buf_start_second_field = divider;
5909  tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5910  if (!tmp)
5911  goto err;
5912  slices = tmp;
5913  slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5914  if (!slices[n_slices].buf)
5915  goto err;
5916  buf_size3 = vc1_unescape_buffer(divider + 4, buf + buf_size - divider - 4, slices[n_slices].buf);
5917  init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5918  buf_size3 << 3);
5919  slices[n_slices].mby_start = s->mb_height + 1 >> 1;
5920  n_slices1 = n_slices - 1;
5921  n_slices++;
5922  }
5923  buf_size2 = vc1_unescape_buffer(buf, divider - buf, buf2);
5924  } else {
5925  buf_size2 = vc1_unescape_buffer(buf, buf_size, buf2);
5926  }
5927  init_get_bits(&s->gb, buf2, buf_size2*8);
5928  } else
5929  init_get_bits(&s->gb, buf, buf_size*8);
5930 
5931  if (v->res_sprite) {
5932  v->new_sprite = !get_bits1(&s->gb);
5933  v->two_sprites = get_bits1(&s->gb);
5934  /* res_sprite means a Windows Media Image stream, AV_CODEC_ID_*IMAGE means
5935  we're using the sprite compositor. These are intentionally kept separate
5936  so you can get the raw sprites by using the wmv3 decoder for WMVP or
5937  the vc1 one for WVP2 */
5938  if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5939  if (v->new_sprite) {
5940  // switch AVCodecContext parameters to those of the sprites
5941  avctx->width = avctx->coded_width = v->sprite_width;
5942  avctx->height = avctx->coded_height = v->sprite_height;
5943  } else {
5944  goto image;
5945  }
5946  }
5947  }
5948 
5949  if (s->context_initialized &&
5950  (s->width != avctx->coded_width ||
5951  s->height != avctx->coded_height)) {
5952  ff_vc1_decode_end(avctx);
5953  }
5954 
5955  if (!s->context_initialized) {
5956  if (ff_msmpeg4_decode_init(avctx) < 0)
5957  goto err;
5958  if (ff_vc1_decode_init_alloc_tables(v) < 0) {
5959  ff_MPV_common_end(s);
5960  goto err;
5961  }
5962 
5963  s->low_delay = !avctx->has_b_frames || v->res_sprite;
5964 
5965  if (v->profile == PROFILE_ADVANCED) {
5966  if(avctx->coded_width<=1 || avctx->coded_height<=1)
5967  goto err;
5968  s->h_edge_pos = avctx->coded_width;
5969  s->v_edge_pos = avctx->coded_height;
5970  }
5971  }
5972 
5973  // do parse frame header
5974  v->pic_header_flag = 0;
5975  v->first_pic_header_flag = 1;
5976  if (v->profile < PROFILE_ADVANCED) {
5977  if (ff_vc1_parse_frame_header(v, &s->gb) < 0) {
5978  goto err;
5979  }
5980  } else {
5981  if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) {
5982  goto err;
5983  }
5984  }
5985  v->first_pic_header_flag = 0;
5986 
5987  if (avctx->debug & FF_DEBUG_PICT_INFO)
5988  av_log(v->s.avctx, AV_LOG_DEBUG, "pict_type: %c\n", av_get_picture_type_char(s->pict_type));
5989 
5990  if ((avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE)
5991  && s->pict_type != AV_PICTURE_TYPE_I) {
5992  av_log(v->s.avctx, AV_LOG_ERROR, "Sprite decoder: expected I-frame\n");
5993  goto err;
5994  }
5995 
5996  if ((s->mb_height >> v->field_mode) == 0) {
5997  av_log(v->s.avctx, AV_LOG_ERROR, "image too short\n");
5998  goto err;
5999  }
6000 
6001  // for skipping the frame
6004 
6005  /* skip B-frames if we don't have reference frames */
6006  if (s->last_picture_ptr == NULL && (s->pict_type == AV_PICTURE_TYPE_B || s->droppable)) {
6007  goto err;
6008  }
6009  if ((avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) ||
6010  (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) ||
6011  avctx->skip_frame >= AVDISCARD_ALL) {
6012  goto end;
6013  }
6014 
6015  if (s->next_p_frame_damaged) {
6016  if (s->pict_type == AV_PICTURE_TYPE_B)
6017  goto end;
6018  else
6019  s->next_p_frame_damaged = 0;
6020  }
6021 
6022  if (ff_MPV_frame_start(s, avctx) < 0) {
6023  goto err;
6024  }
6025 
6029 
6030  // process pulldown flags
6032  // Pulldown flags are only valid when 'broadcast' has been set.
6033  // So ticks_per_frame will be 2
6034  if (v->rff) {
6035  // repeat field
6037  } else if (v->rptfrm) {
6038  // repeat frames
6039  s->current_picture_ptr->f.repeat_pict = v->rptfrm * 2;
6040  }
6041 
6044 
6045  if ((CONFIG_VC1_VDPAU_DECODER)
6047  if (v->field_mode && buf_start_second_field) {
6048  ff_vdpau_vc1_decode_picture(s, buf_start, buf_start_second_field - buf_start);
6049  ff_vdpau_vc1_decode_picture(s, buf_start_second_field, (buf + buf_size) - buf_start_second_field);
6050  } else {
6051  ff_vdpau_vc1_decode_picture(s, buf_start, (buf + buf_size) - buf_start);
6052  }
6053  } else if (avctx->hwaccel) {
6054  if (v->field_mode && buf_start_second_field) {
6055  // decode first field
6057  if (avctx->hwaccel->start_frame(avctx, buf_start, buf_start_second_field - buf_start) < 0)
6058  goto err;
6059  if (avctx->hwaccel->decode_slice(avctx, buf_start, buf_start_second_field - buf_start) < 0)
6060  goto err;
6061  if (avctx->hwaccel->end_frame(avctx) < 0)
6062  goto err;
6063 
6064  // decode second field
6065  s->gb = slices[n_slices1 + 1].gb;
6067  v->second_field = 1;
6068  v->pic_header_flag = 0;
6069  if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) {
6070  av_log(avctx, AV_LOG_ERROR, "parsing header for second field failed");
6071  goto err;
6072  }
6074 
6075  if (avctx->hwaccel->start_frame(avctx, buf_start_second_field, (buf + buf_size) - buf_start_second_field) < 0)
6076  goto err;
6077  if (avctx->hwaccel->decode_slice(avctx, buf_start_second_field, (buf + buf_size) - buf_start_second_field) < 0)
6078  goto err;
6079  if (avctx->hwaccel->end_frame(avctx) < 0)
6080  goto err;
6081  } else {
6083  if (avctx->hwaccel->start_frame(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
6084  goto err;
6085  if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
6086  goto err;
6087  if (avctx->hwaccel->end_frame(avctx) < 0)
6088  goto err;
6089  }
6090  } else {
6091  int header_ret = 0;
6092 
6094 
6095  v->bits = buf_size * 8;
6096  v->end_mb_x = s->mb_width;
6097  if (v->field_mode) {
6098  s->current_picture.f.linesize[0] <<= 1;
6099  s->current_picture.f.linesize[1] <<= 1;
6100  s->current_picture.f.linesize[2] <<= 1;
6101  s->linesize <<= 1;
6102  s->uvlinesize <<= 1;
6103  }
6104  mb_height = s->mb_height >> v->field_mode;
6105 
6106  av_assert0 (mb_height > 0);
6107 
6108  for (i = 0; i <= n_slices; i++) {
6109  if (i > 0 && slices[i - 1].mby_start >= mb_height) {
6110  if (v->field_mode <= 0) {
6111  av_log(v->s.avctx, AV_LOG_ERROR, "Slice %d starts beyond "
6112  "picture boundary (%d >= %d)\n", i,
6113  slices[i - 1].mby_start, mb_height);
6114  continue;
6115  }
6116  v->second_field = 1;
6117  av_assert0((s->mb_height & 1) == 0);
6118  v->blocks_off = s->b8_stride * (s->mb_height&~1);
6119  v->mb_off = s->mb_stride * s->mb_height >> 1;
6120  } else {
6121  v->second_field = 0;
6122  v->blocks_off = 0;
6123  v->mb_off = 0;
6124  }
6125  if (i) {
6126  v->pic_header_flag = 0;
6127  if (v->field_mode && i == n_slices1 + 2) {
6128  if ((header_ret = ff_vc1_parse_frame_header_adv(v, &s->gb)) < 0) {
6129  av_log(v->s.avctx, AV_LOG_ERROR, "Field header damaged\n");
6130  if (avctx->err_recognition & AV_EF_EXPLODE)
6131  goto err;
6132  continue;
6133  }
6134  } else if (get_bits1(&s->gb)) {
6135  v->pic_header_flag = 1;
6136  if ((header_ret = ff_vc1_parse_frame_header_adv(v, &s->gb)) < 0) {
6137  av_log(v->s.avctx, AV_LOG_ERROR, "Slice header damaged\n");
6138  if (avctx->err_recognition & AV_EF_EXPLODE)
6139  goto err;
6140  continue;
6141  }
6142  }
6143  }
6144  if (header_ret < 0)
6145  continue;
6146  s->start_mb_y = (i == 0) ? 0 : FFMAX(0, slices[i-1].mby_start % mb_height);
6147  if (!v->field_mode || v->second_field)
6148  s->end_mb_y = (i == n_slices ) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
6149  else {
6150  if (i >= n_slices) {
6151  av_log(v->s.avctx, AV_LOG_ERROR, "first field slice count too large\n");
6152  continue;
6153  }
6154  s->end_mb_y = (i <= n_slices1 + 1) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
6155  }
6156  if (s->end_mb_y <= s->start_mb_y) {
6157  av_log(v->s.avctx, AV_LOG_ERROR, "end mb y %d %d invalid\n", s->end_mb_y, s->start_mb_y);
6158  continue;
6159  }
6160  if (!v->p_frame_skipped && s->pict_type != AV_PICTURE_TYPE_I && !v->cbpcy_vlc) {
6161  av_log(v->s.avctx, AV_LOG_ERROR, "missing cbpcy_vlc\n");
6162  continue;
6163  }
6165  if (i != n_slices)
6166  s->gb = slices[i].gb;
6167  }
6168  if (v->field_mode) {
6169  v->second_field = 0;
6170  s->current_picture.f.linesize[0] >>= 1;
6171  s->current_picture.f.linesize[1] >>= 1;
6172  s->current_picture.f.linesize[2] >>= 1;
6173  s->linesize >>= 1;
6174  s->uvlinesize >>= 1;
6176  FFSWAP(uint8_t *, v->mv_f_next[0], v->mv_f[0]);
6177  FFSWAP(uint8_t *, v->mv_f_next[1], v->mv_f[1]);
6178  }
6179  }
6180  av_dlog(s->avctx, "Consumed %i/%i bits\n",
6181  get_bits_count(&s->gb), s->gb.size_in_bits);
6182 // if (get_bits_count(&s->gb) > buf_size * 8)
6183 // return -1;
6185  goto err;
6186  if (!v->field_mode)
6187  ff_er_frame_end(&s->er);
6188  }
6189 
6190  ff_MPV_frame_end(s);
6191 
6192  if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
6193 image:
6194  avctx->width = avctx->coded_width = v->output_width;
6195  avctx->height = avctx->coded_height = v->output_height;
6196  if (avctx->skip_frame >= AVDISCARD_NONREF)
6197  goto end;
6198 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
6199  if (vc1_decode_sprites(v, &s->gb))
6200  goto err;
6201 #endif
6202  if ((ret = av_frame_ref(pict, v->sprite_output_frame)) < 0)
6203  goto err;
6204  *got_frame = 1;
6205  } else {
6206  if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
6207  if ((ret = av_frame_ref(pict, &s->current_picture_ptr->f)) < 0)
6208  goto err;
6210  *got_frame = 1;
6211  } else if (s->last_picture_ptr != NULL) {
6212  if ((ret = av_frame_ref(pict, &s->last_picture_ptr->f)) < 0)
6213  goto err;
6215  *got_frame = 1;
6216  }
6217  }
6218 
6219 end:
6220  av_free(buf2);
6221  for (i = 0; i < n_slices; i++)
6222  av_free(slices[i].buf);
6223  av_free(slices);
6224  return buf_size;
6225 
6226 err:
6227  av_free(buf2);
6228  for (i = 0; i < n_slices; i++)
6229  av_free(slices[i].buf);
6230  av_free(slices);
6231  return -1;
6232 }
6233 
6234 
6235 static const AVProfile profiles[] = {
6236  { FF_PROFILE_VC1_SIMPLE, "Simple" },
6237  { FF_PROFILE_VC1_MAIN, "Main" },
6238  { FF_PROFILE_VC1_COMPLEX, "Complex" },
6239  { FF_PROFILE_VC1_ADVANCED, "Advanced" },
6240  { FF_PROFILE_UNKNOWN },
6241 };
6242 
6244 #if CONFIG_DXVA2
6246 #endif
6247 #if CONFIG_VAAPI
6249 #endif
6250 #if CONFIG_VDPAU
6252 #endif
6255 };
6256 
6258  .name = "vc1",
6259  .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1"),
6260  .type = AVMEDIA_TYPE_VIDEO,
6261  .id = AV_CODEC_ID_VC1,
6262  .priv_data_size = sizeof(VC1Context),
6263  .init = vc1_decode_init,
6266  .flush = ff_mpeg_flush,
6267  .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
6268  .pix_fmts = vc1_hwaccel_pixfmt_list_420,
6269  .profiles = NULL_IF_CONFIG_SMALL(profiles)
6270 };
6271 
6272 #if CONFIG_WMV3_DECODER
6273 AVCodec ff_wmv3_decoder = {
6274  .name = "wmv3",
6275  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9"),
6276  .type = AVMEDIA_TYPE_VIDEO,
6277  .id = AV_CODEC_ID_WMV3,
6278  .priv_data_size = sizeof(VC1Context),
6279  .init = vc1_decode_init,
6282  .flush = ff_mpeg_flush,
6283  .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
6284  .pix_fmts = vc1_hwaccel_pixfmt_list_420,
6285  .profiles = NULL_IF_CONFIG_SMALL(profiles)
6286 };
6287 #endif
6288 
6289 #if CONFIG_WMV3_VDPAU_DECODER
6290 AVCodec ff_wmv3_vdpau_decoder = {
6291  .name = "wmv3_vdpau",
6292  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 VDPAU"),
6293  .type = AVMEDIA_TYPE_VIDEO,
6294  .id = AV_CODEC_ID_WMV3,
6295  .priv_data_size = sizeof(VC1Context),
6296  .init = vc1_decode_init,
6300  .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_VDPAU_WMV3, AV_PIX_FMT_NONE },
6301  .profiles = NULL_IF_CONFIG_SMALL(profiles)
6302 };
6303 #endif
6304 
6305 #if CONFIG_VC1_VDPAU_DECODER
6306 AVCodec ff_vc1_vdpau_decoder = {
6307  .name = "vc1_vdpau",
6308  .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1 VDPAU"),
6309  .type = AVMEDIA_TYPE_VIDEO,
6310  .id = AV_CODEC_ID_VC1,
6311  .priv_data_size = sizeof(VC1Context),
6312  .init = vc1_decode_init,
6316  .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_VDPAU_VC1, AV_PIX_FMT_NONE },
6317  .profiles = NULL_IF_CONFIG_SMALL(profiles)
6318 };
6319 #endif
6320 
6321 #if CONFIG_WMV3IMAGE_DECODER
6322 AVCodec ff_wmv3image_decoder = {
6323  .name = "wmv3image",
6324  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image"),
6325  .type = AVMEDIA_TYPE_VIDEO,
6326  .id = AV_CODEC_ID_WMV3IMAGE,
6327  .priv_data_size = sizeof(VC1Context),
6328  .init = vc1_decode_init,
6331  .capabilities = CODEC_CAP_DR1,
6332  .flush = vc1_sprite_flush,
6333  .pix_fmts = ff_pixfmt_list_420
6334 };
6335 #endif
6336 
6337 #if CONFIG_VC1IMAGE_DECODER
6338 AVCodec ff_vc1image_decoder = {
6339  .name = "vc1image",
6340  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image v2"),
6341  .type = AVMEDIA_TYPE_VIDEO,
6342  .id = AV_CODEC_ID_VC1IMAGE,
6343  .priv_data_size = sizeof(VC1Context),
6344  .init = vc1_decode_init,
6347  .capabilities = CODEC_CAP_DR1,
6348  .flush = vc1_sprite_flush,
6349  .pix_fmts = ff_pixfmt_list_420
6350 };
6351 #endif