FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
vc1dec.c
Go to the documentation of this file.
1 /*
2  * VC-1 and WMV3 decoder
3  * Copyright (c) 2011 Mashiat Sarker Shakkhar
4  * Copyright (c) 2006-2007 Konstantin Shishkov
5  * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 /**
25  * @file
26  * VC-1 and WMV3 decoder
27  */
28 
29 #include "internal.h"
30 #include "avcodec.h"
31 #include "error_resilience.h"
32 #include "mpegvideo.h"
33 #include "h263.h"
34 #include "h264chroma.h"
35 #include "vc1.h"
36 #include "vc1data.h"
37 #include "vc1acdata.h"
38 #include "msmpeg4data.h"
39 #include "unary.h"
40 #include "mathops.h"
41 #include "vdpau_internal.h"
42 #include "libavutil/avassert.h"
43 
44 #undef NDEBUG
45 #include <assert.h>
46 
47 #define MB_INTRA_VLC_BITS 9
48 #define DC_VLC_BITS 9
49 
50 
51 // offset tables for interlaced picture MVDATA decoding
52 static const int offset_table1[9] = { 0, 1, 2, 4, 8, 16, 32, 64, 128 };
53 static const int offset_table2[9] = { 0, 1, 3, 7, 15, 31, 63, 127, 255 };
54 
55 /***********************************************************************/
56 /**
57  * @name VC-1 Bitplane decoding
58  * @see 8.7, p56
59  * @{
60  */
61 
62 /**
63  * Imode types
64  * @{
65  */
66 enum Imode {
74 };
75 /** @} */ //imode defines
76 
78 {
79  MpegEncContext *s = &v->s;
81  if (v->field_mode && !(v->second_field ^ v->tff)) {
82  s->dest[0] += s->current_picture_ptr->f.linesize[0];
83  s->dest[1] += s->current_picture_ptr->f.linesize[1];
84  s->dest[2] += s->current_picture_ptr->f.linesize[2];
85  }
86 }
87 
88 /** @} */ //Bitplane group
89 
91 {
92  MpegEncContext *s = &v->s;
93  int topleft_mb_pos, top_mb_pos;
94  int stride_y, fieldtx = 0;
95  int v_dist;
96 
97  /* The put pixels loop is always one MB row behind the decoding loop,
98  * because we can only put pixels when overlap filtering is done, and
99  * for filtering of the bottom edge of a MB, we need the next MB row
100  * present as well.
101  * Within the row, the put pixels loop is also one MB col behind the
102  * decoding loop. The reason for this is again, because for filtering
103  * of the right MB edge, we need the next MB present. */
104  if (!s->first_slice_line) {
105  if (s->mb_x) {
106  topleft_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x - 1;
107  if (v->fcm == ILACE_FRAME)
108  fieldtx = v->fieldtx_plane[topleft_mb_pos];
109  stride_y = s->linesize << fieldtx;
110  v_dist = (16 - fieldtx) >> (fieldtx == 0);
112  s->dest[0] - 16 * s->linesize - 16,
113  stride_y);
115  s->dest[0] - 16 * s->linesize - 8,
116  stride_y);
118  s->dest[0] - v_dist * s->linesize - 16,
119  stride_y);
121  s->dest[0] - v_dist * s->linesize - 8,
122  stride_y);
124  s->dest[1] - 8 * s->uvlinesize - 8,
125  s->uvlinesize);
127  s->dest[2] - 8 * s->uvlinesize - 8,
128  s->uvlinesize);
129  }
130  if (s->mb_x == s->mb_width - 1) {
131  top_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x;
132  if (v->fcm == ILACE_FRAME)
133  fieldtx = v->fieldtx_plane[top_mb_pos];
134  stride_y = s->linesize << fieldtx;
135  v_dist = fieldtx ? 15 : 8;
137  s->dest[0] - 16 * s->linesize,
138  stride_y);
140  s->dest[0] - 16 * s->linesize + 8,
141  stride_y);
143  s->dest[0] - v_dist * s->linesize,
144  stride_y);
146  s->dest[0] - v_dist * s->linesize + 8,
147  stride_y);
149  s->dest[1] - 8 * s->uvlinesize,
150  s->uvlinesize);
152  s->dest[2] - 8 * s->uvlinesize,
153  s->uvlinesize);
154  }
155  }
156 
157 #define inc_blk_idx(idx) do { \
158  idx++; \
159  if (idx >= v->n_allocated_blks) \
160  idx = 0; \
161  } while (0)
162 
167 }
168 
169 static void vc1_loop_filter_iblk(VC1Context *v, int pq)
170 {
171  MpegEncContext *s = &v->s;
172  int j;
173  if (!s->first_slice_line) {
174  v->vc1dsp.vc1_v_loop_filter16(s->dest[0], s->linesize, pq);
175  if (s->mb_x)
176  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
177  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
178  for (j = 0; j < 2; j++) {
179  v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1], s->uvlinesize, pq);
180  if (s->mb_x)
181  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
182  }
183  }
184  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] + 8 * s->linesize, s->linesize, pq);
185 
186  if (s->mb_y == s->end_mb_y - 1) {
187  if (s->mb_x) {
188  v->vc1dsp.vc1_h_loop_filter16(s->dest[0], s->linesize, pq);
189  v->vc1dsp.vc1_h_loop_filter8(s->dest[1], s->uvlinesize, pq);
190  v->vc1dsp.vc1_h_loop_filter8(s->dest[2], s->uvlinesize, pq);
191  }
192  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] + 8, s->linesize, pq);
193  }
194 }
195 
197 {
198  MpegEncContext *s = &v->s;
199  int j;
200 
201  /* The loopfilter runs 1 row and 1 column behind the overlap filter, which
202  * means it runs two rows/cols behind the decoding loop. */
203  if (!s->first_slice_line) {
204  if (s->mb_x) {
205  if (s->mb_y >= s->start_mb_y + 2) {
206  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
207 
208  if (s->mb_x >= 2)
209  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 16, s->linesize, pq);
210  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 8, s->linesize, pq);
211  for (j = 0; j < 2; j++) {
212  v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
213  if (s->mb_x >= 2) {
214  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize - 8, s->uvlinesize, pq);
215  }
216  }
217  }
218  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize - 16, s->linesize, pq);
219  }
220 
221  if (s->mb_x == s->mb_width - 1) {
222  if (s->mb_y >= s->start_mb_y + 2) {
223  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
224 
225  if (s->mb_x)
226  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize, s->linesize, pq);
227  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize + 8, s->linesize, pq);
228  for (j = 0; j < 2; j++) {
229  v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
230  if (s->mb_x >= 2) {
231  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize, s->uvlinesize, pq);
232  }
233  }
234  }
235  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize, s->linesize, pq);
236  }
237 
238  if (s->mb_y == s->end_mb_y) {
239  if (s->mb_x) {
240  if (s->mb_x >= 2)
241  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
242  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 8, s->linesize, pq);
243  if (s->mb_x >= 2) {
244  for (j = 0; j < 2; j++) {
245  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
246  }
247  }
248  }
249 
250  if (s->mb_x == s->mb_width - 1) {
251  if (s->mb_x)
252  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
253  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
254  if (s->mb_x) {
255  for (j = 0; j < 2; j++) {
256  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
257  }
258  }
259  }
260  }
261  }
262 }
263 
265 {
266  MpegEncContext *s = &v->s;
267  int mb_pos;
268 
269  if (v->condover == CONDOVER_NONE)
270  return;
271 
272  mb_pos = s->mb_x + s->mb_y * s->mb_stride;
273 
274  /* Within a MB, the horizontal overlap always runs before the vertical.
275  * To accomplish that, we run the H on left and internal borders of the
276  * currently decoded MB. Then, we wait for the next overlap iteration
277  * to do H overlap on the right edge of this MB, before moving over and
278  * running the V overlap. Therefore, the V overlap makes us trail by one
279  * MB col and the H overlap filter makes us trail by one MB row. This
280  * is reflected in the time at which we run the put_pixels loop. */
281  if (v->condover == CONDOVER_ALL || v->pq >= 9 || v->over_flags_plane[mb_pos]) {
282  if (s->mb_x && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
283  v->over_flags_plane[mb_pos - 1])) {
285  v->block[v->cur_blk_idx][0]);
287  v->block[v->cur_blk_idx][2]);
288  if (!(s->flags & CODEC_FLAG_GRAY)) {
290  v->block[v->cur_blk_idx][4]);
292  v->block[v->cur_blk_idx][5]);
293  }
294  }
296  v->block[v->cur_blk_idx][1]);
298  v->block[v->cur_blk_idx][3]);
299 
300  if (s->mb_x == s->mb_width - 1) {
301  if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
302  v->over_flags_plane[mb_pos - s->mb_stride])) {
304  v->block[v->cur_blk_idx][0]);
306  v->block[v->cur_blk_idx][1]);
307  if (!(s->flags & CODEC_FLAG_GRAY)) {
309  v->block[v->cur_blk_idx][4]);
311  v->block[v->cur_blk_idx][5]);
312  }
313  }
315  v->block[v->cur_blk_idx][2]);
317  v->block[v->cur_blk_idx][3]);
318  }
319  }
320  if (s->mb_x && (v->condover == CONDOVER_ALL || v->over_flags_plane[mb_pos - 1])) {
321  if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
322  v->over_flags_plane[mb_pos - s->mb_stride - 1])) {
324  v->block[v->left_blk_idx][0]);
326  v->block[v->left_blk_idx][1]);
327  if (!(s->flags & CODEC_FLAG_GRAY)) {
329  v->block[v->left_blk_idx][4]);
331  v->block[v->left_blk_idx][5]);
332  }
333  }
335  v->block[v->left_blk_idx][2]);
337  v->block[v->left_blk_idx][3]);
338  }
339 }
340 
341 /** Do motion compensation over 1 macroblock
342  * Mostly adapted hpel_motion and qpel_motion from mpegvideo.c
343  */
344 static void vc1_mc_1mv(VC1Context *v, int dir)
345 {
346  MpegEncContext *s = &v->s;
347  H264ChromaContext *h264chroma = &v->h264chroma;
348  uint8_t *srcY, *srcU, *srcV;
349  int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
350  int v_edge_pos = s->v_edge_pos >> v->field_mode;
351  int i;
352  uint8_t (*luty)[256], (*lutuv)[256];
353  int use_ic;
354 
355  if ((!v->field_mode ||
356  (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
357  !v->s.last_picture.f.data[0])
358  return;
359 
360  mx = s->mv[dir][0][0];
361  my = s->mv[dir][0][1];
362 
363  // store motion vectors for further use in B frames
364  if (s->pict_type == AV_PICTURE_TYPE_P) {
365  for (i = 0; i < 4; i++) {
366  s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][0] = mx;
367  s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][1] = my;
368  }
369  }
370 
371  uvmx = (mx + ((mx & 3) == 3)) >> 1;
372  uvmy = (my + ((my & 3) == 3)) >> 1;
373  v->luma_mv[s->mb_x][0] = uvmx;
374  v->luma_mv[s->mb_x][1] = uvmy;
375 
376  if (v->field_mode &&
377  v->cur_field_type != v->ref_field_type[dir]) {
378  my = my - 2 + 4 * v->cur_field_type;
379  uvmy = uvmy - 2 + 4 * v->cur_field_type;
380  }
381 
382  // fastuvmc shall be ignored for interlaced frame picture
383  if (v->fastuvmc && (v->fcm != ILACE_FRAME)) {
384  uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
385  uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
386  }
387  if (!dir) {
388  if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
389  srcY = s->current_picture.f.data[0];
390  srcU = s->current_picture.f.data[1];
391  srcV = s->current_picture.f.data[2];
392  luty = v->curr_luty;
393  lutuv = v->curr_lutuv;
394  use_ic = v->curr_use_ic;
395  } else {
396  srcY = s->last_picture.f.data[0];
397  srcU = s->last_picture.f.data[1];
398  srcV = s->last_picture.f.data[2];
399  luty = v->last_luty;
400  lutuv = v->last_lutuv;
401  use_ic = v->last_use_ic;
402  }
403  } else {
404  srcY = s->next_picture.f.data[0];
405  srcU = s->next_picture.f.data[1];
406  srcV = s->next_picture.f.data[2];
407  luty = v->next_luty;
408  lutuv = v->next_lutuv;
409  use_ic = v->next_use_ic;
410  }
411 
412  if (!srcY || !srcU) {
413  av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
414  return;
415  }
416 
417  src_x = s->mb_x * 16 + (mx >> 2);
418  src_y = s->mb_y * 16 + (my >> 2);
419  uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
420  uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
421 
422  if (v->profile != PROFILE_ADVANCED) {
423  src_x = av_clip( src_x, -16, s->mb_width * 16);
424  src_y = av_clip( src_y, -16, s->mb_height * 16);
425  uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
426  uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
427  } else {
428  src_x = av_clip( src_x, -17, s->avctx->coded_width);
429  src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
430  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
431  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
432  }
433 
434  srcY += src_y * s->linesize + src_x;
435  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
436  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
437 
438  if (v->field_mode && v->ref_field_type[dir]) {
439  srcY += s->current_picture_ptr->f.linesize[0];
440  srcU += s->current_picture_ptr->f.linesize[1];
441  srcV += s->current_picture_ptr->f.linesize[2];
442  }
443 
444  /* for grayscale we should not try to read from unknown area */
445  if (s->flags & CODEC_FLAG_GRAY) {
446  srcU = s->edge_emu_buffer + 18 * s->linesize;
447  srcV = s->edge_emu_buffer + 18 * s->linesize;
448  }
449 
450  if (v->rangeredfrm || use_ic
451  || s->h_edge_pos < 22 || v_edge_pos < 22
452  || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel * 3
453  || (unsigned)(src_y - 1) > v_edge_pos - (my&3) - 16 - 3) {
454  uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
455 
456  srcY -= s->mspel * (1 + s->linesize);
458  srcY, s->linesize,
459  17 + s->mspel * 2, 17 + s->mspel * 2,
460  src_x - s->mspel, src_y - s->mspel,
461  s->h_edge_pos, v_edge_pos);
462  srcY = s->edge_emu_buffer;
463  s->vdsp.emulated_edge_mc(uvbuf, s->uvlinesize, srcU, s->uvlinesize,
464  8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
465  s->h_edge_pos >> 1, v_edge_pos >> 1);
466  s->vdsp.emulated_edge_mc(uvbuf + 16, s->uvlinesize, srcV, s->uvlinesize,
467  8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
468  s->h_edge_pos >> 1, v_edge_pos >> 1);
469  srcU = uvbuf;
470  srcV = uvbuf + 16;
471  /* if we deal with range reduction we need to scale source blocks */
472  if (v->rangeredfrm) {
473  int i, j;
474  uint8_t *src, *src2;
475 
476  src = srcY;
477  for (j = 0; j < 17 + s->mspel * 2; j++) {
478  for (i = 0; i < 17 + s->mspel * 2; i++)
479  src[i] = ((src[i] - 128) >> 1) + 128;
480  src += s->linesize;
481  }
482  src = srcU;
483  src2 = srcV;
484  for (j = 0; j < 9; j++) {
485  for (i = 0; i < 9; i++) {
486  src[i] = ((src[i] - 128) >> 1) + 128;
487  src2[i] = ((src2[i] - 128) >> 1) + 128;
488  }
489  src += s->uvlinesize;
490  src2 += s->uvlinesize;
491  }
492  }
493  /* if we deal with intensity compensation we need to scale source blocks */
494  if (use_ic) {
495  int i, j;
496  uint8_t *src, *src2;
497 
498  src = srcY;
499  for (j = 0; j < 17 + s->mspel * 2; j++) {
500  int f = v->field_mode ? v->ref_field_type[dir] : ((j + src_y - s->mspel) & 1) ;
501  for (i = 0; i < 17 + s->mspel * 2; i++)
502  src[i] = luty[f][src[i]];
503  src += s->linesize;
504  }
505  src = srcU;
506  src2 = srcV;
507  for (j = 0; j < 9; j++) {
508  int f = v->field_mode ? v->ref_field_type[dir] : ((j + uvsrc_y) & 1);
509  for (i = 0; i < 9; i++) {
510  src[i] = lutuv[f][src[i]];
511  src2[i] = lutuv[f][src2[i]];
512  }
513  src += s->uvlinesize;
514  src2 += s->uvlinesize;
515  }
516  }
517  srcY += s->mspel * (1 + s->linesize);
518  }
519 
520  if (s->mspel) {
521  dxy = ((my & 3) << 2) | (mx & 3);
522  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] , srcY , s->linesize, v->rnd);
523  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8, srcY + 8, s->linesize, v->rnd);
524  srcY += s->linesize * 8;
525  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize , srcY , s->linesize, v->rnd);
526  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
527  } else { // hpel mc - always used for luma
528  dxy = (my & 2) | ((mx & 2) >> 1);
529  if (!v->rnd)
530  s->hdsp.put_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
531  else
532  s->hdsp.put_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
533  }
534 
535  if (s->flags & CODEC_FLAG_GRAY) return;
536  /* Chroma MC always uses qpel bilinear */
537  uvmx = (uvmx & 3) << 1;
538  uvmy = (uvmy & 3) << 1;
539  if (!v->rnd) {
540  h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
541  h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
542  } else {
543  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
544  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
545  }
546 }
547 
548 static inline int median4(int a, int b, int c, int d)
549 {
550  if (a < b) {
551  if (c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
552  else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
553  } else {
554  if (c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
555  else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
556  }
557 }
558 
559 /** Do motion compensation for 4-MV macroblock - luminance block
560  */
561 static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg)
562 {
563  MpegEncContext *s = &v->s;
564  uint8_t *srcY;
565  int dxy, mx, my, src_x, src_y;
566  int off;
567  int fieldmv = (v->fcm == ILACE_FRAME) ? v->blk_mv_type[s->block_index[n]] : 0;
568  int v_edge_pos = s->v_edge_pos >> v->field_mode;
569  uint8_t (*luty)[256];
570  int use_ic;
571 
572  if ((!v->field_mode ||
573  (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
574  !v->s.last_picture.f.data[0])
575  return;
576 
577  mx = s->mv[dir][n][0];
578  my = s->mv[dir][n][1];
579 
580  if (!dir) {
581  if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
582  srcY = s->current_picture.f.data[0];
583  luty = v->curr_luty;
584  use_ic = v->curr_use_ic;
585  } else {
586  srcY = s->last_picture.f.data[0];
587  luty = v->last_luty;
588  use_ic = v->last_use_ic;
589  }
590  } else {
591  srcY = s->next_picture.f.data[0];
592  luty = v->next_luty;
593  use_ic = v->next_use_ic;
594  }
595 
596  if (!srcY) {
597  av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
598  return;
599  }
600 
601  if (v->field_mode) {
602  if (v->cur_field_type != v->ref_field_type[dir])
603  my = my - 2 + 4 * v->cur_field_type;
604  }
605 
606  if (s->pict_type == AV_PICTURE_TYPE_P && n == 3 && v->field_mode) {
607  int same_count = 0, opp_count = 0, k;
608  int chosen_mv[2][4][2], f;
609  int tx, ty;
610  for (k = 0; k < 4; k++) {
611  f = v->mv_f[0][s->block_index[k] + v->blocks_off];
612  chosen_mv[f][f ? opp_count : same_count][0] = s->mv[0][k][0];
613  chosen_mv[f][f ? opp_count : same_count][1] = s->mv[0][k][1];
614  opp_count += f;
615  same_count += 1 - f;
616  }
617  f = opp_count > same_count;
618  switch (f ? opp_count : same_count) {
619  case 4:
620  tx = median4(chosen_mv[f][0][0], chosen_mv[f][1][0],
621  chosen_mv[f][2][0], chosen_mv[f][3][0]);
622  ty = median4(chosen_mv[f][0][1], chosen_mv[f][1][1],
623  chosen_mv[f][2][1], chosen_mv[f][3][1]);
624  break;
625  case 3:
626  tx = mid_pred(chosen_mv[f][0][0], chosen_mv[f][1][0], chosen_mv[f][2][0]);
627  ty = mid_pred(chosen_mv[f][0][1], chosen_mv[f][1][1], chosen_mv[f][2][1]);
628  break;
629  case 2:
630  tx = (chosen_mv[f][0][0] + chosen_mv[f][1][0]) / 2;
631  ty = (chosen_mv[f][0][1] + chosen_mv[f][1][1]) / 2;
632  break;
633  default:
634  av_assert0(0);
635  }
636  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
637  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
638  for (k = 0; k < 4; k++)
639  v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
640  }
641 
642  if (v->fcm == ILACE_FRAME) { // not sure if needed for other types of picture
643  int qx, qy;
644  int width = s->avctx->coded_width;
645  int height = s->avctx->coded_height >> 1;
646  if (s->pict_type == AV_PICTURE_TYPE_P) {
647  s->current_picture.motion_val[1][s->block_index[n] + v->blocks_off][0] = mx;
648  s->current_picture.motion_val[1][s->block_index[n] + v->blocks_off][1] = my;
649  }
650  qx = (s->mb_x * 16) + (mx >> 2);
651  qy = (s->mb_y * 8) + (my >> 3);
652 
653  if (qx < -17)
654  mx -= 4 * (qx + 17);
655  else if (qx > width)
656  mx -= 4 * (qx - width);
657  if (qy < -18)
658  my -= 8 * (qy + 18);
659  else if (qy > height + 1)
660  my -= 8 * (qy - height - 1);
661  }
662 
663  if ((v->fcm == ILACE_FRAME) && fieldmv)
664  off = ((n > 1) ? s->linesize : 0) + (n & 1) * 8;
665  else
666  off = s->linesize * 4 * (n & 2) + (n & 1) * 8;
667 
668  src_x = s->mb_x * 16 + (n & 1) * 8 + (mx >> 2);
669  if (!fieldmv)
670  src_y = s->mb_y * 16 + (n & 2) * 4 + (my >> 2);
671  else
672  src_y = s->mb_y * 16 + ((n > 1) ? 1 : 0) + (my >> 2);
673 
674  if (v->profile != PROFILE_ADVANCED) {
675  src_x = av_clip(src_x, -16, s->mb_width * 16);
676  src_y = av_clip(src_y, -16, s->mb_height * 16);
677  } else {
678  src_x = av_clip(src_x, -17, s->avctx->coded_width);
679  if (v->fcm == ILACE_FRAME) {
680  if (src_y & 1)
681  src_y = av_clip(src_y, -17, s->avctx->coded_height + 1);
682  else
683  src_y = av_clip(src_y, -18, s->avctx->coded_height);
684  } else {
685  src_y = av_clip(src_y, -18, s->avctx->coded_height + 1);
686  }
687  }
688 
689  srcY += src_y * s->linesize + src_x;
690  if (v->field_mode && v->ref_field_type[dir])
691  srcY += s->current_picture_ptr->f.linesize[0];
692 
693  if (fieldmv && !(src_y & 1))
694  v_edge_pos--;
695  if (fieldmv && (src_y & 1) && src_y < 4)
696  src_y--;
697  if (v->rangeredfrm || use_ic
698  || s->h_edge_pos < 13 || v_edge_pos < 23
699  || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx & 3) - 8 - s->mspel * 2
700  || (unsigned)(src_y - (s->mspel << fieldmv)) > v_edge_pos - (my & 3) - ((8 + s->mspel * 2) << fieldmv)) {
701  srcY -= s->mspel * (1 + (s->linesize << fieldmv));
702  /* check emulate edge stride and offset */
704  9 + s->mspel * 2, (9 + s->mspel * 2) << fieldmv,
705  src_x - s->mspel, src_y - (s->mspel << fieldmv),
706  s->h_edge_pos, v_edge_pos);
707  srcY = s->edge_emu_buffer;
708  /* if we deal with range reduction we need to scale source blocks */
709  if (v->rangeredfrm) {
710  int i, j;
711  uint8_t *src;
712 
713  src = srcY;
714  for (j = 0; j < 9 + s->mspel * 2; j++) {
715  for (i = 0; i < 9 + s->mspel * 2; i++)
716  src[i] = ((src[i] - 128) >> 1) + 128;
717  src += s->linesize << fieldmv;
718  }
719  }
720  /* if we deal with intensity compensation we need to scale source blocks */
721  if (use_ic) {
722  int i, j;
723  uint8_t *src;
724 
725  src = srcY;
726  for (j = 0; j < 9 + s->mspel * 2; j++) {
727  int f = v->field_mode ? v->ref_field_type[dir] : (((j<<fieldmv)+src_y - (s->mspel << fieldmv)) & 1);
728  for (i = 0; i < 9 + s->mspel * 2; i++)
729  src[i] = luty[f][src[i]];
730  src += s->linesize << fieldmv;
731  }
732  }
733  srcY += s->mspel * (1 + (s->linesize << fieldmv));
734  }
735 
736  if (s->mspel) {
737  dxy = ((my & 3) << 2) | (mx & 3);
738  if (avg)
739  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
740  else
741  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
742  } else { // hpel mc - always used for luma
743  dxy = (my & 2) | ((mx & 2) >> 1);
744  if (!v->rnd)
745  s->hdsp.put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
746  else
747  s->hdsp.put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
748  }
749 }
750 
751 static av_always_inline int get_chroma_mv(int *mvx, int *mvy, int *a, int flag, int *tx, int *ty)
752 {
753  int idx, i;
754  static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
755 
756  idx = ((a[3] != flag) << 3)
757  | ((a[2] != flag) << 2)
758  | ((a[1] != flag) << 1)
759  | (a[0] != flag);
760  if (!idx) {
761  *tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
762  *ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
763  return 4;
764  } else if (count[idx] == 1) {
765  switch (idx) {
766  case 0x1:
767  *tx = mid_pred(mvx[1], mvx[2], mvx[3]);
768  *ty = mid_pred(mvy[1], mvy[2], mvy[3]);
769  return 3;
770  case 0x2:
771  *tx = mid_pred(mvx[0], mvx[2], mvx[3]);
772  *ty = mid_pred(mvy[0], mvy[2], mvy[3]);
773  return 3;
774  case 0x4:
775  *tx = mid_pred(mvx[0], mvx[1], mvx[3]);
776  *ty = mid_pred(mvy[0], mvy[1], mvy[3]);
777  return 3;
778  case 0x8:
779  *tx = mid_pred(mvx[0], mvx[1], mvx[2]);
780  *ty = mid_pred(mvy[0], mvy[1], mvy[2]);
781  return 3;
782  }
783  } else if (count[idx] == 2) {
784  int t1 = 0, t2 = 0;
785  for (i = 0; i < 3; i++)
786  if (!a[i]) {
787  t1 = i;
788  break;
789  }
790  for (i = t1 + 1; i < 4; i++)
791  if (!a[i]) {
792  t2 = i;
793  break;
794  }
795  *tx = (mvx[t1] + mvx[t2]) / 2;
796  *ty = (mvy[t1] + mvy[t2]) / 2;
797  return 2;
798  } else {
799  return 0;
800  }
801  return -1;
802 }
803 
804 /** Do motion compensation for 4-MV macroblock - both chroma blocks
805  */
806 static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
807 {
808  MpegEncContext *s = &v->s;
809  H264ChromaContext *h264chroma = &v->h264chroma;
810  uint8_t *srcU, *srcV;
811  int uvmx, uvmy, uvsrc_x, uvsrc_y;
812  int k, tx = 0, ty = 0;
813  int mvx[4], mvy[4], intra[4], mv_f[4];
814  int valid_count;
815  int chroma_ref_type = v->cur_field_type;
816  int v_edge_pos = s->v_edge_pos >> v->field_mode;
817  uint8_t (*lutuv)[256];
818  int use_ic;
819 
820  if (!v->field_mode && !v->s.last_picture.f.data[0])
821  return;
822  if (s->flags & CODEC_FLAG_GRAY)
823  return;
824 
825  for (k = 0; k < 4; k++) {
826  mvx[k] = s->mv[dir][k][0];
827  mvy[k] = s->mv[dir][k][1];
828  intra[k] = v->mb_type[0][s->block_index[k]];
829  if (v->field_mode)
830  mv_f[k] = v->mv_f[dir][s->block_index[k] + v->blocks_off];
831  }
832 
833  /* calculate chroma MV vector from four luma MVs */
834  if (!v->field_mode || (v->field_mode && !v->numref)) {
835  valid_count = get_chroma_mv(mvx, mvy, intra, 0, &tx, &ty);
836  chroma_ref_type = v->reffield;
837  if (!valid_count) {
838  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
839  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
840  v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
841  return; //no need to do MC for intra blocks
842  }
843  } else {
844  int dominant = 0;
845  if (mv_f[0] + mv_f[1] + mv_f[2] + mv_f[3] > 2)
846  dominant = 1;
847  valid_count = get_chroma_mv(mvx, mvy, mv_f, dominant, &tx, &ty);
848  if (dominant)
849  chroma_ref_type = !v->cur_field_type;
850  }
851  if (v->field_mode && chroma_ref_type == 1 && v->cur_field_type == 1 && !v->s.last_picture.f.data[0])
852  return;
853  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
854  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
855  uvmx = (tx + ((tx & 3) == 3)) >> 1;
856  uvmy = (ty + ((ty & 3) == 3)) >> 1;
857 
858  v->luma_mv[s->mb_x][0] = uvmx;
859  v->luma_mv[s->mb_x][1] = uvmy;
860 
861  if (v->fastuvmc) {
862  uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
863  uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
864  }
865  // Field conversion bias
866  if (v->cur_field_type != chroma_ref_type)
867  uvmy += 2 - 4 * chroma_ref_type;
868 
869  uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
870  uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
871 
872  if (v->profile != PROFILE_ADVANCED) {
873  uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
874  uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
875  } else {
876  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
877  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
878  }
879 
880  if (!dir) {
881  if (v->field_mode && (v->cur_field_type != chroma_ref_type) && v->second_field) {
882  srcU = s->current_picture.f.data[1];
883  srcV = s->current_picture.f.data[2];
884  lutuv = v->curr_lutuv;
885  use_ic = v->curr_use_ic;
886  } else {
887  srcU = s->last_picture.f.data[1];
888  srcV = s->last_picture.f.data[2];
889  lutuv = v->last_lutuv;
890  use_ic = v->last_use_ic;
891  }
892  } else {
893  srcU = s->next_picture.f.data[1];
894  srcV = s->next_picture.f.data[2];
895  lutuv = v->next_lutuv;
896  use_ic = v->next_use_ic;
897  }
898 
899  if (!srcU) {
900  av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
901  return;
902  }
903 
904  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
905  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
906 
907  if (v->field_mode) {
908  if (chroma_ref_type) {
909  srcU += s->current_picture_ptr->f.linesize[1];
910  srcV += s->current_picture_ptr->f.linesize[2];
911  }
912  }
913 
914  if (v->rangeredfrm || use_ic
915  || s->h_edge_pos < 18 || v_edge_pos < 18
916  || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
917  || (unsigned)uvsrc_y > (v_edge_pos >> 1) - 9) {
919  s->uvlinesize, 8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
920  s->h_edge_pos >> 1, v_edge_pos >> 1);
921  s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, s->uvlinesize, srcV,
922  s->uvlinesize, 8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
923  s->h_edge_pos >> 1, v_edge_pos >> 1);
924  srcU = s->edge_emu_buffer;
925  srcV = s->edge_emu_buffer + 16;
926 
927  /* if we deal with range reduction we need to scale source blocks */
928  if (v->rangeredfrm) {
929  int i, j;
930  uint8_t *src, *src2;
931 
932  src = srcU;
933  src2 = srcV;
934  for (j = 0; j < 9; j++) {
935  for (i = 0; i < 9; i++) {
936  src[i] = ((src[i] - 128) >> 1) + 128;
937  src2[i] = ((src2[i] - 128) >> 1) + 128;
938  }
939  src += s->uvlinesize;
940  src2 += s->uvlinesize;
941  }
942  }
943  /* if we deal with intensity compensation we need to scale source blocks */
944  if (use_ic) {
945  int i, j;
946  uint8_t *src, *src2;
947 
948  src = srcU;
949  src2 = srcV;
950  for (j = 0; j < 9; j++) {
951  int f = v->field_mode ? chroma_ref_type : ((j + uvsrc_y) & 1);
952  for (i = 0; i < 9; i++) {
953  src[i] = lutuv[f][src[i]];
954  src2[i] = lutuv[f][src2[i]];
955  }
956  src += s->uvlinesize;
957  src2 += s->uvlinesize;
958  }
959  }
960  }
961 
962  /* Chroma MC always uses qpel bilinear */
963  uvmx = (uvmx & 3) << 1;
964  uvmy = (uvmy & 3) << 1;
965  if (!v->rnd) {
966  h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
967  h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
968  } else {
969  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
970  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
971  }
972 }
973 
974 /** Do motion compensation for 4-MV interlaced frame chroma macroblock (both U and V)
975  */
976 static void vc1_mc_4mv_chroma4(VC1Context *v, int dir, int dir2, int avg)
977 {
978  MpegEncContext *s = &v->s;
979  H264ChromaContext *h264chroma = &v->h264chroma;
980  uint8_t *srcU, *srcV;
981  int uvsrc_x, uvsrc_y;
982  int uvmx_field[4], uvmy_field[4];
983  int i, off, tx, ty;
984  int fieldmv = v->blk_mv_type[s->block_index[0]];
985  static const int s_rndtblfield[16] = { 0, 0, 1, 2, 4, 4, 5, 6, 2, 2, 3, 8, 6, 6, 7, 12 };
986  int v_dist = fieldmv ? 1 : 4; // vertical offset for lower sub-blocks
987  int v_edge_pos = s->v_edge_pos >> 1;
988  int use_ic;
989  uint8_t (*lutuv)[256];
990 
991  if (s->flags & CODEC_FLAG_GRAY)
992  return;
993 
994  for (i = 0; i < 4; i++) {
995  int d = i < 2 ? dir: dir2;
996  tx = s->mv[d][i][0];
997  uvmx_field[i] = (tx + ((tx & 3) == 3)) >> 1;
998  ty = s->mv[d][i][1];
999  if (fieldmv)
1000  uvmy_field[i] = (ty >> 4) * 8 + s_rndtblfield[ty & 0xF];
1001  else
1002  uvmy_field[i] = (ty + ((ty & 3) == 3)) >> 1;
1003  }
1004 
1005  for (i = 0; i < 4; i++) {
1006  off = (i & 1) * 4 + ((i & 2) ? v_dist * s->uvlinesize : 0);
1007  uvsrc_x = s->mb_x * 8 + (i & 1) * 4 + (uvmx_field[i] >> 2);
1008  uvsrc_y = s->mb_y * 8 + ((i & 2) ? v_dist : 0) + (uvmy_field[i] >> 2);
1009  // FIXME: implement proper pull-back (see vc1cropmv.c, vc1CROPMV_ChromaPullBack())
1010  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1011  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1012  if (i < 2 ? dir : dir2) {
1013  srcU = s->next_picture.f.data[1];
1014  srcV = s->next_picture.f.data[2];
1015  lutuv = v->next_lutuv;
1016  use_ic = v->next_use_ic;
1017  } else {
1018  srcU = s->last_picture.f.data[1];
1019  srcV = s->last_picture.f.data[2];
1020  lutuv = v->last_lutuv;
1021  use_ic = v->last_use_ic;
1022  }
1023  if (!srcU)
1024  return;
1025  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
1026  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
1027  uvmx_field[i] = (uvmx_field[i] & 3) << 1;
1028  uvmy_field[i] = (uvmy_field[i] & 3) << 1;
1029 
1030  if (fieldmv && !(uvsrc_y & 1))
1031  v_edge_pos = (s->v_edge_pos >> 1) - 1;
1032 
1033  if (fieldmv && (uvsrc_y & 1) && uvsrc_y < 2)
1034  uvsrc_y--;
1035  if (use_ic
1036  || s->h_edge_pos < 10 || v_edge_pos < (5 << fieldmv)
1037  || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 5
1038  || (unsigned)uvsrc_y > v_edge_pos - (5 << fieldmv)) {
1040  s->uvlinesize, 5, (5 << fieldmv), uvsrc_x,
1041  uvsrc_y, s->h_edge_pos >> 1, v_edge_pos);
1042  s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, s->uvlinesize, srcV,
1043  s->uvlinesize, 5, (5 << fieldmv), uvsrc_x,
1044  uvsrc_y, s->h_edge_pos >> 1, v_edge_pos);
1045  srcU = s->edge_emu_buffer;
1046  srcV = s->edge_emu_buffer + 16;
1047 
1048  /* if we deal with intensity compensation we need to scale source blocks */
1049  if (use_ic) {
1050  int i, j;
1051  uint8_t *src, *src2;
1052 
1053  src = srcU;
1054  src2 = srcV;
1055  for (j = 0; j < 5; j++) {
1056  int f = (uvsrc_y + (j << fieldmv)) & 1;
1057  for (i = 0; i < 5; i++) {
1058  src[i] = lutuv[f][src[i]];
1059  src2[i] = lutuv[f][src2[i]];
1060  }
1061  src += s->uvlinesize << fieldmv;
1062  src2 += s->uvlinesize << fieldmv;
1063  }
1064  }
1065  }
1066  if (avg) {
1067  if (!v->rnd) {
1068  h264chroma->avg_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1069  h264chroma->avg_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1070  } else {
1071  v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1072  v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1073  }
1074  } else {
1075  if (!v->rnd) {
1076  h264chroma->put_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1077  h264chroma->put_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1078  } else {
1079  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1080  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1081  }
1082  }
1083  }
1084 }
1085 
1086 /***********************************************************************/
1087 /**
1088  * @name VC-1 Block-level functions
1089  * @see 7.1.4, p91 and 8.1.1.7, p(1)04
1090  * @{
1091  */
1092 
1093 /**
1094  * @def GET_MQUANT
1095  * @brief Get macroblock-level quantizer scale
1096  */
1097 #define GET_MQUANT() \
1098  if (v->dquantfrm) { \
1099  int edges = 0; \
1100  if (v->dqprofile == DQPROFILE_ALL_MBS) { \
1101  if (v->dqbilevel) { \
1102  mquant = (get_bits1(gb)) ? v->altpq : v->pq; \
1103  } else { \
1104  mqdiff = get_bits(gb, 3); \
1105  if (mqdiff != 7) \
1106  mquant = v->pq + mqdiff; \
1107  else \
1108  mquant = get_bits(gb, 5); \
1109  } \
1110  } \
1111  if (v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1112  edges = 1 << v->dqsbedge; \
1113  else if (v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1114  edges = (3 << v->dqsbedge) % 15; \
1115  else if (v->dqprofile == DQPROFILE_FOUR_EDGES) \
1116  edges = 15; \
1117  if ((edges&1) && !s->mb_x) \
1118  mquant = v->altpq; \
1119  if ((edges&2) && s->first_slice_line) \
1120  mquant = v->altpq; \
1121  if ((edges&4) && s->mb_x == (s->mb_width - 1)) \
1122  mquant = v->altpq; \
1123  if ((edges&8) && s->mb_y == (s->mb_height - 1)) \
1124  mquant = v->altpq; \
1125  if (!mquant || mquant > 31) { \
1126  av_log(v->s.avctx, AV_LOG_ERROR, \
1127  "Overriding invalid mquant %d\n", mquant); \
1128  mquant = 1; \
1129  } \
1130  }
1131 
1132 /**
1133  * @def GET_MVDATA(_dmv_x, _dmv_y)
1134  * @brief Get MV differentials
1135  * @see MVDATA decoding from 8.3.5.2, p(1)20
1136  * @param _dmv_x Horizontal differential for decoded MV
1137  * @param _dmv_y Vertical differential for decoded MV
1138  */
1139 #define GET_MVDATA(_dmv_x, _dmv_y) \
1140  index = 1 + get_vlc2(gb, ff_vc1_mv_diff_vlc[s->mv_table_index].table, \
1141  VC1_MV_DIFF_VLC_BITS, 2); \
1142  if (index > 36) { \
1143  mb_has_coeffs = 1; \
1144  index -= 37; \
1145  } else \
1146  mb_has_coeffs = 0; \
1147  s->mb_intra = 0; \
1148  if (!index) { \
1149  _dmv_x = _dmv_y = 0; \
1150  } else if (index == 35) { \
1151  _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1152  _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1153  } else if (index == 36) { \
1154  _dmv_x = 0; \
1155  _dmv_y = 0; \
1156  s->mb_intra = 1; \
1157  } else { \
1158  index1 = index % 6; \
1159  if (!s->quarter_sample && index1 == 5) val = 1; \
1160  else val = 0; \
1161  if (size_table[index1] - val > 0) \
1162  val = get_bits(gb, size_table[index1] - val); \
1163  else val = 0; \
1164  sign = 0 - (val&1); \
1165  _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1166  \
1167  index1 = index / 6; \
1168  if (!s->quarter_sample && index1 == 5) val = 1; \
1169  else val = 0; \
1170  if (size_table[index1] - val > 0) \
1171  val = get_bits(gb, size_table[index1] - val); \
1172  else val = 0; \
1173  sign = 0 - (val & 1); \
1174  _dmv_y = (sign ^ ((val >> 1) + offset_table[index1])) - sign; \
1175  }
1176 
1178  int *dmv_y, int *pred_flag)
1179 {
1180  int index, index1;
1181  int extend_x = 0, extend_y = 0;
1182  GetBitContext *gb = &v->s.gb;
1183  int bits, esc;
1184  int val, sign;
1185  const int* offs_tab;
1186 
1187  if (v->numref) {
1188  bits = VC1_2REF_MVDATA_VLC_BITS;
1189  esc = 125;
1190  } else {
1191  bits = VC1_1REF_MVDATA_VLC_BITS;
1192  esc = 71;
1193  }
1194  switch (v->dmvrange) {
1195  case 1:
1196  extend_x = 1;
1197  break;
1198  case 2:
1199  extend_y = 1;
1200  break;
1201  case 3:
1202  extend_x = extend_y = 1;
1203  break;
1204  }
1205  index = get_vlc2(gb, v->imv_vlc->table, bits, 3);
1206  if (index == esc) {
1207  *dmv_x = get_bits(gb, v->k_x);
1208  *dmv_y = get_bits(gb, v->k_y);
1209  if (v->numref) {
1210  if (pred_flag) {
1211  *pred_flag = *dmv_y & 1;
1212  *dmv_y = (*dmv_y + *pred_flag) >> 1;
1213  } else {
1214  *dmv_y = (*dmv_y + (*dmv_y & 1)) >> 1;
1215  }
1216  }
1217  }
1218  else {
1219  av_assert0(index < esc);
1220  if (extend_x)
1221  offs_tab = offset_table2;
1222  else
1223  offs_tab = offset_table1;
1224  index1 = (index + 1) % 9;
1225  if (index1 != 0) {
1226  val = get_bits(gb, index1 + extend_x);
1227  sign = 0 -(val & 1);
1228  *dmv_x = (sign ^ ((val >> 1) + offs_tab[index1])) - sign;
1229  } else
1230  *dmv_x = 0;
1231  if (extend_y)
1232  offs_tab = offset_table2;
1233  else
1234  offs_tab = offset_table1;
1235  index1 = (index + 1) / 9;
1236  if (index1 > v->numref) {
1237  val = get_bits(gb, (index1 + (extend_y << v->numref)) >> v->numref);
1238  sign = 0 - (val & 1);
1239  *dmv_y = (sign ^ ((val >> 1) + offs_tab[index1 >> v->numref])) - sign;
1240  } else
1241  *dmv_y = 0;
1242  if (v->numref && pred_flag)
1243  *pred_flag = index1 & 1;
1244  }
1245 }
1246 
1247 static av_always_inline int scaleforsame_x(VC1Context *v, int n /* MV */, int dir)
1248 {
1249  int scaledvalue, refdist;
1250  int scalesame1, scalesame2;
1251  int scalezone1_x, zone1offset_x;
1252  int table_index = dir ^ v->second_field;
1253 
1254  if (v->s.pict_type != AV_PICTURE_TYPE_B)
1255  refdist = v->refdist;
1256  else
1257  refdist = dir ? v->brfd : v->frfd;
1258  if (refdist > 3)
1259  refdist = 3;
1260  scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
1261  scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
1262  scalezone1_x = ff_vc1_field_mvpred_scales[table_index][3][refdist];
1263  zone1offset_x = ff_vc1_field_mvpred_scales[table_index][5][refdist];
1264 
1265  if (FFABS(n) > 255)
1266  scaledvalue = n;
1267  else {
1268  if (FFABS(n) < scalezone1_x)
1269  scaledvalue = (n * scalesame1) >> 8;
1270  else {
1271  if (n < 0)
1272  scaledvalue = ((n * scalesame2) >> 8) - zone1offset_x;
1273  else
1274  scaledvalue = ((n * scalesame2) >> 8) + zone1offset_x;
1275  }
1276  }
1277  return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1278 }
1279 
1280 static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n /* MV */, int dir)
1281 {
1282  int scaledvalue, refdist;
1283  int scalesame1, scalesame2;
1284  int scalezone1_y, zone1offset_y;
1285  int table_index = dir ^ v->second_field;
1286 
1287  if (v->s.pict_type != AV_PICTURE_TYPE_B)
1288  refdist = v->refdist;
1289  else
1290  refdist = dir ? v->brfd : v->frfd;
1291  if (refdist > 3)
1292  refdist = 3;
1293  scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
1294  scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
1295  scalezone1_y = ff_vc1_field_mvpred_scales[table_index][4][refdist];
1296  zone1offset_y = ff_vc1_field_mvpred_scales[table_index][6][refdist];
1297 
1298  if (FFABS(n) > 63)
1299  scaledvalue = n;
1300  else {
1301  if (FFABS(n) < scalezone1_y)
1302  scaledvalue = (n * scalesame1) >> 8;
1303  else {
1304  if (n < 0)
1305  scaledvalue = ((n * scalesame2) >> 8) - zone1offset_y;
1306  else
1307  scaledvalue = ((n * scalesame2) >> 8) + zone1offset_y;
1308  }
1309  }
1310 
1311  if (v->cur_field_type && !v->ref_field_type[dir])
1312  return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1313  else
1314  return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1315 }
1316 
1317 static av_always_inline int scaleforopp_x(VC1Context *v, int n /* MV */)
1318 {
1319  int scalezone1_x, zone1offset_x;
1320  int scaleopp1, scaleopp2, brfd;
1321  int scaledvalue;
1322 
1323  brfd = FFMIN(v->brfd, 3);
1324  scalezone1_x = ff_vc1_b_field_mvpred_scales[3][brfd];
1325  zone1offset_x = ff_vc1_b_field_mvpred_scales[5][brfd];
1326  scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
1327  scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
1328 
1329  if (FFABS(n) > 255)
1330  scaledvalue = n;
1331  else {
1332  if (FFABS(n) < scalezone1_x)
1333  scaledvalue = (n * scaleopp1) >> 8;
1334  else {
1335  if (n < 0)
1336  scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_x;
1337  else
1338  scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_x;
1339  }
1340  }
1341  return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1342 }
1343 
1344 static av_always_inline int scaleforopp_y(VC1Context *v, int n /* MV */, int dir)
1345 {
1346  int scalezone1_y, zone1offset_y;
1347  int scaleopp1, scaleopp2, brfd;
1348  int scaledvalue;
1349 
1350  brfd = FFMIN(v->brfd, 3);
1351  scalezone1_y = ff_vc1_b_field_mvpred_scales[4][brfd];
1352  zone1offset_y = ff_vc1_b_field_mvpred_scales[6][brfd];
1353  scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
1354  scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
1355 
1356  if (FFABS(n) > 63)
1357  scaledvalue = n;
1358  else {
1359  if (FFABS(n) < scalezone1_y)
1360  scaledvalue = (n * scaleopp1) >> 8;
1361  else {
1362  if (n < 0)
1363  scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_y;
1364  else
1365  scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_y;
1366  }
1367  }
1368  if (v->cur_field_type && !v->ref_field_type[dir]) {
1369  return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1370  } else {
1371  return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1372  }
1373 }
1374 
1375 static av_always_inline int scaleforsame(VC1Context *v, int i, int n /* MV */,
1376  int dim, int dir)
1377 {
1378  int brfd, scalesame;
1379  int hpel = 1 - v->s.quarter_sample;
1380 
1381  n >>= hpel;
1382  if (v->s.pict_type != AV_PICTURE_TYPE_B || v->second_field || !dir) {
1383  if (dim)
1384  n = scaleforsame_y(v, i, n, dir) << hpel;
1385  else
1386  n = scaleforsame_x(v, n, dir) << hpel;
1387  return n;
1388  }
1389  brfd = FFMIN(v->brfd, 3);
1390  scalesame = ff_vc1_b_field_mvpred_scales[0][brfd];
1391 
1392  n = (n * scalesame >> 8) << hpel;
1393  return n;
1394 }
1395 
1396 static av_always_inline int scaleforopp(VC1Context *v, int n /* MV */,
1397  int dim, int dir)
1398 {
1399  int refdist, scaleopp;
1400  int hpel = 1 - v->s.quarter_sample;
1401 
1402  n >>= hpel;
1403  if (v->s.pict_type == AV_PICTURE_TYPE_B && !v->second_field && dir == 1) {
1404  if (dim)
1405  n = scaleforopp_y(v, n, dir) << hpel;
1406  else
1407  n = scaleforopp_x(v, n) << hpel;
1408  return n;
1409  }
1410  if (v->s.pict_type != AV_PICTURE_TYPE_B)
1411  refdist = FFMIN(v->refdist, 3);
1412  else
1413  refdist = dir ? v->brfd : v->frfd;
1414  scaleopp = ff_vc1_field_mvpred_scales[dir ^ v->second_field][0][refdist];
1415 
1416  n = (n * scaleopp >> 8) << hpel;
1417  return n;
1418 }
1419 
1420 /** Predict and set motion vector
1421  */
1422 static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
1423  int mv1, int r_x, int r_y, uint8_t* is_intra,
1424  int pred_flag, int dir)
1425 {
1426  MpegEncContext *s = &v->s;
1427  int xy, wrap, off = 0;
1428  int16_t *A, *B, *C;
1429  int px, py;
1430  int sum;
1431  int mixedmv_pic, num_samefield = 0, num_oppfield = 0;
1432  int opposite, a_f, b_f, c_f;
1433  int16_t field_predA[2];
1434  int16_t field_predB[2];
1435  int16_t field_predC[2];
1436  int a_valid, b_valid, c_valid;
1437  int hybridmv_thresh, y_bias = 0;
1438 
1439  if (v->mv_mode == MV_PMODE_MIXED_MV ||
1441  mixedmv_pic = 1;
1442  else
1443  mixedmv_pic = 0;
1444  /* scale MV difference to be quad-pel */
1445  dmv_x <<= 1 - s->quarter_sample;
1446  dmv_y <<= 1 - s->quarter_sample;
1447 
1448  wrap = s->b8_stride;
1449  xy = s->block_index[n];
1450 
1451  if (s->mb_intra) {
1452  s->mv[0][n][0] = s->current_picture.motion_val[0][xy + v->blocks_off][0] = 0;
1453  s->mv[0][n][1] = s->current_picture.motion_val[0][xy + v->blocks_off][1] = 0;
1454  s->current_picture.motion_val[1][xy + v->blocks_off][0] = 0;
1455  s->current_picture.motion_val[1][xy + v->blocks_off][1] = 0;
1456  if (mv1) { /* duplicate motion data for 1-MV block */
1457  s->current_picture.motion_val[0][xy + 1 + v->blocks_off][0] = 0;
1458  s->current_picture.motion_val[0][xy + 1 + v->blocks_off][1] = 0;
1459  s->current_picture.motion_val[0][xy + wrap + v->blocks_off][0] = 0;
1460  s->current_picture.motion_val[0][xy + wrap + v->blocks_off][1] = 0;
1461  s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0;
1462  s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0;
1463  v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1464  s->current_picture.motion_val[1][xy + 1 + v->blocks_off][0] = 0;
1465  s->current_picture.motion_val[1][xy + 1 + v->blocks_off][1] = 0;
1466  s->current_picture.motion_val[1][xy + wrap][0] = 0;
1467  s->current_picture.motion_val[1][xy + wrap + v->blocks_off][1] = 0;
1468  s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][0] = 0;
1469  s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][1] = 0;
1470  }
1471  return;
1472  }
1473 
1474  C = s->current_picture.motion_val[dir][xy - 1 + v->blocks_off];
1475  A = s->current_picture.motion_val[dir][xy - wrap + v->blocks_off];
1476  if (mv1) {
1477  if (v->field_mode && mixedmv_pic)
1478  off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
1479  else
1480  off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
1481  } else {
1482  //in 4-MV mode different blocks have different B predictor position
1483  switch (n) {
1484  case 0:
1485  off = (s->mb_x > 0) ? -1 : 1;
1486  break;
1487  case 1:
1488  off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
1489  break;
1490  case 2:
1491  off = 1;
1492  break;
1493  case 3:
1494  off = -1;
1495  }
1496  }
1497  B = s->current_picture.motion_val[dir][xy - wrap + off + v->blocks_off];
1498 
1499  a_valid = !s->first_slice_line || (n == 2 || n == 3);
1500  b_valid = a_valid && (s->mb_width > 1);
1501  c_valid = s->mb_x || (n == 1 || n == 3);
1502  if (v->field_mode) {
1503  a_valid = a_valid && !is_intra[xy - wrap];
1504  b_valid = b_valid && !is_intra[xy - wrap + off];
1505  c_valid = c_valid && !is_intra[xy - 1];
1506  }
1507 
1508  if (a_valid) {
1509  a_f = v->mv_f[dir][xy - wrap + v->blocks_off];
1510  num_oppfield += a_f;
1511  num_samefield += 1 - a_f;
1512  field_predA[0] = A[0];
1513  field_predA[1] = A[1];
1514  } else {
1515  field_predA[0] = field_predA[1] = 0;
1516  a_f = 0;
1517  }
1518  if (b_valid) {
1519  b_f = v->mv_f[dir][xy - wrap + off + v->blocks_off];
1520  num_oppfield += b_f;
1521  num_samefield += 1 - b_f;
1522  field_predB[0] = B[0];
1523  field_predB[1] = B[1];
1524  } else {
1525  field_predB[0] = field_predB[1] = 0;
1526  b_f = 0;
1527  }
1528  if (c_valid) {
1529  c_f = v->mv_f[dir][xy - 1 + v->blocks_off];
1530  num_oppfield += c_f;
1531  num_samefield += 1 - c_f;
1532  field_predC[0] = C[0];
1533  field_predC[1] = C[1];
1534  } else {
1535  field_predC[0] = field_predC[1] = 0;
1536  c_f = 0;
1537  }
1538 
1539  if (v->field_mode) {
1540  if (!v->numref)
1541  // REFFIELD determines if the last field or the second-last field is
1542  // to be used as reference
1543  opposite = 1 - v->reffield;
1544  else {
1545  if (num_samefield <= num_oppfield)
1546  opposite = 1 - pred_flag;
1547  else
1548  opposite = pred_flag;
1549  }
1550  } else
1551  opposite = 0;
1552  if (opposite) {
1553  if (a_valid && !a_f) {
1554  field_predA[0] = scaleforopp(v, field_predA[0], 0, dir);
1555  field_predA[1] = scaleforopp(v, field_predA[1], 1, dir);
1556  }
1557  if (b_valid && !b_f) {
1558  field_predB[0] = scaleforopp(v, field_predB[0], 0, dir);
1559  field_predB[1] = scaleforopp(v, field_predB[1], 1, dir);
1560  }
1561  if (c_valid && !c_f) {
1562  field_predC[0] = scaleforopp(v, field_predC[0], 0, dir);
1563  field_predC[1] = scaleforopp(v, field_predC[1], 1, dir);
1564  }
1565  v->mv_f[dir][xy + v->blocks_off] = 1;
1566  v->ref_field_type[dir] = !v->cur_field_type;
1567  } else {
1568  if (a_valid && a_f) {
1569  field_predA[0] = scaleforsame(v, n, field_predA[0], 0, dir);
1570  field_predA[1] = scaleforsame(v, n, field_predA[1], 1, dir);
1571  }
1572  if (b_valid && b_f) {
1573  field_predB[0] = scaleforsame(v, n, field_predB[0], 0, dir);
1574  field_predB[1] = scaleforsame(v, n, field_predB[1], 1, dir);
1575  }
1576  if (c_valid && c_f) {
1577  field_predC[0] = scaleforsame(v, n, field_predC[0], 0, dir);
1578  field_predC[1] = scaleforsame(v, n, field_predC[1], 1, dir);
1579  }
1580  v->mv_f[dir][xy + v->blocks_off] = 0;
1581  v->ref_field_type[dir] = v->cur_field_type;
1582  }
1583 
1584  if (a_valid) {
1585  px = field_predA[0];
1586  py = field_predA[1];
1587  } else if (c_valid) {
1588  px = field_predC[0];
1589  py = field_predC[1];
1590  } else if (b_valid) {
1591  px = field_predB[0];
1592  py = field_predB[1];
1593  } else {
1594  px = 0;
1595  py = 0;
1596  }
1597 
1598  if (num_samefield + num_oppfield > 1) {
1599  px = mid_pred(field_predA[0], field_predB[0], field_predC[0]);
1600  py = mid_pred(field_predA[1], field_predB[1], field_predC[1]);
1601  }
1602 
1603  /* Pullback MV as specified in 8.3.5.3.4 */
1604  if (!v->field_mode) {
1605  int qx, qy, X, Y;
1606  qx = (s->mb_x << 6) + ((n == 1 || n == 3) ? 32 : 0);
1607  qy = (s->mb_y << 6) + ((n == 2 || n == 3) ? 32 : 0);
1608  X = (s->mb_width << 6) - 4;
1609  Y = (s->mb_height << 6) - 4;
1610  if (mv1) {
1611  if (qx + px < -60) px = -60 - qx;
1612  if (qy + py < -60) py = -60 - qy;
1613  } else {
1614  if (qx + px < -28) px = -28 - qx;
1615  if (qy + py < -28) py = -28 - qy;
1616  }
1617  if (qx + px > X) px = X - qx;
1618  if (qy + py > Y) py = Y - qy;
1619  }
1620 
1621  if (!v->field_mode || s->pict_type != AV_PICTURE_TYPE_B) {
1622  /* Calculate hybrid prediction as specified in 8.3.5.3.5 (also 10.3.5.4.3.5) */
1623  hybridmv_thresh = 32;
1624  if (a_valid && c_valid) {
1625  if (is_intra[xy - wrap])
1626  sum = FFABS(px) + FFABS(py);
1627  else
1628  sum = FFABS(px - field_predA[0]) + FFABS(py - field_predA[1]);
1629  if (sum > hybridmv_thresh) {
1630  if (get_bits1(&s->gb)) { // read HYBRIDPRED bit
1631  px = field_predA[0];
1632  py = field_predA[1];
1633  } else {
1634  px = field_predC[0];
1635  py = field_predC[1];
1636  }
1637  } else {
1638  if (is_intra[xy - 1])
1639  sum = FFABS(px) + FFABS(py);
1640  else
1641  sum = FFABS(px - field_predC[0]) + FFABS(py - field_predC[1]);
1642  if (sum > hybridmv_thresh) {
1643  if (get_bits1(&s->gb)) {
1644  px = field_predA[0];
1645  py = field_predA[1];
1646  } else {
1647  px = field_predC[0];
1648  py = field_predC[1];
1649  }
1650  }
1651  }
1652  }
1653  }
1654 
1655  if (v->field_mode && v->numref)
1656  r_y >>= 1;
1657  if (v->field_mode && v->cur_field_type && v->ref_field_type[dir] == 0)
1658  y_bias = 1;
1659  /* store MV using signed modulus of MV range defined in 4.11 */
1660  s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1661  s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1] = ((py + dmv_y + r_y - y_bias) & ((r_y << 1) - 1)) - r_y + y_bias;
1662  if (mv1) { /* duplicate motion data for 1-MV block */
1663  s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1664  s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1665  s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1666  s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1667  s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1668  s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1669  v->mv_f[dir][xy + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1670  v->mv_f[dir][xy + wrap + v->blocks_off] = v->mv_f[dir][xy + wrap + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1671  }
1672 }
1673 
1674 /** Predict and set motion vector for interlaced frame picture MBs
1675  */
1676 static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
1677  int mvn, int r_x, int r_y, uint8_t* is_intra, int dir)
1678 {
1679  MpegEncContext *s = &v->s;
1680  int xy, wrap, off = 0;
1681  int A[2], B[2], C[2];
1682  int px = 0, py = 0;
1683  int a_valid = 0, b_valid = 0, c_valid = 0;
1684  int field_a, field_b, field_c; // 0: same, 1: opposit
1685  int total_valid, num_samefield, num_oppfield;
1686  int pos_c, pos_b, n_adj;
1687 
1688  wrap = s->b8_stride;
1689  xy = s->block_index[n];
1690 
1691  if (s->mb_intra) {
1692  s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0;
1693  s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0;
1694  s->current_picture.motion_val[1][xy][0] = 0;
1695  s->current_picture.motion_val[1][xy][1] = 0;
1696  if (mvn == 1) { /* duplicate motion data for 1-MV block */
1697  s->current_picture.motion_val[0][xy + 1][0] = 0;
1698  s->current_picture.motion_val[0][xy + 1][1] = 0;
1699  s->current_picture.motion_val[0][xy + wrap][0] = 0;
1700  s->current_picture.motion_val[0][xy + wrap][1] = 0;
1701  s->current_picture.motion_val[0][xy + wrap + 1][0] = 0;
1702  s->current_picture.motion_val[0][xy + wrap + 1][1] = 0;
1703  v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1704  s->current_picture.motion_val[1][xy + 1][0] = 0;
1705  s->current_picture.motion_val[1][xy + 1][1] = 0;
1706  s->current_picture.motion_val[1][xy + wrap][0] = 0;
1707  s->current_picture.motion_val[1][xy + wrap][1] = 0;
1708  s->current_picture.motion_val[1][xy + wrap + 1][0] = 0;
1709  s->current_picture.motion_val[1][xy + wrap + 1][1] = 0;
1710  }
1711  return;
1712  }
1713 
1714  off = ((n == 0) || (n == 1)) ? 1 : -1;
1715  /* predict A */
1716  if (s->mb_x || (n == 1) || (n == 3)) {
1717  if ((v->blk_mv_type[xy]) // current block (MB) has a field MV
1718  || (!v->blk_mv_type[xy] && !v->blk_mv_type[xy - 1])) { // or both have frame MV
1719  A[0] = s->current_picture.motion_val[dir][xy - 1][0];
1720  A[1] = s->current_picture.motion_val[dir][xy - 1][1];
1721  a_valid = 1;
1722  } else { // current block has frame mv and cand. has field MV (so average)
1723  A[0] = (s->current_picture.motion_val[dir][xy - 1][0]
1724  + s->current_picture.motion_val[dir][xy - 1 + off * wrap][0] + 1) >> 1;
1725  A[1] = (s->current_picture.motion_val[dir][xy - 1][1]
1726  + s->current_picture.motion_val[dir][xy - 1 + off * wrap][1] + 1) >> 1;
1727  a_valid = 1;
1728  }
1729  if (!(n & 1) && v->is_intra[s->mb_x - 1]) {
1730  a_valid = 0;
1731  A[0] = A[1] = 0;
1732  }
1733  } else
1734  A[0] = A[1] = 0;
1735  /* Predict B and C */
1736  B[0] = B[1] = C[0] = C[1] = 0;
1737  if (n == 0 || n == 1 || v->blk_mv_type[xy]) {
1738  if (!s->first_slice_line) {
1739  if (!v->is_intra[s->mb_x - s->mb_stride]) {
1740  b_valid = 1;
1741  n_adj = n | 2;
1742  pos_b = s->block_index[n_adj] - 2 * wrap;
1743  if (v->blk_mv_type[pos_b] && v->blk_mv_type[xy]) {
1744  n_adj = (n & 2) | (n & 1);
1745  }
1746  B[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap][0];
1747  B[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap][1];
1748  if (v->blk_mv_type[pos_b] && !v->blk_mv_type[xy]) {
1749  B[0] = (B[0] + s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1;
1750  B[1] = (B[1] + s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1;
1751  }
1752  }
1753  if (s->mb_width > 1) {
1754  if (!v->is_intra[s->mb_x - s->mb_stride + 1]) {
1755  c_valid = 1;
1756  n_adj = 2;
1757  pos_c = s->block_index[2] - 2 * wrap + 2;
1758  if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1759  n_adj = n & 2;
1760  }
1761  C[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][0];
1762  C[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][1];
1763  if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1764  C[0] = (1 + C[0] + (s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1;
1765  C[1] = (1 + C[1] + (s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1;
1766  }
1767  if (s->mb_x == s->mb_width - 1) {
1768  if (!v->is_intra[s->mb_x - s->mb_stride - 1]) {
1769  c_valid = 1;
1770  n_adj = 3;
1771  pos_c = s->block_index[3] - 2 * wrap - 2;
1772  if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1773  n_adj = n | 1;
1774  }
1775  C[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][0];
1776  C[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][1];
1777  if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1778  C[0] = (1 + C[0] + s->current_picture.motion_val[dir][s->block_index[1] - 2 * wrap - 2][0]) >> 1;
1779  C[1] = (1 + C[1] + s->current_picture.motion_val[dir][s->block_index[1] - 2 * wrap - 2][1]) >> 1;
1780  }
1781  } else
1782  c_valid = 0;
1783  }
1784  }
1785  }
1786  }
1787  } else {
1788  pos_b = s->block_index[1];
1789  b_valid = 1;
1790  B[0] = s->current_picture.motion_val[dir][pos_b][0];
1791  B[1] = s->current_picture.motion_val[dir][pos_b][1];
1792  pos_c = s->block_index[0];
1793  c_valid = 1;
1794  C[0] = s->current_picture.motion_val[dir][pos_c][0];
1795  C[1] = s->current_picture.motion_val[dir][pos_c][1];
1796  }
1797 
1798  total_valid = a_valid + b_valid + c_valid;
1799  // check if predictor A is out of bounds
1800  if (!s->mb_x && !(n == 1 || n == 3)) {
1801  A[0] = A[1] = 0;
1802  }
1803  // check if predictor B is out of bounds
1804  if ((s->first_slice_line && v->blk_mv_type[xy]) || (s->first_slice_line && !(n & 2))) {
1805  B[0] = B[1] = C[0] = C[1] = 0;
1806  }
1807  if (!v->blk_mv_type[xy]) {
1808  if (s->mb_width == 1) {
1809  px = B[0];
1810  py = B[1];
1811  } else {
1812  if (total_valid >= 2) {
1813  px = mid_pred(A[0], B[0], C[0]);
1814  py = mid_pred(A[1], B[1], C[1]);
1815  } else if (total_valid) {
1816  if (a_valid) { px = A[0]; py = A[1]; }
1817  else if (b_valid) { px = B[0]; py = B[1]; }
1818  else { px = C[0]; py = C[1]; }
1819  }
1820  }
1821  } else {
1822  if (a_valid)
1823  field_a = (A[1] & 4) ? 1 : 0;
1824  else
1825  field_a = 0;
1826  if (b_valid)
1827  field_b = (B[1] & 4) ? 1 : 0;
1828  else
1829  field_b = 0;
1830  if (c_valid)
1831  field_c = (C[1] & 4) ? 1 : 0;
1832  else
1833  field_c = 0;
1834 
1835  num_oppfield = field_a + field_b + field_c;
1836  num_samefield = total_valid - num_oppfield;
1837  if (total_valid == 3) {
1838  if ((num_samefield == 3) || (num_oppfield == 3)) {
1839  px = mid_pred(A[0], B[0], C[0]);
1840  py = mid_pred(A[1], B[1], C[1]);
1841  } else if (num_samefield >= num_oppfield) {
1842  /* take one MV from same field set depending on priority
1843  the check for B may not be necessary */
1844  px = !field_a ? A[0] : B[0];
1845  py = !field_a ? A[1] : B[1];
1846  } else {
1847  px = field_a ? A[0] : B[0];
1848  py = field_a ? A[1] : B[1];
1849  }
1850  } else if (total_valid == 2) {
1851  if (num_samefield >= num_oppfield) {
1852  if (!field_a && a_valid) {
1853  px = A[0];
1854  py = A[1];
1855  } else if (!field_b && b_valid) {
1856  px = B[0];
1857  py = B[1];
1858  } else /*if (c_valid)*/ {
1859  av_assert1(c_valid);
1860  px = C[0];
1861  py = C[1];
1862  } /*else px = py = 0;*/
1863  } else {
1864  if (field_a && a_valid) {
1865  px = A[0];
1866  py = A[1];
1867  } else /*if (field_b && b_valid)*/ {
1868  av_assert1(field_b && b_valid);
1869  px = B[0];
1870  py = B[1];
1871  } /*else if (c_valid) {
1872  px = C[0];
1873  py = C[1];
1874  }*/
1875  }
1876  } else if (total_valid == 1) {
1877  px = (a_valid) ? A[0] : ((b_valid) ? B[0] : C[0]);
1878  py = (a_valid) ? A[1] : ((b_valid) ? B[1] : C[1]);
1879  }
1880  }
1881 
1882  /* store MV using signed modulus of MV range defined in 4.11 */
1883  s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1884  s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
1885  if (mvn == 1) { /* duplicate motion data for 1-MV block */
1886  s->current_picture.motion_val[dir][xy + 1 ][0] = s->current_picture.motion_val[dir][xy][0];
1887  s->current_picture.motion_val[dir][xy + 1 ][1] = s->current_picture.motion_val[dir][xy][1];
1888  s->current_picture.motion_val[dir][xy + wrap ][0] = s->current_picture.motion_val[dir][xy][0];
1889  s->current_picture.motion_val[dir][xy + wrap ][1] = s->current_picture.motion_val[dir][xy][1];
1890  s->current_picture.motion_val[dir][xy + wrap + 1][0] = s->current_picture.motion_val[dir][xy][0];
1891  s->current_picture.motion_val[dir][xy + wrap + 1][1] = s->current_picture.motion_val[dir][xy][1];
1892  } else if (mvn == 2) { /* duplicate motion data for 2-Field MV block */
1893  s->current_picture.motion_val[dir][xy + 1][0] = s->current_picture.motion_val[dir][xy][0];
1894  s->current_picture.motion_val[dir][xy + 1][1] = s->current_picture.motion_val[dir][xy][1];
1895  s->mv[dir][n + 1][0] = s->mv[dir][n][0];
1896  s->mv[dir][n + 1][1] = s->mv[dir][n][1];
1897  }
1898 }
1899 
1900 /** Motion compensation for direct or interpolated blocks in B-frames
1901  */
1903 {
1904  MpegEncContext *s = &v->s;
1905  H264ChromaContext *h264chroma = &v->h264chroma;
1906  uint8_t *srcY, *srcU, *srcV;
1907  int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
1908  int off, off_uv;
1909  int v_edge_pos = s->v_edge_pos >> v->field_mode;
1910  int use_ic = v->next_use_ic;
1911 
1912  if (!v->field_mode && !v->s.next_picture.f.data[0])
1913  return;
1914 
1915  mx = s->mv[1][0][0];
1916  my = s->mv[1][0][1];
1917  uvmx = (mx + ((mx & 3) == 3)) >> 1;
1918  uvmy = (my + ((my & 3) == 3)) >> 1;
1919  if (v->field_mode) {
1920  if (v->cur_field_type != v->ref_field_type[1])
1921  my = my - 2 + 4 * v->cur_field_type;
1922  uvmy = uvmy - 2 + 4 * v->cur_field_type;
1923  }
1924  if (v->fastuvmc) {
1925  uvmx = uvmx + ((uvmx < 0) ? -(uvmx & 1) : (uvmx & 1));
1926  uvmy = uvmy + ((uvmy < 0) ? -(uvmy & 1) : (uvmy & 1));
1927  }
1928  srcY = s->next_picture.f.data[0];
1929  srcU = s->next_picture.f.data[1];
1930  srcV = s->next_picture.f.data[2];
1931 
1932  src_x = s->mb_x * 16 + (mx >> 2);
1933  src_y = s->mb_y * 16 + (my >> 2);
1934  uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1935  uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
1936 
1937  if (v->profile != PROFILE_ADVANCED) {
1938  src_x = av_clip( src_x, -16, s->mb_width * 16);
1939  src_y = av_clip( src_y, -16, s->mb_height * 16);
1940  uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
1941  uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
1942  } else {
1943  src_x = av_clip( src_x, -17, s->avctx->coded_width);
1944  src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
1945  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1946  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1947  }
1948 
1949  srcY += src_y * s->linesize + src_x;
1950  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
1951  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
1952 
1953  if (v->field_mode && v->ref_field_type[1]) {
1954  srcY += s->current_picture_ptr->f.linesize[0];
1955  srcU += s->current_picture_ptr->f.linesize[1];
1956  srcV += s->current_picture_ptr->f.linesize[2];
1957  }
1958 
1959  /* for grayscale we should not try to read from unknown area */
1960  if (s->flags & CODEC_FLAG_GRAY) {
1961  srcU = s->edge_emu_buffer + 18 * s->linesize;
1962  srcV = s->edge_emu_buffer + 18 * s->linesize;
1963  }
1964 
1965  if (v->rangeredfrm || s->h_edge_pos < 22 || v_edge_pos < 22 || use_ic
1966  || (unsigned)(src_x - 1) > s->h_edge_pos - (mx & 3) - 16 - 3
1967  || (unsigned)(src_y - 1) > v_edge_pos - (my & 3) - 16 - 3) {
1968  uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
1969 
1970  srcY -= s->mspel * (1 + s->linesize);
1972  17 + s->mspel * 2, 17 + s->mspel * 2,
1973  src_x - s->mspel, src_y - s->mspel,
1974  s->h_edge_pos, v_edge_pos);
1975  srcY = s->edge_emu_buffer;
1976  s->vdsp.emulated_edge_mc(uvbuf, s->uvlinesize, srcU, s->uvlinesize,
1977  8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
1978  s->h_edge_pos >> 1, v_edge_pos >> 1);
1979  s->vdsp.emulated_edge_mc(uvbuf + 16, s->uvlinesize, srcV, s->uvlinesize,
1980  8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
1981  s->h_edge_pos >> 1, v_edge_pos >> 1);
1982  srcU = uvbuf;
1983  srcV = uvbuf + 16;
1984  /* if we deal with range reduction we need to scale source blocks */
1985  if (v->rangeredfrm) {
1986  int i, j;
1987  uint8_t *src, *src2;
1988 
1989  src = srcY;
1990  for (j = 0; j < 17 + s->mspel * 2; j++) {
1991  for (i = 0; i < 17 + s->mspel * 2; i++)
1992  src[i] = ((src[i] - 128) >> 1) + 128;
1993  src += s->linesize;
1994  }
1995  src = srcU;
1996  src2 = srcV;
1997  for (j = 0; j < 9; j++) {
1998  for (i = 0; i < 9; i++) {
1999  src[i] = ((src[i] - 128) >> 1) + 128;
2000  src2[i] = ((src2[i] - 128) >> 1) + 128;
2001  }
2002  src += s->uvlinesize;
2003  src2 += s->uvlinesize;
2004  }
2005  }
2006 
2007  if (use_ic) {
2008  uint8_t (*luty )[256] = v->next_luty;
2009  uint8_t (*lutuv)[256] = v->next_lutuv;
2010  int i, j;
2011  uint8_t *src, *src2;
2012 
2013  src = srcY;
2014  for (j = 0; j < 17 + s->mspel * 2; j++) {
2015  int f = v->field_mode ? v->ref_field_type[1] : ((j+src_y - s->mspel) & 1);
2016  for (i = 0; i < 17 + s->mspel * 2; i++)
2017  src[i] = luty[f][src[i]];
2018  src += s->linesize;
2019  }
2020  src = srcU;
2021  src2 = srcV;
2022  for (j = 0; j < 9; j++) {
2023  int f = v->field_mode ? v->ref_field_type[1] : ((j+uvsrc_y) & 1);
2024  for (i = 0; i < 9; i++) {
2025  src[i] = lutuv[f][src[i]];
2026  src2[i] = lutuv[f][src2[i]];
2027  }
2028  src += s->uvlinesize;
2029  src2 += s->uvlinesize;
2030  }
2031  }
2032  srcY += s->mspel * (1 + s->linesize);
2033  }
2034 
2035  off = 0;
2036  off_uv = 0;
2037 
2038  if (s->mspel) {
2039  dxy = ((my & 3) << 2) | (mx & 3);
2040  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
2041  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8, srcY + 8, s->linesize, v->rnd);
2042  srcY += s->linesize * 8;
2043  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize , srcY , s->linesize, v->rnd);
2044  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
2045  } else { // hpel mc
2046  dxy = (my & 2) | ((mx & 2) >> 1);
2047 
2048  if (!v->rnd)
2049  s->hdsp.avg_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
2050  else
2051  s->hdsp.avg_no_rnd_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, 16);
2052  }
2053 
2054  if (s->flags & CODEC_FLAG_GRAY) return;
2055  /* Chroma MC always uses qpel blilinear */
2056  uvmx = (uvmx & 3) << 1;
2057  uvmy = (uvmy & 3) << 1;
2058  if (!v->rnd) {
2059  h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2060  h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2061  } else {
2062  v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2063  v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2064  }
2065 }
2066 
2067 static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
2068 {
2069  int n = bfrac;
2070 
2071 #if B_FRACTION_DEN==256
2072  if (inv)
2073  n -= 256;
2074  if (!qs)
2075  return 2 * ((value * n + 255) >> 9);
2076  return (value * n + 128) >> 8;
2077 #else
2078  if (inv)
2079  n -= B_FRACTION_DEN;
2080  if (!qs)
2081  return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
2082  return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
2083 #endif
2084 }
2085 
2086 /** Reconstruct motion vector for B-frame and do motion compensation
2087  */
2088 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2],
2089  int direct, int mode)
2090 {
2091  if (direct) {
2092  vc1_mc_1mv(v, 0);
2093  vc1_interp_mc(v);
2094  return;
2095  }
2096  if (mode == BMV_TYPE_INTERPOLATED) {
2097  vc1_mc_1mv(v, 0);
2098  vc1_interp_mc(v);
2099  return;
2100  }
2101 
2102  vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
2103 }
2104 
2105 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
2106  int direct, int mvtype)
2107 {
2108  MpegEncContext *s = &v->s;
2109  int xy, wrap, off = 0;
2110  int16_t *A, *B, *C;
2111  int px, py;
2112  int sum;
2113  int r_x, r_y;
2114  const uint8_t *is_intra = v->mb_type[0];
2115 
2116  r_x = v->range_x;
2117  r_y = v->range_y;
2118  /* scale MV difference to be quad-pel */
2119  dmv_x[0] <<= 1 - s->quarter_sample;
2120  dmv_y[0] <<= 1 - s->quarter_sample;
2121  dmv_x[1] <<= 1 - s->quarter_sample;
2122  dmv_y[1] <<= 1 - s->quarter_sample;
2123 
2124  wrap = s->b8_stride;
2125  xy = s->block_index[0];
2126 
2127  if (s->mb_intra) {
2128  s->current_picture.motion_val[0][xy + v->blocks_off][0] =
2129  s->current_picture.motion_val[0][xy + v->blocks_off][1] =
2130  s->current_picture.motion_val[1][xy + v->blocks_off][0] =
2131  s->current_picture.motion_val[1][xy + v->blocks_off][1] = 0;
2132  return;
2133  }
2134  if (!v->field_mode) {
2135  s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
2136  s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
2137  s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
2138  s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
2139 
2140  /* Pullback predicted motion vectors as specified in 8.4.5.4 */
2141  s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2142  s->mv[0][0][1] = av_clip(s->mv[0][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2143  s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2144  s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2145  }
2146  if (direct) {
2147  s->current_picture.motion_val[0][xy + v->blocks_off][0] = s->mv[0][0][0];
2148  s->current_picture.motion_val[0][xy + v->blocks_off][1] = s->mv[0][0][1];
2149  s->current_picture.motion_val[1][xy + v->blocks_off][0] = s->mv[1][0][0];
2150  s->current_picture.motion_val[1][xy + v->blocks_off][1] = s->mv[1][0][1];
2151  return;
2152  }
2153 
2154  if ((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2155  C = s->current_picture.motion_val[0][xy - 2];
2156  A = s->current_picture.motion_val[0][xy - wrap * 2];
2157  off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2158  B = s->current_picture.motion_val[0][xy - wrap * 2 + off];
2159 
2160  if (!s->mb_x) C[0] = C[1] = 0;
2161  if (!s->first_slice_line) { // predictor A is not out of bounds
2162  if (s->mb_width == 1) {
2163  px = A[0];
2164  py = A[1];
2165  } else {
2166  px = mid_pred(A[0], B[0], C[0]);
2167  py = mid_pred(A[1], B[1], C[1]);
2168  }
2169  } else if (s->mb_x) { // predictor C is not out of bounds
2170  px = C[0];
2171  py = C[1];
2172  } else {
2173  px = py = 0;
2174  }
2175  /* Pullback MV as specified in 8.3.5.3.4 */
2176  {
2177  int qx, qy, X, Y;
2178  if (v->profile < PROFILE_ADVANCED) {
2179  qx = (s->mb_x << 5);
2180  qy = (s->mb_y << 5);
2181  X = (s->mb_width << 5) - 4;
2182  Y = (s->mb_height << 5) - 4;
2183  if (qx + px < -28) px = -28 - qx;
2184  if (qy + py < -28) py = -28 - qy;
2185  if (qx + px > X) px = X - qx;
2186  if (qy + py > Y) py = Y - qy;
2187  } else {
2188  qx = (s->mb_x << 6);
2189  qy = (s->mb_y << 6);
2190  X = (s->mb_width << 6) - 4;
2191  Y = (s->mb_height << 6) - 4;
2192  if (qx + px < -60) px = -60 - qx;
2193  if (qy + py < -60) py = -60 - qy;
2194  if (qx + px > X) px = X - qx;
2195  if (qy + py > Y) py = Y - qy;
2196  }
2197  }
2198  /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2199  if (0 && !s->first_slice_line && s->mb_x) {
2200  if (is_intra[xy - wrap])
2201  sum = FFABS(px) + FFABS(py);
2202  else
2203  sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2204  if (sum > 32) {
2205  if (get_bits1(&s->gb)) {
2206  px = A[0];
2207  py = A[1];
2208  } else {
2209  px = C[0];
2210  py = C[1];
2211  }
2212  } else {
2213  if (is_intra[xy - 2])
2214  sum = FFABS(px) + FFABS(py);
2215  else
2216  sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2217  if (sum > 32) {
2218  if (get_bits1(&s->gb)) {
2219  px = A[0];
2220  py = A[1];
2221  } else {
2222  px = C[0];
2223  py = C[1];
2224  }
2225  }
2226  }
2227  }
2228  /* store MV using signed modulus of MV range defined in 4.11 */
2229  s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2230  s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2231  }
2232  if ((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2233  C = s->current_picture.motion_val[1][xy - 2];
2234  A = s->current_picture.motion_val[1][xy - wrap * 2];
2235  off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2236  B = s->current_picture.motion_val[1][xy - wrap * 2 + off];
2237 
2238  if (!s->mb_x)
2239  C[0] = C[1] = 0;
2240  if (!s->first_slice_line) { // predictor A is not out of bounds
2241  if (s->mb_width == 1) {
2242  px = A[0];
2243  py = A[1];
2244  } else {
2245  px = mid_pred(A[0], B[0], C[0]);
2246  py = mid_pred(A[1], B[1], C[1]);
2247  }
2248  } else if (s->mb_x) { // predictor C is not out of bounds
2249  px = C[0];
2250  py = C[1];
2251  } else {
2252  px = py = 0;
2253  }
2254  /* Pullback MV as specified in 8.3.5.3.4 */
2255  {
2256  int qx, qy, X, Y;
2257  if (v->profile < PROFILE_ADVANCED) {
2258  qx = (s->mb_x << 5);
2259  qy = (s->mb_y << 5);
2260  X = (s->mb_width << 5) - 4;
2261  Y = (s->mb_height << 5) - 4;
2262  if (qx + px < -28) px = -28 - qx;
2263  if (qy + py < -28) py = -28 - qy;
2264  if (qx + px > X) px = X - qx;
2265  if (qy + py > Y) py = Y - qy;
2266  } else {
2267  qx = (s->mb_x << 6);
2268  qy = (s->mb_y << 6);
2269  X = (s->mb_width << 6) - 4;
2270  Y = (s->mb_height << 6) - 4;
2271  if (qx + px < -60) px = -60 - qx;
2272  if (qy + py < -60) py = -60 - qy;
2273  if (qx + px > X) px = X - qx;
2274  if (qy + py > Y) py = Y - qy;
2275  }
2276  }
2277  /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2278  if (0 && !s->first_slice_line && s->mb_x) {
2279  if (is_intra[xy - wrap])
2280  sum = FFABS(px) + FFABS(py);
2281  else
2282  sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2283  if (sum > 32) {
2284  if (get_bits1(&s->gb)) {
2285  px = A[0];
2286  py = A[1];
2287  } else {
2288  px = C[0];
2289  py = C[1];
2290  }
2291  } else {
2292  if (is_intra[xy - 2])
2293  sum = FFABS(px) + FFABS(py);
2294  else
2295  sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2296  if (sum > 32) {
2297  if (get_bits1(&s->gb)) {
2298  px = A[0];
2299  py = A[1];
2300  } else {
2301  px = C[0];
2302  py = C[1];
2303  }
2304  }
2305  }
2306  }
2307  /* store MV using signed modulus of MV range defined in 4.11 */
2308 
2309  s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2310  s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2311  }
2312  s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2313  s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2314  s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2315  s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2316 }
2317 
2318 static inline void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, int mv1, int *pred_flag)
2319 {
2320  int dir = (v->bmvtype == BMV_TYPE_BACKWARD) ? 1 : 0;
2321  MpegEncContext *s = &v->s;
2322  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2323 
2324  if (v->bmvtype == BMV_TYPE_DIRECT) {
2325  int total_opp, k, f;
2326  if (s->next_picture.mb_type[mb_pos + v->mb_off] != MB_TYPE_INTRA) {
2327  s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
2328  v->bfraction, 0, s->quarter_sample);
2329  s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1],
2330  v->bfraction, 0, s->quarter_sample);
2331  s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
2332  v->bfraction, 1, s->quarter_sample);
2333  s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1],
2334  v->bfraction, 1, s->quarter_sample);
2335 
2336  total_opp = v->mv_f_next[0][s->block_index[0] + v->blocks_off]
2337  + v->mv_f_next[0][s->block_index[1] + v->blocks_off]
2338  + v->mv_f_next[0][s->block_index[2] + v->blocks_off]
2339  + v->mv_f_next[0][s->block_index[3] + v->blocks_off];
2340  f = (total_opp > 2) ? 1 : 0;
2341  } else {
2342  s->mv[0][0][0] = s->mv[0][0][1] = 0;
2343  s->mv[1][0][0] = s->mv[1][0][1] = 0;
2344  f = 0;
2345  }
2346  v->ref_field_type[0] = v->ref_field_type[1] = v->cur_field_type ^ f;
2347  for (k = 0; k < 4; k++) {
2348  s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][0] = s->mv[0][0][0];
2349  s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][1] = s->mv[0][0][1];
2350  s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][0] = s->mv[1][0][0];
2351  s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][1] = s->mv[1][0][1];
2352  v->mv_f[0][s->block_index[k] + v->blocks_off] = f;
2353  v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
2354  }
2355  return;
2356  }
2357  if (v->bmvtype == BMV_TYPE_INTERPOLATED) {
2358  vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2359  vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2360  return;
2361  }
2362  if (dir) { // backward
2363  vc1_pred_mv(v, n, dmv_x[1], dmv_y[1], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2364  if (n == 3 || mv1) {
2365  vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
2366  }
2367  } else { // forward
2368  vc1_pred_mv(v, n, dmv_x[0], dmv_y[0], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2369  if (n == 3 || mv1) {
2370  vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], 0, 1);
2371  }
2372  }
2373 }
2374 
2375 /** Get predicted DC value for I-frames only
2376  * prediction dir: left=0, top=1
2377  * @param s MpegEncContext
2378  * @param overlap flag indicating that overlap filtering is used
2379  * @param pq integer part of picture quantizer
2380  * @param[in] n block index in the current MB
2381  * @param dc_val_ptr Pointer to DC predictor
2382  * @param dir_ptr Prediction direction for use in AC prediction
2383  */
2384 static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2385  int16_t **dc_val_ptr, int *dir_ptr)
2386 {
2387  int a, b, c, wrap, pred, scale;
2388  int16_t *dc_val;
2389  static const uint16_t dcpred[32] = {
2390  -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2391  114, 102, 93, 85, 79, 73, 68, 64,
2392  60, 57, 54, 51, 49, 47, 45, 43,
2393  41, 39, 38, 37, 35, 34, 33
2394  };
2395 
2396  /* find prediction - wmv3_dc_scale always used here in fact */
2397  if (n < 4) scale = s->y_dc_scale;
2398  else scale = s->c_dc_scale;
2399 
2400  wrap = s->block_wrap[n];
2401  dc_val = s->dc_val[0] + s->block_index[n];
2402 
2403  /* B A
2404  * C X
2405  */
2406  c = dc_val[ - 1];
2407  b = dc_val[ - 1 - wrap];
2408  a = dc_val[ - wrap];
2409 
2410  if (pq < 9 || !overlap) {
2411  /* Set outer values */
2412  if (s->first_slice_line && (n != 2 && n != 3))
2413  b = a = dcpred[scale];
2414  if (s->mb_x == 0 && (n != 1 && n != 3))
2415  b = c = dcpred[scale];
2416  } else {
2417  /* Set outer values */
2418  if (s->first_slice_line && (n != 2 && n != 3))
2419  b = a = 0;
2420  if (s->mb_x == 0 && (n != 1 && n != 3))
2421  b = c = 0;
2422  }
2423 
2424  if (abs(a - b) <= abs(b - c)) {
2425  pred = c;
2426  *dir_ptr = 1; // left
2427  } else {
2428  pred = a;
2429  *dir_ptr = 0; // top
2430  }
2431 
2432  /* update predictor */
2433  *dc_val_ptr = &dc_val[0];
2434  return pred;
2435 }
2436 
2437 
2438 /** Get predicted DC value
2439  * prediction dir: left=0, top=1
2440  * @param s MpegEncContext
2441  * @param overlap flag indicating that overlap filtering is used
2442  * @param pq integer part of picture quantizer
2443  * @param[in] n block index in the current MB
2444  * @param a_avail flag indicating top block availability
2445  * @param c_avail flag indicating left block availability
2446  * @param dc_val_ptr Pointer to DC predictor
2447  * @param dir_ptr Prediction direction for use in AC prediction
2448  */
2449 static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2450  int a_avail, int c_avail,
2451  int16_t **dc_val_ptr, int *dir_ptr)
2452 {
2453  int a, b, c, wrap, pred;
2454  int16_t *dc_val;
2455  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2456  int q1, q2 = 0;
2457  int dqscale_index;
2458 
2459  wrap = s->block_wrap[n];
2460  dc_val = s->dc_val[0] + s->block_index[n];
2461 
2462  /* B A
2463  * C X
2464  */
2465  c = dc_val[ - 1];
2466  b = dc_val[ - 1 - wrap];
2467  a = dc_val[ - wrap];
2468  /* scale predictors if needed */
2469  q1 = s->current_picture.qscale_table[mb_pos];
2470  dqscale_index = s->y_dc_scale_table[q1] - 1;
2471  if (dqscale_index < 0)
2472  return 0;
2473  if (c_avail && (n != 1 && n != 3)) {
2474  q2 = s->current_picture.qscale_table[mb_pos - 1];
2475  if (q2 && q2 != q1)
2476  c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2477  }
2478  if (a_avail && (n != 2 && n != 3)) {
2479  q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2480  if (q2 && q2 != q1)
2481  a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2482  }
2483  if (a_avail && c_avail && (n != 3)) {
2484  int off = mb_pos;
2485  if (n != 1)
2486  off--;
2487  if (n != 2)
2488  off -= s->mb_stride;
2489  q2 = s->current_picture.qscale_table[off];
2490  if (q2 && q2 != q1)
2491  b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2492  }
2493 
2494  if (a_avail && c_avail) {
2495  if (abs(a - b) <= abs(b - c)) {
2496  pred = c;
2497  *dir_ptr = 1; // left
2498  } else {
2499  pred = a;
2500  *dir_ptr = 0; // top
2501  }
2502  } else if (a_avail) {
2503  pred = a;
2504  *dir_ptr = 0; // top
2505  } else if (c_avail) {
2506  pred = c;
2507  *dir_ptr = 1; // left
2508  } else {
2509  pred = 0;
2510  *dir_ptr = 1; // left
2511  }
2512 
2513  /* update predictor */
2514  *dc_val_ptr = &dc_val[0];
2515  return pred;
2516 }
2517 
2518 /** @} */ // Block group
2519 
2520 /**
2521  * @name VC1 Macroblock-level functions in Simple/Main Profiles
2522  * @see 7.1.4, p91 and 8.1.1.7, p(1)04
2523  * @{
2524  */
2525 
2526 static inline int vc1_coded_block_pred(MpegEncContext * s, int n,
2527  uint8_t **coded_block_ptr)
2528 {
2529  int xy, wrap, pred, a, b, c;
2530 
2531  xy = s->block_index[n];
2532  wrap = s->b8_stride;
2533 
2534  /* B C
2535  * A X
2536  */
2537  a = s->coded_block[xy - 1 ];
2538  b = s->coded_block[xy - 1 - wrap];
2539  c = s->coded_block[xy - wrap];
2540 
2541  if (b == c) {
2542  pred = a;
2543  } else {
2544  pred = c;
2545  }
2546 
2547  /* store value */
2548  *coded_block_ptr = &s->coded_block[xy];
2549 
2550  return pred;
2551 }
2552 
2553 /**
2554  * Decode one AC coefficient
2555  * @param v The VC1 context
2556  * @param last Last coefficient
2557  * @param skip How much zero coefficients to skip
2558  * @param value Decoded AC coefficient value
2559  * @param codingset set of VLC to decode data
2560  * @see 8.1.3.4
2561  */
2562 static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip,
2563  int *value, int codingset)
2564 {
2565  GetBitContext *gb = &v->s.gb;
2566  int index, escape, run = 0, level = 0, lst = 0;
2567 
2568  index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2569  if (index != ff_vc1_ac_sizes[codingset] - 1) {
2570  run = vc1_index_decode_table[codingset][index][0];
2571  level = vc1_index_decode_table[codingset][index][1];
2572  lst = index >= vc1_last_decode_table[codingset] || get_bits_left(gb) < 0;
2573  if (get_bits1(gb))
2574  level = -level;
2575  } else {
2576  escape = decode210(gb);
2577  if (escape != 2) {
2578  index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2579  run = vc1_index_decode_table[codingset][index][0];
2580  level = vc1_index_decode_table[codingset][index][1];
2581  lst = index >= vc1_last_decode_table[codingset];
2582  if (escape == 0) {
2583  if (lst)
2584  level += vc1_last_delta_level_table[codingset][run];
2585  else
2586  level += vc1_delta_level_table[codingset][run];
2587  } else {
2588  if (lst)
2589  run += vc1_last_delta_run_table[codingset][level] + 1;
2590  else
2591  run += vc1_delta_run_table[codingset][level] + 1;
2592  }
2593  if (get_bits1(gb))
2594  level = -level;
2595  } else {
2596  int sign;
2597  lst = get_bits1(gb);
2598  if (v->s.esc3_level_length == 0) {
2599  if (v->pq < 8 || v->dquantfrm) { // table 59
2600  v->s.esc3_level_length = get_bits(gb, 3);
2601  if (!v->s.esc3_level_length)
2602  v->s.esc3_level_length = get_bits(gb, 2) + 8;
2603  } else { // table 60
2604  v->s.esc3_level_length = get_unary(gb, 1, 6) + 2;
2605  }
2606  v->s.esc3_run_length = 3 + get_bits(gb, 2);
2607  }
2608  run = get_bits(gb, v->s.esc3_run_length);
2609  sign = get_bits1(gb);
2610  level = get_bits(gb, v->s.esc3_level_length);
2611  if (sign)
2612  level = -level;
2613  }
2614  }
2615 
2616  *last = lst;
2617  *skip = run;
2618  *value = level;
2619 }
2620 
2621 /** Decode intra block in intra frames - should be faster than decode_intra_block
2622  * @param v VC1Context
2623  * @param block block to decode
2624  * @param[in] n subblock index
2625  * @param coded are AC coeffs present or not
2626  * @param codingset set of VLC to decode data
2627  */
2628 static int vc1_decode_i_block(VC1Context *v, int16_t block[64], int n,
2629  int coded, int codingset)
2630 {
2631  GetBitContext *gb = &v->s.gb;
2632  MpegEncContext *s = &v->s;
2633  int dc_pred_dir = 0; /* Direction of the DC prediction used */
2634  int i;
2635  int16_t *dc_val;
2636  int16_t *ac_val, *ac_val2;
2637  int dcdiff;
2638 
2639  /* Get DC differential */
2640  if (n < 4) {
2642  } else {
2644  }
2645  if (dcdiff < 0) {
2646  av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2647  return -1;
2648  }
2649  if (dcdiff) {
2650  if (dcdiff == 119 /* ESC index value */) {
2651  /* TODO: Optimize */
2652  if (v->pq == 1) dcdiff = get_bits(gb, 10);
2653  else if (v->pq == 2) dcdiff = get_bits(gb, 9);
2654  else dcdiff = get_bits(gb, 8);
2655  } else {
2656  if (v->pq == 1)
2657  dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2658  else if (v->pq == 2)
2659  dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2660  }
2661  if (get_bits1(gb))
2662  dcdiff = -dcdiff;
2663  }
2664 
2665  /* Prediction */
2666  dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
2667  *dc_val = dcdiff;
2668 
2669  /* Store the quantized DC coeff, used for prediction */
2670  if (n < 4) {
2671  block[0] = dcdiff * s->y_dc_scale;
2672  } else {
2673  block[0] = dcdiff * s->c_dc_scale;
2674  }
2675  /* Skip ? */
2676  if (!coded) {
2677  goto not_coded;
2678  }
2679 
2680  // AC Decoding
2681  i = 1;
2682 
2683  {
2684  int last = 0, skip, value;
2685  const uint8_t *zz_table;
2686  int scale;
2687  int k;
2688 
2689  scale = v->pq * 2 + v->halfpq;
2690 
2691  if (v->s.ac_pred) {
2692  if (!dc_pred_dir)
2693  zz_table = v->zz_8x8[2];
2694  else
2695  zz_table = v->zz_8x8[3];
2696  } else
2697  zz_table = v->zz_8x8[1];
2698 
2699  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2700  ac_val2 = ac_val;
2701  if (dc_pred_dir) // left
2702  ac_val -= 16;
2703  else // top
2704  ac_val -= 16 * s->block_wrap[n];
2705 
2706  while (!last) {
2707  vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2708  i += skip;
2709  if (i > 63)
2710  break;
2711  block[zz_table[i++]] = value;
2712  }
2713 
2714  /* apply AC prediction if needed */
2715  if (s->ac_pred) {
2716  if (dc_pred_dir) { // left
2717  for (k = 1; k < 8; k++)
2718  block[k << v->left_blk_sh] += ac_val[k];
2719  } else { // top
2720  for (k = 1; k < 8; k++)
2721  block[k << v->top_blk_sh] += ac_val[k + 8];
2722  }
2723  }
2724  /* save AC coeffs for further prediction */
2725  for (k = 1; k < 8; k++) {
2726  ac_val2[k] = block[k << v->left_blk_sh];
2727  ac_val2[k + 8] = block[k << v->top_blk_sh];
2728  }
2729 
2730  /* scale AC coeffs */
2731  for (k = 1; k < 64; k++)
2732  if (block[k]) {
2733  block[k] *= scale;
2734  if (!v->pquantizer)
2735  block[k] += (block[k] < 0) ? -v->pq : v->pq;
2736  }
2737 
2738  if (s->ac_pred) i = 63;
2739  }
2740 
2741 not_coded:
2742  if (!coded) {
2743  int k, scale;
2744  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2745  ac_val2 = ac_val;
2746 
2747  i = 0;
2748  scale = v->pq * 2 + v->halfpq;
2749  memset(ac_val2, 0, 16 * 2);
2750  if (dc_pred_dir) { // left
2751  ac_val -= 16;
2752  if (s->ac_pred)
2753  memcpy(ac_val2, ac_val, 8 * 2);
2754  } else { // top
2755  ac_val -= 16 * s->block_wrap[n];
2756  if (s->ac_pred)
2757  memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2758  }
2759 
2760  /* apply AC prediction if needed */
2761  if (s->ac_pred) {
2762  if (dc_pred_dir) { //left
2763  for (k = 1; k < 8; k++) {
2764  block[k << v->left_blk_sh] = ac_val[k] * scale;
2765  if (!v->pquantizer && block[k << v->left_blk_sh])
2766  block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -v->pq : v->pq;
2767  }
2768  } else { // top
2769  for (k = 1; k < 8; k++) {
2770  block[k << v->top_blk_sh] = ac_val[k + 8] * scale;
2771  if (!v->pquantizer && block[k << v->top_blk_sh])
2772  block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -v->pq : v->pq;
2773  }
2774  }
2775  i = 63;
2776  }
2777  }
2778  s->block_last_index[n] = i;
2779 
2780  return 0;
2781 }
2782 
2783 /** Decode intra block in intra frames - should be faster than decode_intra_block
2784  * @param v VC1Context
2785  * @param block block to decode
2786  * @param[in] n subblock number
2787  * @param coded are AC coeffs present or not
2788  * @param codingset set of VLC to decode data
2789  * @param mquant quantizer value for this macroblock
2790  */
2791 static int vc1_decode_i_block_adv(VC1Context *v, int16_t block[64], int n,
2792  int coded, int codingset, int mquant)
2793 {
2794  GetBitContext *gb = &v->s.gb;
2795  MpegEncContext *s = &v->s;
2796  int dc_pred_dir = 0; /* Direction of the DC prediction used */
2797  int i;
2798  int16_t *dc_val = NULL;
2799  int16_t *ac_val, *ac_val2;
2800  int dcdiff;
2801  int a_avail = v->a_avail, c_avail = v->c_avail;
2802  int use_pred = s->ac_pred;
2803  int scale;
2804  int q1, q2 = 0;
2805  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2806 
2807  /* Get DC differential */
2808  if (n < 4) {
2810  } else {
2812  }
2813  if (dcdiff < 0) {
2814  av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2815  return -1;
2816  }
2817  if (dcdiff) {
2818  if (dcdiff == 119 /* ESC index value */) {
2819  /* TODO: Optimize */
2820  if (mquant == 1) dcdiff = get_bits(gb, 10);
2821  else if (mquant == 2) dcdiff = get_bits(gb, 9);
2822  else dcdiff = get_bits(gb, 8);
2823  } else {
2824  if (mquant == 1)
2825  dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2826  else if (mquant == 2)
2827  dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2828  }
2829  if (get_bits1(gb))
2830  dcdiff = -dcdiff;
2831  }
2832 
2833  /* Prediction */
2834  dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
2835  *dc_val = dcdiff;
2836 
2837  /* Store the quantized DC coeff, used for prediction */
2838  if (n < 4) {
2839  block[0] = dcdiff * s->y_dc_scale;
2840  } else {
2841  block[0] = dcdiff * s->c_dc_scale;
2842  }
2843 
2844  //AC Decoding
2845  i = 1;
2846 
2847  /* check if AC is needed at all */
2848  if (!a_avail && !c_avail)
2849  use_pred = 0;
2850  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2851  ac_val2 = ac_val;
2852 
2853  scale = mquant * 2 + ((mquant == v->pq) ? v->halfpq : 0);
2854 
2855  if (dc_pred_dir) // left
2856  ac_val -= 16;
2857  else // top
2858  ac_val -= 16 * s->block_wrap[n];
2859 
2860  q1 = s->current_picture.qscale_table[mb_pos];
2861  if ( dc_pred_dir && c_avail && mb_pos)
2862  q2 = s->current_picture.qscale_table[mb_pos - 1];
2863  if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
2864  q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2865  if ( dc_pred_dir && n == 1)
2866  q2 = q1;
2867  if (!dc_pred_dir && n == 2)
2868  q2 = q1;
2869  if (n == 3)
2870  q2 = q1;
2871 
2872  if (coded) {
2873  int last = 0, skip, value;
2874  const uint8_t *zz_table;
2875  int k;
2876 
2877  if (v->s.ac_pred) {
2878  if (!use_pred && v->fcm == ILACE_FRAME) {
2879  zz_table = v->zzi_8x8;
2880  } else {
2881  if (!dc_pred_dir) // top
2882  zz_table = v->zz_8x8[2];
2883  else // left
2884  zz_table = v->zz_8x8[3];
2885  }
2886  } else {
2887  if (v->fcm != ILACE_FRAME)
2888  zz_table = v->zz_8x8[1];
2889  else
2890  zz_table = v->zzi_8x8;
2891  }
2892 
2893  while (!last) {
2894  vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2895  i += skip;
2896  if (i > 63)
2897  break;
2898  block[zz_table[i++]] = value;
2899  }
2900 
2901  /* apply AC prediction if needed */
2902  if (use_pred) {
2903  /* scale predictors if needed*/
2904  if (q2 && q1 != q2) {
2905  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2906  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2907 
2908  if (q1 < 1)
2909  return AVERROR_INVALIDDATA;
2910  if (dc_pred_dir) { // left
2911  for (k = 1; k < 8; k++)
2912  block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2913  } else { // top
2914  for (k = 1; k < 8; k++)
2915  block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2916  }
2917  } else {
2918  if (dc_pred_dir) { //left
2919  for (k = 1; k < 8; k++)
2920  block[k << v->left_blk_sh] += ac_val[k];
2921  } else { //top
2922  for (k = 1; k < 8; k++)
2923  block[k << v->top_blk_sh] += ac_val[k + 8];
2924  }
2925  }
2926  }
2927  /* save AC coeffs for further prediction */
2928  for (k = 1; k < 8; k++) {
2929  ac_val2[k ] = block[k << v->left_blk_sh];
2930  ac_val2[k + 8] = block[k << v->top_blk_sh];
2931  }
2932 
2933  /* scale AC coeffs */
2934  for (k = 1; k < 64; k++)
2935  if (block[k]) {
2936  block[k] *= scale;
2937  if (!v->pquantizer)
2938  block[k] += (block[k] < 0) ? -mquant : mquant;
2939  }
2940 
2941  if (use_pred) i = 63;
2942  } else { // no AC coeffs
2943  int k;
2944 
2945  memset(ac_val2, 0, 16 * 2);
2946  if (dc_pred_dir) { // left
2947  if (use_pred) {
2948  memcpy(ac_val2, ac_val, 8 * 2);
2949  if (q2 && q1 != q2) {
2950  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2951  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2952  if (q1 < 1)
2953  return AVERROR_INVALIDDATA;
2954  for (k = 1; k < 8; k++)
2955  ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2956  }
2957  }
2958  } else { // top
2959  if (use_pred) {
2960  memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2961  if (q2 && q1 != q2) {
2962  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2963  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2964  if (q1 < 1)
2965  return AVERROR_INVALIDDATA;
2966  for (k = 1; k < 8; k++)
2967  ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2968  }
2969  }
2970  }
2971 
2972  /* apply AC prediction if needed */
2973  if (use_pred) {
2974  if (dc_pred_dir) { // left
2975  for (k = 1; k < 8; k++) {
2976  block[k << v->left_blk_sh] = ac_val2[k] * scale;
2977  if (!v->pquantizer && block[k << v->left_blk_sh])
2978  block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
2979  }
2980  } else { // top
2981  for (k = 1; k < 8; k++) {
2982  block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
2983  if (!v->pquantizer && block[k << v->top_blk_sh])
2984  block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
2985  }
2986  }
2987  i = 63;
2988  }
2989  }
2990  s->block_last_index[n] = i;
2991 
2992  return 0;
2993 }
2994 
2995 /** Decode intra block in inter frames - more generic version than vc1_decode_i_block
2996  * @param v VC1Context
2997  * @param block block to decode
2998  * @param[in] n subblock index
2999  * @param coded are AC coeffs present or not
3000  * @param mquant block quantizer
3001  * @param codingset set of VLC to decode data
3002  */
3003 static int vc1_decode_intra_block(VC1Context *v, int16_t block[64], int n,
3004  int coded, int mquant, int codingset)
3005 {
3006  GetBitContext *gb = &v->s.gb;
3007  MpegEncContext *s = &v->s;
3008  int dc_pred_dir = 0; /* Direction of the DC prediction used */
3009  int i;
3010  int16_t *dc_val = NULL;
3011  int16_t *ac_val, *ac_val2;
3012  int dcdiff;
3013  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3014  int a_avail = v->a_avail, c_avail = v->c_avail;
3015  int use_pred = s->ac_pred;
3016  int scale;
3017  int q1, q2 = 0;
3018 
3019  s->dsp.clear_block(block);
3020 
3021  /* XXX: Guard against dumb values of mquant */
3022  mquant = (mquant < 1) ? 0 : ((mquant > 31) ? 31 : mquant);
3023 
3024  /* Set DC scale - y and c use the same */
3025  s->y_dc_scale = s->y_dc_scale_table[mquant];
3026  s->c_dc_scale = s->c_dc_scale_table[mquant];
3027 
3028  /* Get DC differential */
3029  if (n < 4) {
3031  } else {
3033  }
3034  if (dcdiff < 0) {
3035  av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
3036  return -1;
3037  }
3038  if (dcdiff) {
3039  if (dcdiff == 119 /* ESC index value */) {
3040  /* TODO: Optimize */
3041  if (mquant == 1) dcdiff = get_bits(gb, 10);
3042  else if (mquant == 2) dcdiff = get_bits(gb, 9);
3043  else dcdiff = get_bits(gb, 8);
3044  } else {
3045  if (mquant == 1)
3046  dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
3047  else if (mquant == 2)
3048  dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
3049  }
3050  if (get_bits1(gb))
3051  dcdiff = -dcdiff;
3052  }
3053 
3054  /* Prediction */
3055  dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
3056  *dc_val = dcdiff;
3057 
3058  /* Store the quantized DC coeff, used for prediction */
3059 
3060  if (n < 4) {
3061  block[0] = dcdiff * s->y_dc_scale;
3062  } else {
3063  block[0] = dcdiff * s->c_dc_scale;
3064  }
3065 
3066  //AC Decoding
3067  i = 1;
3068 
3069  /* check if AC is needed at all and adjust direction if needed */
3070  if (!a_avail) dc_pred_dir = 1;
3071  if (!c_avail) dc_pred_dir = 0;
3072  if (!a_avail && !c_avail) use_pred = 0;
3073  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
3074  ac_val2 = ac_val;
3075 
3076  scale = mquant * 2 + v->halfpq;
3077 
3078  if (dc_pred_dir) //left
3079  ac_val -= 16;
3080  else //top
3081  ac_val -= 16 * s->block_wrap[n];
3082 
3083  q1 = s->current_picture.qscale_table[mb_pos];
3084  if (dc_pred_dir && c_avail && mb_pos)
3085  q2 = s->current_picture.qscale_table[mb_pos - 1];
3086  if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
3087  q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
3088  if ( dc_pred_dir && n == 1)
3089  q2 = q1;
3090  if (!dc_pred_dir && n == 2)
3091  q2 = q1;
3092  if (n == 3) q2 = q1;
3093 
3094  if (coded) {
3095  int last = 0, skip, value;
3096  int k;
3097 
3098  while (!last) {
3099  vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
3100  i += skip;
3101  if (i > 63)
3102  break;
3103  if (v->fcm == PROGRESSIVE)
3104  block[v->zz_8x8[0][i++]] = value;
3105  else {
3106  if (use_pred && (v->fcm == ILACE_FRAME)) {
3107  if (!dc_pred_dir) // top
3108  block[v->zz_8x8[2][i++]] = value;
3109  else // left
3110  block[v->zz_8x8[3][i++]] = value;
3111  } else {
3112  block[v->zzi_8x8[i++]] = value;
3113  }
3114  }
3115  }
3116 
3117  /* apply AC prediction if needed */
3118  if (use_pred) {
3119  /* scale predictors if needed*/
3120  if (q2 && q1 != q2) {
3121  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3122  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3123 
3124  if (q1 < 1)
3125  return AVERROR_INVALIDDATA;
3126  if (dc_pred_dir) { // left
3127  for (k = 1; k < 8; k++)
3128  block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3129  } else { //top
3130  for (k = 1; k < 8; k++)
3131  block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3132  }
3133  } else {
3134  if (dc_pred_dir) { // left
3135  for (k = 1; k < 8; k++)
3136  block[k << v->left_blk_sh] += ac_val[k];
3137  } else { // top
3138  for (k = 1; k < 8; k++)
3139  block[k << v->top_blk_sh] += ac_val[k + 8];
3140  }
3141  }
3142  }
3143  /* save AC coeffs for further prediction */
3144  for (k = 1; k < 8; k++) {
3145  ac_val2[k ] = block[k << v->left_blk_sh];
3146  ac_val2[k + 8] = block[k << v->top_blk_sh];
3147  }
3148 
3149  /* scale AC coeffs */
3150  for (k = 1; k < 64; k++)
3151  if (block[k]) {
3152  block[k] *= scale;
3153  if (!v->pquantizer)
3154  block[k] += (block[k] < 0) ? -mquant : mquant;
3155  }
3156 
3157  if (use_pred) i = 63;
3158  } else { // no AC coeffs
3159  int k;
3160 
3161  memset(ac_val2, 0, 16 * 2);
3162  if (dc_pred_dir) { // left
3163  if (use_pred) {
3164  memcpy(ac_val2, ac_val, 8 * 2);
3165  if (q2 && q1 != q2) {
3166  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3167  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3168  if (q1 < 1)
3169  return AVERROR_INVALIDDATA;
3170  for (k = 1; k < 8; k++)
3171  ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3172  }
3173  }
3174  } else { // top
3175  if (use_pred) {
3176  memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3177  if (q2 && q1 != q2) {
3178  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3179  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3180  if (q1 < 1)
3181  return AVERROR_INVALIDDATA;
3182  for (k = 1; k < 8; k++)
3183  ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3184  }
3185  }
3186  }
3187 
3188  /* apply AC prediction if needed */
3189  if (use_pred) {
3190  if (dc_pred_dir) { // left
3191  for (k = 1; k < 8; k++) {
3192  block[k << v->left_blk_sh] = ac_val2[k] * scale;
3193  if (!v->pquantizer && block[k << v->left_blk_sh])
3194  block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
3195  }
3196  } else { // top
3197  for (k = 1; k < 8; k++) {
3198  block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
3199  if (!v->pquantizer && block[k << v->top_blk_sh])
3200  block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
3201  }
3202  }
3203  i = 63;
3204  }
3205  }
3206  s->block_last_index[n] = i;
3207 
3208  return 0;
3209 }
3210 
3211 /** Decode P block
3212  */
3213 static int vc1_decode_p_block(VC1Context *v, int16_t block[64], int n,
3214  int mquant, int ttmb, int first_block,
3215  uint8_t *dst, int linesize, int skip_block,
3216  int *ttmb_out)
3217 {
3218  MpegEncContext *s = &v->s;
3219  GetBitContext *gb = &s->gb;
3220  int i, j;
3221  int subblkpat = 0;
3222  int scale, off, idx, last, skip, value;
3223  int ttblk = ttmb & 7;
3224  int pat = 0;
3225 
3226  s->dsp.clear_block(block);
3227 
3228  if (ttmb == -1) {
3230  }
3231  if (ttblk == TT_4X4) {
3232  subblkpat = ~(get_vlc2(gb, ff_vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
3233  }
3234  if ((ttblk != TT_8X8 && ttblk != TT_4X4)
3235  && ((v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))
3236  || (!v->res_rtm_flag && !first_block))) {
3237  subblkpat = decode012(gb);
3238  if (subblkpat)
3239  subblkpat ^= 3; // swap decoded pattern bits
3240  if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM)
3241  ttblk = TT_8X4;
3242  if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT)
3243  ttblk = TT_4X8;
3244  }
3245  scale = 2 * mquant + ((v->pq == mquant) ? v->halfpq : 0);
3246 
3247  // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
3248  if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
3249  subblkpat = 2 - (ttblk == TT_8X4_TOP);
3250  ttblk = TT_8X4;
3251  }
3252  if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
3253  subblkpat = 2 - (ttblk == TT_4X8_LEFT);
3254  ttblk = TT_4X8;
3255  }
3256  switch (ttblk) {
3257  case TT_8X8:
3258  pat = 0xF;
3259  i = 0;
3260  last = 0;
3261  while (!last) {
3262  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3263  i += skip;
3264  if (i > 63)
3265  break;
3266  if (!v->fcm)
3267  idx = v->zz_8x8[0][i++];
3268  else
3269  idx = v->zzi_8x8[i++];
3270  block[idx] = value * scale;
3271  if (!v->pquantizer)
3272  block[idx] += (block[idx] < 0) ? -mquant : mquant;
3273  }
3274  if (!skip_block) {
3275  if (i == 1)
3276  v->vc1dsp.vc1_inv_trans_8x8_dc(dst, linesize, block);
3277  else {
3278  v->vc1dsp.vc1_inv_trans_8x8(block);
3279  s->dsp.add_pixels_clamped(block, dst, linesize);
3280  }
3281  }
3282  break;
3283  case TT_4X4:
3284  pat = ~subblkpat & 0xF;
3285  for (j = 0; j < 4; j++) {
3286  last = subblkpat & (1 << (3 - j));
3287  i = 0;
3288  off = (j & 1) * 4 + (j & 2) * 16;
3289  while (!last) {
3290  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3291  i += skip;
3292  if (i > 15)
3293  break;
3294  if (!v->fcm)
3296  else
3297  idx = ff_vc1_adv_interlaced_4x4_zz[i++];
3298  block[idx + off] = value * scale;
3299  if (!v->pquantizer)
3300  block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3301  }
3302  if (!(subblkpat & (1 << (3 - j))) && !skip_block) {
3303  if (i == 1)
3304  v->vc1dsp.vc1_inv_trans_4x4_dc(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3305  else
3306  v->vc1dsp.vc1_inv_trans_4x4(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3307  }
3308  }
3309  break;
3310  case TT_8X4:
3311  pat = ~((subblkpat & 2) * 6 + (subblkpat & 1) * 3) & 0xF;
3312  for (j = 0; j < 2; j++) {
3313  last = subblkpat & (1 << (1 - j));
3314  i = 0;
3315  off = j * 32;
3316  while (!last) {
3317  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3318  i += skip;
3319  if (i > 31)
3320  break;
3321  if (!v->fcm)
3322  idx = v->zz_8x4[i++] + off;
3323  else
3324  idx = ff_vc1_adv_interlaced_8x4_zz[i++] + off;
3325  block[idx] = value * scale;
3326  if (!v->pquantizer)
3327  block[idx] += (block[idx] < 0) ? -mquant : mquant;
3328  }
3329  if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3330  if (i == 1)
3331  v->vc1dsp.vc1_inv_trans_8x4_dc(dst + j * 4 * linesize, linesize, block + off);
3332  else
3333  v->vc1dsp.vc1_inv_trans_8x4(dst + j * 4 * linesize, linesize, block + off);
3334  }
3335  }
3336  break;
3337  case TT_4X8:
3338  pat = ~(subblkpat * 5) & 0xF;
3339  for (j = 0; j < 2; j++) {
3340  last = subblkpat & (1 << (1 - j));
3341  i = 0;
3342  off = j * 4;
3343  while (!last) {
3344  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3345  i += skip;
3346  if (i > 31)
3347  break;
3348  if (!v->fcm)
3349  idx = v->zz_4x8[i++] + off;
3350  else
3351  idx = ff_vc1_adv_interlaced_4x8_zz[i++] + off;
3352  block[idx] = value * scale;
3353  if (!v->pquantizer)
3354  block[idx] += (block[idx] < 0) ? -mquant : mquant;
3355  }
3356  if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3357  if (i == 1)
3358  v->vc1dsp.vc1_inv_trans_4x8_dc(dst + j * 4, linesize, block + off);
3359  else
3360  v->vc1dsp.vc1_inv_trans_4x8(dst + j*4, linesize, block + off);
3361  }
3362  }
3363  break;
3364  }
3365  if (ttmb_out)
3366  *ttmb_out |= ttblk << (n * 4);
3367  return pat;
3368 }
3369 
3370 /** @} */ // Macroblock group
3371 
3372 static const int size_table [6] = { 0, 2, 3, 4, 5, 8 };
3373 static const int offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3374 
3376 {
3377  MpegEncContext *s = &v->s;
3378  int mb_cbp = v->cbp[s->mb_x - s->mb_stride],
3379  block_cbp = mb_cbp >> (block_num * 4), bottom_cbp,
3380  mb_is_intra = v->is_intra[s->mb_x - s->mb_stride],
3381  block_is_intra = mb_is_intra >> (block_num * 4), bottom_is_intra;
3382  int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3383  uint8_t *dst;
3384 
3385  if (block_num > 3) {
3386  dst = s->dest[block_num - 3];
3387  } else {
3388  dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 8) * linesize;
3389  }
3390  if (s->mb_y != s->end_mb_y || block_num < 2) {
3391  int16_t (*mv)[2];
3392  int mv_stride;
3393 
3394  if (block_num > 3) {
3395  bottom_cbp = v->cbp[s->mb_x] >> (block_num * 4);
3396  bottom_is_intra = v->is_intra[s->mb_x] >> (block_num * 4);
3397  mv = &v->luma_mv[s->mb_x - s->mb_stride];
3398  mv_stride = s->mb_stride;
3399  } else {
3400  bottom_cbp = (block_num < 2) ? (mb_cbp >> ((block_num + 2) * 4))
3401  : (v->cbp[s->mb_x] >> ((block_num - 2) * 4));
3402  bottom_is_intra = (block_num < 2) ? (mb_is_intra >> ((block_num + 2) * 4))
3403  : (v->is_intra[s->mb_x] >> ((block_num - 2) * 4));
3404  mv_stride = s->b8_stride;
3405  mv = &s->current_picture.motion_val[0][s->block_index[block_num] - 2 * mv_stride];
3406  }
3407 
3408  if (bottom_is_intra & 1 || block_is_intra & 1 ||
3409  mv[0][0] != mv[mv_stride][0] || mv[0][1] != mv[mv_stride][1]) {
3410  v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3411  } else {
3412  idx = ((bottom_cbp >> 2) | block_cbp) & 3;
3413  if (idx == 3) {
3414  v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3415  } else if (idx) {
3416  if (idx == 1)
3417  v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3418  else
3419  v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3420  }
3421  }
3422  }
3423 
3424  dst -= 4 * linesize;
3425  ttblk = (v->ttblk[s->mb_x - s->mb_stride] >> (block_num * 4)) & 0xF;
3426  if (ttblk == TT_4X4 || ttblk == TT_8X4) {
3427  idx = (block_cbp | (block_cbp >> 2)) & 3;
3428  if (idx == 3) {
3429  v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3430  } else if (idx) {
3431  if (idx == 1)
3432  v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3433  else
3434  v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3435  }
3436  }
3437 }
3438 
3440 {
3441  MpegEncContext *s = &v->s;
3442  int mb_cbp = v->cbp[s->mb_x - 1 - s->mb_stride],
3443  block_cbp = mb_cbp >> (block_num * 4), right_cbp,
3444  mb_is_intra = v->is_intra[s->mb_x - 1 - s->mb_stride],
3445  block_is_intra = mb_is_intra >> (block_num * 4), right_is_intra;
3446  int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3447  uint8_t *dst;
3448 
3449  if (block_num > 3) {
3450  dst = s->dest[block_num - 3] - 8 * linesize;
3451  } else {
3452  dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 16) * linesize - 8;
3453  }
3454 
3455  if (s->mb_x != s->mb_width || !(block_num & 5)) {
3456  int16_t (*mv)[2];
3457 
3458  if (block_num > 3) {
3459  right_cbp = v->cbp[s->mb_x - s->mb_stride] >> (block_num * 4);
3460  right_is_intra = v->is_intra[s->mb_x - s->mb_stride] >> (block_num * 4);
3461  mv = &v->luma_mv[s->mb_x - s->mb_stride - 1];
3462  } else {
3463  right_cbp = (block_num & 1) ? (v->cbp[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3464  : (mb_cbp >> ((block_num + 1) * 4));
3465  right_is_intra = (block_num & 1) ? (v->is_intra[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3466  : (mb_is_intra >> ((block_num + 1) * 4));
3467  mv = &s->current_picture.motion_val[0][s->block_index[block_num] - s->b8_stride * 2 - 2];
3468  }
3469  if (block_is_intra & 1 || right_is_intra & 1 || mv[0][0] != mv[1][0] || mv[0][1] != mv[1][1]) {
3470  v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3471  } else {
3472  idx = ((right_cbp >> 1) | block_cbp) & 5; // FIXME check
3473  if (idx == 5) {
3474  v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3475  } else if (idx) {
3476  if (idx == 1)
3477  v->vc1dsp.vc1_h_loop_filter4(dst + 4 * linesize, linesize, v->pq);
3478  else
3479  v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3480  }
3481  }
3482  }
3483 
3484  dst -= 4;
3485  ttblk = (v->ttblk[s->mb_x - s->mb_stride - 1] >> (block_num * 4)) & 0xf;
3486  if (ttblk == TT_4X4 || ttblk == TT_4X8) {
3487  idx = (block_cbp | (block_cbp >> 1)) & 5;
3488  if (idx == 5) {
3489  v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3490  } else if (idx) {
3491  if (idx == 1)
3492  v->vc1dsp.vc1_h_loop_filter4(dst + linesize * 4, linesize, v->pq);
3493  else
3494  v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3495  }
3496  }
3497 }
3498 
3500 {
3501  MpegEncContext *s = &v->s;
3502  int i;
3503 
3504  for (i = 0; i < 6; i++) {
3506  }
3507 
3508  /* V always precedes H, therefore we run H one MB before V;
3509  * at the end of a row, we catch up to complete the row */
3510  if (s->mb_x) {
3511  for (i = 0; i < 6; i++) {
3513  }
3514  if (s->mb_x == s->mb_width - 1) {
3515  s->mb_x++;
3517  for (i = 0; i < 6; i++) {
3519  }
3520  }
3521  }
3522 }
3523 
3524 /** Decode one P-frame MB
3525  */
3527 {
3528  MpegEncContext *s = &v->s;
3529  GetBitContext *gb = &s->gb;
3530  int i, j;
3531  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3532  int cbp; /* cbp decoding stuff */
3533  int mqdiff, mquant; /* MB quantization */
3534  int ttmb = v->ttfrm; /* MB Transform type */
3535 
3536  int mb_has_coeffs = 1; /* last_flag */
3537  int dmv_x, dmv_y; /* Differential MV components */
3538  int index, index1; /* LUT indexes */
3539  int val, sign; /* temp values */
3540  int first_block = 1;
3541  int dst_idx, off;
3542  int skipped, fourmv;
3543  int block_cbp = 0, pat, block_tt = 0, block_intra = 0;
3544 
3545  mquant = v->pq; /* lossy initialization */
3546 
3547  if (v->mv_type_is_raw)
3548  fourmv = get_bits1(gb);
3549  else
3550  fourmv = v->mv_type_mb_plane[mb_pos];
3551  if (v->skip_is_raw)
3552  skipped = get_bits1(gb);
3553  else
3554  skipped = v->s.mbskip_table[mb_pos];
3555 
3556  if (!fourmv) { /* 1MV mode */
3557  if (!skipped) {
3558  GET_MVDATA(dmv_x, dmv_y);
3559 
3560  if (s->mb_intra) {
3561  s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3562  s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3563  }
3565  vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3566 
3567  /* FIXME Set DC val for inter block ? */
3568  if (s->mb_intra && !mb_has_coeffs) {
3569  GET_MQUANT();
3570  s->ac_pred = get_bits1(gb);
3571  cbp = 0;
3572  } else if (mb_has_coeffs) {
3573  if (s->mb_intra)
3574  s->ac_pred = get_bits1(gb);
3575  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3576  GET_MQUANT();
3577  } else {
3578  mquant = v->pq;
3579  cbp = 0;
3580  }
3581  s->current_picture.qscale_table[mb_pos] = mquant;
3582 
3583  if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3584  ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
3585  VC1_TTMB_VLC_BITS, 2);
3586  if (!s->mb_intra) vc1_mc_1mv(v, 0);
3587  dst_idx = 0;
3588  for (i = 0; i < 6; i++) {
3589  s->dc_val[0][s->block_index[i]] = 0;
3590  dst_idx += i >> 2;
3591  val = ((cbp >> (5 - i)) & 1);
3592  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3593  v->mb_type[0][s->block_index[i]] = s->mb_intra;
3594  if (s->mb_intra) {
3595  /* check if prediction blocks A and C are available */
3596  v->a_avail = v->c_avail = 0;
3597  if (i == 2 || i == 3 || !s->first_slice_line)
3598  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3599  if (i == 1 || i == 3 || s->mb_x)
3600  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3601 
3602  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3603  (i & 4) ? v->codingset2 : v->codingset);
3604  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3605  continue;
3606  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3607  if (v->rangeredfrm)
3608  for (j = 0; j < 64; j++)
3609  s->block[i][j] <<= 1;
3610  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3611  if (v->pq >= 9 && v->overlap) {
3612  if (v->c_avail)
3613  v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3614  if (v->a_avail)
3615  v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3616  }
3617  block_cbp |= 0xF << (i << 2);
3618  block_intra |= 1 << i;
3619  } else if (val) {
3620  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block,
3621  s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize,
3622  (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3623  block_cbp |= pat << (i << 2);
3624  if (!v->ttmbf && ttmb < 8)
3625  ttmb = -1;
3626  first_block = 0;
3627  }
3628  }
3629  } else { // skipped
3630  s->mb_intra = 0;
3631  for (i = 0; i < 6; i++) {
3632  v->mb_type[0][s->block_index[i]] = 0;
3633  s->dc_val[0][s->block_index[i]] = 0;
3634  }
3635  s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
3636  s->current_picture.qscale_table[mb_pos] = 0;
3637  vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3638  vc1_mc_1mv(v, 0);
3639  }
3640  } else { // 4MV mode
3641  if (!skipped /* unskipped MB */) {
3642  int intra_count = 0, coded_inter = 0;
3643  int is_intra[6], is_coded[6];
3644  /* Get CBPCY */
3645  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3646  for (i = 0; i < 6; i++) {
3647  val = ((cbp >> (5 - i)) & 1);
3648  s->dc_val[0][s->block_index[i]] = 0;
3649  s->mb_intra = 0;
3650  if (i < 4) {
3651  dmv_x = dmv_y = 0;
3652  s->mb_intra = 0;
3653  mb_has_coeffs = 0;
3654  if (val) {
3655  GET_MVDATA(dmv_x, dmv_y);
3656  }
3657  vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3658  if (!s->mb_intra)
3659  vc1_mc_4mv_luma(v, i, 0, 0);
3660  intra_count += s->mb_intra;
3661  is_intra[i] = s->mb_intra;
3662  is_coded[i] = mb_has_coeffs;
3663  }
3664  if (i & 4) {
3665  is_intra[i] = (intra_count >= 3);
3666  is_coded[i] = val;
3667  }
3668  if (i == 4)
3669  vc1_mc_4mv_chroma(v, 0);
3670  v->mb_type[0][s->block_index[i]] = is_intra[i];
3671  if (!coded_inter)
3672  coded_inter = !is_intra[i] & is_coded[i];
3673  }
3674  // if there are no coded blocks then don't do anything more
3675  dst_idx = 0;
3676  if (!intra_count && !coded_inter)
3677  goto end;
3678  GET_MQUANT();
3679  s->current_picture.qscale_table[mb_pos] = mquant;
3680  /* test if block is intra and has pred */
3681  {
3682  int intrapred = 0;
3683  for (i = 0; i < 6; i++)
3684  if (is_intra[i]) {
3685  if (((!s->first_slice_line || (i == 2 || i == 3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
3686  || ((s->mb_x || (i == 1 || i == 3)) && v->mb_type[0][s->block_index[i] - 1])) {
3687  intrapred = 1;
3688  break;
3689  }
3690  }
3691  if (intrapred)
3692  s->ac_pred = get_bits1(gb);
3693  else
3694  s->ac_pred = 0;
3695  }
3696  if (!v->ttmbf && coded_inter)
3698  for (i = 0; i < 6; i++) {
3699  dst_idx += i >> 2;
3700  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3701  s->mb_intra = is_intra[i];
3702  if (is_intra[i]) {
3703  /* check if prediction blocks A and C are available */
3704  v->a_avail = v->c_avail = 0;
3705  if (i == 2 || i == 3 || !s->first_slice_line)
3706  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3707  if (i == 1 || i == 3 || s->mb_x)
3708  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3709 
3710  vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant,
3711  (i & 4) ? v->codingset2 : v->codingset);
3712  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3713  continue;
3714  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3715  if (v->rangeredfrm)
3716  for (j = 0; j < 64; j++)
3717  s->block[i][j] <<= 1;
3718  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off,
3719  (i & 4) ? s->uvlinesize : s->linesize);
3720  if (v->pq >= 9 && v->overlap) {
3721  if (v->c_avail)
3722  v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3723  if (v->a_avail)
3724  v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3725  }
3726  block_cbp |= 0xF << (i << 2);
3727  block_intra |= 1 << i;
3728  } else if (is_coded[i]) {
3729  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3730  first_block, s->dest[dst_idx] + off,
3731  (i & 4) ? s->uvlinesize : s->linesize,
3732  (i & 4) && (s->flags & CODEC_FLAG_GRAY),
3733  &block_tt);
3734  block_cbp |= pat << (i << 2);
3735  if (!v->ttmbf && ttmb < 8)
3736  ttmb = -1;
3737  first_block = 0;
3738  }
3739  }
3740  } else { // skipped MB
3741  s->mb_intra = 0;
3742  s->current_picture.qscale_table[mb_pos] = 0;
3743  for (i = 0; i < 6; i++) {
3744  v->mb_type[0][s->block_index[i]] = 0;
3745  s->dc_val[0][s->block_index[i]] = 0;
3746  }
3747  for (i = 0; i < 4; i++) {
3748  vc1_pred_mv(v, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3749  vc1_mc_4mv_luma(v, i, 0, 0);
3750  }
3751  vc1_mc_4mv_chroma(v, 0);
3752  s->current_picture.qscale_table[mb_pos] = 0;
3753  }
3754  }
3755 end:
3756  v->cbp[s->mb_x] = block_cbp;
3757  v->ttblk[s->mb_x] = block_tt;
3758  v->is_intra[s->mb_x] = block_intra;
3759 
3760  return 0;
3761 }
3762 
3763 /* Decode one macroblock in an interlaced frame p picture */
3764 
3766 {
3767  MpegEncContext *s = &v->s;
3768  GetBitContext *gb = &s->gb;
3769  int i;
3770  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3771  int cbp = 0; /* cbp decoding stuff */
3772  int mqdiff, mquant; /* MB quantization */
3773  int ttmb = v->ttfrm; /* MB Transform type */
3774 
3775  int mb_has_coeffs = 1; /* last_flag */
3776  int dmv_x, dmv_y; /* Differential MV components */
3777  int val; /* temp value */
3778  int first_block = 1;
3779  int dst_idx, off;
3780  int skipped, fourmv = 0, twomv = 0;
3781  int block_cbp = 0, pat, block_tt = 0;
3782  int idx_mbmode = 0, mvbp;
3783  int stride_y, fieldtx;
3784 
3785  mquant = v->pq; /* Lossy initialization */
3786 
3787  if (v->skip_is_raw)
3788  skipped = get_bits1(gb);
3789  else
3790  skipped = v->s.mbskip_table[mb_pos];
3791  if (!skipped) {
3792  if (v->fourmvswitch)
3793  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_4MV_MBMODE_VLC_BITS, 2); // try getting this done
3794  else
3795  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2); // in a single line
3796  switch (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0]) {
3797  /* store the motion vector type in a flag (useful later) */
3798  case MV_PMODE_INTFR_4MV:
3799  fourmv = 1;
3800  v->blk_mv_type[s->block_index[0]] = 0;
3801  v->blk_mv_type[s->block_index[1]] = 0;
3802  v->blk_mv_type[s->block_index[2]] = 0;
3803  v->blk_mv_type[s->block_index[3]] = 0;
3804  break;
3806  fourmv = 1;
3807  v->blk_mv_type[s->block_index[0]] = 1;
3808  v->blk_mv_type[s->block_index[1]] = 1;
3809  v->blk_mv_type[s->block_index[2]] = 1;
3810  v->blk_mv_type[s->block_index[3]] = 1;
3811  break;
3813  twomv = 1;
3814  v->blk_mv_type[s->block_index[0]] = 1;
3815  v->blk_mv_type[s->block_index[1]] = 1;
3816  v->blk_mv_type[s->block_index[2]] = 1;
3817  v->blk_mv_type[s->block_index[3]] = 1;
3818  break;
3819  case MV_PMODE_INTFR_1MV:
3820  v->blk_mv_type[s->block_index[0]] = 0;
3821  v->blk_mv_type[s->block_index[1]] = 0;
3822  v->blk_mv_type[s->block_index[2]] = 0;
3823  v->blk_mv_type[s->block_index[3]] = 0;
3824  break;
3825  }
3826  if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
3827  for (i = 0; i < 4; i++) {
3828  s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
3829  s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
3830  }
3831  s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
3832  s->mb_intra = v->is_intra[s->mb_x] = 1;
3833  for (i = 0; i < 6; i++)
3834  v->mb_type[0][s->block_index[i]] = 1;
3835  fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
3836  mb_has_coeffs = get_bits1(gb);
3837  if (mb_has_coeffs)
3838  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3839  v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
3840  GET_MQUANT();
3841  s->current_picture.qscale_table[mb_pos] = mquant;
3842  /* Set DC scale - y and c use the same (not sure if necessary here) */
3843  s->y_dc_scale = s->y_dc_scale_table[mquant];
3844  s->c_dc_scale = s->c_dc_scale_table[mquant];
3845  dst_idx = 0;
3846  for (i = 0; i < 6; i++) {
3847  s->dc_val[0][s->block_index[i]] = 0;
3848  dst_idx += i >> 2;
3849  val = ((cbp >> (5 - i)) & 1);
3850  v->mb_type[0][s->block_index[i]] = s->mb_intra;
3851  v->a_avail = v->c_avail = 0;
3852  if (i == 2 || i == 3 || !s->first_slice_line)
3853  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3854  if (i == 1 || i == 3 || s->mb_x)
3855  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3856 
3857  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3858  (i & 4) ? v->codingset2 : v->codingset);
3859  if ((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3860  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3861  if (i < 4) {
3862  stride_y = s->linesize << fieldtx;
3863  off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
3864  } else {
3865  stride_y = s->uvlinesize;
3866  off = 0;
3867  }
3868  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, stride_y);
3869  //TODO: loop filter
3870  }
3871 
3872  } else { // inter MB
3873  mb_has_coeffs = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][3];
3874  if (mb_has_coeffs)
3875  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3876  if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
3878  } else {
3879  if ((ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV)
3880  || (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV_FIELD)) {
3882  }
3883  }
3884  s->mb_intra = v->is_intra[s->mb_x] = 0;
3885  for (i = 0; i < 6; i++)
3886  v->mb_type[0][s->block_index[i]] = 0;
3887  fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][1];
3888  /* for all motion vector read MVDATA and motion compensate each block */
3889  dst_idx = 0;
3890  if (fourmv) {
3891  mvbp = v->fourmvbp;
3892  for (i = 0; i < 6; i++) {
3893  if (i < 4) {
3894  dmv_x = dmv_y = 0;
3895  val = ((mvbp >> (3 - i)) & 1);
3896  if (val) {
3897  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3898  }
3899  vc1_pred_mv_intfr(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0);
3900  vc1_mc_4mv_luma(v, i, 0, 0);
3901  } else if (i == 4) {
3902  vc1_mc_4mv_chroma4(v, 0, 0, 0);
3903  }
3904  }
3905  } else if (twomv) {
3906  mvbp = v->twomvbp;
3907  dmv_x = dmv_y = 0;
3908  if (mvbp & 2) {
3909  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3910  }
3911  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], 0);
3912  vc1_mc_4mv_luma(v, 0, 0, 0);
3913  vc1_mc_4mv_luma(v, 1, 0, 0);
3914  dmv_x = dmv_y = 0;
3915  if (mvbp & 1) {
3916  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3917  }
3918  vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], 0);
3919  vc1_mc_4mv_luma(v, 2, 0, 0);
3920  vc1_mc_4mv_luma(v, 3, 0, 0);
3921  vc1_mc_4mv_chroma4(v, 0, 0, 0);
3922  } else {
3923  mvbp = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][2];
3924  dmv_x = dmv_y = 0;
3925  if (mvbp) {
3926  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3927  }
3928  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0);
3929  vc1_mc_1mv(v, 0);
3930  }
3931  if (cbp)
3932  GET_MQUANT(); // p. 227
3933  s->current_picture.qscale_table[mb_pos] = mquant;
3934  if (!v->ttmbf && cbp)
3936  for (i = 0; i < 6; i++) {
3937  s->dc_val[0][s->block_index[i]] = 0;
3938  dst_idx += i >> 2;
3939  val = ((cbp >> (5 - i)) & 1);
3940  if (!fieldtx)
3941  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3942  else
3943  off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
3944  if (val) {
3945  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3946  first_block, s->dest[dst_idx] + off,
3947  (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
3948  (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3949  block_cbp |= pat << (i << 2);
3950  if (!v->ttmbf && ttmb < 8)
3951  ttmb = -1;
3952  first_block = 0;
3953  }
3954  }
3955  }
3956  } else { // skipped
3957  s->mb_intra = v->is_intra[s->mb_x] = 0;
3958  for (i = 0; i < 6; i++) {
3959  v->mb_type[0][s->block_index[i]] = 0;
3960  s->dc_val[0][s->block_index[i]] = 0;
3961  }
3962  s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
3963  s->current_picture.qscale_table[mb_pos] = 0;
3964  v->blk_mv_type[s->block_index[0]] = 0;
3965  v->blk_mv_type[s->block_index[1]] = 0;
3966  v->blk_mv_type[s->block_index[2]] = 0;
3967  v->blk_mv_type[s->block_index[3]] = 0;
3968  vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0);
3969  vc1_mc_1mv(v, 0);
3970  }
3971  if (s->mb_x == s->mb_width - 1)
3972  memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0])*s->mb_stride);
3973  return 0;
3974 }
3975 
3977 {
3978  MpegEncContext *s = &v->s;
3979  GetBitContext *gb = &s->gb;
3980  int i;
3981  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3982  int cbp = 0; /* cbp decoding stuff */
3983  int mqdiff, mquant; /* MB quantization */
3984  int ttmb = v->ttfrm; /* MB Transform type */
3985 
3986  int mb_has_coeffs = 1; /* last_flag */
3987  int dmv_x, dmv_y; /* Differential MV components */
3988  int val; /* temp values */
3989  int first_block = 1;
3990  int dst_idx, off;
3991  int pred_flag = 0;
3992  int block_cbp = 0, pat, block_tt = 0;
3993  int idx_mbmode = 0;
3994 
3995  mquant = v->pq; /* Lossy initialization */
3996 
3997  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
3998  if (idx_mbmode <= 1) { // intra MB
3999  s->mb_intra = v->is_intra[s->mb_x] = 1;
4000  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4001  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4002  s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4003  GET_MQUANT();
4004  s->current_picture.qscale_table[mb_pos] = mquant;
4005  /* Set DC scale - y and c use the same (not sure if necessary here) */
4006  s->y_dc_scale = s->y_dc_scale_table[mquant];
4007  s->c_dc_scale = s->c_dc_scale_table[mquant];
4008  v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4009  mb_has_coeffs = idx_mbmode & 1;
4010  if (mb_has_coeffs)
4011  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4012  dst_idx = 0;
4013  for (i = 0; i < 6; i++) {
4014  s->dc_val[0][s->block_index[i]] = 0;
4015  v->mb_type[0][s->block_index[i]] = 1;
4016  dst_idx += i >> 2;
4017  val = ((cbp >> (5 - i)) & 1);
4018  v->a_avail = v->c_avail = 0;
4019  if (i == 2 || i == 3 || !s->first_slice_line)
4020  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4021  if (i == 1 || i == 3 || s->mb_x)
4022  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4023 
4024  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4025  (i & 4) ? v->codingset2 : v->codingset);
4026  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4027  continue;
4028  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4029  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4030  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
4031  // TODO: loop filter
4032  }
4033  } else {
4034  s->mb_intra = v->is_intra[s->mb_x] = 0;
4035  s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4036  for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4037  if (idx_mbmode <= 5) { // 1-MV
4038  dmv_x = dmv_y = pred_flag = 0;
4039  if (idx_mbmode & 1) {
4040  get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
4041  }
4042  vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
4043  vc1_mc_1mv(v, 0);
4044  mb_has_coeffs = !(idx_mbmode & 2);
4045  } else { // 4-MV
4047  for (i = 0; i < 6; i++) {
4048  if (i < 4) {
4049  dmv_x = dmv_y = pred_flag = 0;
4050  val = ((v->fourmvbp >> (3 - i)) & 1);
4051  if (val) {
4052  get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
4053  }
4054  vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
4055  vc1_mc_4mv_luma(v, i, 0, 0);
4056  } else if (i == 4)
4057  vc1_mc_4mv_chroma(v, 0);
4058  }
4059  mb_has_coeffs = idx_mbmode & 1;
4060  }
4061  if (mb_has_coeffs)
4062  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4063  if (cbp) {
4064  GET_MQUANT();
4065  }
4066  s->current_picture.qscale_table[mb_pos] = mquant;
4067  if (!v->ttmbf && cbp) {
4069  }
4070  dst_idx = 0;
4071  for (i = 0; i < 6; i++) {
4072  s->dc_val[0][s->block_index[i]] = 0;
4073  dst_idx += i >> 2;
4074  val = ((cbp >> (5 - i)) & 1);
4075  off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4076  if (val) {
4077  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4078  first_block, s->dest[dst_idx] + off,
4079  (i & 4) ? s->uvlinesize : s->linesize,
4080  (i & 4) && (s->flags & CODEC_FLAG_GRAY),
4081  &block_tt);
4082  block_cbp |= pat << (i << 2);
4083  if (!v->ttmbf && ttmb < 8) ttmb = -1;
4084  first_block = 0;
4085  }
4086  }
4087  }
4088  if (s->mb_x == s->mb_width - 1)
4089  memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4090  return 0;
4091 }
4092 
4093 /** Decode one B-frame MB (in Main profile)
4094  */
4096 {
4097  MpegEncContext *s = &v->s;
4098  GetBitContext *gb = &s->gb;
4099  int i, j;
4100  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4101  int cbp = 0; /* cbp decoding stuff */
4102  int mqdiff, mquant; /* MB quantization */
4103  int ttmb = v->ttfrm; /* MB Transform type */
4104  int mb_has_coeffs = 0; /* last_flag */
4105  int index, index1; /* LUT indexes */
4106  int val, sign; /* temp values */
4107  int first_block = 1;
4108  int dst_idx, off;
4109  int skipped, direct;
4110  int dmv_x[2], dmv_y[2];
4111  int bmvtype = BMV_TYPE_BACKWARD;
4112 
4113  mquant = v->pq; /* lossy initialization */
4114  s->mb_intra = 0;
4115 
4116  if (v->dmb_is_raw)
4117  direct = get_bits1(gb);
4118  else
4119  direct = v->direct_mb_plane[mb_pos];
4120  if (v->skip_is_raw)
4121  skipped = get_bits1(gb);
4122  else
4123  skipped = v->s.mbskip_table[mb_pos];
4124 
4125  dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4126  for (i = 0; i < 6; i++) {
4127  v->mb_type[0][s->block_index[i]] = 0;
4128  s->dc_val[0][s->block_index[i]] = 0;
4129  }
4130  s->current_picture.qscale_table[mb_pos] = 0;
4131 
4132  if (!direct) {
4133  if (!skipped) {
4134  GET_MVDATA(dmv_x[0], dmv_y[0]);
4135  dmv_x[1] = dmv_x[0];
4136  dmv_y[1] = dmv_y[0];
4137  }
4138  if (skipped || !s->mb_intra) {
4139  bmvtype = decode012(gb);
4140  switch (bmvtype) {
4141  case 0:
4142  bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
4143  break;
4144  case 1:
4145  bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
4146  break;
4147  case 2:
4148  bmvtype = BMV_TYPE_INTERPOLATED;
4149  dmv_x[0] = dmv_y[0] = 0;
4150  }
4151  }
4152  }
4153  for (i = 0; i < 6; i++)
4154  v->mb_type[0][s->block_index[i]] = s->mb_intra;
4155 
4156  if (skipped) {
4157  if (direct)
4158  bmvtype = BMV_TYPE_INTERPOLATED;
4159  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4160  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4161  return;
4162  }
4163  if (direct) {
4164  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4165  GET_MQUANT();
4166  s->mb_intra = 0;
4167  s->current_picture.qscale_table[mb_pos] = mquant;
4168  if (!v->ttmbf)
4170  dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
4171  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4172  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4173  } else {
4174  if (!mb_has_coeffs && !s->mb_intra) {
4175  /* no coded blocks - effectively skipped */
4176  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4177  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4178  return;
4179  }
4180  if (s->mb_intra && !mb_has_coeffs) {
4181  GET_MQUANT();
4182  s->current_picture.qscale_table[mb_pos] = mquant;
4183  s->ac_pred = get_bits1(gb);
4184  cbp = 0;
4185  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4186  } else {
4187  if (bmvtype == BMV_TYPE_INTERPOLATED) {
4188  GET_MVDATA(dmv_x[0], dmv_y[0]);
4189  if (!mb_has_coeffs) {
4190  /* interpolated skipped block */
4191  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4192  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4193  return;
4194  }
4195  }
4196  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4197  if (!s->mb_intra) {
4198  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4199  }
4200  if (s->mb_intra)
4201  s->ac_pred = get_bits1(gb);
4202  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4203  GET_MQUANT();
4204  s->current_picture.qscale_table[mb_pos] = mquant;
4205  if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
4207  }
4208  }
4209  dst_idx = 0;
4210  for (i = 0; i < 6; i++) {
4211  s->dc_val[0][s->block_index[i]] = 0;
4212  dst_idx += i >> 2;
4213  val = ((cbp >> (5 - i)) & 1);
4214  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4215  v->mb_type[0][s->block_index[i]] = s->mb_intra;
4216  if (s->mb_intra) {
4217  /* check if prediction blocks A and C are available */
4218  v->a_avail = v->c_avail = 0;
4219  if (i == 2 || i == 3 || !s->first_slice_line)
4220  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4221  if (i == 1 || i == 3 || s->mb_x)
4222  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4223 
4224  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4225  (i & 4) ? v->codingset2 : v->codingset);
4226  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4227  continue;
4228  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4229  if (v->rangeredfrm)
4230  for (j = 0; j < 64; j++)
4231  s->block[i][j] <<= 1;
4232  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
4233  } else if (val) {
4234  vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4235  first_block, s->dest[dst_idx] + off,
4236  (i & 4) ? s->uvlinesize : s->linesize,
4237  (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4238  if (!v->ttmbf && ttmb < 8)
4239  ttmb = -1;
4240  first_block = 0;
4241  }
4242  }
4243 }
4244 
4245 /** Decode one B-frame MB (in interlaced field B picture)
4246  */
4248 {
4249  MpegEncContext *s = &v->s;
4250  GetBitContext *gb = &s->gb;
4251  int i, j;
4252  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4253  int cbp = 0; /* cbp decoding stuff */
4254  int mqdiff, mquant; /* MB quantization */
4255  int ttmb = v->ttfrm; /* MB Transform type */
4256  int mb_has_coeffs = 0; /* last_flag */
4257  int val; /* temp value */
4258  int first_block = 1;
4259  int dst_idx, off;
4260  int fwd;
4261  int dmv_x[2], dmv_y[2], pred_flag[2];
4262  int bmvtype = BMV_TYPE_BACKWARD;
4263  int idx_mbmode;
4264 
4265  mquant = v->pq; /* Lossy initialization */
4266  s->mb_intra = 0;
4267 
4268  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4269  if (idx_mbmode <= 1) { // intra MB
4270  s->mb_intra = v->is_intra[s->mb_x] = 1;
4271  s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
4272  s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
4273  s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4274  GET_MQUANT();
4275  s->current_picture.qscale_table[mb_pos] = mquant;
4276  /* Set DC scale - y and c use the same (not sure if necessary here) */
4277  s->y_dc_scale = s->y_dc_scale_table[mquant];
4278  s->c_dc_scale = s->c_dc_scale_table[mquant];
4279  v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4280  mb_has_coeffs = idx_mbmode & 1;
4281  if (mb_has_coeffs)
4282  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4283  dst_idx = 0;
4284  for (i = 0; i < 6; i++) {
4285  s->dc_val[0][s->block_index[i]] = 0;
4286  dst_idx += i >> 2;
4287  val = ((cbp >> (5 - i)) & 1);
4288  v->mb_type[0][s->block_index[i]] = s->mb_intra;
4289  v->a_avail = v->c_avail = 0;
4290  if (i == 2 || i == 3 || !s->first_slice_line)
4291  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4292  if (i == 1 || i == 3 || s->mb_x)
4293  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4294 
4295  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4296  (i & 4) ? v->codingset2 : v->codingset);
4297  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4298  continue;
4299  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4300  if (v->rangeredfrm)
4301  for (j = 0; j < 64; j++)
4302  s->block[i][j] <<= 1;
4303  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4304  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
4305  // TODO: yet to perform loop filter
4306  }
4307  } else {
4308  s->mb_intra = v->is_intra[s->mb_x] = 0;
4309  s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4310  for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4311  if (v->fmb_is_raw)
4312  fwd = v->forward_mb_plane[mb_pos] = get_bits1(gb);
4313  else
4314  fwd = v->forward_mb_plane[mb_pos];
4315  if (idx_mbmode <= 5) { // 1-MV
4316  int interpmvp = 0;
4317  dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4318  pred_flag[0] = pred_flag[1] = 0;
4319  if (fwd)
4320  bmvtype = BMV_TYPE_FORWARD;
4321  else {
4322  bmvtype = decode012(gb);
4323  switch (bmvtype) {
4324  case 0:
4325  bmvtype = BMV_TYPE_BACKWARD;
4326  break;
4327  case 1:
4328  bmvtype = BMV_TYPE_DIRECT;
4329  break;
4330  case 2:
4331  bmvtype = BMV_TYPE_INTERPOLATED;
4332  interpmvp = get_bits1(gb);
4333  }
4334  }
4335  v->bmvtype = bmvtype;
4336  if (bmvtype != BMV_TYPE_DIRECT && idx_mbmode & 1) {
4337  get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD], &dmv_y[bmvtype == BMV_TYPE_BACKWARD], &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4338  }
4339  if (interpmvp) {
4340  get_mvdata_interlaced(v, &dmv_x[1], &dmv_y[1], &pred_flag[1]);
4341  }
4342  if (bmvtype == BMV_TYPE_DIRECT) {
4343  dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4344  dmv_x[1] = dmv_y[1] = pred_flag[0] = 0;
4345  }
4346  vc1_pred_b_mv_intfi(v, 0, dmv_x, dmv_y, 1, pred_flag);
4347  vc1_b_mc(v, dmv_x, dmv_y, (bmvtype == BMV_TYPE_DIRECT), bmvtype);
4348  mb_has_coeffs = !(idx_mbmode & 2);
4349  } else { // 4-MV
4350  if (fwd)
4351  bmvtype = BMV_TYPE_FORWARD;
4352  v->bmvtype = bmvtype;
4354  for (i = 0; i < 6; i++) {
4355  if (i < 4) {
4356  dmv_x[0] = dmv_y[0] = pred_flag[0] = 0;
4357  dmv_x[1] = dmv_y[1] = pred_flag[1] = 0;
4358  val = ((v->fourmvbp >> (3 - i)) & 1);
4359  if (val) {
4360  get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD],
4361  &dmv_y[bmvtype == BMV_TYPE_BACKWARD],
4362  &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);
4363  }
4364  vc1_pred_b_mv_intfi(v, i, dmv_x, dmv_y, 0, pred_flag);
4365  vc1_mc_4mv_luma(v, i, bmvtype == BMV_TYPE_BACKWARD, 0);
4366  } else if (i == 4)
4367  vc1_mc_4mv_chroma(v, bmvtype == BMV_TYPE_BACKWARD);
4368  }
4369  mb_has_coeffs = idx_mbmode & 1;
4370  }
4371  if (mb_has_coeffs)
4372  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4373  if (cbp) {
4374  GET_MQUANT();
4375  }
4376  s->current_picture.qscale_table[mb_pos] = mquant;
4377  if (!v->ttmbf && cbp) {
4379  }
4380  dst_idx = 0;
4381  for (i = 0; i < 6; i++) {
4382  s->dc_val[0][s->block_index[i]] = 0;
4383  dst_idx += i >> 2;
4384  val = ((cbp >> (5 - i)) & 1);
4385  off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4386  if (val) {
4387  vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4388  first_block, s->dest[dst_idx] + off,
4389  (i & 4) ? s->uvlinesize : s->linesize,
4390  (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4391  if (!v->ttmbf && ttmb < 8)
4392  ttmb = -1;
4393  first_block = 0;
4394  }
4395  }
4396  }
4397 }
4398 
4399 /** Decode one B-frame MB (in interlaced frame B picture)
4400  */
4402 {
4403  MpegEncContext *s = &v->s;
4404  GetBitContext *gb = &s->gb;
4405  int i, j;
4406  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4407  int cbp = 0; /* cbp decoding stuff */
4408  int mqdiff, mquant; /* MB quantization */
4409  int ttmb = v->ttfrm; /* MB Transform type */
4410  int mvsw = 0; /* motion vector switch */
4411  int mb_has_coeffs = 1; /* last_flag */
4412  int dmv_x, dmv_y; /* Differential MV components */
4413  int val; /* temp value */
4414  int first_block = 1;
4415  int dst_idx, off;
4416  int skipped, direct, twomv = 0;
4417  int block_cbp = 0, pat, block_tt = 0;
4418  int idx_mbmode = 0, mvbp;
4419  int stride_y, fieldtx;
4420  int bmvtype = BMV_TYPE_BACKWARD;
4421  int dir, dir2;
4422 
4423  mquant = v->pq; /* Lossy initialization */
4424  s->mb_intra = 0;
4425  if (v->skip_is_raw)
4426  skipped = get_bits1(gb);
4427  else
4428  skipped = v->s.mbskip_table[mb_pos];
4429 
4430  if (!skipped) {
4431  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2);
4432  if (ff_vc1_mbmode_intfrp[0][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
4433  twomv = 1;
4434  v->blk_mv_type[s->block_index[0]] = 1;
4435  v->blk_mv_type[s->block_index[1]] = 1;
4436  v->blk_mv_type[s->block_index[2]] = 1;
4437  v->blk_mv_type[s->block_index[3]] = 1;
4438  } else {
4439  v->blk_mv_type[s->block_index[0]] = 0;
4440  v->blk_mv_type[s->block_index[1]] = 0;
4441  v->blk_mv_type[s->block_index[2]] = 0;
4442  v->blk_mv_type[s->block_index[3]] = 0;
4443  }
4444  }
4445 
4446  if (v->dmb_is_raw)
4447  direct = get_bits1(gb);
4448  else
4449  direct = v->direct_mb_plane[mb_pos];
4450 
4451  if (direct) {
4452  s->mv[0][0][0] = s->current_picture.motion_val[0][s->block_index[0]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][0], v->bfraction, 0, s->quarter_sample);
4453  s->mv[0][0][1] = s->current_picture.motion_val[0][s->block_index[0]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][1], v->bfraction, 0, s->quarter_sample);
4454  s->mv[1][0][0] = s->current_picture.motion_val[1][s->block_index[0]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][0], v->bfraction, 1, s->quarter_sample);
4455  s->mv[1][0][1] = s->current_picture.motion_val[1][s->block_index[0]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0]][1], v->bfraction, 1, s->quarter_sample);
4456 
4457  if (twomv) {
4458  s->mv[0][2][0] = s->current_picture.motion_val[0][s->block_index[2]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][0], v->bfraction, 0, s->quarter_sample);
4459  s->mv[0][2][1] = s->current_picture.motion_val[0][s->block_index[2]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][1], v->bfraction, 0, s->quarter_sample);
4460  s->mv[1][2][0] = s->current_picture.motion_val[1][s->block_index[2]][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][0], v->bfraction, 1, s->quarter_sample);
4461  s->mv[1][2][1] = s->current_picture.motion_val[1][s->block_index[2]][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[2]][1], v->bfraction, 1, s->quarter_sample);
4462 
4463  for (i = 1; i < 4; i += 2) {
4464  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = s->mv[0][i-1][0];
4465  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = s->mv[0][i-1][1];
4466  s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = s->mv[1][i-1][0];
4467  s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = s->mv[1][i-1][1];
4468  }
4469  } else {
4470  for (i = 1; i < 4; i++) {
4471  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = s->mv[0][0][0];
4472  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = s->mv[0][0][1];
4473  s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = s->mv[1][0][0];
4474  s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = s->mv[1][0][1];
4475  }
4476  }
4477  }
4478 
4479  if (ff_vc1_mbmode_intfrp[0][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
4480  for (i = 0; i < 4; i++) {
4481  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0] = 0;
4482  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1] = 0;
4483  s->mv[1][i][0] = s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
4484  s->mv[1][i][1] = s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
4485  }
4486  s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
4487  s->mb_intra = v->is_intra[s->mb_x] = 1;
4488  for (i = 0; i < 6; i++)
4489  v->mb_type[0][s->block_index[i]] = 1;
4490  fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
4491  mb_has_coeffs = get_bits1(gb);
4492  if (mb_has_coeffs)
4493  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4494  v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4495  GET_MQUANT();
4496  s->current_picture.qscale_table[mb_pos] = mquant;
4497  /* Set DC scale - y and c use the same (not sure if necessary here) */
4498  s->y_dc_scale = s->y_dc_scale_table[mquant];
4499  s->c_dc_scale = s->c_dc_scale_table[mquant];
4500  dst_idx = 0;
4501  for (i = 0; i < 6; i++) {
4502  s->dc_val[0][s->block_index[i]] = 0;
4503  dst_idx += i >> 2;
4504  val = ((cbp >> (5 - i)) & 1);
4505  v->mb_type[0][s->block_index[i]] = s->mb_intra;
4506  v->a_avail = v->c_avail = 0;
4507  if (i == 2 || i == 3 || !s->first_slice_line)
4508  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4509  if (i == 1 || i == 3 || s->mb_x)
4510  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4511 
4512  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4513  (i & 4) ? v->codingset2 : v->codingset);
4514  if (i > 3 && (s->flags & CODEC_FLAG_GRAY))
4515  continue;
4516  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4517  if (i < 4) {
4518  stride_y = s->linesize << fieldtx;
4519  off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
4520  } else {
4521  stride_y = s->uvlinesize;
4522  off = 0;
4523  }
4524  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, stride_y);
4525  }
4526  } else {
4527  s->mb_intra = v->is_intra[s->mb_x] = 0;
4528  if (!direct) {
4529  if (skipped || !s->mb_intra) {
4530  bmvtype = decode012(gb);
4531  switch (bmvtype) {
4532  case 0:
4533  bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
4534  break;
4535  case 1:
4536  bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
4537  break;
4538  case 2:
4539  bmvtype = BMV_TYPE_INTERPOLATED;
4540  }
4541  }
4542 
4543  if (twomv && bmvtype != BMV_TYPE_INTERPOLATED)
4544  mvsw = get_bits1(gb);
4545  }
4546 
4547  if (!skipped) { // inter MB
4548  mb_has_coeffs = ff_vc1_mbmode_intfrp[0][idx_mbmode][3];
4549  if (mb_has_coeffs)
4550  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4551  if (!direct) {
4552  if (bmvtype == BMV_TYPE_INTERPOLATED && twomv) {
4554  } else if (bmvtype == BMV_TYPE_INTERPOLATED || twomv) {
4556  }
4557  }
4558 
4559  for (i = 0; i < 6; i++)
4560  v->mb_type[0][s->block_index[i]] = 0;
4561  fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[0][idx_mbmode][1];
4562  /* for all motion vector read MVDATA and motion compensate each block */
4563  dst_idx = 0;
4564  if (direct) {
4565  if (twomv) {
4566  for (i = 0; i < 4; i++) {
4567  vc1_mc_4mv_luma(v, i, 0, 0);
4568  vc1_mc_4mv_luma(v, i, 1, 1);
4569  }
4570  vc1_mc_4mv_chroma4(v, 0, 0, 0);
4571  vc1_mc_4mv_chroma4(v, 1, 1, 1);
4572  } else {
4573  vc1_mc_1mv(v, 0);
4574  vc1_interp_mc(v);
4575  }
4576  } else if (twomv && bmvtype == BMV_TYPE_INTERPOLATED) {
4577  mvbp = v->fourmvbp;
4578  for (i = 0; i < 4; i++) {
4579  dir = i==1 || i==3;
4580  dmv_x = dmv_y = 0;
4581  val = ((mvbp >> (3 - i)) & 1);
4582  if (val)
4583  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4584  j = i > 1 ? 2 : 0;
4585  vc1_pred_mv_intfr(v, j, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir);
4586  vc1_mc_4mv_luma(v, j, dir, dir);
4587  vc1_mc_4mv_luma(v, j+1, dir, dir);
4588  }
4589 
4590  vc1_mc_4mv_chroma4(v, 0, 0, 0);
4591  vc1_mc_4mv_chroma4(v, 1, 1, 1);
4592  } else if (bmvtype == BMV_TYPE_INTERPOLATED) {
4593  mvbp = v->twomvbp;
4594  dmv_x = dmv_y = 0;
4595  if (mvbp & 2)
4596  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4597 
4598  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0);
4599  vc1_mc_1mv(v, 0);
4600 
4601  dmv_x = dmv_y = 0;
4602  if (mvbp & 1)
4603  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4604 
4605  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 1);
4606  vc1_interp_mc(v);
4607  } else if (twomv) {
4608  dir = bmvtype == BMV_TYPE_BACKWARD;
4609  dir2 = dir;
4610  if (mvsw)
4611  dir2 = !dir;
4612  mvbp = v->twomvbp;
4613  dmv_x = dmv_y = 0;
4614  if (mvbp & 2)
4615  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4616  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir);
4617 
4618  dmv_x = dmv_y = 0;
4619  if (mvbp & 1)
4620  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4621  vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], dir2);
4622 
4623  if (mvsw) {
4624  for (i = 0; i < 2; i++) {
4625  s->mv[dir][i+2][0] = s->mv[dir][i][0] = s->current_picture.motion_val[dir][s->block_index[i+2]][0] = s->current_picture.motion_val[dir][s->block_index[i]][0];
4626  s->mv[dir][i+2][1] = s->mv[dir][i][1] = s->current_picture.motion_val[dir][s->block_index[i+2]][1] = s->current_picture.motion_val[dir][s->block_index[i]][1];
4627  s->mv[dir2][i+2][0] = s->mv[dir2][i][0] = s->current_picture.motion_val[dir2][s->block_index[i]][0] = s->current_picture.motion_val[dir2][s->block_index[i+2]][0];
4628  s->mv[dir2][i+2][1] = s->mv[dir2][i][1] = s->current_picture.motion_val[dir2][s->block_index[i]][1] = s->current_picture.motion_val[dir2][s->block_index[i+2]][1];
4629  }
4630  } else {
4631  vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, v->mb_type[0], !dir);
4632  vc1_pred_mv_intfr(v, 2, 0, 0, 2, v->range_x, v->range_y, v->mb_type[0], !dir);
4633  }
4634 
4635  vc1_mc_4mv_luma(v, 0, dir, 0);
4636  vc1_mc_4mv_luma(v, 1, dir, 0);
4637  vc1_mc_4mv_luma(v, 2, dir2, 0);
4638  vc1_mc_4mv_luma(v, 3, dir2, 0);
4639  vc1_mc_4mv_chroma4(v, dir, dir2, 0);
4640  } else {
4641  dir = bmvtype == BMV_TYPE_BACKWARD;
4642 
4643  mvbp = ff_vc1_mbmode_intfrp[0][idx_mbmode][2];
4644  dmv_x = dmv_y = 0;
4645  if (mvbp)
4646  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
4647 
4648  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], dir);
4649  v->blk_mv_type[s->block_index[0]] = 1;
4650  v->blk_mv_type[s->block_index[1]] = 1;
4651  v->blk_mv_type[s->block_index[2]] = 1;
4652  v->blk_mv_type[s->block_index[3]] = 1;
4653  vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, 0, !dir);
4654  for (i = 0; i < 2; i++) {
4655  s->mv[!dir][i+2][0] = s->mv[!dir][i][0] = s->current_picture.motion_val[!dir][s->block_index[i+2]][0] = s->current_picture.motion_val[!dir][s->block_index[i]][0];
4656  s->mv[!dir][i+2][1] = s->mv[!dir][i][1] = s->current_picture.motion_val[!dir][s->block_index[i+2]][1] = s->current_picture.motion_val[!dir][s->block_index[i]][1];
4657  }
4658  vc1_mc_1mv(v, dir);
4659  }
4660 
4661  if (cbp)
4662  GET_MQUANT(); // p. 227
4663  s->current_picture.qscale_table[mb_pos] = mquant;
4664  if (!v->ttmbf && cbp)
4666  for (i = 0; i < 6; i++) {
4667  s->dc_val[0][s->block_index[i]] = 0;
4668  dst_idx += i >> 2;
4669  val = ((cbp >> (5 - i)) & 1);
4670  if (!fieldtx)
4671  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4672  else
4673  off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
4674  if (val) {
4675  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4676  first_block, s->dest[dst_idx] + off,
4677  (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
4678  (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
4679  block_cbp |= pat << (i << 2);
4680  if (!v->ttmbf && ttmb < 8)
4681  ttmb = -1;
4682  first_block = 0;
4683  }
4684  }
4685 
4686  } else { // skipped
4687  dir = 0;
4688  for (i = 0; i < 6; i++) {
4689  v->mb_type[0][s->block_index[i]] = 0;
4690  s->dc_val[0][s->block_index[i]] = 0;
4691  }
4692  s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
4693  s->current_picture.qscale_table[mb_pos] = 0;
4694  v->blk_mv_type[s->block_index[0]] = 0;
4695  v->blk_mv_type[s->block_index[1]] = 0;
4696  v->blk_mv_type[s->block_index[2]] = 0;
4697  v->blk_mv_type[s->block_index[3]] = 0;
4698 
4699  if (!direct) {
4700  if (bmvtype == BMV_TYPE_INTERPOLATED) {
4701  vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0);
4702  vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 1);
4703  } else {
4704  dir = bmvtype == BMV_TYPE_BACKWARD;
4705  vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], dir);
4706  if (mvsw) {
4707  int dir2 = dir;
4708  if (mvsw)
4709  dir2 = !dir;
4710  for (i = 0; i < 2; i++) {
4711  s->mv[dir][i+2][0] = s->mv[dir][i][0] = s->current_picture.motion_val[dir][s->block_index[i+2]][0] = s->current_picture.motion_val[dir][s->block_index[i]][0];
4712  s->mv[dir][i+2][1] = s->mv[dir][i][1] = s->current_picture.motion_val[dir][s->block_index[i+2]][1] = s->current_picture.motion_val[dir][s->block_index[i]][1];
4713  s->mv[dir2][i+2][0] = s->mv[dir2][i][0] = s->current_picture.motion_val[dir2][s->block_index[i]][0] = s->current_picture.motion_val[dir2][s->block_index[i+2]][0];
4714  s->mv[dir2][i+2][1] = s->mv[dir2][i][1] = s->current_picture.motion_val[dir2][s->block_index[i]][1] = s->current_picture.motion_val[dir2][s->block_index[i+2]][1];
4715  }
4716  } else {
4717  v->blk_mv_type[s->block_index[0]] = 1;
4718  v->blk_mv_type[s->block_index[1]] = 1;
4719  v->blk_mv_type[s->block_index[2]] = 1;
4720  v->blk_mv_type[s->block_index[3]] = 1;
4721  vc1_pred_mv_intfr(v, 0, 0, 0, 2, v->range_x, v->range_y, 0, !dir);
4722  for (i = 0; i < 2; i++) {
4723  s->mv[!dir][i+2][0] = s->mv[!dir][i][0] = s->current_picture.motion_val[!dir][s->block_index[i+2]][0] = s->current_picture.motion_val[!dir][s->block_index[i]][0];
4724  s->mv[!dir][i+2][1] = s->mv[!dir][i][1] = s->current_picture.motion_val[!dir][s->block_index[i+2]][1] = s->current_picture.motion_val[!dir][s->block_index[i]][1];
4725  }
4726  }
4727  }
4728  }
4729 
4730  vc1_mc_1mv(v, dir);
4731  if (direct || bmvtype == BMV_TYPE_INTERPOLATED) {
4732  vc1_interp_mc(v);
4733  }
4734  }
4735  }
4736  if (s->mb_x == s->mb_width - 1)
4737  memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4738  v->cbp[s->mb_x] = block_cbp;
4739  v->ttblk[s->mb_x] = block_tt;
4740  return 0;
4741 }
4742 
4743 /** Decode blocks of I-frame
4744  */
4746 {
4747  int k, j;
4748  MpegEncContext *s = &v->s;
4749  int cbp, val;
4750  uint8_t *coded_val;
4751  int mb_pos;
4752 
4753  /* select codingmode used for VLC tables selection */
4754  switch (v->y_ac_table_index) {
4755  case 0:
4757  break;
4758  case 1:
4760  break;
4761  case 2:
4763  break;
4764  }
4765 
4766  switch (v->c_ac_table_index) {
4767  case 0:
4769  break;
4770  case 1:
4772  break;
4773  case 2:
4775  break;
4776  }
4777 
4778  /* Set DC scale - y and c use the same */
4779  s->y_dc_scale = s->y_dc_scale_table[v->pq];
4780  s->c_dc_scale = s->c_dc_scale_table[v->pq];
4781 
4782  //do frame decode
4783  s->mb_x = s->mb_y = 0;
4784  s->mb_intra = 1;
4785  s->first_slice_line = 1;
4786  for (s->mb_y = 0; s->mb_y < s->end_mb_y; s->mb_y++) {
4787  s->mb_x = 0;
4788  init_block_index(v);
4789  for (; s->mb_x < v->end_mb_x; s->mb_x++) {
4790  uint8_t *dst[6];
4792  dst[0] = s->dest[0];
4793  dst[1] = dst[0] + 8;
4794  dst[2] = s->dest[0] + s->linesize * 8;
4795  dst[3] = dst[2] + 8;
4796  dst[4] = s->dest[1];
4797  dst[5] = s->dest[2];
4798  s->dsp.clear_blocks(s->block[0]);
4799  mb_pos = s->mb_x + s->mb_y * s->mb_width;
4800  s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
4801  s->current_picture.qscale_table[mb_pos] = v->pq;
4802  s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
4803  s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
4804 
4805  // do actual MB decoding and displaying
4807  v->s.ac_pred = get_bits1(&v->s.gb);
4808 
4809  for (k = 0; k < 6; k++) {
4810  val = ((cbp >> (5 - k)) & 1);
4811 
4812  if (k < 4) {
4813  int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4814  val = val ^ pred;
4815  *coded_val = val;
4816  }
4817  cbp |= val << (5 - k);
4818 
4819  vc1_decode_i_block(v, s->block[k], k, val, (k < 4) ? v->codingset : v->codingset2);
4820 
4821  if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4822  continue;
4823  v->vc1dsp.vc1_inv_trans_8x8(s->block[k]);
4824  if (v->pq >= 9 && v->overlap) {
4825  if (v->rangeredfrm)
4826  for (j = 0; j < 64; j++)
4827  s->block[k][j] <<= 1;
4828  s->dsp.put_signed_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4829  } else {
4830  if (v->rangeredfrm)
4831  for (j = 0; j < 64; j++)
4832  s->block[k][j] = (s->block[k][j] - 64) << 1;
4833  s->dsp.put_pixels_clamped(s->block[k], dst[k], k & 4 ? s->uvlinesize : s->linesize);
4834  }
4835  }
4836 
4837  if (v->pq >= 9 && v->overlap) {
4838  if (s->mb_x) {
4839  v->vc1dsp.vc1_h_overlap(s->dest[0], s->linesize);
4840  v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4841  if (!(s->flags & CODEC_FLAG_GRAY)) {
4842  v->vc1dsp.vc1_h_overlap(s->dest[1], s->uvlinesize);
4843  v->vc1dsp.vc1_h_overlap(s->dest[2], s->uvlinesize);
4844  }
4845  }
4846  v->vc1dsp.vc1_h_overlap(s->dest[0] + 8, s->linesize);
4847  v->vc1dsp.vc1_h_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4848  if (!s->first_slice_line) {
4849  v->vc1dsp.vc1_v_overlap(s->dest[0], s->linesize);
4850  v->vc1dsp.vc1_v_overlap(s->dest[0] + 8, s->linesize);
4851  if (!(s->flags & CODEC_FLAG_GRAY)) {
4852  v->vc1dsp.vc1_v_overlap(s->dest[1], s->uvlinesize);
4853  v->vc1dsp.vc1_v_overlap(s->dest[2], s->uvlinesize);
4854  }
4855  }
4856  v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize, s->linesize);
4857  v->vc1dsp.vc1_v_overlap(s->dest[0] + 8 * s->linesize + 8, s->linesize);
4858  }
4859  if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
4860 
4861  if (get_bits_count(&s->gb) > v->bits) {
4862  ff_er_add_slice(&s->er, 0, 0, s->mb_x, s->mb_y, ER_MB_ERROR);
4863  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4864  get_bits_count(&s->gb), v->bits);
4865  return;
4866  }
4867  }
4868  if (!v->s.loop_filter)
4869  ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
4870  else if (s->mb_y)
4871  ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
4872 
4873  s->first_slice_line = 0;
4874  }
4875  if (v->s.loop_filter)
4876  ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
4877 
4878  /* This is intentionally mb_height and not end_mb_y - unlike in advanced
4879  * profile, these only differ are when decoding MSS2 rectangles. */
4880  ff_er_add_slice(&s->er, 0, 0, s->mb_width - 1, s->mb_height - 1, ER_MB_END);
4881 }
4882 
4883 /** Decode blocks of I-frame for advanced profile
4884  */
4886 {
4887  int k;
4888  MpegEncContext *s = &v->s;
4889  int cbp, val;
4890  uint8_t *coded_val;
4891  int mb_pos;
4892  int mquant = v->pq;
4893  int mqdiff;
4894  GetBitContext *gb = &s->gb;
4895 
4896  /* select codingmode used for VLC tables selection */
4897  switch (v->y_ac_table_index) {
4898  case 0:
4900  break;
4901  case 1:
4903  break;
4904  case 2:
4906  break;
4907  }
4908 
4909  switch (v->c_ac_table_index) {
4910  case 0:
4912  break;
4913  case 1:
4915  break;
4916  case 2:
4918  break;
4919  }
4920 
4921  // do frame decode
4922  s->mb_x = s->mb_y = 0;
4923  s->mb_intra = 1;
4924  s->first_slice_line = 1;
4925  s->mb_y = s->start_mb_y;
4926  if (s->start_mb_y) {
4927  s->mb_x = 0;
4928  init_block_index(v);
4929  memset(&s->coded_block[s->block_index[0] - s->b8_stride], 0,
4930  (1 + s->b8_stride) * sizeof(*s->coded_block));
4931  }
4932  for (; s->mb_y < s->end_mb_y; s->mb_y++) {
4933  s->mb_x = 0;
4934  init_block_index(v);
4935  for (;s->mb_x < s->mb_width; s->mb_x++) {
4936  int16_t (*block)[64] = v->block[v->cur_blk_idx];
4938  s->dsp.clear_blocks(block[0]);
4939  mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4940  s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4941  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4942  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4943 
4944  // do actual MB decoding and displaying
4945  if (v->fieldtx_is_raw)
4946  v->fieldtx_plane[mb_pos] = get_bits1(&v->s.gb);
4948  if ( v->acpred_is_raw)
4949  v->s.ac_pred = get_bits1(&v->s.gb);
4950  else
4951  v->s.ac_pred = v->acpred_plane[mb_pos];
4952 
4953  if (v->condover == CONDOVER_SELECT && v->overflg_is_raw)
4954  v->over_flags_plane[mb_pos] = get_bits1(&v->s.gb);
4955 
4956  GET_MQUANT();
4957 
4958  s->current_picture.qscale_table[mb_pos] = mquant;
4959  /* Set DC scale - y and c use the same */
4960  s->y_dc_scale = s->y_dc_scale_table[mquant];
4961  s->c_dc_scale = s->c_dc_scale_table[mquant];
4962 
4963  for (k = 0; k < 6; k++) {
4964  val = ((cbp >> (5 - k)) & 1);
4965 
4966  if (k < 4) {
4967  int pred = vc1_coded_block_pred(&v->s, k, &coded_val);
4968  val = val ^ pred;
4969  *coded_val = val;
4970  }
4971  cbp |= val << (5 - k);
4972 
4973  v->a_avail = !s->first_slice_line || (k == 2 || k == 3);
4974  v->c_avail = !!s->mb_x || (k == 1 || k == 3);
4975 
4976  vc1_decode_i_block_adv(v, block[k], k, val,
4977  (k < 4) ? v->codingset : v->codingset2, mquant);
4978 
4979  if (k > 3 && (s->flags & CODEC_FLAG_GRAY))
4980  continue;
4982  }
4983 
4987 
4988  if (get_bits_count(&s->gb) > v->bits) {
4989  // TODO: may need modification to handle slice coding
4990  ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
4991  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i\n",
4992  get_bits_count(&s->gb), v->bits);
4993  return;
4994  }
4995  }
4996  if (!v->s.loop_filter)
4997  ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
4998  else if (s->mb_y)
4999  ff_mpeg_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
5000  s->first_slice_line = 0;
5001  }
5002 
5003  /* raw bottom MB row */
5004  s->mb_x = 0;
5005  init_block_index(v);
5006 
5007  for (;s->mb_x < s->mb_width; s->mb_x++) {
5010  if (v->s.loop_filter)
5012  }
5013  if (v->s.loop_filter)
5014  ff_mpeg_draw_horiz_band(s, (s->end_mb_y-1)*16, 16);
5015  ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
5016  (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
5017 }
5018 
5020 {
5021  MpegEncContext *s = &v->s;
5022  int apply_loop_filter;
5023 
5024  /* select codingmode used for VLC tables selection */
5025  switch (v->c_ac_table_index) {
5026  case 0:
5028  break;
5029  case 1:
5031  break;
5032  case 2:
5034  break;
5035  }
5036 
5037  switch (v->c_ac_table_index) {
5038  case 0:
5040  break;
5041  case 1:
5043  break;
5044  case 2:
5046  break;
5047  }
5048 
5049  apply_loop_filter = s->loop_filter && !(s->avctx->skip_loop_filter >= AVDISCARD_NONKEY) &&
5050  v->fcm == PROGRESSIVE;
5051  s->first_slice_line = 1;
5052  memset(v->cbp_base, 0, sizeof(v->cbp_base[0])*2*s->mb_stride);
5053  for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
5054  s->mb_x = 0;
5055  init_block_index(v);
5056  for (; s->mb_x < s->mb_width; s->mb_x++) {
5058 
5059  if (v->fcm == ILACE_FIELD)
5061  else if (v->fcm == ILACE_FRAME)
5063  else vc1_decode_p_mb(v);
5064  if (s->mb_y != s->start_mb_y && apply_loop_filter)
5066  if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
5067  // TODO: may need modification to handle slice coding
5068  ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
5069  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
5070  get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
5071  return;
5072  }
5073  }
5074  memmove(v->cbp_base, v->cbp, sizeof(v->cbp_base[0]) * s->mb_stride);
5075  memmove(v->ttblk_base, v->ttblk, sizeof(v->ttblk_base[0]) * s->mb_stride);
5076  memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
5077  memmove(v->luma_mv_base, v->luma_mv, sizeof(v->luma_mv_base[0]) * s->mb_stride);
5078  if (s->mb_y != s->start_mb_y) ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
5079  s->first_slice_line = 0;
5080  }
5081  if (apply_loop_filter) {
5082  s->mb_x = 0;
5083  init_block_index(v);
5084  for (; s->mb_x < s->mb_width; s->mb_x++) {
5087  }
5088  }
5089  if (s->end_mb_y >= s->start_mb_y)
5090  ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
5091  ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
5092  (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
5093 }
5094 
5096 {
5097  MpegEncContext *s = &v->s;
5098 
5099  /* select codingmode used for VLC tables selection */
5100  switch (v->c_ac_table_index) {
5101  case 0:
5103  break;
5104  case 1:
5106  break;
5107  case 2:
5109  break;
5110  }
5111 
5112  switch (v->c_ac_table_index) {
5113  case 0:
5115  break;
5116  case 1:
5118  break;
5119  case 2:
5121  break;
5122  }
5123 
5124  s->first_slice_line = 1;
5125  for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
5126  s->mb_x = 0;
5127  init_block_index(v);
5128  for (; s->mb_x < s->mb_width; s->mb_x++) {
5130 
5131  if (v->fcm == ILACE_FIELD)
5133  else if (v->fcm == ILACE_FRAME)
5135  else
5136  vc1_decode_b_mb(v);
5137  if (get_bits_count(&s->gb) > v->bits || get_bits_count(&s->gb) < 0) {
5138  // TODO: may need modification to handle slice coding
5139  ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
5140  av_log(s->avctx, AV_LOG_ERROR, "Bits overconsumption: %i > %i at %ix%i\n",
5141  get_bits_count(&s->gb), v->bits, s->mb_x, s->mb_y);
5142  return;
5143  }
5144  if (v->s.loop_filter) vc1_loop_filter_iblk(v, v->pq);
5145  }
5146  if (!v->s.loop_filter)
5147  ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
5148  else if (s->mb_y)
5149  ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
5150  s->first_slice_line = 0;
5151  }
5152  if (v->s.loop_filter)
5153  ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
5154  ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
5155  (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
5156 }
5157 
5159 {
5160  MpegEncContext *s = &v->s;
5161 
5162  if (!v->s.last_picture.f.data[0])
5163  return;
5164 
5165  ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, ER_MB_END);
5166  s->first_slice_line = 1;
5167  for (s->mb_y = s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
5168  s->mb_x = 0;
5169  init_block_index(v);
5171  memcpy(s->dest[0], s->last_picture.f.data[0] + s->mb_y * 16 * s->linesize, s->linesize * 16);
5172  memcpy(s->dest[1], s->last_picture.f.data[1] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
5173  memcpy(s->dest[2], s->last_picture.f.data[2] + s->mb_y * 8 * s->uvlinesize, s->uvlinesize * 8);
5174  ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
5175  s->first_slice_line = 0;
5176  }
5178 }
5179 
5181 {
5182 
5183  v->s.esc3_level_length = 0;
5184  if (v->x8_type) {
5185  ff_intrax8_decode_picture(&v->x8, 2*v->pq + v->halfpq, v->pq * !v->pquantizer);
5186  } else {
5187  v->cur_blk_idx = 0;
5188  v->left_blk_idx = -1;
5189  v->topleft_blk_idx = 1;
5190  v->top_blk_idx = 2;
5191  switch (v->s.pict_type) {
5192  case AV_PICTURE_TYPE_I:
5193  if (v->profile == PROFILE_ADVANCED)
5195  else
5197  break;
5198  case AV_PICTURE_TYPE_P:
5199  if (v->p_frame_skipped)
5201  else
5203  break;
5204  case AV_PICTURE_TYPE_B:
5205  if (v->bi_type) {
5206  if (v->profile == PROFILE_ADVANCED)
5208  else
5210  } else
5212  break;
5213  }
5214  }
5215 }
5216 
5217 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
5218 
5219 typedef struct {
5220  /**
5221  * Transform coefficients for both sprites in 16.16 fixed point format,
5222  * in the order they appear in the bitstream:
5223  * x scale
5224  * rotation 1 (unused)
5225  * x offset
5226  * rotation 2 (unused)
5227  * y scale
5228  * y offset
5229  * alpha
5230  */
5231  int coefs[2][7];
5232 
5233  int effect_type, effect_flag;
5234  int effect_pcount1, effect_pcount2; ///< amount of effect parameters stored in effect_params
5235  int effect_params1[15], effect_params2[10]; ///< effect parameters in 16.16 fixed point format
5236 } SpriteData;
5237 
5238 static inline int get_fp_val(GetBitContext* gb)
5239 {
5240  return (get_bits_long(gb, 30) - (1 << 29)) << 1;
5241 }
5242 
5243 static void vc1_sprite_parse_transform(GetBitContext* gb, int c[7])
5244 {
5245  c[1] = c[3] = 0;
5246 
5247  switch (get_bits(gb, 2)) {
5248  case 0:
5249  c[0] = 1 << 16;
5250  c[2] = get_fp_val(gb);
5251  c[4] = 1 << 16;
5252  break;
5253  case 1:
5254  c[0] = c[4] = get_fp_val(gb);
5255  c[2] = get_fp_val(gb);
5256  break;
5257  case 2:
5258  c[0] = get_fp_val(gb);
5259  c[2] = get_fp_val(gb);
5260  c[4] = get_fp_val(gb);
5261  break;
5262  case 3:
5263  c[0] = get_fp_val(gb);
5264  c[1] = get_fp_val(gb);
5265  c[2] = get_fp_val(gb);
5266  c[3] = get_fp_val(gb);
5267  c[4] = get_fp_val(gb);
5268  break;
5269  }
5270  c[5] = get_fp_val(gb);
5271  if (get_bits1(gb))
5272  c[6] = get_fp_val(gb);
5273  else
5274  c[6] = 1 << 16;
5275 }
5276 
5277 static void vc1_parse_sprites(VC1Context *v, GetBitContext* gb, SpriteData* sd)
5278 {
5279  AVCodecContext *avctx = v->s.avctx;
5280  int sprite, i;
5281 
5282  for (sprite = 0; sprite <= v->two_sprites; sprite++) {
5283  vc1_sprite_parse_transform(gb, sd->coefs[sprite]);
5284  if (sd->coefs[sprite][1] || sd->coefs[sprite][3])
5285  avpriv_request_sample(avctx, "Non-zero rotation coefficients");
5286  av_log(avctx, AV_LOG_DEBUG, sprite ? "S2:" : "S1:");
5287  for (i = 0; i < 7; i++)
5288  av_log(avctx, AV_LOG_DEBUG, " %d.%.3d",
5289  sd->coefs[sprite][i] / (1<<16),
5290  (abs(sd->coefs[sprite][i]) & 0xFFFF) * 1000 / (1 << 16));
5291  av_log(avctx, AV_LOG_DEBUG, "\n");
5292  }
5293 
5294  skip_bits(gb, 2);
5295  if (sd->effect_type = get_bits_long(gb, 30)) {
5296  switch (sd->effect_pcount1 = get_bits(gb, 4)) {
5297  case 7:
5298  vc1_sprite_parse_transform(gb, sd->effect_params1);
5299  break;
5300  case 14:
5301  vc1_sprite_parse_transform(gb, sd->effect_params1);
5302  vc1_sprite_parse_transform(gb, sd->effect_params1 + 7);
5303  break;
5304  default:
5305  for (i = 0; i < sd->effect_pcount1; i++)
5306  sd->effect_params1[i] = get_fp_val(gb);
5307  }
5308  if (sd->effect_type != 13 || sd->effect_params1[0] != sd->coefs[0][6]) {
5309  // effect 13 is simple alpha blending and matches the opacity above
5310  av_log(avctx, AV_LOG_DEBUG, "Effect: %d; params: ", sd->effect_type);
5311  for (i = 0; i < sd->effect_pcount1; i++)
5312  av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
5313  sd->effect_params1[i] / (1 << 16),
5314  (abs(sd->effect_params1[i]) & 0xFFFF) * 1000 / (1 << 16));
5315  av_log(avctx, AV_LOG_DEBUG, "\n");
5316  }
5317 
5318  sd->effect_pcount2 = get_bits(gb, 16);
5319  if (sd->effect_pcount2 > 10) {
5320  av_log(avctx, AV_LOG_ERROR, "Too many effect parameters\n");
5321  return;
5322  } else if (sd->effect_pcount2) {
5323  i = -1;
5324  av_log(avctx, AV_LOG_DEBUG, "Effect params 2: ");
5325  while (++i < sd->effect_pcount2) {
5326  sd->effect_params2[i] = get_fp_val(gb);
5327  av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
5328  sd->effect_params2[i] / (1 << 16),
5329  (abs(sd->effect_params2[i]) & 0xFFFF) * 1000 / (1 << 16));
5330  }
5331  av_log(avctx, AV_LOG_DEBUG, "\n");
5332  }
5333  }
5334  if (sd->effect_flag = get_bits1(gb))
5335  av_log(avctx, AV_LOG_DEBUG, "Effect flag set\n");
5336 
5337  if (get_bits_count(gb) >= gb->size_in_bits +
5338  (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE ? 64 : 0))
5339  av_log(avctx, AV_LOG_ERROR, "Buffer overrun\n");
5340  if (get_bits_count(gb) < gb->size_in_bits - 8)
5341  av_log(avctx, AV_LOG_WARNING, "Buffer not fully read\n");
5342 }
5343 
5344 static void vc1_draw_sprites(VC1Context *v, SpriteData* sd)
5345 {
5346  int i, plane, row, sprite;
5347  int sr_cache[2][2] = { { -1, -1 }, { -1, -1 } };
5348  uint8_t* src_h[2][2];
5349  int xoff[2], xadv[2], yoff[2], yadv[2], alpha;
5350  int ysub[2];
5351  MpegEncContext *s = &v->s;
5352 
5353  for (i = 0; i < 2; i++) {
5354  xoff[i] = av_clip(sd->coefs[i][2], 0, v->sprite_width-1 << 16);
5355  xadv[i] = sd->coefs[i][0];
5356  if (xadv[i] != 1<<16 || (v->sprite_width << 16) - (v->output_width << 16) - xoff[i])
5357  xadv[i] = av_clip(xadv[i], 0, ((v->sprite_width<<16) - xoff[i] - 1) / v->output_width);
5358 
5359  yoff[i] = av_clip(sd->coefs[i][5], 0, v->sprite_height-1 << 16);
5360  yadv[i] = av_clip(sd->coefs[i][4], 0, ((v->sprite_height << 16) - yoff[i]) / v->output_height);
5361  }
5362  alpha = av_clip(sd->coefs[1][6], 0, (1<<16) - 1);
5363 
5364  for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++) {
5365  int width = v->output_width>>!!plane;
5366 
5367  for (row = 0; row < v->output_height>>!!plane; row++) {
5368  uint8_t *dst = v->sprite_output_frame->data[plane] +
5369  v->sprite_output_frame->linesize[plane] * row;
5370 
5371  for (sprite = 0; sprite <= v->two_sprites; sprite++) {
5372  uint8_t *iplane = s->current_picture.f.data[plane];
5373  int iline = s->current_picture.f.linesize[plane];
5374  int ycoord = yoff[sprite] + yadv[sprite] * row;
5375  int yline = ycoord >> 16;
5376  int next_line;
5377  ysub[sprite] = ycoord & 0xFFFF;
5378  if (sprite) {
5379  iplane = s->last_picture.f.data[plane];
5380  iline = s->last_picture.f.linesize[plane];
5381  }
5382  next_line = FFMIN(yline + 1, (v->sprite_height >> !!plane) - 1) * iline;
5383  if (!(xoff[sprite] & 0xFFFF) && xadv[sprite] == 1 << 16) {
5384  src_h[sprite][0] = iplane + (xoff[sprite] >> 16) + yline * iline;
5385  if (ysub[sprite])
5386  src_h[sprite][1] = iplane + (xoff[sprite] >> 16) + next_line;
5387  } else {
5388  if (sr_cache[sprite][0] != yline) {
5389  if (sr_cache[sprite][1] == yline) {
5390  FFSWAP(uint8_t*, v->sr_rows[sprite][0], v->sr_rows[sprite][1]);
5391  FFSWAP(int, sr_cache[sprite][0], sr_cache[sprite][1]);
5392  } else {
5393  v->vc1dsp.sprite_h(v->sr_rows[sprite][0], iplane + yline * iline, xoff[sprite], xadv[sprite], width);
5394  sr_cache[sprite][0] = yline;
5395  }
5396  }
5397  if (ysub[sprite] && sr_cache[sprite][1] != yline + 1) {
5398  v->vc1dsp.sprite_h(v->sr_rows[sprite][1],
5399  iplane + next_line, xoff[sprite],
5400  xadv[sprite], width);
5401  sr_cache[sprite][1] = yline + 1;
5402  }
5403  src_h[sprite][0] = v->sr_rows[sprite][0];
5404  src_h[sprite][1] = v->sr_rows[sprite][1];
5405  }
5406  }
5407 
5408  if (!v->two_sprites) {
5409  if (ysub[0]) {
5410  v->vc1dsp.sprite_v_single(dst, src_h[0][0], src_h[0][1], ysub[0], width);
5411  } else {
5412  memcpy(dst, src_h[0][0], width);
5413  }
5414  } else {
5415  if (ysub[0] && ysub[1]) {
5416  v->vc1dsp.sprite_v_double_twoscale(dst, src_h[0][0], src_h[0][1], ysub[0],
5417  src_h[1][0], src_h[1][1], ysub[1], alpha, width);
5418  } else if (ysub[0]) {
5419  v->vc1dsp.sprite_v_double_onescale(dst, src_h[0][0], src_h[0][1], ysub[0],
5420  src_h[1][0], alpha, width);
5421  } else if (ysub[1]) {
5422  v->vc1dsp.sprite_v_double_onescale(dst, src_h[1][0], src_h[1][1], ysub[1],
5423  src_h[0][0], (1<<16)-1-alpha, width);
5424  } else {
5425  v->vc1dsp.sprite_v_double_noscale(dst, src_h[0][0], src_h[1][0], alpha, width);
5426  }
5427  }
5428  }
5429 
5430  if (!plane) {
5431  for (i = 0; i < 2; i++) {
5432  xoff[i] >>= 1;
5433  yoff[i] >>= 1;
5434  }
5435  }
5436 
5437  }
5438 }
5439 
5440 
5441 static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb)
5442 {
5443  int ret;
5444  MpegEncContext *s = &v->s;
5445  AVCodecContext *avctx = s->avctx;
5446  SpriteData sd;
5447 
5448  vc1_parse_sprites(v, gb, &sd);
5449 
5450  if (!s->current_picture.f.data[0]) {
5451  av_log(avctx, AV_LOG_ERROR, "Got no sprites\n");
5452  return -1;
5453  }
5454 
5455  if (v->two_sprites && (!s->last_picture_ptr || !s->last_picture.f.data[0])) {
5456  av_log(avctx, AV_LOG_WARNING, "Need two sprites, only got one\n");
5457  v->two_sprites = 0;
5458  }
5459 
5461  if ((ret = ff_get_buffer(avctx, v->sprite_output_frame, 0)) < 0)
5462  return ret;
5463 
5464  vc1_draw_sprites(v, &sd);
5465 
5466  return 0;
5467 }
5468 
5469 static void vc1_sprite_flush(AVCodecContext *avctx)
5470 {
5471  VC1Context *v = avctx->priv_data;
5472  MpegEncContext *s = &v->s;
5473  AVFrame *f = &s->current_picture.f;
5474  int plane, i;
5475 
5476  /* Windows Media Image codecs have a convergence interval of two keyframes.
5477  Since we can't enforce it, clear to black the missing sprite. This is
5478  wrong but it looks better than doing nothing. */
5479 
5480  if (f->data[0])
5481  for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++)
5482  for (i = 0; i < v->sprite_height>>!!plane; i++)
5483  memset(f->data[plane] + i * f->linesize[plane],
5484  plane ? 128 : 0, f->linesize[plane]);
5485 }
5486 
5487 #endif
5488 
5490 {
5491  MpegEncContext *s = &v->s;
5492  int i;
5493 
5494  /* Allocate mb bitplanes */
5499  v->acpred_plane = av_malloc (s->mb_stride * s->mb_height);
5501 
5502  v->n_allocated_blks = s->mb_width + 2;
5503  v->block = av_malloc(sizeof(*v->block) * v->n_allocated_blks);
5504  v->cbp_base = av_malloc(sizeof(v->cbp_base[0]) * 2 * s->mb_stride);
5505  v->cbp = v->cbp_base + s->mb_stride;
5506  v->ttblk_base = av_malloc(sizeof(v->ttblk_base[0]) * 2 * s->mb_stride);
5507  v->ttblk = v->ttblk_base + s->mb_stride;
5508  v->is_intra_base = av_mallocz(sizeof(v->is_intra_base[0]) * 2 * s->mb_stride);
5509  v->is_intra = v->is_intra_base + s->mb_stride;
5510  v->luma_mv_base = av_malloc(sizeof(v->luma_mv_base[0]) * 2 * s->mb_stride);
5511  v->luma_mv = v->luma_mv_base + s->mb_stride;
5512 
5513  /* allocate block type info in that way so it could be used with s->block_index[] */
5514  v->mb_type_base = av_malloc(s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5515  v->mb_type[0] = v->mb_type_base + s->b8_stride + 1;
5516  v->mb_type[1] = v->mb_type_base + s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride + 1;
5517  v->mb_type[2] = v->mb_type[1] + s->mb_stride * (s->mb_height + 1);
5518 
5519  /* allocate memory to store block level MV info */
5520  v->blk_mv_type_base = av_mallocz( s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5521  v->blk_mv_type = v->blk_mv_type_base + s->b8_stride + 1;
5522  v->mv_f_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5523  v->mv_f[0] = v->mv_f_base + s->b8_stride + 1;
5524  v->mv_f[1] = v->mv_f[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5525  v->mv_f_next_base = av_mallocz(2 * (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2));
5526  v->mv_f_next[0] = v->mv_f_next_base + s->b8_stride + 1;
5527  v->mv_f_next[1] = v->mv_f_next[0] + (s->b8_stride * (s->mb_height * 2 + 1) + s->mb_stride * (s->mb_height + 1) * 2);
5528 
5529  /* Init coded blocks info */
5530  if (v->profile == PROFILE_ADVANCED) {
5531 // if (alloc_bitplane(&v->over_flags_plane, s->mb_width, s->mb_height) < 0)
5532 // return -1;
5533 // if (alloc_bitplane(&v->ac_pred_plane, s->mb_width, s->mb_height) < 0)
5534 // return -1;
5535  }
5536 
5537  ff_intrax8_common_init(&v->x8,s);
5538 
5540  for (i = 0; i < 4; i++)
5541  if (!(v->sr_rows[i >> 1][i & 1] = av_malloc(v->output_width)))
5542  return AVERROR(ENOMEM);
5543  }
5544 
5545  if (!v->mv_type_mb_plane || !v->direct_mb_plane || !v->acpred_plane || !v->over_flags_plane ||
5546  !v->block || !v->cbp_base || !v->ttblk_base || !v->is_intra_base || !v->luma_mv_base ||
5547  !v->mb_type_base) {
5550  av_freep(&v->acpred_plane);
5552  av_freep(&v->block);
5553  av_freep(&v->cbp_base);
5554  av_freep(&v->ttblk_base);
5555  av_freep(&v->is_intra_base);
5556  av_freep(&v->luma_mv_base);
5557  av_freep(&v->mb_type_base);
5558  return AVERROR(ENOMEM);
5559  }
5560 
5561  return 0;
5562 }
5563 
5565 {
5566  int i;
5567  for (i = 0; i < 64; i++) {
5568 #define transpose(x) ((x >> 3) | ((x & 7) << 3))
5569  v->zz_8x8[0][i] = transpose(ff_wmv1_scantable[0][i]);
5570  v->zz_8x8[1][i] = transpose(ff_wmv1_scantable[1][i]);
5571  v->zz_8x8[2][i] = transpose(ff_wmv1_scantable[2][i]);
5572  v->zz_8x8[3][i] = transpose(ff_wmv1_scantable[3][i]);
5574  }
5575  v->left_blk_sh = 0;
5576  v->top_blk_sh = 3;
5577 }
5578 
5579 /** Initialize a VC1/WMV3 decoder
5580  * @todo TODO: Handle VC-1 IDUs (Transport level?)
5581  * @todo TODO: Decypher remaining bits in extra_data
5582  */
5584 {
5585  VC1Context *v = avctx->priv_data;
5586  MpegEncContext *s = &v->s;
5587  GetBitContext gb;
5588  int ret;
5589 
5590  /* save the container output size for WMImage */
5591  v->output_width = avctx->width;
5592  v->output_height = avctx->height;
5593 
5594  if (!avctx->extradata_size || !avctx->extradata)
5595  return -1;
5596  if (!(avctx->flags & CODEC_FLAG_GRAY))
5597  avctx->pix_fmt = avctx->get_format(avctx, avctx->codec->pix_fmts);
5598  else
5599  avctx->pix_fmt = AV_PIX_FMT_GRAY8;
5600  avctx->hwaccel = ff_find_hwaccel(avctx->codec->id, avctx->pix_fmt);
5601  v->s.avctx = avctx;
5602  avctx->flags |= CODEC_FLAG_EMU_EDGE;
5603  v->s.flags |= CODEC_FLAG_EMU_EDGE;
5604 
5605  if ((ret = ff_vc1_init_common(v)) < 0)
5606  return ret;
5607  // ensure static VLC tables are initialized
5608  if ((ret = ff_msmpeg4_decode_init(avctx)) < 0)
5609  return ret;
5610  if ((ret = ff_vc1_decode_init_alloc_tables(v)) < 0)
5611  return ret;
5612  // Hack to ensure the above functions will be called
5613  // again once we know all necessary settings.
5614  // That this is necessary might indicate a bug.
5615  ff_vc1_decode_end(avctx);
5616 
5618  ff_vc1dsp_init(&v->vc1dsp);
5619 
5620  if (avctx->codec_id == AV_CODEC_ID_WMV3 || avctx->codec_id == AV_CODEC_ID_WMV3IMAGE) {
5621  int count = 0;
5622 
5623  // looks like WMV3 has a sequence header stored in the extradata
5624  // advanced sequence header may be before the first frame
5625  // the last byte of the extradata is a version number, 1 for the
5626  // samples we can decode
5627 
5628  init_get_bits(&gb, avctx->extradata, avctx->extradata_size*8);
5629 
5630  if ((ret = ff_vc1_decode_sequence_header(avctx, v, &gb)) < 0)
5631  return ret;
5632 
5633  count = avctx->extradata_size*8 - get_bits_count(&gb);
5634  if (count > 0) {
5635  av_log(avctx, AV_LOG_INFO, "Extra data: %i bits left, value: %X\n",
5636  count, get_bits(&gb, count));
5637  } else if (count < 0) {
5638  av_log(avctx, AV_LOG_INFO, "Read %i bits in overflow\n", -count);
5639  }
5640  } else { // VC1/WVC1/WVP2
5641  const uint8_t *start = avctx->extradata;
5642  uint8_t *end = avctx->extradata + avctx->extradata_size;
5643  const uint8_t *next;
5644  int size, buf2_size;
5645  uint8_t *buf2 = NULL;
5646  int seq_initialized = 0, ep_initialized = 0;
5647 
5648  if (avctx->extradata_size < 16) {
5649  av_log(avctx, AV_LOG_ERROR, "Extradata size too small: %i\n", avctx->extradata_size);
5650  return -1;
5651  }
5652 
5654  start = find_next_marker(start, end); // in WVC1 extradata first byte is its size, but can be 0 in mkv
5655  next = start;
5656  for (; next < end; start = next) {
5657  next = find_next_marker(start + 4, end);
5658  size = next - start - 4;
5659  if (size <= 0)
5660  continue;
5661  buf2_size = vc1_unescape_buffer(start + 4, size, buf2);
5662  init_get_bits(&gb, buf2, buf2_size * 8);
5663  switch (AV_RB32(start)) {
5664  case VC1_CODE_SEQHDR:
5665  if ((ret = ff_vc1_decode_sequence_header(avctx, v, &gb)) < 0) {
5666  av_free(buf2);
5667  return ret;
5668  }
5669  seq_initialized = 1;
5670  break;
5671  case VC1_CODE_ENTRYPOINT:
5672  if ((ret = ff_vc1_decode_entry_point(avctx, v, &gb)) < 0) {
5673  av_free(buf2);
5674  return ret;
5675  }
5676  ep_initialized = 1;
5677  break;
5678  }
5679  }
5680  av_free(buf2);
5681  if (!seq_initialized || !ep_initialized) {
5682  av_log(avctx, AV_LOG_ERROR, "Incomplete extradata\n");
5683  return -1;
5684  }
5685  v->res_sprite = (avctx->codec_id == AV_CODEC_ID_VC1IMAGE);
5686  }
5687 
5689  if (!v->sprite_output_frame)
5690  return AVERROR(ENOMEM);
5691 
5692  avctx->profile = v->profile;
5693  if (v->profile == PROFILE_ADVANCED)
5694  avctx->level = v->level;
5695 
5696  avctx->has_b_frames = !!avctx->max_b_frames;
5697 
5698  s->mb_width = (avctx->coded_width + 15) >> 4;
5699  s->mb_height = (avctx->coded_height + 15) >> 4;
5700 
5701  if (v->profile == PROFILE_ADVANCED || v->res_fasttx) {
5703  } else {
5704  memcpy(v->zz_8x8, ff_wmv1_scantable, 4*64);
5705  v->left_blk_sh = 3;
5706  v->top_blk_sh = 0;
5707  }
5708 
5709  if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5710  v->sprite_width = avctx->coded_width;
5711  v->sprite_height = avctx->coded_height;
5712 
5713  avctx->coded_width = avctx->width = v->output_width;
5714  avctx->coded_height = avctx->height = v->output_height;
5715 
5716  // prevent 16.16 overflows
5717  if (v->sprite_width > 1 << 14 ||
5718  v->sprite_height > 1 << 14 ||
5719  v->output_width > 1 << 14 ||
5720  v->output_height > 1 << 14) return -1;
5721 
5722  if ((v->sprite_width&1) || (v->sprite_height&1)) {
5723  avpriv_request_sample(avctx, "odd sprites support");
5724  return AVERROR_PATCHWELCOME;
5725  }
5726  }
5727  return 0;
5728 }
5729 
5730 /** Close a VC1/WMV3 decoder
5731  * @warning Initial try at using MpegEncContext stuff
5732  */
5734 {
5735  VC1Context *v = avctx->priv_data;
5736  int i;
5737 
5739 
5740  for (i = 0; i < 4; i++)
5741  av_freep(&v->sr_rows[i >> 1][i & 1]);
5742  av_freep(&v->hrd_rate);
5743  av_freep(&v->hrd_buffer);
5744  ff_MPV_common_end(&v->s);
5748  av_freep(&v->fieldtx_plane);
5749  av_freep(&v->acpred_plane);
5751  av_freep(&v->mb_type_base);
5753  av_freep(&v->mv_f_base);
5754  av_freep(&v->mv_f_next_base);
5755  av_freep(&v->block);
5756  av_freep(&v->cbp_base);
5757  av_freep(&v->ttblk_base);
5758  av_freep(&v->is_intra_base); // FIXME use v->mb_type[]
5759  av_freep(&v->luma_mv_base);
5761  return 0;
5762 }
5763 
5764 
5765 /** Decode a VC1/WMV3 frame
5766  * @todo TODO: Handle VC-1 IDUs (Transport level?)
5767  */
5768 static int vc1_decode_frame(AVCodecContext *avctx, void *data,
5769  int *got_frame, AVPacket *avpkt)
5770 {
5771  const uint8_t *buf = avpkt->data;
5772  int buf_size = avpkt->size, n_slices = 0, i, ret;
5773  VC1Context *v = avctx->priv_data;
5774  MpegEncContext *s = &v->s;
5775  AVFrame *pict = data;
5776  uint8_t *buf2 = NULL;
5777  const uint8_t *buf_start = buf, *buf_start_second_field = NULL;
5778  int mb_height, n_slices1=-1;
5779  struct {
5780  uint8_t *buf;
5781  GetBitContext gb;
5782  int mby_start;
5783  } *slices = NULL, *tmp;
5784 
5785  v->second_field = 0;
5786 
5787  if(s->flags & CODEC_FLAG_LOW_DELAY)
5788  s->low_delay = 1;
5789 
5790  /* no supplementary picture */
5791  if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == VC1_CODE_ENDOFSEQ)) {
5792  /* special case for last picture */
5793  if (s->low_delay == 0 && s->next_picture_ptr) {
5794  if ((ret = av_frame_ref(pict, &s->next_picture_ptr->f)) < 0)
5795  return ret;
5796  s->next_picture_ptr = NULL;
5797 
5798  *got_frame = 1;
5799  }
5800 
5801  return buf_size;
5802  }
5803 
5804  if (s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) {
5805  if (v->profile < PROFILE_ADVANCED)
5806  avctx->pix_fmt = AV_PIX_FMT_VDPAU_WMV3;
5807  else
5808  avctx->pix_fmt = AV_PIX_FMT_VDPAU_VC1;
5809  }
5810 
5811  //for advanced profile we may need to parse and unescape data
5812  if (avctx->codec_id == AV_CODEC_ID_VC1 || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5813  int buf_size2 = 0;
5814  buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5815  if (!buf2)
5816  return AVERROR(ENOMEM);
5817 
5818  if (IS_MARKER(AV_RB32(buf))) { /* frame starts with marker and needs to be parsed */
5819  const uint8_t *start, *end, *next;
5820  int size;
5821 
5822  next = buf;
5823  for (start = buf, end = buf + buf_size; next < end; start = next) {
5824  next = find_next_marker(start + 4, end);
5825  size = next - start - 4;
5826  if (size <= 0) continue;
5827  switch (AV_RB32(start)) {
5828  case VC1_CODE_FRAME:
5829  if (avctx->hwaccel ||
5830  s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
5831  buf_start = start;
5832  buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5833  break;
5834  case VC1_CODE_FIELD: {
5835  int buf_size3;
5836  if (avctx->hwaccel ||
5837  s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
5838  buf_start_second_field = start;
5839  tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5840  if (!tmp)
5841  goto err;
5842  slices = tmp;
5843  slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5844  if (!slices[n_slices].buf)
5845  goto err;
5846  buf_size3 = vc1_unescape_buffer(start + 4, size,
5847  slices[n_slices].buf);
5848  init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5849  buf_size3 << 3);
5850  /* assuming that the field marker is at the exact middle,
5851  hope it's correct */
5852  slices[n_slices].mby_start = s->mb_height >> 1;
5853  n_slices1 = n_slices - 1; // index of the last slice of the first field
5854  n_slices++;
5855  break;
5856  }
5857  case VC1_CODE_ENTRYPOINT: /* it should be before frame data */
5858  buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
5859  init_get_bits(&s->gb, buf2, buf_size2 * 8);
5860  ff_vc1_decode_entry_point(avctx, v, &s->gb);
5861  break;
5862  case VC1_CODE_SLICE: {
5863  int buf_size3;
5864  tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5865  if (!tmp)
5866  goto err;
5867  slices = tmp;
5868  slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5869  if (!slices[n_slices].buf)
5870  goto err;
5871  buf_size3 = vc1_unescape_buffer(start + 4, size,
5872  slices[n_slices].buf);
5873  init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5874  buf_size3 << 3);
5875  slices[n_slices].mby_start = get_bits(&slices[n_slices].gb, 9);
5876  n_slices++;
5877  break;
5878  }
5879  }
5880  }
5881  } else if (v->interlace && ((buf[0] & 0xC0) == 0xC0)) { /* WVC1 interlaced stores both fields divided by marker */
5882  const uint8_t *divider;
5883  int buf_size3;
5884 
5885  divider = find_next_marker(buf, buf + buf_size);
5886  if ((divider == (buf + buf_size)) || AV_RB32(divider) != VC1_CODE_FIELD) {
5887  av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n");
5888  goto err;
5889  } else { // found field marker, unescape second field
5890  if (avctx->hwaccel ||
5891  s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
5892  buf_start_second_field = divider;
5893  tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
5894  if (!tmp)
5895  goto err;
5896  slices = tmp;
5897  slices[n_slices].buf = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
5898  if (!slices[n_slices].buf)
5899  goto err;
5900  buf_size3 = vc1_unescape_buffer(divider + 4, buf + buf_size - divider - 4, slices[n_slices].buf);
5901  init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
5902  buf_size3 << 3);
5903  slices[n_slices].mby_start = s->mb_height >> 1;
5904  n_slices1 = n_slices - 1;
5905  n_slices++;
5906  }
5907  buf_size2 = vc1_unescape_buffer(buf, divider - buf, buf2);
5908  } else {
5909  buf_size2 = vc1_unescape_buffer(buf, buf_size, buf2);
5910  }
5911  init_get_bits(&s->gb, buf2, buf_size2*8);
5912  } else
5913  init_get_bits(&s->gb, buf, buf_size*8);
5914 
5915  if (v->res_sprite) {
5916  v->new_sprite = !get_bits1(&s->gb);
5917  v->two_sprites = get_bits1(&s->gb);
5918  /* res_sprite means a Windows Media Image stream, AV_CODEC_ID_*IMAGE means
5919  we're using the sprite compositor. These are intentionally kept separate
5920  so you can get the raw sprites by using the wmv3 decoder for WMVP or
5921  the vc1 one for WVP2 */
5922  if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
5923  if (v->new_sprite) {
5924  // switch AVCodecContext parameters to those of the sprites
5925  avctx->width = avctx->coded_width = v->sprite_width;
5926  avctx->height = avctx->coded_height = v->sprite_height;
5927  } else {
5928  goto image;
5929  }
5930  }
5931  }
5932 
5933  if (s->context_initialized &&
5934  (s->width != avctx->coded_width ||
5935  s->height != avctx->coded_height)) {
5936  ff_vc1_decode_end(avctx);
5937  }
5938 
5939  if (!s->context_initialized) {
5940  if (ff_msmpeg4_decode_init(avctx) < 0)
5941  goto err;
5942  if (ff_vc1_decode_init_alloc_tables(v) < 0) {
5943  ff_MPV_common_end(s);
5944  goto err;
5945  }
5946 
5947  s->low_delay = !avctx->has_b_frames || v->res_sprite;
5948 
5949  if (v->profile == PROFILE_ADVANCED) {
5950  if(avctx->coded_width<=1 || avctx->coded_height<=1)
5951  goto err;
5952  s->h_edge_pos = avctx->coded_width;
5953  s->v_edge_pos = avctx->coded_height;
5954  }
5955  }
5956 
5957  /* We need to set current_picture_ptr before reading the header,
5958  * otherwise we cannot store anything in there. */
5959  if (s->current_picture_ptr == NULL || s->current_picture_ptr->f.data[0]) {
5960  int i = ff_find_unused_picture(s, 0);
5961  if (i < 0)
5962  goto err;
5963  s->current_picture_ptr = &s->picture[i];
5964  }
5965 
5966  // do parse frame header
5967  v->pic_header_flag = 0;
5968  v->first_pic_header_flag = 1;
5969  if (v->profile < PROFILE_ADVANCED) {
5970  if (ff_vc1_parse_frame_header(v, &s->gb) < 0) {
5971  goto err;
5972  }
5973  } else {
5974  if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) {
5975  goto err;
5976  }
5977  }
5978  v->first_pic_header_flag = 0;
5979 
5980  if (avctx->debug & FF_DEBUG_PICT_INFO)
5981  av_log(v->s.avctx, AV_LOG_DEBUG, "pict_type: %c\n", av_get_picture_type_char(s->pict_type));
5982 
5983  if ((avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE)
5984  && s->pict_type != AV_PICTURE_TYPE_I) {
5985  av_log(v->s.avctx, AV_LOG_ERROR, "Sprite decoder: expected I-frame\n");
5986  goto err;
5987  }
5988 
5989  if ((s->mb_height >> v->field_mode) == 0) {
5990  av_log(v->s.avctx, AV_LOG_ERROR, "image too short\n");
5991  goto err;
5992  }
5993 
5994  // process pulldown flags
5996  // Pulldown flags are only valid when 'broadcast' has been set.
5997  // So ticks_per_frame will be 2
5998  if (v->rff) {
5999  // repeat field
6001  } else if (v->rptfrm) {
6002  // repeat frames
6003  s->current_picture_ptr->f.repeat_pict = v->rptfrm * 2;
6004  }
6005 
6006  // for skipping the frame
6009 
6010  /* skip B-frames if we don't have reference frames */
6011  if (s->last_picture_ptr == NULL && (s->pict_type == AV_PICTURE_TYPE_B || s->droppable)) {
6012  goto err;
6013  }
6014  if ((avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) ||
6015  (avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I) ||
6016  avctx->skip_frame >= AVDISCARD_ALL) {
6017  goto end;
6018  }
6019 
6020  if (s->next_p_frame_damaged) {
6021  if (s->pict_type == AV_PICTURE_TYPE_B)
6022  goto end;
6023  else
6024  s->next_p_frame_damaged = 0;
6025  }
6026 
6027  if (ff_MPV_frame_start(s, avctx) < 0) {
6028  goto err;
6029  }
6030 
6033 
6036 
6037  if ((CONFIG_VC1_VDPAU_DECODER)
6038  &&s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) {
6039  if (v->field_mode && buf_start_second_field) {
6040  ff_vdpau_vc1_decode_picture(s, buf_start, buf_start_second_field - buf_start);
6041  ff_vdpau_vc1_decode_picture(s, buf_start_second_field, (buf + buf_size) - buf_start_second_field);
6042  } else {
6043  ff_vdpau_vc1_decode_picture(s, buf_start, (buf + buf_size) - buf_start);
6044  }
6045  } else if (avctx->hwaccel) {
6046  if (v->field_mode && buf_start_second_field) {
6047  // decode first field
6049  if (avctx->hwaccel->start_frame(avctx, buf_start, buf_start_second_field - buf_start) < 0)
6050  goto err;
6051  if (avctx->hwaccel->decode_slice(avctx, buf_start, buf_start_second_field - buf_start) < 0)
6052  goto err;
6053  if (avctx->hwaccel->end_frame(avctx) < 0)
6054  goto err;
6055 
6056  // decode second field
6057  s->gb = slices[n_slices1 + 1].gb;
6059  v->second_field = 1;
6060  v->pic_header_flag = 0;
6061  if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) {
6062  av_log(avctx, AV_LOG_ERROR, "parsing header for second field failed");
6063  goto err;
6064  }
6066 
6067  if (avctx->hwaccel->start_frame(avctx, buf_start_second_field, (buf + buf_size) - buf_start_second_field) < 0)
6068  goto err;
6069  if (avctx->hwaccel->decode_slice(avctx, buf_start_second_field, (buf + buf_size) - buf_start_second_field) < 0)
6070  goto err;
6071  if (avctx->hwaccel->end_frame(avctx) < 0)
6072  goto err;
6073  } else {
6075  if (avctx->hwaccel->start_frame(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
6076  goto err;
6077  if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
6078  goto err;
6079  if (avctx->hwaccel->end_frame(avctx) < 0)
6080  goto err;
6081  }
6082  } else {
6083  int header_ret = 0;
6084 
6086 
6087  v->bits = buf_size * 8;
6088  v->end_mb_x = s->mb_width;
6089  if (v->field_mode) {
6090  s->current_picture.f.linesize[0] <<= 1;
6091  s->current_picture.f.linesize[1] <<= 1;
6092  s->current_picture.f.linesize[2] <<= 1;
6093  s->linesize <<= 1;
6094  s->uvlinesize <<= 1;
6095  }
6096  mb_height = s->mb_height >> v->field_mode;
6097 
6098  av_assert0 (mb_height > 0);
6099 
6100  for (i = 0; i <= n_slices; i++) {
6101  if (i > 0 && slices[i - 1].mby_start >= mb_height) {
6102  if (v->field_mode <= 0) {
6103  av_log(v->s.avctx, AV_LOG_ERROR, "Slice %d starts beyond "
6104  "picture boundary (%d >= %d)\n", i,
6105  slices[i - 1].mby_start, mb_height);
6106  continue;
6107  }
6108  v->second_field = 1;
6109  v->blocks_off = s->b8_stride * (s->mb_height&~1);
6110  v->mb_off = s->mb_stride * s->mb_height >> 1;
6111  } else {
6112  v->second_field = 0;
6113  v->blocks_off = 0;
6114  v->mb_off = 0;
6115  }
6116  if (i) {
6117  v->pic_header_flag = 0;
6118  if (v->field_mode && i == n_slices1 + 2) {
6119  if ((header_ret = ff_vc1_parse_frame_header_adv(v, &s->gb)) < 0) {
6120  av_log(v->s.avctx, AV_LOG_ERROR, "Field header damaged\n");
6121  if (avctx->err_recognition & AV_EF_EXPLODE)
6122  goto err;
6123  continue;
6124  }
6125  } else if (get_bits1(&s->gb)) {
6126  v->pic_header_flag = 1;
6127  if ((header_ret = ff_vc1_parse_frame_header_adv(v, &s->gb)) < 0) {
6128  av_log(v->s.avctx, AV_LOG_ERROR, "Slice header damaged\n");
6129  if (avctx->err_recognition & AV_EF_EXPLODE)
6130  goto err;
6131  continue;
6132  }
6133  }
6134  }
6135  if (header_ret < 0)
6136  continue;
6137  s->start_mb_y = (i == 0) ? 0 : FFMAX(0, slices[i-1].mby_start % mb_height);
6138  if (!v->field_mode || v->second_field)
6139  s->end_mb_y = (i == n_slices ) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
6140  else {
6141  if (i >= n_slices) {
6142  av_log(v->s.avctx, AV_LOG_ERROR, "first field slice count too large\n");
6143  continue;
6144  }
6145  s->end_mb_y = (i <= n_slices1 + 1) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
6146  }
6147  if (s->end_mb_y <= s->start_mb_y) {
6148  av_log(v->s.avctx, AV_LOG_ERROR, "end mb y %d %d invalid\n", s->end_mb_y, s->start_mb_y);
6149  continue;
6150  }
6151  if (!v->p_frame_skipped && s->pict_type != AV_PICTURE_TYPE_I && !v->cbpcy_vlc) {
6152  av_log(v->s.avctx, AV_LOG_ERROR, "missing cbpcy_vlc\n");
6153  continue;
6154  }
6156  if (i != n_slices)
6157  s->gb = slices[i].gb;
6158  }
6159  if (v->field_mode) {
6160  v->second_field = 0;
6161  s->current_picture.f.linesize[0] >>= 1;
6162  s->current_picture.f.linesize[1] >>= 1;
6163  s->current_picture.f.linesize[2] >>= 1;
6164  s->linesize >>= 1;
6165  s->uvlinesize >>= 1;
6167  FFSWAP(uint8_t *, v->mv_f_next[0], v->mv_f[0]);
6168  FFSWAP(uint8_t *, v->mv_f_next[1], v->mv_f[1]);
6169  }
6170  }
6171  av_dlog(s->avctx, "Consumed %i/%i bits\n",
6172  get_bits_count(&s->gb), s->gb.size_in_bits);
6173 // if (get_bits_count(&s->gb) > buf_size * 8)
6174 // return -1;
6176  goto err;
6177  if (!v->field_mode)
6178  ff_er_frame_end(&s->er);
6179  }
6180 
6181  ff_MPV_frame_end(s);
6182 
6183  if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
6184 image:
6185  avctx->width = avctx->coded_width = v->output_width;
6186  avctx->height = avctx->coded_height = v->output_height;
6187  if (avctx->skip_frame >= AVDISCARD_NONREF)
6188  goto end;
6189 #if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
6190  if (vc1_decode_sprites(v, &s->gb))
6191  goto err;
6192 #endif
6193  if ((ret = av_frame_ref(pict, v->sprite_output_frame)) < 0)
6194  goto err;
6195  *got_frame = 1;
6196  } else {
6197  if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
6198  if ((ret = av_frame_ref(pict, &s->current_picture_ptr->f)) < 0)
6199  goto err;
6201  } else if (s->last_picture_ptr != NULL) {
6202  if ((ret = av_frame_ref(pict, &s->last_picture_ptr->f)) < 0)
6203  goto err;
6205  }
6206  if (s->last_picture_ptr || s->low_delay) {
6207  *got_frame = 1;
6208  }
6209  }
6210 
6211 end:
6212  av_free(buf2);
6213  for (i = 0; i < n_slices; i++)
6214  av_free(slices[i].buf);
6215  av_free(slices);
6216  return buf_size;
6217 
6218 err:
6219  av_free(buf2);
6220  for (i = 0; i < n_slices; i++)
6221  av_free(slices[i].buf);
6222  av_free(slices);
6223  return -1;
6224 }
6225 
6226 
6227 static const AVProfile profiles[] = {
6228  { FF_PROFILE_VC1_SIMPLE, "Simple" },
6229  { FF_PROFILE_VC1_MAIN, "Main" },
6230  { FF_PROFILE_VC1_COMPLEX, "Complex" },
6231  { FF_PROFILE_VC1_ADVANCED, "Advanced" },
6232  { FF_PROFILE_UNKNOWN },
6233 };
6234 
6236 #if CONFIG_DXVA2
6238 #endif
6239 #if CONFIG_VAAPI
6241 #endif
6242 #if CONFIG_VDPAU
6244 #endif
6247 };
6248 
6250  .name = "vc1",
6251  .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1"),
6252  .type = AVMEDIA_TYPE_VIDEO,
6253  .id = AV_CODEC_ID_VC1,
6254  .priv_data_size = sizeof(VC1Context),
6255  .init = vc1_decode_init,
6258  .flush = ff_mpeg_flush,
6259  .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
6260  .pix_fmts = vc1_hwaccel_pixfmt_list_420,
6261  .profiles = NULL_IF_CONFIG_SMALL(profiles)
6262 };
6263 
6264 #if CONFIG_WMV3_DECODER
6265 AVCodec ff_wmv3_decoder = {
6266  .name = "wmv3",
6267  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9"),
6268  .type = AVMEDIA_TYPE_VIDEO,
6269  .id = AV_CODEC_ID_WMV3,
6270  .priv_data_size = sizeof(VC1Context),
6271  .init = vc1_decode_init,
6274  .flush = ff_mpeg_flush,
6275  .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
6276  .pix_fmts = vc1_hwaccel_pixfmt_list_420,
6277  .profiles = NULL_IF_CONFIG_SMALL(profiles)
6278 };
6279 #endif
6280 
6281 #if CONFIG_WMV3_VDPAU_DECODER
6282 AVCodec ff_wmv3_vdpau_decoder = {
6283  .name = "wmv3_vdpau",
6284  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 VDPAU"),
6285  .type = AVMEDIA_TYPE_VIDEO,
6286  .id = AV_CODEC_ID_WMV3,
6287  .priv_data_size = sizeof(VC1Context),
6288  .init = vc1_decode_init,
6291  .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
6292  .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_VDPAU_WMV3, AV_PIX_FMT_NONE },
6293  .profiles = NULL_IF_CONFIG_SMALL(profiles)
6294 };
6295 #endif
6296 
6297 #if CONFIG_VC1_VDPAU_DECODER
6298 AVCodec ff_vc1_vdpau_decoder = {
6299  .name = "vc1_vdpau",
6300  .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1 VDPAU"),
6301  .type = AVMEDIA_TYPE_VIDEO,
6302  .id = AV_CODEC_ID_VC1,
6303  .priv_data_size = sizeof(VC1Context),
6304  .init = vc1_decode_init,
6307  .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
6308  .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_VDPAU_VC1, AV_PIX_FMT_NONE },
6309  .profiles = NULL_IF_CONFIG_SMALL(profiles)
6310 };
6311 #endif
6312 
6313 #if CONFIG_WMV3IMAGE_DECODER
6314 AVCodec ff_wmv3image_decoder = {
6315  .name = "wmv3image",
6316  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image"),
6317  .type = AVMEDIA_TYPE_VIDEO,
6318  .id = AV_CODEC_ID_WMV3IMAGE,
6319  .priv_data_size = sizeof(VC1Context),
6320  .init = vc1_decode_init,
6323  .capabilities = CODEC_CAP_DR1,
6324  .flush = vc1_sprite_flush,
6325  .pix_fmts = ff_pixfmt_list_420
6326 };
6327 #endif
6328 
6329 #if CONFIG_VC1IMAGE_DECODER
6330 AVCodec ff_vc1image_decoder = {
6331  .name = "vc1image",
6332  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image v2"),
6333  .type = AVMEDIA_TYPE_VIDEO,
6334  .id = AV_CODEC_ID_VC1IMAGE,
6335  .priv_data_size = sizeof(VC1Context),
6336  .init = vc1_decode_init,
6339  .capabilities = CODEC_CAP_DR1,
6340  .flush = vc1_sprite_flush,
6341  .pix_fmts = ff_pixfmt_list_420
6342 };
6343 #endif