FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
vc1dec.c
Go to the documentation of this file.
1 /*
2  * VC-1 and WMV3 decoder
3  * Copyright (c) 2011 Mashiat Sarker Shakkhar
4  * Copyright (c) 2006-2007 Konstantin Shishkov
5  * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 /**
25  * @file
26  * VC-1 and WMV3 decoder
27  */
28 
29 #include "internal.h"
30 #include "avcodec.h"
31 #include "error_resilience.h"
32 #include "mpegvideo.h"
33 #include "h263.h"
34 #include "h264chroma.h"
35 #include "vc1.h"
36 #include "vc1data.h"
37 #include "vc1acdata.h"
38 #include "msmpeg4data.h"
39 #include "unary.h"
40 #include "mathops.h"
41 #include "vdpau_internal.h"
42 #include "libavutil/avassert.h"
43 
44 #undef NDEBUG
45 #include <assert.h>
46 
47 #define MB_INTRA_VLC_BITS 9
48 #define DC_VLC_BITS 9
49 
50 
51 // offset tables for interlaced picture MVDATA decoding
52 static const int offset_table1[9] = { 0, 1, 2, 4, 8, 16, 32, 64, 128 };
53 static const int offset_table2[9] = { 0, 1, 3, 7, 15, 31, 63, 127, 255 };
54 
55 /***********************************************************************/
56 /**
57  * @name VC-1 Bitplane decoding
58  * @see 8.7, p56
59  * @{
60  */
61 
62 
64 {
65  MpegEncContext *s = &v->s;
67  if (v->field_mode && !(v->second_field ^ v->tff)) {
68  s->dest[0] += s->current_picture_ptr->f.linesize[0];
69  s->dest[1] += s->current_picture_ptr->f.linesize[1];
70  s->dest[2] += s->current_picture_ptr->f.linesize[2];
71  }
72 }
73 
74 /** @} */ //Bitplane group
75 
77 {
78  MpegEncContext *s = &v->s;
79  int topleft_mb_pos, top_mb_pos;
80  int stride_y, fieldtx = 0;
81  int v_dist;
82 
83  /* The put pixels loop is always one MB row behind the decoding loop,
84  * because we can only put pixels when overlap filtering is done, and
85  * for filtering of the bottom edge of a MB, we need the next MB row
86  * present as well.
87  * Within the row, the put pixels loop is also one MB col behind the
88  * decoding loop. The reason for this is again, because for filtering
89  * of the right MB edge, we need the next MB present. */
90  if (!s->first_slice_line) {
91  if (s->mb_x) {
92  topleft_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x - 1;
93  if (v->fcm == ILACE_FRAME)
94  fieldtx = v->fieldtx_plane[topleft_mb_pos];
95  stride_y = s->linesize << fieldtx;
96  v_dist = (16 - fieldtx) >> (fieldtx == 0);
98  s->dest[0] - 16 * s->linesize - 16,
99  stride_y);
101  s->dest[0] - 16 * s->linesize - 8,
102  stride_y);
104  s->dest[0] - v_dist * s->linesize - 16,
105  stride_y);
107  s->dest[0] - v_dist * s->linesize - 8,
108  stride_y);
110  s->dest[1] - 8 * s->uvlinesize - 8,
111  s->uvlinesize);
113  s->dest[2] - 8 * s->uvlinesize - 8,
114  s->uvlinesize);
115  }
116  if (s->mb_x == s->mb_width - 1) {
117  top_mb_pos = (s->mb_y - 1) * s->mb_stride + s->mb_x;
118  if (v->fcm == ILACE_FRAME)
119  fieldtx = v->fieldtx_plane[top_mb_pos];
120  stride_y = s->linesize << fieldtx;
121  v_dist = fieldtx ? 15 : 8;
123  s->dest[0] - 16 * s->linesize,
124  stride_y);
126  s->dest[0] - 16 * s->linesize + 8,
127  stride_y);
129  s->dest[0] - v_dist * s->linesize,
130  stride_y);
132  s->dest[0] - v_dist * s->linesize + 8,
133  stride_y);
135  s->dest[1] - 8 * s->uvlinesize,
136  s->uvlinesize);
138  s->dest[2] - 8 * s->uvlinesize,
139  s->uvlinesize);
140  }
141  }
142 
143 #define inc_blk_idx(idx) do { \
144  idx++; \
145  if (idx >= v->n_allocated_blks) \
146  idx = 0; \
147  } while (0)
148 
153 }
154 
155 static void vc1_loop_filter_iblk(VC1Context *v, int pq)
156 {
157  MpegEncContext *s = &v->s;
158  int j;
159  if (!s->first_slice_line) {
160  v->vc1dsp.vc1_v_loop_filter16(s->dest[0], s->linesize, pq);
161  if (s->mb_x)
162  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
163  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
164  for (j = 0; j < 2; j++) {
165  v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1], s->uvlinesize, pq);
166  if (s->mb_x)
167  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
168  }
169  }
170  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] + 8 * s->linesize, s->linesize, pq);
171 
172  if (s->mb_y == s->end_mb_y - 1) {
173  if (s->mb_x) {
174  v->vc1dsp.vc1_h_loop_filter16(s->dest[0], s->linesize, pq);
175  v->vc1dsp.vc1_h_loop_filter8(s->dest[1], s->uvlinesize, pq);
176  v->vc1dsp.vc1_h_loop_filter8(s->dest[2], s->uvlinesize, pq);
177  }
178  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] + 8, s->linesize, pq);
179  }
180 }
181 
183 {
184  MpegEncContext *s = &v->s;
185  int j;
186 
187  /* The loopfilter runs 1 row and 1 column behind the overlap filter, which
188  * means it runs two rows/cols behind the decoding loop. */
189  if (!s->first_slice_line) {
190  if (s->mb_x) {
191  if (s->mb_y >= s->start_mb_y + 2) {
192  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
193 
194  if (s->mb_x >= 2)
195  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 16, s->linesize, pq);
196  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize - 8, s->linesize, pq);
197  for (j = 0; j < 2; j++) {
198  v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
199  if (s->mb_x >= 2) {
200  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize - 8, s->uvlinesize, pq);
201  }
202  }
203  }
204  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize - 16, s->linesize, pq);
205  }
206 
207  if (s->mb_x == s->mb_width - 1) {
208  if (s->mb_y >= s->start_mb_y + 2) {
209  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
210 
211  if (s->mb_x)
212  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize, s->linesize, pq);
213  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 32 * s->linesize + 8, s->linesize, pq);
214  for (j = 0; j < 2; j++) {
215  v->vc1dsp.vc1_v_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
216  if (s->mb_x >= 2) {
217  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 16 * s->uvlinesize, s->uvlinesize, pq);
218  }
219  }
220  }
221  v->vc1dsp.vc1_v_loop_filter16(s->dest[0] - 8 * s->linesize, s->linesize, pq);
222  }
223 
224  if (s->mb_y == s->end_mb_y) {
225  if (s->mb_x) {
226  if (s->mb_x >= 2)
227  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 16, s->linesize, pq);
228  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize - 8, s->linesize, pq);
229  if (s->mb_x >= 2) {
230  for (j = 0; j < 2; j++) {
231  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize - 8, s->uvlinesize, pq);
232  }
233  }
234  }
235 
236  if (s->mb_x == s->mb_width - 1) {
237  if (s->mb_x)
238  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize, s->linesize, pq);
239  v->vc1dsp.vc1_h_loop_filter16(s->dest[0] - 16 * s->linesize + 8, s->linesize, pq);
240  if (s->mb_x) {
241  for (j = 0; j < 2; j++) {
242  v->vc1dsp.vc1_h_loop_filter8(s->dest[j + 1] - 8 * s->uvlinesize, s->uvlinesize, pq);
243  }
244  }
245  }
246  }
247  }
248 }
249 
251 {
252  MpegEncContext *s = &v->s;
253  int mb_pos;
254 
255  if (v->condover == CONDOVER_NONE)
256  return;
257 
258  mb_pos = s->mb_x + s->mb_y * s->mb_stride;
259 
260  /* Within a MB, the horizontal overlap always runs before the vertical.
261  * To accomplish that, we run the H on left and internal borders of the
262  * currently decoded MB. Then, we wait for the next overlap iteration
263  * to do H overlap on the right edge of this MB, before moving over and
264  * running the V overlap. Therefore, the V overlap makes us trail by one
265  * MB col and the H overlap filter makes us trail by one MB row. This
266  * is reflected in the time at which we run the put_pixels loop. */
267  if (v->condover == CONDOVER_ALL || v->pq >= 9 || v->over_flags_plane[mb_pos]) {
268  if (s->mb_x && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
269  v->over_flags_plane[mb_pos - 1])) {
271  v->block[v->cur_blk_idx][0]);
273  v->block[v->cur_blk_idx][2]);
274  if (!(s->flags & CODEC_FLAG_GRAY)) {
276  v->block[v->cur_blk_idx][4]);
278  v->block[v->cur_blk_idx][5]);
279  }
280  }
282  v->block[v->cur_blk_idx][1]);
284  v->block[v->cur_blk_idx][3]);
285 
286  if (s->mb_x == s->mb_width - 1) {
287  if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
288  v->over_flags_plane[mb_pos - s->mb_stride])) {
290  v->block[v->cur_blk_idx][0]);
292  v->block[v->cur_blk_idx][1]);
293  if (!(s->flags & CODEC_FLAG_GRAY)) {
295  v->block[v->cur_blk_idx][4]);
297  v->block[v->cur_blk_idx][5]);
298  }
299  }
301  v->block[v->cur_blk_idx][2]);
303  v->block[v->cur_blk_idx][3]);
304  }
305  }
306  if (s->mb_x && (v->condover == CONDOVER_ALL || v->over_flags_plane[mb_pos - 1])) {
307  if (!s->first_slice_line && (v->condover == CONDOVER_ALL || v->pq >= 9 ||
308  v->over_flags_plane[mb_pos - s->mb_stride - 1])) {
310  v->block[v->left_blk_idx][0]);
312  v->block[v->left_blk_idx][1]);
313  if (!(s->flags & CODEC_FLAG_GRAY)) {
315  v->block[v->left_blk_idx][4]);
317  v->block[v->left_blk_idx][5]);
318  }
319  }
321  v->block[v->left_blk_idx][2]);
323  v->block[v->left_blk_idx][3]);
324  }
325 }
326 
327 /** Do motion compensation over 1 macroblock
328  * Mostly adapted hpel_motion and qpel_motion from mpegvideo.c
329  */
330 static void vc1_mc_1mv(VC1Context *v, int dir)
331 {
332  MpegEncContext *s = &v->s;
333  H264ChromaContext *h264chroma = &v->h264chroma;
334  uint8_t *srcY, *srcU, *srcV;
335  int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
336  int v_edge_pos = s->v_edge_pos >> v->field_mode;
337  int i;
338  uint8_t (*luty)[256], (*lutuv)[256];
339  int use_ic;
340 
341  if ((!v->field_mode ||
342  (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
343  !v->s.last_picture.f.data[0])
344  return;
345 
346  mx = s->mv[dir][0][0];
347  my = s->mv[dir][0][1];
348 
349  // store motion vectors for further use in B frames
350  if (s->pict_type == AV_PICTURE_TYPE_P) {
351  for (i = 0; i < 4; i++) {
352  s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][0] = mx;
353  s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][1] = my;
354  }
355  }
356 
357  uvmx = (mx + ((mx & 3) == 3)) >> 1;
358  uvmy = (my + ((my & 3) == 3)) >> 1;
359  v->luma_mv[s->mb_x][0] = uvmx;
360  v->luma_mv[s->mb_x][1] = uvmy;
361 
362  if (v->field_mode &&
363  v->cur_field_type != v->ref_field_type[dir]) {
364  my = my - 2 + 4 * v->cur_field_type;
365  uvmy = uvmy - 2 + 4 * v->cur_field_type;
366  }
367 
368  // fastuvmc shall be ignored for interlaced frame picture
369  if (v->fastuvmc && (v->fcm != ILACE_FRAME)) {
370  uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
371  uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
372  }
373  if (!dir) {
374  if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
375  srcY = s->current_picture.f.data[0];
376  srcU = s->current_picture.f.data[1];
377  srcV = s->current_picture.f.data[2];
378  luty = v->curr_luty;
379  lutuv = v->curr_lutuv;
380  use_ic = *v->curr_use_ic;
381  } else {
382  srcY = s->last_picture.f.data[0];
383  srcU = s->last_picture.f.data[1];
384  srcV = s->last_picture.f.data[2];
385  luty = v->last_luty;
386  lutuv = v->last_lutuv;
387  use_ic = v->last_use_ic;
388  }
389  } else {
390  srcY = s->next_picture.f.data[0];
391  srcU = s->next_picture.f.data[1];
392  srcV = s->next_picture.f.data[2];
393  luty = v->next_luty;
394  lutuv = v->next_lutuv;
395  use_ic = v->next_use_ic;
396  }
397 
398  if (!srcY || !srcU) {
399  av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
400  return;
401  }
402 
403  src_x = s->mb_x * 16 + (mx >> 2);
404  src_y = s->mb_y * 16 + (my >> 2);
405  uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
406  uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
407 
408  if (v->profile != PROFILE_ADVANCED) {
409  src_x = av_clip( src_x, -16, s->mb_width * 16);
410  src_y = av_clip( src_y, -16, s->mb_height * 16);
411  uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
412  uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
413  } else {
414  src_x = av_clip( src_x, -17, s->avctx->coded_width);
415  src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
416  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
417  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
418  }
419 
420  srcY += src_y * s->linesize + src_x;
421  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
422  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
423 
424  if (v->field_mode && v->ref_field_type[dir]) {
425  srcY += s->current_picture_ptr->f.linesize[0];
426  srcU += s->current_picture_ptr->f.linesize[1];
427  srcV += s->current_picture_ptr->f.linesize[2];
428  }
429 
430  /* for grayscale we should not try to read from unknown area */
431  if (s->flags & CODEC_FLAG_GRAY) {
432  srcU = s->edge_emu_buffer + 18 * s->linesize;
433  srcV = s->edge_emu_buffer + 18 * s->linesize;
434  }
435 
436  if (v->rangeredfrm || use_ic
437  || s->h_edge_pos < 22 || v_edge_pos < 22
438  || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel * 3
439  || (unsigned)(src_y - 1) > v_edge_pos - (my&3) - 16 - 3) {
440  uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
441 
442  srcY -= s->mspel * (1 + s->linesize);
444  s->linesize, s->linesize,
445  17 + s->mspel * 2, 17 + s->mspel * 2,
446  src_x - s->mspel, src_y - s->mspel,
447  s->h_edge_pos, v_edge_pos);
448  srcY = s->edge_emu_buffer;
449  s->vdsp.emulated_edge_mc(uvbuf, srcU,
450  s->uvlinesize, s->uvlinesize,
451  8 + 1, 8 + 1,
452  uvsrc_x, uvsrc_y,
453  s->h_edge_pos >> 1, v_edge_pos >> 1);
454  s->vdsp.emulated_edge_mc(uvbuf + 16, srcV,
455  s->uvlinesize, s->uvlinesize,
456  8 + 1, 8 + 1,
457  uvsrc_x, uvsrc_y,
458  s->h_edge_pos >> 1, v_edge_pos >> 1);
459  srcU = uvbuf;
460  srcV = uvbuf + 16;
461  /* if we deal with range reduction we need to scale source blocks */
462  if (v->rangeredfrm) {
463  int i, j;
464  uint8_t *src, *src2;
465 
466  src = srcY;
467  for (j = 0; j < 17 + s->mspel * 2; j++) {
468  for (i = 0; i < 17 + s->mspel * 2; i++)
469  src[i] = ((src[i] - 128) >> 1) + 128;
470  src += s->linesize;
471  }
472  src = srcU;
473  src2 = srcV;
474  for (j = 0; j < 9; j++) {
475  for (i = 0; i < 9; i++) {
476  src[i] = ((src[i] - 128) >> 1) + 128;
477  src2[i] = ((src2[i] - 128) >> 1) + 128;
478  }
479  src += s->uvlinesize;
480  src2 += s->uvlinesize;
481  }
482  }
483  /* if we deal with intensity compensation we need to scale source blocks */
484  if (use_ic) {
485  int i, j;
486  uint8_t *src, *src2;
487 
488  src = srcY;
489  for (j = 0; j < 17 + s->mspel * 2; j++) {
490  int f = v->field_mode ? v->ref_field_type[dir] : ((j + src_y - s->mspel) & 1) ;
491  for (i = 0; i < 17 + s->mspel * 2; i++)
492  src[i] = luty[f][src[i]];
493  src += s->linesize;
494  }
495  src = srcU;
496  src2 = srcV;
497  for (j = 0; j < 9; j++) {
498  int f = v->field_mode ? v->ref_field_type[dir] : ((j + uvsrc_y) & 1);
499  for (i = 0; i < 9; i++) {
500  src[i] = lutuv[f][src[i]];
501  src2[i] = lutuv[f][src2[i]];
502  }
503  src += s->uvlinesize;
504  src2 += s->uvlinesize;
505  }
506  }
507  srcY += s->mspel * (1 + s->linesize);
508  }
509 
510  if (s->mspel) {
511  dxy = ((my & 3) << 2) | (mx & 3);
512  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] , srcY , s->linesize, v->rnd);
513  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8, srcY + 8, s->linesize, v->rnd);
514  srcY += s->linesize * 8;
515  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize , srcY , s->linesize, v->rnd);
516  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
517  } else { // hpel mc - always used for luma
518  dxy = (my & 2) | ((mx & 2) >> 1);
519  if (!v->rnd)
520  s->hdsp.put_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
521  else
522  s->hdsp.put_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
523  }
524 
525  if (s->flags & CODEC_FLAG_GRAY) return;
526  /* Chroma MC always uses qpel bilinear */
527  uvmx = (uvmx & 3) << 1;
528  uvmy = (uvmy & 3) << 1;
529  if (!v->rnd) {
530  h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
531  h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
532  } else {
533  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
534  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
535  }
536 }
537 
538 static inline int median4(int a, int b, int c, int d)
539 {
540  if (a < b) {
541  if (c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
542  else return (FFMIN(b, c) + FFMAX(a, d)) / 2;
543  } else {
544  if (c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
545  else return (FFMIN(a, c) + FFMAX(b, d)) / 2;
546  }
547 }
548 
549 /** Do motion compensation for 4-MV macroblock - luminance block
550  */
551 static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg)
552 {
553  MpegEncContext *s = &v->s;
554  uint8_t *srcY;
555  int dxy, mx, my, src_x, src_y;
556  int off;
557  int fieldmv = (v->fcm == ILACE_FRAME) ? v->blk_mv_type[s->block_index[n]] : 0;
558  int v_edge_pos = s->v_edge_pos >> v->field_mode;
559  uint8_t (*luty)[256];
560  int use_ic;
561 
562  if ((!v->field_mode ||
563  (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
564  !v->s.last_picture.f.data[0])
565  return;
566 
567  mx = s->mv[dir][n][0];
568  my = s->mv[dir][n][1];
569 
570  if (!dir) {
571  if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
572  srcY = s->current_picture.f.data[0];
573  luty = v->curr_luty;
574  use_ic = *v->curr_use_ic;
575  } else {
576  srcY = s->last_picture.f.data[0];
577  luty = v->last_luty;
578  use_ic = v->last_use_ic;
579  }
580  } else {
581  srcY = s->next_picture.f.data[0];
582  luty = v->next_luty;
583  use_ic = v->next_use_ic;
584  }
585 
586  if (!srcY) {
587  av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
588  return;
589  }
590 
591  if (v->field_mode) {
592  if (v->cur_field_type != v->ref_field_type[dir])
593  my = my - 2 + 4 * v->cur_field_type;
594  }
595 
596  if (s->pict_type == AV_PICTURE_TYPE_P && n == 3 && v->field_mode) {
597  int same_count = 0, opp_count = 0, k;
598  int chosen_mv[2][4][2], f;
599  int tx, ty;
600  for (k = 0; k < 4; k++) {
601  f = v->mv_f[0][s->block_index[k] + v->blocks_off];
602  chosen_mv[f][f ? opp_count : same_count][0] = s->mv[0][k][0];
603  chosen_mv[f][f ? opp_count : same_count][1] = s->mv[0][k][1];
604  opp_count += f;
605  same_count += 1 - f;
606  }
607  f = opp_count > same_count;
608  switch (f ? opp_count : same_count) {
609  case 4:
610  tx = median4(chosen_mv[f][0][0], chosen_mv[f][1][0],
611  chosen_mv[f][2][0], chosen_mv[f][3][0]);
612  ty = median4(chosen_mv[f][0][1], chosen_mv[f][1][1],
613  chosen_mv[f][2][1], chosen_mv[f][3][1]);
614  break;
615  case 3:
616  tx = mid_pred(chosen_mv[f][0][0], chosen_mv[f][1][0], chosen_mv[f][2][0]);
617  ty = mid_pred(chosen_mv[f][0][1], chosen_mv[f][1][1], chosen_mv[f][2][1]);
618  break;
619  case 2:
620  tx = (chosen_mv[f][0][0] + chosen_mv[f][1][0]) / 2;
621  ty = (chosen_mv[f][0][1] + chosen_mv[f][1][1]) / 2;
622  break;
623  default:
624  av_assert0(0);
625  }
626  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
627  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
628  for (k = 0; k < 4; k++)
629  v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
630  }
631 
632  if (v->fcm == ILACE_FRAME) { // not sure if needed for other types of picture
633  int qx, qy;
634  int width = s->avctx->coded_width;
635  int height = s->avctx->coded_height >> 1;
636  if (s->pict_type == AV_PICTURE_TYPE_P) {
637  s->current_picture.motion_val[1][s->block_index[n] + v->blocks_off][0] = mx;
638  s->current_picture.motion_val[1][s->block_index[n] + v->blocks_off][1] = my;
639  }
640  qx = (s->mb_x * 16) + (mx >> 2);
641  qy = (s->mb_y * 8) + (my >> 3);
642 
643  if (qx < -17)
644  mx -= 4 * (qx + 17);
645  else if (qx > width)
646  mx -= 4 * (qx - width);
647  if (qy < -18)
648  my -= 8 * (qy + 18);
649  else if (qy > height + 1)
650  my -= 8 * (qy - height - 1);
651  }
652 
653  if ((v->fcm == ILACE_FRAME) && fieldmv)
654  off = ((n > 1) ? s->linesize : 0) + (n & 1) * 8;
655  else
656  off = s->linesize * 4 * (n & 2) + (n & 1) * 8;
657 
658  src_x = s->mb_x * 16 + (n & 1) * 8 + (mx >> 2);
659  if (!fieldmv)
660  src_y = s->mb_y * 16 + (n & 2) * 4 + (my >> 2);
661  else
662  src_y = s->mb_y * 16 + ((n > 1) ? 1 : 0) + (my >> 2);
663 
664  if (v->profile != PROFILE_ADVANCED) {
665  src_x = av_clip(src_x, -16, s->mb_width * 16);
666  src_y = av_clip(src_y, -16, s->mb_height * 16);
667  } else {
668  src_x = av_clip(src_x, -17, s->avctx->coded_width);
669  if (v->fcm == ILACE_FRAME) {
670  if (src_y & 1)
671  src_y = av_clip(src_y, -17, s->avctx->coded_height + 1);
672  else
673  src_y = av_clip(src_y, -18, s->avctx->coded_height);
674  } else {
675  src_y = av_clip(src_y, -18, s->avctx->coded_height + 1);
676  }
677  }
678 
679  srcY += src_y * s->linesize + src_x;
680  if (v->field_mode && v->ref_field_type[dir])
681  srcY += s->current_picture_ptr->f.linesize[0];
682 
683  if (fieldmv && !(src_y & 1))
684  v_edge_pos--;
685  if (fieldmv && (src_y & 1) && src_y < 4)
686  src_y--;
687  if (v->rangeredfrm || use_ic
688  || s->h_edge_pos < 13 || v_edge_pos < 23
689  || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx & 3) - 8 - s->mspel * 2
690  || (unsigned)(src_y - (s->mspel << fieldmv)) > v_edge_pos - (my & 3) - ((8 + s->mspel * 2) << fieldmv)) {
691  srcY -= s->mspel * (1 + (s->linesize << fieldmv));
692  /* check emulate edge stride and offset */
694  s->linesize, s->linesize,
695  9 + s->mspel * 2, (9 + s->mspel * 2) << fieldmv,
696  src_x - s->mspel, src_y - (s->mspel << fieldmv),
697  s->h_edge_pos, v_edge_pos);
698  srcY = s->edge_emu_buffer;
699  /* if we deal with range reduction we need to scale source blocks */
700  if (v->rangeredfrm) {
701  int i, j;
702  uint8_t *src;
703 
704  src = srcY;
705  for (j = 0; j < 9 + s->mspel * 2; j++) {
706  for (i = 0; i < 9 + s->mspel * 2; i++)
707  src[i] = ((src[i] - 128) >> 1) + 128;
708  src += s->linesize << fieldmv;
709  }
710  }
711  /* if we deal with intensity compensation we need to scale source blocks */
712  if (use_ic) {
713  int i, j;
714  uint8_t *src;
715 
716  src = srcY;
717  for (j = 0; j < 9 + s->mspel * 2; j++) {
718  int f = v->field_mode ? v->ref_field_type[dir] : (((j<<fieldmv)+src_y - (s->mspel << fieldmv)) & 1);
719  for (i = 0; i < 9 + s->mspel * 2; i++)
720  src[i] = luty[f][src[i]];
721  src += s->linesize << fieldmv;
722  }
723  }
724  srcY += s->mspel * (1 + (s->linesize << fieldmv));
725  }
726 
727  if (s->mspel) {
728  dxy = ((my & 3) << 2) | (mx & 3);
729  if (avg)
730  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
731  else
732  v->vc1dsp.put_vc1_mspel_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
733  } else { // hpel mc - always used for luma
734  dxy = (my & 2) | ((mx & 2) >> 1);
735  if (!v->rnd)
736  s->hdsp.put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
737  else
738  s->hdsp.put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
739  }
740 }
741 
742 static av_always_inline int get_chroma_mv(int *mvx, int *mvy, int *a, int flag, int *tx, int *ty)
743 {
744  int idx, i;
745  static const int count[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
746 
747  idx = ((a[3] != flag) << 3)
748  | ((a[2] != flag) << 2)
749  | ((a[1] != flag) << 1)
750  | (a[0] != flag);
751  if (!idx) {
752  *tx = median4(mvx[0], mvx[1], mvx[2], mvx[3]);
753  *ty = median4(mvy[0], mvy[1], mvy[2], mvy[3]);
754  return 4;
755  } else if (count[idx] == 1) {
756  switch (idx) {
757  case 0x1:
758  *tx = mid_pred(mvx[1], mvx[2], mvx[3]);
759  *ty = mid_pred(mvy[1], mvy[2], mvy[3]);
760  return 3;
761  case 0x2:
762  *tx = mid_pred(mvx[0], mvx[2], mvx[3]);
763  *ty = mid_pred(mvy[0], mvy[2], mvy[3]);
764  return 3;
765  case 0x4:
766  *tx = mid_pred(mvx[0], mvx[1], mvx[3]);
767  *ty = mid_pred(mvy[0], mvy[1], mvy[3]);
768  return 3;
769  case 0x8:
770  *tx = mid_pred(mvx[0], mvx[1], mvx[2]);
771  *ty = mid_pred(mvy[0], mvy[1], mvy[2]);
772  return 3;
773  }
774  } else if (count[idx] == 2) {
775  int t1 = 0, t2 = 0;
776  for (i = 0; i < 3; i++)
777  if (!a[i]) {
778  t1 = i;
779  break;
780  }
781  for (i = t1 + 1; i < 4; i++)
782  if (!a[i]) {
783  t2 = i;
784  break;
785  }
786  *tx = (mvx[t1] + mvx[t2]) / 2;
787  *ty = (mvy[t1] + mvy[t2]) / 2;
788  return 2;
789  } else {
790  return 0;
791  }
792  return -1;
793 }
794 
795 /** Do motion compensation for 4-MV macroblock - both chroma blocks
796  */
797 static void vc1_mc_4mv_chroma(VC1Context *v, int dir)
798 {
799  MpegEncContext *s = &v->s;
800  H264ChromaContext *h264chroma = &v->h264chroma;
801  uint8_t *srcU, *srcV;
802  int uvmx, uvmy, uvsrc_x, uvsrc_y;
803  int k, tx = 0, ty = 0;
804  int mvx[4], mvy[4], intra[4], mv_f[4];
805  int valid_count;
806  int chroma_ref_type = v->cur_field_type;
807  int v_edge_pos = s->v_edge_pos >> v->field_mode;
808  uint8_t (*lutuv)[256];
809  int use_ic;
810 
811  if (!v->field_mode && !v->s.last_picture.f.data[0])
812  return;
813  if (s->flags & CODEC_FLAG_GRAY)
814  return;
815 
816  for (k = 0; k < 4; k++) {
817  mvx[k] = s->mv[dir][k][0];
818  mvy[k] = s->mv[dir][k][1];
819  intra[k] = v->mb_type[0][s->block_index[k]];
820  if (v->field_mode)
821  mv_f[k] = v->mv_f[dir][s->block_index[k] + v->blocks_off];
822  }
823 
824  /* calculate chroma MV vector from four luma MVs */
825  if (!v->field_mode || (v->field_mode && !v->numref)) {
826  valid_count = get_chroma_mv(mvx, mvy, intra, 0, &tx, &ty);
827  chroma_ref_type = v->reffield;
828  if (!valid_count) {
829  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
830  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
831  v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
832  return; //no need to do MC for intra blocks
833  }
834  } else {
835  int dominant = 0;
836  if (mv_f[0] + mv_f[1] + mv_f[2] + mv_f[3] > 2)
837  dominant = 1;
838  valid_count = get_chroma_mv(mvx, mvy, mv_f, dominant, &tx, &ty);
839  if (dominant)
840  chroma_ref_type = !v->cur_field_type;
841  }
842  if (v->field_mode && chroma_ref_type == 1 && v->cur_field_type == 1 && !v->s.last_picture.f.data[0])
843  return;
844  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
845  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
846  uvmx = (tx + ((tx & 3) == 3)) >> 1;
847  uvmy = (ty + ((ty & 3) == 3)) >> 1;
848 
849  v->luma_mv[s->mb_x][0] = uvmx;
850  v->luma_mv[s->mb_x][1] = uvmy;
851 
852  if (v->fastuvmc) {
853  uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
854  uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
855  }
856  // Field conversion bias
857  if (v->cur_field_type != chroma_ref_type)
858  uvmy += 2 - 4 * chroma_ref_type;
859 
860  uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
861  uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
862 
863  if (v->profile != PROFILE_ADVANCED) {
864  uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
865  uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
866  } else {
867  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
868  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
869  }
870 
871  if (!dir) {
872  if (v->field_mode && (v->cur_field_type != chroma_ref_type) && v->second_field) {
873  srcU = s->current_picture.f.data[1];
874  srcV = s->current_picture.f.data[2];
875  lutuv = v->curr_lutuv;
876  use_ic = *v->curr_use_ic;
877  } else {
878  srcU = s->last_picture.f.data[1];
879  srcV = s->last_picture.f.data[2];
880  lutuv = v->last_lutuv;
881  use_ic = v->last_use_ic;
882  }
883  } else {
884  srcU = s->next_picture.f.data[1];
885  srcV = s->next_picture.f.data[2];
886  lutuv = v->next_lutuv;
887  use_ic = v->next_use_ic;
888  }
889 
890  if (!srcU) {
891  av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
892  return;
893  }
894 
895  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
896  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
897 
898  if (v->field_mode) {
899  if (chroma_ref_type) {
900  srcU += s->current_picture_ptr->f.linesize[1];
901  srcV += s->current_picture_ptr->f.linesize[2];
902  }
903  }
904 
905  if (v->rangeredfrm || use_ic
906  || s->h_edge_pos < 18 || v_edge_pos < 18
907  || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
908  || (unsigned)uvsrc_y > (v_edge_pos >> 1) - 9) {
910  s->uvlinesize, s->uvlinesize,
911  8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
912  s->h_edge_pos >> 1, v_edge_pos >> 1);
913  s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV,
914  s->uvlinesize, s->uvlinesize,
915  8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
916  s->h_edge_pos >> 1, v_edge_pos >> 1);
917  srcU = s->edge_emu_buffer;
918  srcV = s->edge_emu_buffer + 16;
919 
920  /* if we deal with range reduction we need to scale source blocks */
921  if (v->rangeredfrm) {
922  int i, j;
923  uint8_t *src, *src2;
924 
925  src = srcU;
926  src2 = srcV;
927  for (j = 0; j < 9; j++) {
928  for (i = 0; i < 9; i++) {
929  src[i] = ((src[i] - 128) >> 1) + 128;
930  src2[i] = ((src2[i] - 128) >> 1) + 128;
931  }
932  src += s->uvlinesize;
933  src2 += s->uvlinesize;
934  }
935  }
936  /* if we deal with intensity compensation we need to scale source blocks */
937  if (use_ic) {
938  int i, j;
939  uint8_t *src, *src2;
940 
941  src = srcU;
942  src2 = srcV;
943  for (j = 0; j < 9; j++) {
944  int f = v->field_mode ? chroma_ref_type : ((j + uvsrc_y) & 1);
945  for (i = 0; i < 9; i++) {
946  src[i] = lutuv[f][src[i]];
947  src2[i] = lutuv[f][src2[i]];
948  }
949  src += s->uvlinesize;
950  src2 += s->uvlinesize;
951  }
952  }
953  }
954 
955  /* Chroma MC always uses qpel bilinear */
956  uvmx = (uvmx & 3) << 1;
957  uvmy = (uvmy & 3) << 1;
958  if (!v->rnd) {
959  h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
960  h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
961  } else {
962  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
963  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
964  }
965 }
966 
967 /** Do motion compensation for 4-MV interlaced frame chroma macroblock (both U and V)
968  */
969 static void vc1_mc_4mv_chroma4(VC1Context *v, int dir, int dir2, int avg)
970 {
971  MpegEncContext *s = &v->s;
972  H264ChromaContext *h264chroma = &v->h264chroma;
973  uint8_t *srcU, *srcV;
974  int uvsrc_x, uvsrc_y;
975  int uvmx_field[4], uvmy_field[4];
976  int i, off, tx, ty;
977  int fieldmv = v->blk_mv_type[s->block_index[0]];
978  static const int s_rndtblfield[16] = { 0, 0, 1, 2, 4, 4, 5, 6, 2, 2, 3, 8, 6, 6, 7, 12 };
979  int v_dist = fieldmv ? 1 : 4; // vertical offset for lower sub-blocks
980  int v_edge_pos = s->v_edge_pos >> 1;
981  int use_ic;
982  uint8_t (*lutuv)[256];
983 
984  if (s->flags & CODEC_FLAG_GRAY)
985  return;
986 
987  for (i = 0; i < 4; i++) {
988  int d = i < 2 ? dir: dir2;
989  tx = s->mv[d][i][0];
990  uvmx_field[i] = (tx + ((tx & 3) == 3)) >> 1;
991  ty = s->mv[d][i][1];
992  if (fieldmv)
993  uvmy_field[i] = (ty >> 4) * 8 + s_rndtblfield[ty & 0xF];
994  else
995  uvmy_field[i] = (ty + ((ty & 3) == 3)) >> 1;
996  }
997 
998  for (i = 0; i < 4; i++) {
999  off = (i & 1) * 4 + ((i & 2) ? v_dist * s->uvlinesize : 0);
1000  uvsrc_x = s->mb_x * 8 + (i & 1) * 4 + (uvmx_field[i] >> 2);
1001  uvsrc_y = s->mb_y * 8 + ((i & 2) ? v_dist : 0) + (uvmy_field[i] >> 2);
1002  // FIXME: implement proper pull-back (see vc1cropmv.c, vc1CROPMV_ChromaPullBack())
1003  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1004  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1005  if (i < 2 ? dir : dir2) {
1006  srcU = s->next_picture.f.data[1];
1007  srcV = s->next_picture.f.data[2];
1008  lutuv = v->next_lutuv;
1009  use_ic = v->next_use_ic;
1010  } else {
1011  srcU = s->last_picture.f.data[1];
1012  srcV = s->last_picture.f.data[2];
1013  lutuv = v->last_lutuv;
1014  use_ic = v->last_use_ic;
1015  }
1016  if (!srcU)
1017  return;
1018  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
1019  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
1020  uvmx_field[i] = (uvmx_field[i] & 3) << 1;
1021  uvmy_field[i] = (uvmy_field[i] & 3) << 1;
1022 
1023  if (fieldmv && !(uvsrc_y & 1))
1024  v_edge_pos = (s->v_edge_pos >> 1) - 1;
1025 
1026  if (fieldmv && (uvsrc_y & 1) && uvsrc_y < 2)
1027  uvsrc_y--;
1028  if (use_ic
1029  || s->h_edge_pos < 10 || v_edge_pos < (5 << fieldmv)
1030  || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 5
1031  || (unsigned)uvsrc_y > v_edge_pos - (5 << fieldmv)) {
1033  s->uvlinesize, s->uvlinesize,
1034  5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1035  s->h_edge_pos >> 1, v_edge_pos);
1036  s->vdsp.emulated_edge_mc(s->edge_emu_buffer + 16, srcV,
1037  s->uvlinesize, s->uvlinesize,
1038  5, (5 << fieldmv), uvsrc_x, uvsrc_y,
1039  s->h_edge_pos >> 1, v_edge_pos);
1040  srcU = s->edge_emu_buffer;
1041  srcV = s->edge_emu_buffer + 16;
1042 
1043  /* if we deal with intensity compensation we need to scale source blocks */
1044  if (use_ic) {
1045  int i, j;
1046  uint8_t *src, *src2;
1047 
1048  src = srcU;
1049  src2 = srcV;
1050  for (j = 0; j < 5; j++) {
1051  int f = (uvsrc_y + (j << fieldmv)) & 1;
1052  for (i = 0; i < 5; i++) {
1053  src[i] = lutuv[f][src[i]];
1054  src2[i] = lutuv[f][src2[i]];
1055  }
1056  src += s->uvlinesize << fieldmv;
1057  src2 += s->uvlinesize << fieldmv;
1058  }
1059  }
1060  }
1061  if (avg) {
1062  if (!v->rnd) {
1063  h264chroma->avg_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1064  h264chroma->avg_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1065  } else {
1066  v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1067  v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1068  }
1069  } else {
1070  if (!v->rnd) {
1071  h264chroma->put_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1072  h264chroma->put_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1073  } else {
1074  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1075  v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
1076  }
1077  }
1078  }
1079 }
1080 
1081 /***********************************************************************/
1082 /**
1083  * @name VC-1 Block-level functions
1084  * @see 7.1.4, p91 and 8.1.1.7, p(1)04
1085  * @{
1086  */
1087 
1088 /**
1089  * @def GET_MQUANT
1090  * @brief Get macroblock-level quantizer scale
1091  */
1092 #define GET_MQUANT() \
1093  if (v->dquantfrm) { \
1094  int edges = 0; \
1095  if (v->dqprofile == DQPROFILE_ALL_MBS) { \
1096  if (v->dqbilevel) { \
1097  mquant = (get_bits1(gb)) ? v->altpq : v->pq; \
1098  } else { \
1099  mqdiff = get_bits(gb, 3); \
1100  if (mqdiff != 7) \
1101  mquant = v->pq + mqdiff; \
1102  else \
1103  mquant = get_bits(gb, 5); \
1104  } \
1105  } \
1106  if (v->dqprofile == DQPROFILE_SINGLE_EDGE) \
1107  edges = 1 << v->dqsbedge; \
1108  else if (v->dqprofile == DQPROFILE_DOUBLE_EDGES) \
1109  edges = (3 << v->dqsbedge) % 15; \
1110  else if (v->dqprofile == DQPROFILE_FOUR_EDGES) \
1111  edges = 15; \
1112  if ((edges&1) && !s->mb_x) \
1113  mquant = v->altpq; \
1114  if ((edges&2) && s->first_slice_line) \
1115  mquant = v->altpq; \
1116  if ((edges&4) && s->mb_x == (s->mb_width - 1)) \
1117  mquant = v->altpq; \
1118  if ((edges&8) && s->mb_y == (s->mb_height - 1)) \
1119  mquant = v->altpq; \
1120  if (!mquant || mquant > 31) { \
1121  av_log(v->s.avctx, AV_LOG_ERROR, \
1122  "Overriding invalid mquant %d\n", mquant); \
1123  mquant = 1; \
1124  } \
1125  }
1126 
1127 /**
1128  * @def GET_MVDATA(_dmv_x, _dmv_y)
1129  * @brief Get MV differentials
1130  * @see MVDATA decoding from 8.3.5.2, p(1)20
1131  * @param _dmv_x Horizontal differential for decoded MV
1132  * @param _dmv_y Vertical differential for decoded MV
1133  */
1134 #define GET_MVDATA(_dmv_x, _dmv_y) \
1135  index = 1 + get_vlc2(gb, ff_vc1_mv_diff_vlc[s->mv_table_index].table, \
1136  VC1_MV_DIFF_VLC_BITS, 2); \
1137  if (index > 36) { \
1138  mb_has_coeffs = 1; \
1139  index -= 37; \
1140  } else \
1141  mb_has_coeffs = 0; \
1142  s->mb_intra = 0; \
1143  if (!index) { \
1144  _dmv_x = _dmv_y = 0; \
1145  } else if (index == 35) { \
1146  _dmv_x = get_bits(gb, v->k_x - 1 + s->quarter_sample); \
1147  _dmv_y = get_bits(gb, v->k_y - 1 + s->quarter_sample); \
1148  } else if (index == 36) { \
1149  _dmv_x = 0; \
1150  _dmv_y = 0; \
1151  s->mb_intra = 1; \
1152  } else { \
1153  index1 = index % 6; \
1154  if (!s->quarter_sample && index1 == 5) val = 1; \
1155  else val = 0; \
1156  if (size_table[index1] - val > 0) \
1157  val = get_bits(gb, size_table[index1] - val); \
1158  else val = 0; \
1159  sign = 0 - (val&1); \
1160  _dmv_x = (sign ^ ((val>>1) + offset_table[index1])) - sign; \
1161  \
1162  index1 = index / 6; \
1163  if (!s->quarter_sample && index1 == 5) val = 1; \
1164  else val = 0; \
1165  if (size_table[index1] - val > 0) \
1166  val = get_bits(gb, size_table[index1] - val); \
1167  else val = 0; \
1168  sign = 0 - (val & 1); \
1169  _dmv_y = (sign ^ ((val >> 1) + offset_table[index1])) - sign; \
1170  }
1171 
1173  int *dmv_y, int *pred_flag)
1174 {
1175  int index, index1;
1176  int extend_x = 0, extend_y = 0;
1177  GetBitContext *gb = &v->s.gb;
1178  int bits, esc;
1179  int val, sign;
1180  const int* offs_tab;
1181 
1182  if (v->numref) {
1183  bits = VC1_2REF_MVDATA_VLC_BITS;
1184  esc = 125;
1185  } else {
1186  bits = VC1_1REF_MVDATA_VLC_BITS;
1187  esc = 71;
1188  }
1189  switch (v->dmvrange) {
1190  case 1:
1191  extend_x = 1;
1192  break;
1193  case 2:
1194  extend_y = 1;
1195  break;
1196  case 3:
1197  extend_x = extend_y = 1;
1198  break;
1199  }
1200  index = get_vlc2(gb, v->imv_vlc->table, bits, 3);
1201  if (index == esc) {
1202  *dmv_x = get_bits(gb, v->k_x);
1203  *dmv_y = get_bits(gb, v->k_y);
1204  if (v->numref) {
1205  if (pred_flag) {
1206  *pred_flag = *dmv_y & 1;
1207  *dmv_y = (*dmv_y + *pred_flag) >> 1;
1208  } else {
1209  *dmv_y = (*dmv_y + (*dmv_y & 1)) >> 1;
1210  }
1211  }
1212  }
1213  else {
1214  av_assert0(index < esc);
1215  if (extend_x)
1216  offs_tab = offset_table2;
1217  else
1218  offs_tab = offset_table1;
1219  index1 = (index + 1) % 9;
1220  if (index1 != 0) {
1221  val = get_bits(gb, index1 + extend_x);
1222  sign = 0 -(val & 1);
1223  *dmv_x = (sign ^ ((val >> 1) + offs_tab[index1])) - sign;
1224  } else
1225  *dmv_x = 0;
1226  if (extend_y)
1227  offs_tab = offset_table2;
1228  else
1229  offs_tab = offset_table1;
1230  index1 = (index + 1) / 9;
1231  if (index1 > v->numref) {
1232  val = get_bits(gb, (index1 + (extend_y << v->numref)) >> v->numref);
1233  sign = 0 - (val & 1);
1234  *dmv_y = (sign ^ ((val >> 1) + offs_tab[index1 >> v->numref])) - sign;
1235  } else
1236  *dmv_y = 0;
1237  if (v->numref && pred_flag)
1238  *pred_flag = index1 & 1;
1239  }
1240 }
1241 
1242 static av_always_inline int scaleforsame_x(VC1Context *v, int n /* MV */, int dir)
1243 {
1244  int scaledvalue, refdist;
1245  int scalesame1, scalesame2;
1246  int scalezone1_x, zone1offset_x;
1247  int table_index = dir ^ v->second_field;
1248 
1249  if (v->s.pict_type != AV_PICTURE_TYPE_B)
1250  refdist = v->refdist;
1251  else
1252  refdist = dir ? v->brfd : v->frfd;
1253  if (refdist > 3)
1254  refdist = 3;
1255  scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
1256  scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
1257  scalezone1_x = ff_vc1_field_mvpred_scales[table_index][3][refdist];
1258  zone1offset_x = ff_vc1_field_mvpred_scales[table_index][5][refdist];
1259 
1260  if (FFABS(n) > 255)
1261  scaledvalue = n;
1262  else {
1263  if (FFABS(n) < scalezone1_x)
1264  scaledvalue = (n * scalesame1) >> 8;
1265  else {
1266  if (n < 0)
1267  scaledvalue = ((n * scalesame2) >> 8) - zone1offset_x;
1268  else
1269  scaledvalue = ((n * scalesame2) >> 8) + zone1offset_x;
1270  }
1271  }
1272  return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1273 }
1274 
1275 static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n /* MV */, int dir)
1276 {
1277  int scaledvalue, refdist;
1278  int scalesame1, scalesame2;
1279  int scalezone1_y, zone1offset_y;
1280  int table_index = dir ^ v->second_field;
1281 
1282  if (v->s.pict_type != AV_PICTURE_TYPE_B)
1283  refdist = v->refdist;
1284  else
1285  refdist = dir ? v->brfd : v->frfd;
1286  if (refdist > 3)
1287  refdist = 3;
1288  scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
1289  scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
1290  scalezone1_y = ff_vc1_field_mvpred_scales[table_index][4][refdist];
1291  zone1offset_y = ff_vc1_field_mvpred_scales[table_index][6][refdist];
1292 
1293  if (FFABS(n) > 63)
1294  scaledvalue = n;
1295  else {
1296  if (FFABS(n) < scalezone1_y)
1297  scaledvalue = (n * scalesame1) >> 8;
1298  else {
1299  if (n < 0)
1300  scaledvalue = ((n * scalesame2) >> 8) - zone1offset_y;
1301  else
1302  scaledvalue = ((n * scalesame2) >> 8) + zone1offset_y;
1303  }
1304  }
1305 
1306  if (v->cur_field_type && !v->ref_field_type[dir])
1307  return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1308  else
1309  return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1310 }
1311 
1312 static av_always_inline int scaleforopp_x(VC1Context *v, int n /* MV */)
1313 {
1314  int scalezone1_x, zone1offset_x;
1315  int scaleopp1, scaleopp2, brfd;
1316  int scaledvalue;
1317 
1318  brfd = FFMIN(v->brfd, 3);
1319  scalezone1_x = ff_vc1_b_field_mvpred_scales[3][brfd];
1320  zone1offset_x = ff_vc1_b_field_mvpred_scales[5][brfd];
1321  scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
1322  scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
1323 
1324  if (FFABS(n) > 255)
1325  scaledvalue = n;
1326  else {
1327  if (FFABS(n) < scalezone1_x)
1328  scaledvalue = (n * scaleopp1) >> 8;
1329  else {
1330  if (n < 0)
1331  scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_x;
1332  else
1333  scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_x;
1334  }
1335  }
1336  return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
1337 }
1338 
1339 static av_always_inline int scaleforopp_y(VC1Context *v, int n /* MV */, int dir)
1340 {
1341  int scalezone1_y, zone1offset_y;
1342  int scaleopp1, scaleopp2, brfd;
1343  int scaledvalue;
1344 
1345  brfd = FFMIN(v->brfd, 3);
1346  scalezone1_y = ff_vc1_b_field_mvpred_scales[4][brfd];
1347  zone1offset_y = ff_vc1_b_field_mvpred_scales[6][brfd];
1348  scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
1349  scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
1350 
1351  if (FFABS(n) > 63)
1352  scaledvalue = n;
1353  else {
1354  if (FFABS(n) < scalezone1_y)
1355  scaledvalue = (n * scaleopp1) >> 8;
1356  else {
1357  if (n < 0)
1358  scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_y;
1359  else
1360  scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_y;
1361  }
1362  }
1363  if (v->cur_field_type && !v->ref_field_type[dir]) {
1364  return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
1365  } else {
1366  return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
1367  }
1368 }
1369 
1370 static av_always_inline int scaleforsame(VC1Context *v, int i, int n /* MV */,
1371  int dim, int dir)
1372 {
1373  int brfd, scalesame;
1374  int hpel = 1 - v->s.quarter_sample;
1375 
1376  n >>= hpel;
1377  if (v->s.pict_type != AV_PICTURE_TYPE_B || v->second_field || !dir) {
1378  if (dim)
1379  n = scaleforsame_y(v, i, n, dir) << hpel;
1380  else
1381  n = scaleforsame_x(v, n, dir) << hpel;
1382  return n;
1383  }
1384  brfd = FFMIN(v->brfd, 3);
1385  scalesame = ff_vc1_b_field_mvpred_scales[0][brfd];
1386 
1387  n = (n * scalesame >> 8) << hpel;
1388  return n;
1389 }
1390 
1391 static av_always_inline int scaleforopp(VC1Context *v, int n /* MV */,
1392  int dim, int dir)
1393 {
1394  int refdist, scaleopp;
1395  int hpel = 1 - v->s.quarter_sample;
1396 
1397  n >>= hpel;
1398  if (v->s.pict_type == AV_PICTURE_TYPE_B && !v->second_field && dir == 1) {
1399  if (dim)
1400  n = scaleforopp_y(v, n, dir) << hpel;
1401  else
1402  n = scaleforopp_x(v, n) << hpel;
1403  return n;
1404  }
1405  if (v->s.pict_type != AV_PICTURE_TYPE_B)
1406  refdist = FFMIN(v->refdist, 3);
1407  else
1408  refdist = dir ? v->brfd : v->frfd;
1409  scaleopp = ff_vc1_field_mvpred_scales[dir ^ v->second_field][0][refdist];
1410 
1411  n = (n * scaleopp >> 8) << hpel;
1412  return n;
1413 }
1414 
1415 /** Predict and set motion vector
1416  */
1417 static inline void vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
1418  int mv1, int r_x, int r_y, uint8_t* is_intra,
1419  int pred_flag, int dir)
1420 {
1421  MpegEncContext *s = &v->s;
1422  int xy, wrap, off = 0;
1423  int16_t *A, *B, *C;
1424  int px, py;
1425  int sum;
1426  int mixedmv_pic, num_samefield = 0, num_oppfield = 0;
1427  int opposite, a_f, b_f, c_f;
1428  int16_t field_predA[2];
1429  int16_t field_predB[2];
1430  int16_t field_predC[2];
1431  int a_valid, b_valid, c_valid;
1432  int hybridmv_thresh, y_bias = 0;
1433 
1434  if (v->mv_mode == MV_PMODE_MIXED_MV ||
1436  mixedmv_pic = 1;
1437  else
1438  mixedmv_pic = 0;
1439  /* scale MV difference to be quad-pel */
1440  dmv_x <<= 1 - s->quarter_sample;
1441  dmv_y <<= 1 - s->quarter_sample;
1442 
1443  wrap = s->b8_stride;
1444  xy = s->block_index[n];
1445 
1446  if (s->mb_intra) {
1447  s->mv[0][n][0] = s->current_picture.motion_val[0][xy + v->blocks_off][0] = 0;
1448  s->mv[0][n][1] = s->current_picture.motion_val[0][xy + v->blocks_off][1] = 0;
1449  s->current_picture.motion_val[1][xy + v->blocks_off][0] = 0;
1450  s->current_picture.motion_val[1][xy + v->blocks_off][1] = 0;
1451  if (mv1) { /* duplicate motion data for 1-MV block */
1452  s->current_picture.motion_val[0][xy + 1 + v->blocks_off][0] = 0;
1453  s->current_picture.motion_val[0][xy + 1 + v->blocks_off][1] = 0;
1454  s->current_picture.motion_val[0][xy + wrap + v->blocks_off][0] = 0;
1455  s->current_picture.motion_val[0][xy + wrap + v->blocks_off][1] = 0;
1456  s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0;
1457  s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0;
1458  v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1459  s->current_picture.motion_val[1][xy + 1 + v->blocks_off][0] = 0;
1460  s->current_picture.motion_val[1][xy + 1 + v->blocks_off][1] = 0;
1461  s->current_picture.motion_val[1][xy + wrap][0] = 0;
1462  s->current_picture.motion_val[1][xy + wrap + v->blocks_off][1] = 0;
1463  s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][0] = 0;
1464  s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][1] = 0;
1465  }
1466  return;
1467  }
1468 
1469  C = s->current_picture.motion_val[dir][xy - 1 + v->blocks_off];
1470  A = s->current_picture.motion_val[dir][xy - wrap + v->blocks_off];
1471  if (mv1) {
1472  if (v->field_mode && mixedmv_pic)
1473  off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
1474  else
1475  off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
1476  } else {
1477  //in 4-MV mode different blocks have different B predictor position
1478  switch (n) {
1479  case 0:
1480  off = (s->mb_x > 0) ? -1 : 1;
1481  break;
1482  case 1:
1483  off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
1484  break;
1485  case 2:
1486  off = 1;
1487  break;
1488  case 3:
1489  off = -1;
1490  }
1491  }
1492  B = s->current_picture.motion_val[dir][xy - wrap + off + v->blocks_off];
1493 
1494  a_valid = !s->first_slice_line || (n == 2 || n == 3);
1495  b_valid = a_valid && (s->mb_width > 1);
1496  c_valid = s->mb_x || (n == 1 || n == 3);
1497  if (v->field_mode) {
1498  a_valid = a_valid && !is_intra[xy - wrap];
1499  b_valid = b_valid && !is_intra[xy - wrap + off];
1500  c_valid = c_valid && !is_intra[xy - 1];
1501  }
1502 
1503  if (a_valid) {
1504  a_f = v->mv_f[dir][xy - wrap + v->blocks_off];
1505  num_oppfield += a_f;
1506  num_samefield += 1 - a_f;
1507  field_predA[0] = A[0];
1508  field_predA[1] = A[1];
1509  } else {
1510  field_predA[0] = field_predA[1] = 0;
1511  a_f = 0;
1512  }
1513  if (b_valid) {
1514  b_f = v->mv_f[dir][xy - wrap + off + v->blocks_off];
1515  num_oppfield += b_f;
1516  num_samefield += 1 - b_f;
1517  field_predB[0] = B[0];
1518  field_predB[1] = B[1];
1519  } else {
1520  field_predB[0] = field_predB[1] = 0;
1521  b_f = 0;
1522  }
1523  if (c_valid) {
1524  c_f = v->mv_f[dir][xy - 1 + v->blocks_off];
1525  num_oppfield += c_f;
1526  num_samefield += 1 - c_f;
1527  field_predC[0] = C[0];
1528  field_predC[1] = C[1];
1529  } else {
1530  field_predC[0] = field_predC[1] = 0;
1531  c_f = 0;
1532  }
1533 
1534  if (v->field_mode) {
1535  if (!v->numref)
1536  // REFFIELD determines if the last field or the second-last field is
1537  // to be used as reference
1538  opposite = 1 - v->reffield;
1539  else {
1540  if (num_samefield <= num_oppfield)
1541  opposite = 1 - pred_flag;
1542  else
1543  opposite = pred_flag;
1544  }
1545  } else
1546  opposite = 0;
1547  if (opposite) {
1548  if (a_valid && !a_f) {
1549  field_predA[0] = scaleforopp(v, field_predA[0], 0, dir);
1550  field_predA[1] = scaleforopp(v, field_predA[1], 1, dir);
1551  }
1552  if (b_valid && !b_f) {
1553  field_predB[0] = scaleforopp(v, field_predB[0], 0, dir);
1554  field_predB[1] = scaleforopp(v, field_predB[1], 1, dir);
1555  }
1556  if (c_valid && !c_f) {
1557  field_predC[0] = scaleforopp(v, field_predC[0], 0, dir);
1558  field_predC[1] = scaleforopp(v, field_predC[1], 1, dir);
1559  }
1560  v->mv_f[dir][xy + v->blocks_off] = 1;
1561  v->ref_field_type[dir] = !v->cur_field_type;
1562  } else {
1563  if (a_valid && a_f) {
1564  field_predA[0] = scaleforsame(v, n, field_predA[0], 0, dir);
1565  field_predA[1] = scaleforsame(v, n, field_predA[1], 1, dir);
1566  }
1567  if (b_valid && b_f) {
1568  field_predB[0] = scaleforsame(v, n, field_predB[0], 0, dir);
1569  field_predB[1] = scaleforsame(v, n, field_predB[1], 1, dir);
1570  }
1571  if (c_valid && c_f) {
1572  field_predC[0] = scaleforsame(v, n, field_predC[0], 0, dir);
1573  field_predC[1] = scaleforsame(v, n, field_predC[1], 1, dir);
1574  }
1575  v->mv_f[dir][xy + v->blocks_off] = 0;
1576  v->ref_field_type[dir] = v->cur_field_type;
1577  }
1578 
1579  if (a_valid) {
1580  px = field_predA[0];
1581  py = field_predA[1];
1582  } else if (c_valid) {
1583  px = field_predC[0];
1584  py = field_predC[1];
1585  } else if (b_valid) {
1586  px = field_predB[0];
1587  py = field_predB[1];
1588  } else {
1589  px = 0;
1590  py = 0;
1591  }
1592 
1593  if (num_samefield + num_oppfield > 1) {
1594  px = mid_pred(field_predA[0], field_predB[0], field_predC[0]);
1595  py = mid_pred(field_predA[1], field_predB[1], field_predC[1]);
1596  }
1597 
1598  /* Pullback MV as specified in 8.3.5.3.4 */
1599  if (!v->field_mode) {
1600  int qx, qy, X, Y;
1601  qx = (s->mb_x << 6) + ((n == 1 || n == 3) ? 32 : 0);
1602  qy = (s->mb_y << 6) + ((n == 2 || n == 3) ? 32 : 0);
1603  X = (s->mb_width << 6) - 4;
1604  Y = (s->mb_height << 6) - 4;
1605  if (mv1) {
1606  if (qx + px < -60) px = -60 - qx;
1607  if (qy + py < -60) py = -60 - qy;
1608  } else {
1609  if (qx + px < -28) px = -28 - qx;
1610  if (qy + py < -28) py = -28 - qy;
1611  }
1612  if (qx + px > X) px = X - qx;
1613  if (qy + py > Y) py = Y - qy;
1614  }
1615 
1616  if (!v->field_mode || s->pict_type != AV_PICTURE_TYPE_B) {
1617  /* Calculate hybrid prediction as specified in 8.3.5.3.5 (also 10.3.5.4.3.5) */
1618  hybridmv_thresh = 32;
1619  if (a_valid && c_valid) {
1620  if (is_intra[xy - wrap])
1621  sum = FFABS(px) + FFABS(py);
1622  else
1623  sum = FFABS(px - field_predA[0]) + FFABS(py - field_predA[1]);
1624  if (sum > hybridmv_thresh) {
1625  if (get_bits1(&s->gb)) { // read HYBRIDPRED bit
1626  px = field_predA[0];
1627  py = field_predA[1];
1628  } else {
1629  px = field_predC[0];
1630  py = field_predC[1];
1631  }
1632  } else {
1633  if (is_intra[xy - 1])
1634  sum = FFABS(px) + FFABS(py);
1635  else
1636  sum = FFABS(px - field_predC[0]) + FFABS(py - field_predC[1]);
1637  if (sum > hybridmv_thresh) {
1638  if (get_bits1(&s->gb)) {
1639  px = field_predA[0];
1640  py = field_predA[1];
1641  } else {
1642  px = field_predC[0];
1643  py = field_predC[1];
1644  }
1645  }
1646  }
1647  }
1648  }
1649 
1650  if (v->field_mode && v->numref)
1651  r_y >>= 1;
1652  if (v->field_mode && v->cur_field_type && v->ref_field_type[dir] == 0)
1653  y_bias = 1;
1654  /* store MV using signed modulus of MV range defined in 4.11 */
1655  s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1656  s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1] = ((py + dmv_y + r_y - y_bias) & ((r_y << 1) - 1)) - r_y + y_bias;
1657  if (mv1) { /* duplicate motion data for 1-MV block */
1658  s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1659  s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1660  s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1661  s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1662  s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0];
1663  s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1];
1664  v->mv_f[dir][xy + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1665  v->mv_f[dir][xy + wrap + v->blocks_off] = v->mv_f[dir][xy + wrap + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
1666  }
1667 }
1668 
1669 /** Predict and set motion vector for interlaced frame picture MBs
1670  */
1671 static inline void vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
1672  int mvn, int r_x, int r_y, uint8_t* is_intra, int dir)
1673 {
1674  MpegEncContext *s = &v->s;
1675  int xy, wrap, off = 0;
1676  int A[2], B[2], C[2];
1677  int px = 0, py = 0;
1678  int a_valid = 0, b_valid = 0, c_valid = 0;
1679  int field_a, field_b, field_c; // 0: same, 1: opposit
1680  int total_valid, num_samefield, num_oppfield;
1681  int pos_c, pos_b, n_adj;
1682 
1683  wrap = s->b8_stride;
1684  xy = s->block_index[n];
1685 
1686  if (s->mb_intra) {
1687  s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0;
1688  s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0;
1689  s->current_picture.motion_val[1][xy][0] = 0;
1690  s->current_picture.motion_val[1][xy][1] = 0;
1691  if (mvn == 1) { /* duplicate motion data for 1-MV block */
1692  s->current_picture.motion_val[0][xy + 1][0] = 0;
1693  s->current_picture.motion_val[0][xy + 1][1] = 0;
1694  s->current_picture.motion_val[0][xy + wrap][0] = 0;
1695  s->current_picture.motion_val[0][xy + wrap][1] = 0;
1696  s->current_picture.motion_val[0][xy + wrap + 1][0] = 0;
1697  s->current_picture.motion_val[0][xy + wrap + 1][1] = 0;
1698  v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
1699  s->current_picture.motion_val[1][xy + 1][0] = 0;
1700  s->current_picture.motion_val[1][xy + 1][1] = 0;
1701  s->current_picture.motion_val[1][xy + wrap][0] = 0;
1702  s->current_picture.motion_val[1][xy + wrap][1] = 0;
1703  s->current_picture.motion_val[1][xy + wrap + 1][0] = 0;
1704  s->current_picture.motion_val[1][xy + wrap + 1][1] = 0;
1705  }
1706  return;
1707  }
1708 
1709  off = ((n == 0) || (n == 1)) ? 1 : -1;
1710  /* predict A */
1711  if (s->mb_x || (n == 1) || (n == 3)) {
1712  if ((v->blk_mv_type[xy]) // current block (MB) has a field MV
1713  || (!v->blk_mv_type[xy] && !v->blk_mv_type[xy - 1])) { // or both have frame MV
1714  A[0] = s->current_picture.motion_val[dir][xy - 1][0];
1715  A[1] = s->current_picture.motion_val[dir][xy - 1][1];
1716  a_valid = 1;
1717  } else { // current block has frame mv and cand. has field MV (so average)
1718  A[0] = (s->current_picture.motion_val[dir][xy - 1][0]
1719  + s->current_picture.motion_val[dir][xy - 1 + off * wrap][0] + 1) >> 1;
1720  A[1] = (s->current_picture.motion_val[dir][xy - 1][1]
1721  + s->current_picture.motion_val[dir][xy - 1 + off * wrap][1] + 1) >> 1;
1722  a_valid = 1;
1723  }
1724  if (!(n & 1) && v->is_intra[s->mb_x - 1]) {
1725  a_valid = 0;
1726  A[0] = A[1] = 0;
1727  }
1728  } else
1729  A[0] = A[1] = 0;
1730  /* Predict B and C */
1731  B[0] = B[1] = C[0] = C[1] = 0;
1732  if (n == 0 || n == 1 || v->blk_mv_type[xy]) {
1733  if (!s->first_slice_line) {
1734  if (!v->is_intra[s->mb_x - s->mb_stride]) {
1735  b_valid = 1;
1736  n_adj = n | 2;
1737  pos_b = s->block_index[n_adj] - 2 * wrap;
1738  if (v->blk_mv_type[pos_b] && v->blk_mv_type[xy]) {
1739  n_adj = (n & 2) | (n & 1);
1740  }
1741  B[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap][0];
1742  B[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap][1];
1743  if (v->blk_mv_type[pos_b] && !v->blk_mv_type[xy]) {
1744  B[0] = (B[0] + s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1;
1745  B[1] = (B[1] + s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1;
1746  }
1747  }
1748  if (s->mb_width > 1) {
1749  if (!v->is_intra[s->mb_x - s->mb_stride + 1]) {
1750  c_valid = 1;
1751  n_adj = 2;
1752  pos_c = s->block_index[2] - 2 * wrap + 2;
1753  if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1754  n_adj = n & 2;
1755  }
1756  C[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][0];
1757  C[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][1];
1758  if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1759  C[0] = (1 + C[0] + (s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1;
1760  C[1] = (1 + C[1] + (s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1;
1761  }
1762  if (s->mb_x == s->mb_width - 1) {
1763  if (!v->is_intra[s->mb_x - s->mb_stride - 1]) {
1764  c_valid = 1;
1765  n_adj = 3;
1766  pos_c = s->block_index[3] - 2 * wrap - 2;
1767  if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
1768  n_adj = n | 1;
1769  }
1770  C[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][0];
1771  C[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][1];
1772  if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
1773  C[0] = (1 + C[0] + s->current_picture.motion_val[dir][s->block_index[1] - 2 * wrap - 2][0]) >> 1;
1774  C[1] = (1 + C[1] + s->current_picture.motion_val[dir][s->block_index[1] - 2 * wrap - 2][1]) >> 1;
1775  }
1776  } else
1777  c_valid = 0;
1778  }
1779  }
1780  }
1781  }
1782  } else {
1783  pos_b = s->block_index[1];
1784  b_valid = 1;
1785  B[0] = s->current_picture.motion_val[dir][pos_b][0];
1786  B[1] = s->current_picture.motion_val[dir][pos_b][1];
1787  pos_c = s->block_index[0];
1788  c_valid = 1;
1789  C[0] = s->current_picture.motion_val[dir][pos_c][0];
1790  C[1] = s->current_picture.motion_val[dir][pos_c][1];
1791  }
1792 
1793  total_valid = a_valid + b_valid + c_valid;
1794  // check if predictor A is out of bounds
1795  if (!s->mb_x && !(n == 1 || n == 3)) {
1796  A[0] = A[1] = 0;
1797  }
1798  // check if predictor B is out of bounds
1799  if ((s->first_slice_line && v->blk_mv_type[xy]) || (s->first_slice_line && !(n & 2))) {
1800  B[0] = B[1] = C[0] = C[1] = 0;
1801  }
1802  if (!v->blk_mv_type[xy]) {
1803  if (s->mb_width == 1) {
1804  px = B[0];
1805  py = B[1];
1806  } else {
1807  if (total_valid >= 2) {
1808  px = mid_pred(A[0], B[0], C[0]);
1809  py = mid_pred(A[1], B[1], C[1]);
1810  } else if (total_valid) {
1811  if (a_valid) { px = A[0]; py = A[1]; }
1812  else if (b_valid) { px = B[0]; py = B[1]; }
1813  else { px = C[0]; py = C[1]; }
1814  }
1815  }
1816  } else {
1817  if (a_valid)
1818  field_a = (A[1] & 4) ? 1 : 0;
1819  else
1820  field_a = 0;
1821  if (b_valid)
1822  field_b = (B[1] & 4) ? 1 : 0;
1823  else
1824  field_b = 0;
1825  if (c_valid)
1826  field_c = (C[1] & 4) ? 1 : 0;
1827  else
1828  field_c = 0;
1829 
1830  num_oppfield = field_a + field_b + field_c;
1831  num_samefield = total_valid - num_oppfield;
1832  if (total_valid == 3) {
1833  if ((num_samefield == 3) || (num_oppfield == 3)) {
1834  px = mid_pred(A[0], B[0], C[0]);
1835  py = mid_pred(A[1], B[1], C[1]);
1836  } else if (num_samefield >= num_oppfield) {
1837  /* take one MV from same field set depending on priority
1838  the check for B may not be necessary */
1839  px = !field_a ? A[0] : B[0];
1840  py = !field_a ? A[1] : B[1];
1841  } else {
1842  px = field_a ? A[0] : B[0];
1843  py = field_a ? A[1] : B[1];
1844  }
1845  } else if (total_valid == 2) {
1846  if (num_samefield >= num_oppfield) {
1847  if (!field_a && a_valid) {
1848  px = A[0];
1849  py = A[1];
1850  } else if (!field_b && b_valid) {
1851  px = B[0];
1852  py = B[1];
1853  } else /*if (c_valid)*/ {
1854  av_assert1(c_valid);
1855  px = C[0];
1856  py = C[1];
1857  } /*else px = py = 0;*/
1858  } else {
1859  if (field_a && a_valid) {
1860  px = A[0];
1861  py = A[1];
1862  } else /*if (field_b && b_valid)*/ {
1863  av_assert1(field_b && b_valid);
1864  px = B[0];
1865  py = B[1];
1866  } /*else if (c_valid) {
1867  px = C[0];
1868  py = C[1];
1869  }*/
1870  }
1871  } else if (total_valid == 1) {
1872  px = (a_valid) ? A[0] : ((b_valid) ? B[0] : C[0]);
1873  py = (a_valid) ? A[1] : ((b_valid) ? B[1] : C[1]);
1874  }
1875  }
1876 
1877  /* store MV using signed modulus of MV range defined in 4.11 */
1878  s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
1879  s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
1880  if (mvn == 1) { /* duplicate motion data for 1-MV block */
1881  s->current_picture.motion_val[dir][xy + 1 ][0] = s->current_picture.motion_val[dir][xy][0];
1882  s->current_picture.motion_val[dir][xy + 1 ][1] = s->current_picture.motion_val[dir][xy][1];
1883  s->current_picture.motion_val[dir][xy + wrap ][0] = s->current_picture.motion_val[dir][xy][0];
1884  s->current_picture.motion_val[dir][xy + wrap ][1] = s->current_picture.motion_val[dir][xy][1];
1885  s->current_picture.motion_val[dir][xy + wrap + 1][0] = s->current_picture.motion_val[dir][xy][0];
1886  s->current_picture.motion_val[dir][xy + wrap + 1][1] = s->current_picture.motion_val[dir][xy][1];
1887  } else if (mvn == 2) { /* duplicate motion data for 2-Field MV block */
1888  s->current_picture.motion_val[dir][xy + 1][0] = s->current_picture.motion_val[dir][xy][0];
1889  s->current_picture.motion_val[dir][xy + 1][1] = s->current_picture.motion_val[dir][xy][1];
1890  s->mv[dir][n + 1][0] = s->mv[dir][n][0];
1891  s->mv[dir][n + 1][1] = s->mv[dir][n][1];
1892  }
1893 }
1894 
1895 /** Motion compensation for direct or interpolated blocks in B-frames
1896  */
1898 {
1899  MpegEncContext *s = &v->s;
1900  H264ChromaContext *h264chroma = &v->h264chroma;
1901  uint8_t *srcY, *srcU, *srcV;
1902  int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
1903  int off, off_uv;
1904  int v_edge_pos = s->v_edge_pos >> v->field_mode;
1905  int use_ic = v->next_use_ic;
1906 
1907  if (!v->field_mode && !v->s.next_picture.f.data[0])
1908  return;
1909 
1910  mx = s->mv[1][0][0];
1911  my = s->mv[1][0][1];
1912  uvmx = (mx + ((mx & 3) == 3)) >> 1;
1913  uvmy = (my + ((my & 3) == 3)) >> 1;
1914  if (v->field_mode) {
1915  if (v->cur_field_type != v->ref_field_type[1])
1916  my = my - 2 + 4 * v->cur_field_type;
1917  uvmy = uvmy - 2 + 4 * v->cur_field_type;
1918  }
1919  if (v->fastuvmc) {
1920  uvmx = uvmx + ((uvmx < 0) ? -(uvmx & 1) : (uvmx & 1));
1921  uvmy = uvmy + ((uvmy < 0) ? -(uvmy & 1) : (uvmy & 1));
1922  }
1923  srcY = s->next_picture.f.data[0];
1924  srcU = s->next_picture.f.data[1];
1925  srcV = s->next_picture.f.data[2];
1926 
1927  src_x = s->mb_x * 16 + (mx >> 2);
1928  src_y = s->mb_y * 16 + (my >> 2);
1929  uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
1930  uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
1931 
1932  if (v->profile != PROFILE_ADVANCED) {
1933  src_x = av_clip( src_x, -16, s->mb_width * 16);
1934  src_y = av_clip( src_y, -16, s->mb_height * 16);
1935  uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
1936  uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
1937  } else {
1938  src_x = av_clip( src_x, -17, s->avctx->coded_width);
1939  src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
1940  uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
1941  uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
1942  }
1943 
1944  srcY += src_y * s->linesize + src_x;
1945  srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
1946  srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
1947 
1948  if (v->field_mode && v->ref_field_type[1]) {
1949  srcY += s->current_picture_ptr->f.linesize[0];
1950  srcU += s->current_picture_ptr->f.linesize[1];
1951  srcV += s->current_picture_ptr->f.linesize[2];
1952  }
1953 
1954  /* for grayscale we should not try to read from unknown area */
1955  if (s->flags & CODEC_FLAG_GRAY) {
1956  srcU = s->edge_emu_buffer + 18 * s->linesize;
1957  srcV = s->edge_emu_buffer + 18 * s->linesize;
1958  }
1959 
1960  if (v->rangeredfrm || s->h_edge_pos < 22 || v_edge_pos < 22 || use_ic
1961  || (unsigned)(src_x - 1) > s->h_edge_pos - (mx & 3) - 16 - 3
1962  || (unsigned)(src_y - 1) > v_edge_pos - (my & 3) - 16 - 3) {
1963  uint8_t *uvbuf = s->edge_emu_buffer + 19 * s->linesize;
1964 
1965  srcY -= s->mspel * (1 + s->linesize);
1967  s->linesize, s->linesize,
1968  17 + s->mspel * 2, 17 + s->mspel * 2,
1969  src_x - s->mspel, src_y - s->mspel,
1970  s->h_edge_pos, v_edge_pos);
1971  srcY = s->edge_emu_buffer;
1972  s->vdsp.emulated_edge_mc(uvbuf, srcU,
1973  s->uvlinesize, s->uvlinesize,
1974  8 + 1, 8 + 1,
1975  uvsrc_x, uvsrc_y,
1976  s->h_edge_pos >> 1, v_edge_pos >> 1);
1977  s->vdsp.emulated_edge_mc(uvbuf + 16, srcV,
1978  s->uvlinesize, s->uvlinesize,
1979  8 + 1, 8 + 1,
1980  uvsrc_x, uvsrc_y,
1981  s->h_edge_pos >> 1, v_edge_pos >> 1);
1982  srcU = uvbuf;
1983  srcV = uvbuf + 16;
1984  /* if we deal with range reduction we need to scale source blocks */
1985  if (v->rangeredfrm) {
1986  int i, j;
1987  uint8_t *src, *src2;
1988 
1989  src = srcY;
1990  for (j = 0; j < 17 + s->mspel * 2; j++) {
1991  for (i = 0; i < 17 + s->mspel * 2; i++)
1992  src[i] = ((src[i] - 128) >> 1) + 128;
1993  src += s->linesize;
1994  }
1995  src = srcU;
1996  src2 = srcV;
1997  for (j = 0; j < 9; j++) {
1998  for (i = 0; i < 9; i++) {
1999  src[i] = ((src[i] - 128) >> 1) + 128;
2000  src2[i] = ((src2[i] - 128) >> 1) + 128;
2001  }
2002  src += s->uvlinesize;
2003  src2 += s->uvlinesize;
2004  }
2005  }
2006 
2007  if (use_ic) {
2008  uint8_t (*luty )[256] = v->next_luty;
2009  uint8_t (*lutuv)[256] = v->next_lutuv;
2010  int i, j;
2011  uint8_t *src, *src2;
2012 
2013  src = srcY;
2014  for (j = 0; j < 17 + s->mspel * 2; j++) {
2015  int f = v->field_mode ? v->ref_field_type[1] : ((j+src_y - s->mspel) & 1);
2016  for (i = 0; i < 17 + s->mspel * 2; i++)
2017  src[i] = luty[f][src[i]];
2018  src += s->linesize;
2019  }
2020  src = srcU;
2021  src2 = srcV;
2022  for (j = 0; j < 9; j++) {
2023  int f = v->field_mode ? v->ref_field_type[1] : ((j+uvsrc_y) & 1);
2024  for (i = 0; i < 9; i++) {
2025  src[i] = lutuv[f][src[i]];
2026  src2[i] = lutuv[f][src2[i]];
2027  }
2028  src += s->uvlinesize;
2029  src2 += s->uvlinesize;
2030  }
2031  }
2032  srcY += s->mspel * (1 + s->linesize);
2033  }
2034 
2035  off = 0;
2036  off_uv = 0;
2037 
2038  if (s->mspel) {
2039  dxy = ((my & 3) << 2) | (mx & 3);
2040  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off , srcY , s->linesize, v->rnd);
2041  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8, srcY + 8, s->linesize, v->rnd);
2042  srcY += s->linesize * 8;
2043  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize , srcY , s->linesize, v->rnd);
2044  v->vc1dsp.avg_vc1_mspel_pixels_tab[dxy](s->dest[0] + off + 8 * s->linesize + 8, srcY + 8, s->linesize, v->rnd);
2045  } else { // hpel mc
2046  dxy = (my & 2) | ((mx & 2) >> 1);
2047 
2048  if (!v->rnd)
2049  s->hdsp.avg_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
2050  else
2051  s->hdsp.avg_no_rnd_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, 16);
2052  }
2053 
2054  if (s->flags & CODEC_FLAG_GRAY) return;
2055  /* Chroma MC always uses qpel blilinear */
2056  uvmx = (uvmx & 3) << 1;
2057  uvmy = (uvmy & 3) << 1;
2058  if (!v->rnd) {
2059  h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2060  h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2061  } else {
2062  v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1] + off_uv, srcU, s->uvlinesize, 8, uvmx, uvmy);
2063  v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2] + off_uv, srcV, s->uvlinesize, 8, uvmx, uvmy);
2064  }
2065 }
2066 
2067 static av_always_inline int scale_mv(int value, int bfrac, int inv, int qs)
2068 {
2069  int n = bfrac;
2070 
2071 #if B_FRACTION_DEN==256
2072  if (inv)
2073  n -= 256;
2074  if (!qs)
2075  return 2 * ((value * n + 255) >> 9);
2076  return (value * n + 128) >> 8;
2077 #else
2078  if (inv)
2079  n -= B_FRACTION_DEN;
2080  if (!qs)
2081  return 2 * ((value * n + B_FRACTION_DEN - 1) / (2 * B_FRACTION_DEN));
2082  return (value * n + B_FRACTION_DEN/2) / B_FRACTION_DEN;
2083 #endif
2084 }
2085 
2086 /** Reconstruct motion vector for B-frame and do motion compensation
2087  */
2088 static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2],
2089  int direct, int mode)
2090 {
2091  if (direct) {
2092  vc1_mc_1mv(v, 0);
2093  vc1_interp_mc(v);
2094  return;
2095  }
2096  if (mode == BMV_TYPE_INTERPOLATED) {
2097  vc1_mc_1mv(v, 0);
2098  vc1_interp_mc(v);
2099  return;
2100  }
2101 
2102  vc1_mc_1mv(v, (mode == BMV_TYPE_BACKWARD));
2103 }
2104 
2105 static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
2106  int direct, int mvtype)
2107 {
2108  MpegEncContext *s = &v->s;
2109  int xy, wrap, off = 0;
2110  int16_t *A, *B, *C;
2111  int px, py;
2112  int sum;
2113  int r_x, r_y;
2114  const uint8_t *is_intra = v->mb_type[0];
2115 
2116  av_assert0(!v->field_mode);
2117 
2118  r_x = v->range_x;
2119  r_y = v->range_y;
2120  /* scale MV difference to be quad-pel */
2121  dmv_x[0] <<= 1 - s->quarter_sample;
2122  dmv_y[0] <<= 1 - s->quarter_sample;
2123  dmv_x[1] <<= 1 - s->quarter_sample;
2124  dmv_y[1] <<= 1 - s->quarter_sample;
2125 
2126  wrap = s->b8_stride;
2127  xy = s->block_index[0];
2128 
2129  if (s->mb_intra) {
2130  s->current_picture.motion_val[0][xy][0] =
2131  s->current_picture.motion_val[0][xy][1] =
2132  s->current_picture.motion_val[1][xy][0] =
2133  s->current_picture.motion_val[1][xy][1] = 0;
2134  return;
2135  }
2136  if (direct && s->next_picture_ptr->field_picture)
2137  av_log(s->avctx, AV_LOG_WARNING, "Mixed frame/field direct mode not supported\n");
2138 
2139  s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
2140  s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
2141  s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
2142  s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
2143 
2144  /* Pullback predicted motion vectors as specified in 8.4.5.4 */
2145  s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2146  s->mv[0][0][1] = av_clip(s->mv[0][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2147  s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
2148  s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
2149  if (direct) {
2150  s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2151  s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2152  s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2153  s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2154  return;
2155  }
2156 
2157  if ((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2158  C = s->current_picture.motion_val[0][xy - 2];
2159  A = s->current_picture.motion_val[0][xy - wrap * 2];
2160  off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2161  B = s->current_picture.motion_val[0][xy - wrap * 2 + off];
2162 
2163  if (!s->mb_x) C[0] = C[1] = 0;
2164  if (!s->first_slice_line) { // predictor A is not out of bounds
2165  if (s->mb_width == 1) {
2166  px = A[0];
2167  py = A[1];
2168  } else {
2169  px = mid_pred(A[0], B[0], C[0]);
2170  py = mid_pred(A[1], B[1], C[1]);
2171  }
2172  } else if (s->mb_x) { // predictor C is not out of bounds
2173  px = C[0];
2174  py = C[1];
2175  } else {
2176  px = py = 0;
2177  }
2178  /* Pullback MV as specified in 8.3.5.3.4 */
2179  {
2180  int qx, qy, X, Y;
2181  if (v->profile < PROFILE_ADVANCED) {
2182  qx = (s->mb_x << 5);
2183  qy = (s->mb_y << 5);
2184  X = (s->mb_width << 5) - 4;
2185  Y = (s->mb_height << 5) - 4;
2186  if (qx + px < -28) px = -28 - qx;
2187  if (qy + py < -28) py = -28 - qy;
2188  if (qx + px > X) px = X - qx;
2189  if (qy + py > Y) py = Y - qy;
2190  } else {
2191  qx = (s->mb_x << 6);
2192  qy = (s->mb_y << 6);
2193  X = (s->mb_width << 6) - 4;
2194  Y = (s->mb_height << 6) - 4;
2195  if (qx + px < -60) px = -60 - qx;
2196  if (qy + py < -60) py = -60 - qy;
2197  if (qx + px > X) px = X - qx;
2198  if (qy + py > Y) py = Y - qy;
2199  }
2200  }
2201  /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2202  if (0 && !s->first_slice_line && s->mb_x) {
2203  if (is_intra[xy - wrap])
2204  sum = FFABS(px) + FFABS(py);
2205  else
2206  sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2207  if (sum > 32) {
2208  if (get_bits1(&s->gb)) {
2209  px = A[0];
2210  py = A[1];
2211  } else {
2212  px = C[0];
2213  py = C[1];
2214  }
2215  } else {
2216  if (is_intra[xy - 2])
2217  sum = FFABS(px) + FFABS(py);
2218  else
2219  sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2220  if (sum > 32) {
2221  if (get_bits1(&s->gb)) {
2222  px = A[0];
2223  py = A[1];
2224  } else {
2225  px = C[0];
2226  py = C[1];
2227  }
2228  }
2229  }
2230  }
2231  /* store MV using signed modulus of MV range defined in 4.11 */
2232  s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
2233  s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
2234  }
2235  if ((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
2236  C = s->current_picture.motion_val[1][xy - 2];
2237  A = s->current_picture.motion_val[1][xy - wrap * 2];
2238  off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
2239  B = s->current_picture.motion_val[1][xy - wrap * 2 + off];
2240 
2241  if (!s->mb_x)
2242  C[0] = C[1] = 0;
2243  if (!s->first_slice_line) { // predictor A is not out of bounds
2244  if (s->mb_width == 1) {
2245  px = A[0];
2246  py = A[1];
2247  } else {
2248  px = mid_pred(A[0], B[0], C[0]);
2249  py = mid_pred(A[1], B[1], C[1]);
2250  }
2251  } else if (s->mb_x) { // predictor C is not out of bounds
2252  px = C[0];
2253  py = C[1];
2254  } else {
2255  px = py = 0;
2256  }
2257  /* Pullback MV as specified in 8.3.5.3.4 */
2258  {
2259  int qx, qy, X, Y;
2260  if (v->profile < PROFILE_ADVANCED) {
2261  qx = (s->mb_x << 5);
2262  qy = (s->mb_y << 5);
2263  X = (s->mb_width << 5) - 4;
2264  Y = (s->mb_height << 5) - 4;
2265  if (qx + px < -28) px = -28 - qx;
2266  if (qy + py < -28) py = -28 - qy;
2267  if (qx + px > X) px = X - qx;
2268  if (qy + py > Y) py = Y - qy;
2269  } else {
2270  qx = (s->mb_x << 6);
2271  qy = (s->mb_y << 6);
2272  X = (s->mb_width << 6) - 4;
2273  Y = (s->mb_height << 6) - 4;
2274  if (qx + px < -60) px = -60 - qx;
2275  if (qy + py < -60) py = -60 - qy;
2276  if (qx + px > X) px = X - qx;
2277  if (qy + py > Y) py = Y - qy;
2278  }
2279  }
2280  /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
2281  if (0 && !s->first_slice_line && s->mb_x) {
2282  if (is_intra[xy - wrap])
2283  sum = FFABS(px) + FFABS(py);
2284  else
2285  sum = FFABS(px - A[0]) + FFABS(py - A[1]);
2286  if (sum > 32) {
2287  if (get_bits1(&s->gb)) {
2288  px = A[0];
2289  py = A[1];
2290  } else {
2291  px = C[0];
2292  py = C[1];
2293  }
2294  } else {
2295  if (is_intra[xy - 2])
2296  sum = FFABS(px) + FFABS(py);
2297  else
2298  sum = FFABS(px - C[0]) + FFABS(py - C[1]);
2299  if (sum > 32) {
2300  if (get_bits1(&s->gb)) {
2301  px = A[0];
2302  py = A[1];
2303  } else {
2304  px = C[0];
2305  py = C[1];
2306  }
2307  }
2308  }
2309  }
2310  /* store MV using signed modulus of MV range defined in 4.11 */
2311 
2312  s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
2313  s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
2314  }
2315  s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0];
2316  s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1];
2317  s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0];
2318  s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1];
2319 }
2320 
2321 static inline void vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, int mv1, int *pred_flag)
2322 {
2323  int dir = (v->bmvtype == BMV_TYPE_BACKWARD) ? 1 : 0;
2324  MpegEncContext *s = &v->s;
2325  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2326 
2327  if (v->bmvtype == BMV_TYPE_DIRECT) {
2328  int total_opp, k, f;
2329  if (s->next_picture.mb_type[mb_pos + v->mb_off] != MB_TYPE_INTRA) {
2330  s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
2331  v->bfraction, 0, s->quarter_sample);
2332  s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1],
2333  v->bfraction, 0, s->quarter_sample);
2334  s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
2335  v->bfraction, 1, s->quarter_sample);
2336  s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1],
2337  v->bfraction, 1, s->quarter_sample);
2338 
2339  total_opp = v->mv_f_next[0][s->block_index[0] + v->blocks_off]
2340  + v->mv_f_next[0][s->block_index[1] + v->blocks_off]
2341  + v->mv_f_next[0][s->block_index[2] + v->blocks_off]
2342  + v->mv_f_next[0][s->block_index[3] + v->blocks_off];
2343  f = (total_opp > 2) ? 1 : 0;
2344  } else {
2345  s->mv[0][0][0] = s->mv[0][0][1] = 0;
2346  s->mv[1][0][0] = s->mv[1][0][1] = 0;
2347  f = 0;
2348  }
2349  v->ref_field_type[0] = v->ref_field_type[1] = v->cur_field_type ^ f;
2350  for (k = 0; k < 4; k++) {
2351  s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][0] = s->mv[0][0][0];
2352  s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][1] = s->mv[0][0][1];
2353  s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][0] = s->mv[1][0][0];
2354  s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][1] = s->mv[1][0][1];
2355  v->mv_f[0][s->block_index[k] + v->blocks_off] = f;
2356  v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
2357  }
2358  return;
2359  }
2360  if (v->bmvtype == BMV_TYPE_INTERPOLATED) {
2361  vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2362  vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2363  return;
2364  }
2365  if (dir) { // backward
2366  vc1_pred_mv(v, n, dmv_x[1], dmv_y[1], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
2367  if (n == 3 || mv1) {
2368  vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
2369  }
2370  } else { // forward
2371  vc1_pred_mv(v, n, dmv_x[0], dmv_y[0], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
2372  if (n == 3 || mv1) {
2373  vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], 0, 1);
2374  }
2375  }
2376 }
2377 
2378 /** Get predicted DC value for I-frames only
2379  * prediction dir: left=0, top=1
2380  * @param s MpegEncContext
2381  * @param overlap flag indicating that overlap filtering is used
2382  * @param pq integer part of picture quantizer
2383  * @param[in] n block index in the current MB
2384  * @param dc_val_ptr Pointer to DC predictor
2385  * @param dir_ptr Prediction direction for use in AC prediction
2386  */
2387 static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2388  int16_t **dc_val_ptr, int *dir_ptr)
2389 {
2390  int a, b, c, wrap, pred, scale;
2391  int16_t *dc_val;
2392  static const uint16_t dcpred[32] = {
2393  -1, 1024, 512, 341, 256, 205, 171, 146, 128,
2394  114, 102, 93, 85, 79, 73, 68, 64,
2395  60, 57, 54, 51, 49, 47, 45, 43,
2396  41, 39, 38, 37, 35, 34, 33
2397  };
2398 
2399  /* find prediction - wmv3_dc_scale always used here in fact */
2400  if (n < 4) scale = s->y_dc_scale;
2401  else scale = s->c_dc_scale;
2402 
2403  wrap = s->block_wrap[n];
2404  dc_val = s->dc_val[0] + s->block_index[n];
2405 
2406  /* B A
2407  * C X
2408  */
2409  c = dc_val[ - 1];
2410  b = dc_val[ - 1 - wrap];
2411  a = dc_val[ - wrap];
2412 
2413  if (pq < 9 || !overlap) {
2414  /* Set outer values */
2415  if (s->first_slice_line && (n != 2 && n != 3))
2416  b = a = dcpred[scale];
2417  if (s->mb_x == 0 && (n != 1 && n != 3))
2418  b = c = dcpred[scale];
2419  } else {
2420  /* Set outer values */
2421  if (s->first_slice_line && (n != 2 && n != 3))
2422  b = a = 0;
2423  if (s->mb_x == 0 && (n != 1 && n != 3))
2424  b = c = 0;
2425  }
2426 
2427  if (abs(a - b) <= abs(b - c)) {
2428  pred = c;
2429  *dir_ptr = 1; // left
2430  } else {
2431  pred = a;
2432  *dir_ptr = 0; // top
2433  }
2434 
2435  /* update predictor */
2436  *dc_val_ptr = &dc_val[0];
2437  return pred;
2438 }
2439 
2440 
2441 /** Get predicted DC value
2442  * prediction dir: left=0, top=1
2443  * @param s MpegEncContext
2444  * @param overlap flag indicating that overlap filtering is used
2445  * @param pq integer part of picture quantizer
2446  * @param[in] n block index in the current MB
2447  * @param a_avail flag indicating top block availability
2448  * @param c_avail flag indicating left block availability
2449  * @param dc_val_ptr Pointer to DC predictor
2450  * @param dir_ptr Prediction direction for use in AC prediction
2451  */
2452 static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
2453  int a_avail, int c_avail,
2454  int16_t **dc_val_ptr, int *dir_ptr)
2455 {
2456  int a, b, c, wrap, pred;
2457  int16_t *dc_val;
2458  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2459  int q1, q2 = 0;
2460  int dqscale_index;
2461 
2462  wrap = s->block_wrap[n];
2463  dc_val = s->dc_val[0] + s->block_index[n];
2464 
2465  /* B A
2466  * C X
2467  */
2468  c = dc_val[ - 1];
2469  b = dc_val[ - 1 - wrap];
2470  a = dc_val[ - wrap];
2471  /* scale predictors if needed */
2472  q1 = s->current_picture.qscale_table[mb_pos];
2473  dqscale_index = s->y_dc_scale_table[q1] - 1;
2474  if (dqscale_index < 0)
2475  return 0;
2476  if (c_avail && (n != 1 && n != 3)) {
2477  q2 = s->current_picture.qscale_table[mb_pos - 1];
2478  if (q2 && q2 != q1)
2479  c = (c * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2480  }
2481  if (a_avail && (n != 2 && n != 3)) {
2482  q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2483  if (q2 && q2 != q1)
2484  a = (a * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2485  }
2486  if (a_avail && c_avail && (n != 3)) {
2487  int off = mb_pos;
2488  if (n != 1)
2489  off--;
2490  if (n != 2)
2491  off -= s->mb_stride;
2492  q2 = s->current_picture.qscale_table[off];
2493  if (q2 && q2 != q1)
2494  b = (b * s->y_dc_scale_table[q2] * ff_vc1_dqscale[dqscale_index] + 0x20000) >> 18;
2495  }
2496 
2497  if (a_avail && c_avail) {
2498  if (abs(a - b) <= abs(b - c)) {
2499  pred = c;
2500  *dir_ptr = 1; // left
2501  } else {
2502  pred = a;
2503  *dir_ptr = 0; // top
2504  }
2505  } else if (a_avail) {
2506  pred = a;
2507  *dir_ptr = 0; // top
2508  } else if (c_avail) {
2509  pred = c;
2510  *dir_ptr = 1; // left
2511  } else {
2512  pred = 0;
2513  *dir_ptr = 1; // left
2514  }
2515 
2516  /* update predictor */
2517  *dc_val_ptr = &dc_val[0];
2518  return pred;
2519 }
2520 
2521 /** @} */ // Block group
2522 
2523 /**
2524  * @name VC1 Macroblock-level functions in Simple/Main Profiles
2525  * @see 7.1.4, p91 and 8.1.1.7, p(1)04
2526  * @{
2527  */
2528 
2529 static inline int vc1_coded_block_pred(MpegEncContext * s, int n,
2530  uint8_t **coded_block_ptr)
2531 {
2532  int xy, wrap, pred, a, b, c;
2533 
2534  xy = s->block_index[n];
2535  wrap = s->b8_stride;
2536 
2537  /* B C
2538  * A X
2539  */
2540  a = s->coded_block[xy - 1 ];
2541  b = s->coded_block[xy - 1 - wrap];
2542  c = s->coded_block[xy - wrap];
2543 
2544  if (b == c) {
2545  pred = a;
2546  } else {
2547  pred = c;
2548  }
2549 
2550  /* store value */
2551  *coded_block_ptr = &s->coded_block[xy];
2552 
2553  return pred;
2554 }
2555 
2556 /**
2557  * Decode one AC coefficient
2558  * @param v The VC1 context
2559  * @param last Last coefficient
2560  * @param skip How much zero coefficients to skip
2561  * @param value Decoded AC coefficient value
2562  * @param codingset set of VLC to decode data
2563  * @see 8.1.3.4
2564  */
2565 static void vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip,
2566  int *value, int codingset)
2567 {
2568  GetBitContext *gb = &v->s.gb;
2569  int index, escape, run = 0, level = 0, lst = 0;
2570 
2571  index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2572  if (index != ff_vc1_ac_sizes[codingset] - 1) {
2573  run = vc1_index_decode_table[codingset][index][0];
2574  level = vc1_index_decode_table[codingset][index][1];
2575  lst = index >= vc1_last_decode_table[codingset] || get_bits_left(gb) < 0;
2576  if (get_bits1(gb))
2577  level = -level;
2578  } else {
2579  escape = decode210(gb);
2580  if (escape != 2) {
2581  index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
2582  run = vc1_index_decode_table[codingset][index][0];
2583  level = vc1_index_decode_table[codingset][index][1];
2584  lst = index >= vc1_last_decode_table[codingset];
2585  if (escape == 0) {
2586  if (lst)
2587  level += vc1_last_delta_level_table[codingset][run];
2588  else
2589  level += vc1_delta_level_table[codingset][run];
2590  } else {
2591  if (lst)
2592  run += vc1_last_delta_run_table[codingset][level] + 1;
2593  else
2594  run += vc1_delta_run_table[codingset][level] + 1;
2595  }
2596  if (get_bits1(gb))
2597  level = -level;
2598  } else {
2599  int sign;
2600  lst = get_bits1(gb);
2601  if (v->s.esc3_level_length == 0) {
2602  if (v->pq < 8 || v->dquantfrm) { // table 59
2603  v->s.esc3_level_length = get_bits(gb, 3);
2604  if (!v->s.esc3_level_length)
2605  v->s.esc3_level_length = get_bits(gb, 2) + 8;
2606  } else { // table 60
2607  v->s.esc3_level_length = get_unary(gb, 1, 6) + 2;
2608  }
2609  v->s.esc3_run_length = 3 + get_bits(gb, 2);
2610  }
2611  run = get_bits(gb, v->s.esc3_run_length);
2612  sign = get_bits1(gb);
2613  level = get_bits(gb, v->s.esc3_level_length);
2614  if (sign)
2615  level = -level;
2616  }
2617  }
2618 
2619  *last = lst;
2620  *skip = run;
2621  *value = level;
2622 }
2623 
2624 /** Decode intra block in intra frames - should be faster than decode_intra_block
2625  * @param v VC1Context
2626  * @param block block to decode
2627  * @param[in] n subblock index
2628  * @param coded are AC coeffs present or not
2629  * @param codingset set of VLC to decode data
2630  */
2631 static int vc1_decode_i_block(VC1Context *v, int16_t block[64], int n,
2632  int coded, int codingset)
2633 {
2634  GetBitContext *gb = &v->s.gb;
2635  MpegEncContext *s = &v->s;
2636  int dc_pred_dir = 0; /* Direction of the DC prediction used */
2637  int i;
2638  int16_t *dc_val;
2639  int16_t *ac_val, *ac_val2;
2640  int dcdiff;
2641 
2642  /* Get DC differential */
2643  if (n < 4) {
2645  } else {
2647  }
2648  if (dcdiff < 0) {
2649  av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2650  return -1;
2651  }
2652  if (dcdiff) {
2653  if (dcdiff == 119 /* ESC index value */) {
2654  /* TODO: Optimize */
2655  if (v->pq == 1) dcdiff = get_bits(gb, 10);
2656  else if (v->pq == 2) dcdiff = get_bits(gb, 9);
2657  else dcdiff = get_bits(gb, 8);
2658  } else {
2659  if (v->pq == 1)
2660  dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2661  else if (v->pq == 2)
2662  dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2663  }
2664  if (get_bits1(gb))
2665  dcdiff = -dcdiff;
2666  }
2667 
2668  /* Prediction */
2669  dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
2670  *dc_val = dcdiff;
2671 
2672  /* Store the quantized DC coeff, used for prediction */
2673  if (n < 4) {
2674  block[0] = dcdiff * s->y_dc_scale;
2675  } else {
2676  block[0] = dcdiff * s->c_dc_scale;
2677  }
2678  /* Skip ? */
2679  if (!coded) {
2680  goto not_coded;
2681  }
2682 
2683  // AC Decoding
2684  i = 1;
2685 
2686  {
2687  int last = 0, skip, value;
2688  const uint8_t *zz_table;
2689  int scale;
2690  int k;
2691 
2692  scale = v->pq * 2 + v->halfpq;
2693 
2694  if (v->s.ac_pred) {
2695  if (!dc_pred_dir)
2696  zz_table = v->zz_8x8[2];
2697  else
2698  zz_table = v->zz_8x8[3];
2699  } else
2700  zz_table = v->zz_8x8[1];
2701 
2702  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2703  ac_val2 = ac_val;
2704  if (dc_pred_dir) // left
2705  ac_val -= 16;
2706  else // top
2707  ac_val -= 16 * s->block_wrap[n];
2708 
2709  while (!last) {
2710  vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2711  i += skip;
2712  if (i > 63)
2713  break;
2714  block[zz_table[i++]] = value;
2715  }
2716 
2717  /* apply AC prediction if needed */
2718  if (s->ac_pred) {
2719  if (dc_pred_dir) { // left
2720  for (k = 1; k < 8; k++)
2721  block[k << v->left_blk_sh] += ac_val[k];
2722  } else { // top
2723  for (k = 1; k < 8; k++)
2724  block[k << v->top_blk_sh] += ac_val[k + 8];
2725  }
2726  }
2727  /* save AC coeffs for further prediction */
2728  for (k = 1; k < 8; k++) {
2729  ac_val2[k] = block[k << v->left_blk_sh];
2730  ac_val2[k + 8] = block[k << v->top_blk_sh];
2731  }
2732 
2733  /* scale AC coeffs */
2734  for (k = 1; k < 64; k++)
2735  if (block[k]) {
2736  block[k] *= scale;
2737  if (!v->pquantizer)
2738  block[k] += (block[k] < 0) ? -v->pq : v->pq;
2739  }
2740 
2741  if (s->ac_pred) i = 63;
2742  }
2743 
2744 not_coded:
2745  if (!coded) {
2746  int k, scale;
2747  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2748  ac_val2 = ac_val;
2749 
2750  i = 0;
2751  scale = v->pq * 2 + v->halfpq;
2752  memset(ac_val2, 0, 16 * 2);
2753  if (dc_pred_dir) { // left
2754  ac_val -= 16;
2755  if (s->ac_pred)
2756  memcpy(ac_val2, ac_val, 8 * 2);
2757  } else { // top
2758  ac_val -= 16 * s->block_wrap[n];
2759  if (s->ac_pred)
2760  memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2761  }
2762 
2763  /* apply AC prediction if needed */
2764  if (s->ac_pred) {
2765  if (dc_pred_dir) { //left
2766  for (k = 1; k < 8; k++) {
2767  block[k << v->left_blk_sh] = ac_val[k] * scale;
2768  if (!v->pquantizer && block[k << v->left_blk_sh])
2769  block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -v->pq : v->pq;
2770  }
2771  } else { // top
2772  for (k = 1; k < 8; k++) {
2773  block[k << v->top_blk_sh] = ac_val[k + 8] * scale;
2774  if (!v->pquantizer && block[k << v->top_blk_sh])
2775  block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -v->pq : v->pq;
2776  }
2777  }
2778  i = 63;
2779  }
2780  }
2781  s->block_last_index[n] = i;
2782 
2783  return 0;
2784 }
2785 
2786 /** Decode intra block in intra frames - should be faster than decode_intra_block
2787  * @param v VC1Context
2788  * @param block block to decode
2789  * @param[in] n subblock number
2790  * @param coded are AC coeffs present or not
2791  * @param codingset set of VLC to decode data
2792  * @param mquant quantizer value for this macroblock
2793  */
2794 static int vc1_decode_i_block_adv(VC1Context *v, int16_t block[64], int n,
2795  int coded, int codingset, int mquant)
2796 {
2797  GetBitContext *gb = &v->s.gb;
2798  MpegEncContext *s = &v->s;
2799  int dc_pred_dir = 0; /* Direction of the DC prediction used */
2800  int i;
2801  int16_t *dc_val = NULL;
2802  int16_t *ac_val, *ac_val2;
2803  int dcdiff;
2804  int a_avail = v->a_avail, c_avail = v->c_avail;
2805  int use_pred = s->ac_pred;
2806  int scale;
2807  int q1, q2 = 0;
2808  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
2809 
2810  /* Get DC differential */
2811  if (n < 4) {
2813  } else {
2815  }
2816  if (dcdiff < 0) {
2817  av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
2818  return -1;
2819  }
2820  if (dcdiff) {
2821  if (dcdiff == 119 /* ESC index value */) {
2822  /* TODO: Optimize */
2823  if (mquant == 1) dcdiff = get_bits(gb, 10);
2824  else if (mquant == 2) dcdiff = get_bits(gb, 9);
2825  else dcdiff = get_bits(gb, 8);
2826  } else {
2827  if (mquant == 1)
2828  dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
2829  else if (mquant == 2)
2830  dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
2831  }
2832  if (get_bits1(gb))
2833  dcdiff = -dcdiff;
2834  }
2835 
2836  /* Prediction */
2837  dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
2838  *dc_val = dcdiff;
2839 
2840  /* Store the quantized DC coeff, used for prediction */
2841  if (n < 4) {
2842  block[0] = dcdiff * s->y_dc_scale;
2843  } else {
2844  block[0] = dcdiff * s->c_dc_scale;
2845  }
2846 
2847  //AC Decoding
2848  i = 1;
2849 
2850  /* check if AC is needed at all */
2851  if (!a_avail && !c_avail)
2852  use_pred = 0;
2853  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
2854  ac_val2 = ac_val;
2855 
2856  scale = mquant * 2 + ((mquant == v->pq) ? v->halfpq : 0);
2857 
2858  if (dc_pred_dir) // left
2859  ac_val -= 16;
2860  else // top
2861  ac_val -= 16 * s->block_wrap[n];
2862 
2863  q1 = s->current_picture.qscale_table[mb_pos];
2864  if ( dc_pred_dir && c_avail && mb_pos)
2865  q2 = s->current_picture.qscale_table[mb_pos - 1];
2866  if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
2867  q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
2868  if ( dc_pred_dir && n == 1)
2869  q2 = q1;
2870  if (!dc_pred_dir && n == 2)
2871  q2 = q1;
2872  if (n == 3)
2873  q2 = q1;
2874 
2875  if (coded) {
2876  int last = 0, skip, value;
2877  const uint8_t *zz_table;
2878  int k;
2879 
2880  if (v->s.ac_pred) {
2881  if (!use_pred && v->fcm == ILACE_FRAME) {
2882  zz_table = v->zzi_8x8;
2883  } else {
2884  if (!dc_pred_dir) // top
2885  zz_table = v->zz_8x8[2];
2886  else // left
2887  zz_table = v->zz_8x8[3];
2888  }
2889  } else {
2890  if (v->fcm != ILACE_FRAME)
2891  zz_table = v->zz_8x8[1];
2892  else
2893  zz_table = v->zzi_8x8;
2894  }
2895 
2896  while (!last) {
2897  vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
2898  i += skip;
2899  if (i > 63)
2900  break;
2901  block[zz_table[i++]] = value;
2902  }
2903 
2904  /* apply AC prediction if needed */
2905  if (use_pred) {
2906  /* scale predictors if needed*/
2907  if (q2 && q1 != q2) {
2908  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2909  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2910 
2911  if (q1 < 1)
2912  return AVERROR_INVALIDDATA;
2913  if (dc_pred_dir) { // left
2914  for (k = 1; k < 8; k++)
2915  block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2916  } else { // top
2917  for (k = 1; k < 8; k++)
2918  block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2919  }
2920  } else {
2921  if (dc_pred_dir) { //left
2922  for (k = 1; k < 8; k++)
2923  block[k << v->left_blk_sh] += ac_val[k];
2924  } else { //top
2925  for (k = 1; k < 8; k++)
2926  block[k << v->top_blk_sh] += ac_val[k + 8];
2927  }
2928  }
2929  }
2930  /* save AC coeffs for further prediction */
2931  for (k = 1; k < 8; k++) {
2932  ac_val2[k ] = block[k << v->left_blk_sh];
2933  ac_val2[k + 8] = block[k << v->top_blk_sh];
2934  }
2935 
2936  /* scale AC coeffs */
2937  for (k = 1; k < 64; k++)
2938  if (block[k]) {
2939  block[k] *= scale;
2940  if (!v->pquantizer)
2941  block[k] += (block[k] < 0) ? -mquant : mquant;
2942  }
2943 
2944  if (use_pred) i = 63;
2945  } else { // no AC coeffs
2946  int k;
2947 
2948  memset(ac_val2, 0, 16 * 2);
2949  if (dc_pred_dir) { // left
2950  if (use_pred) {
2951  memcpy(ac_val2, ac_val, 8 * 2);
2952  if (q2 && q1 != q2) {
2953  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2954  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2955  if (q1 < 1)
2956  return AVERROR_INVALIDDATA;
2957  for (k = 1; k < 8; k++)
2958  ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2959  }
2960  }
2961  } else { // top
2962  if (use_pred) {
2963  memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
2964  if (q2 && q1 != q2) {
2965  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
2966  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
2967  if (q1 < 1)
2968  return AVERROR_INVALIDDATA;
2969  for (k = 1; k < 8; k++)
2970  ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
2971  }
2972  }
2973  }
2974 
2975  /* apply AC prediction if needed */
2976  if (use_pred) {
2977  if (dc_pred_dir) { // left
2978  for (k = 1; k < 8; k++) {
2979  block[k << v->left_blk_sh] = ac_val2[k] * scale;
2980  if (!v->pquantizer && block[k << v->left_blk_sh])
2981  block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
2982  }
2983  } else { // top
2984  for (k = 1; k < 8; k++) {
2985  block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
2986  if (!v->pquantizer && block[k << v->top_blk_sh])
2987  block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
2988  }
2989  }
2990  i = 63;
2991  }
2992  }
2993  s->block_last_index[n] = i;
2994 
2995  return 0;
2996 }
2997 
2998 /** Decode intra block in inter frames - more generic version than vc1_decode_i_block
2999  * @param v VC1Context
3000  * @param block block to decode
3001  * @param[in] n subblock index
3002  * @param coded are AC coeffs present or not
3003  * @param mquant block quantizer
3004  * @param codingset set of VLC to decode data
3005  */
3006 static int vc1_decode_intra_block(VC1Context *v, int16_t block[64], int n,
3007  int coded, int mquant, int codingset)
3008 {
3009  GetBitContext *gb = &v->s.gb;
3010  MpegEncContext *s = &v->s;
3011  int dc_pred_dir = 0; /* Direction of the DC prediction used */
3012  int i;
3013  int16_t *dc_val = NULL;
3014  int16_t *ac_val, *ac_val2;
3015  int dcdiff;
3016  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3017  int a_avail = v->a_avail, c_avail = v->c_avail;
3018  int use_pred = s->ac_pred;
3019  int scale;
3020  int q1, q2 = 0;
3021 
3022  s->dsp.clear_block(block);
3023 
3024  /* XXX: Guard against dumb values of mquant */
3025  mquant = (mquant < 1) ? 0 : ((mquant > 31) ? 31 : mquant);
3026 
3027  /* Set DC scale - y and c use the same */
3028  s->y_dc_scale = s->y_dc_scale_table[mquant];
3029  s->c_dc_scale = s->c_dc_scale_table[mquant];
3030 
3031  /* Get DC differential */
3032  if (n < 4) {
3034  } else {
3036  }
3037  if (dcdiff < 0) {
3038  av_log(s->avctx, AV_LOG_ERROR, "Illegal DC VLC\n");
3039  return -1;
3040  }
3041  if (dcdiff) {
3042  if (dcdiff == 119 /* ESC index value */) {
3043  /* TODO: Optimize */
3044  if (mquant == 1) dcdiff = get_bits(gb, 10);
3045  else if (mquant == 2) dcdiff = get_bits(gb, 9);
3046  else dcdiff = get_bits(gb, 8);
3047  } else {
3048  if (mquant == 1)
3049  dcdiff = (dcdiff << 2) + get_bits(gb, 2) - 3;
3050  else if (mquant == 2)
3051  dcdiff = (dcdiff << 1) + get_bits1(gb) - 1;
3052  }
3053  if (get_bits1(gb))
3054  dcdiff = -dcdiff;
3055  }
3056 
3057  /* Prediction */
3058  dcdiff += vc1_pred_dc(&v->s, v->overlap, mquant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
3059  *dc_val = dcdiff;
3060 
3061  /* Store the quantized DC coeff, used for prediction */
3062 
3063  if (n < 4) {
3064  block[0] = dcdiff * s->y_dc_scale;
3065  } else {
3066  block[0] = dcdiff * s->c_dc_scale;
3067  }
3068 
3069  //AC Decoding
3070  i = 1;
3071 
3072  /* check if AC is needed at all and adjust direction if needed */
3073  if (!a_avail) dc_pred_dir = 1;
3074  if (!c_avail) dc_pred_dir = 0;
3075  if (!a_avail && !c_avail) use_pred = 0;
3076  ac_val = s->ac_val[0][0] + s->block_index[n] * 16;
3077  ac_val2 = ac_val;
3078 
3079  scale = mquant * 2 + v->halfpq;
3080 
3081  if (dc_pred_dir) //left
3082  ac_val -= 16;
3083  else //top
3084  ac_val -= 16 * s->block_wrap[n];
3085 
3086  q1 = s->current_picture.qscale_table[mb_pos];
3087  if (dc_pred_dir && c_avail && mb_pos)
3088  q2 = s->current_picture.qscale_table[mb_pos - 1];
3089  if (!dc_pred_dir && a_avail && mb_pos >= s->mb_stride)
3090  q2 = s->current_picture.qscale_table[mb_pos - s->mb_stride];
3091  if ( dc_pred_dir && n == 1)
3092  q2 = q1;
3093  if (!dc_pred_dir && n == 2)
3094  q2 = q1;
3095  if (n == 3) q2 = q1;
3096 
3097  if (coded) {
3098  int last = 0, skip, value;
3099  int k;
3100 
3101  while (!last) {
3102  vc1_decode_ac_coeff(v, &last, &skip, &value, codingset);
3103  i += skip;
3104  if (i > 63)
3105  break;
3106  if (v->fcm == PROGRESSIVE)
3107  block[v->zz_8x8[0][i++]] = value;
3108  else {
3109  if (use_pred && (v->fcm == ILACE_FRAME)) {
3110  if (!dc_pred_dir) // top
3111  block[v->zz_8x8[2][i++]] = value;
3112  else // left
3113  block[v->zz_8x8[3][i++]] = value;
3114  } else {
3115  block[v->zzi_8x8[i++]] = value;
3116  }
3117  }
3118  }
3119 
3120  /* apply AC prediction if needed */
3121  if (use_pred) {
3122  /* scale predictors if needed*/
3123  if (q2 && q1 != q2) {
3124  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3125  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3126 
3127  if (q1 < 1)
3128  return AVERROR_INVALIDDATA;
3129  if (dc_pred_dir) { // left
3130  for (k = 1; k < 8; k++)
3131  block[k << v->left_blk_sh] += (ac_val[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3132  } else { //top
3133  for (k = 1; k < 8; k++)
3134  block[k << v->top_blk_sh] += (ac_val[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3135  }
3136  } else {
3137  if (dc_pred_dir) { // left
3138  for (k = 1; k < 8; k++)
3139  block[k << v->left_blk_sh] += ac_val[k];
3140  } else { // top
3141  for (k = 1; k < 8; k++)
3142  block[k << v->top_blk_sh] += ac_val[k + 8];
3143  }
3144  }
3145  }
3146  /* save AC coeffs for further prediction */
3147  for (k = 1; k < 8; k++) {
3148  ac_val2[k ] = block[k << v->left_blk_sh];
3149  ac_val2[k + 8] = block[k << v->top_blk_sh];
3150  }
3151 
3152  /* scale AC coeffs */
3153  for (k = 1; k < 64; k++)
3154  if (block[k]) {
3155  block[k] *= scale;
3156  if (!v->pquantizer)
3157  block[k] += (block[k] < 0) ? -mquant : mquant;
3158  }
3159 
3160  if (use_pred) i = 63;
3161  } else { // no AC coeffs
3162  int k;
3163 
3164  memset(ac_val2, 0, 16 * 2);
3165  if (dc_pred_dir) { // left
3166  if (use_pred) {
3167  memcpy(ac_val2, ac_val, 8 * 2);
3168  if (q2 && q1 != q2) {
3169  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3170  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3171  if (q1 < 1)
3172  return AVERROR_INVALIDDATA;
3173  for (k = 1; k < 8; k++)
3174  ac_val2[k] = (ac_val2[k] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3175  }
3176  }
3177  } else { // top
3178  if (use_pred) {
3179  memcpy(ac_val2 + 8, ac_val + 8, 8 * 2);
3180  if (q2 && q1 != q2) {
3181  q1 = q1 * 2 + ((q1 == v->pq) ? v->halfpq : 0) - 1;
3182  q2 = q2 * 2 + ((q2 == v->pq) ? v->halfpq : 0) - 1;
3183  if (q1 < 1)
3184  return AVERROR_INVALIDDATA;
3185  for (k = 1; k < 8; k++)
3186  ac_val2[k + 8] = (ac_val2[k + 8] * q2 * ff_vc1_dqscale[q1 - 1] + 0x20000) >> 18;
3187  }
3188  }
3189  }
3190 
3191  /* apply AC prediction if needed */
3192  if (use_pred) {
3193  if (dc_pred_dir) { // left
3194  for (k = 1; k < 8; k++) {
3195  block[k << v->left_blk_sh] = ac_val2[k] * scale;
3196  if (!v->pquantizer && block[k << v->left_blk_sh])
3197  block[k << v->left_blk_sh] += (block[k << v->left_blk_sh] < 0) ? -mquant : mquant;
3198  }
3199  } else { // top
3200  for (k = 1; k < 8; k++) {
3201  block[k << v->top_blk_sh] = ac_val2[k + 8] * scale;
3202  if (!v->pquantizer && block[k << v->top_blk_sh])
3203  block[k << v->top_blk_sh] += (block[k << v->top_blk_sh] < 0) ? -mquant : mquant;
3204  }
3205  }
3206  i = 63;
3207  }
3208  }
3209  s->block_last_index[n] = i;
3210 
3211  return 0;
3212 }
3213 
3214 /** Decode P block
3215  */
3216 static int vc1_decode_p_block(VC1Context *v, int16_t block[64], int n,
3217  int mquant, int ttmb, int first_block,
3218  uint8_t *dst, int linesize, int skip_block,
3219  int *ttmb_out)
3220 {
3221  MpegEncContext *s = &v->s;
3222  GetBitContext *gb = &s->gb;
3223  int i, j;
3224  int subblkpat = 0;
3225  int scale, off, idx, last, skip, value;
3226  int ttblk = ttmb & 7;
3227  int pat = 0;
3228 
3229  s->dsp.clear_block(block);
3230 
3231  if (ttmb == -1) {
3233  }
3234  if (ttblk == TT_4X4) {
3235  subblkpat = ~(get_vlc2(gb, ff_vc1_subblkpat_vlc[v->tt_index].table, VC1_SUBBLKPAT_VLC_BITS, 1) + 1);
3236  }
3237  if ((ttblk != TT_8X8 && ttblk != TT_4X4)
3238  && ((v->ttmbf || (ttmb != -1 && (ttmb & 8) && !first_block))
3239  || (!v->res_rtm_flag && !first_block))) {
3240  subblkpat = decode012(gb);
3241  if (subblkpat)
3242  subblkpat ^= 3; // swap decoded pattern bits
3243  if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM)
3244  ttblk = TT_8X4;
3245  if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT)
3246  ttblk = TT_4X8;
3247  }
3248  scale = 2 * mquant + ((v->pq == mquant) ? v->halfpq : 0);
3249 
3250  // convert transforms like 8X4_TOP to generic TT and SUBBLKPAT
3251  if (ttblk == TT_8X4_TOP || ttblk == TT_8X4_BOTTOM) {
3252  subblkpat = 2 - (ttblk == TT_8X4_TOP);
3253  ttblk = TT_8X4;
3254  }
3255  if (ttblk == TT_4X8_RIGHT || ttblk == TT_4X8_LEFT) {
3256  subblkpat = 2 - (ttblk == TT_4X8_LEFT);
3257  ttblk = TT_4X8;
3258  }
3259  switch (ttblk) {
3260  case TT_8X8:
3261  pat = 0xF;
3262  i = 0;
3263  last = 0;
3264  while (!last) {
3265  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3266  i += skip;
3267  if (i > 63)
3268  break;
3269  if (!v->fcm)
3270  idx = v->zz_8x8[0][i++];
3271  else
3272  idx = v->zzi_8x8[i++];
3273  block[idx] = value * scale;
3274  if (!v->pquantizer)
3275  block[idx] += (block[idx] < 0) ? -mquant : mquant;
3276  }
3277  if (!skip_block) {
3278  if (i == 1)
3279  v->vc1dsp.vc1_inv_trans_8x8_dc(dst, linesize, block);
3280  else {
3281  v->vc1dsp.vc1_inv_trans_8x8(block);
3282  s->dsp.add_pixels_clamped(block, dst, linesize);
3283  }
3284  }
3285  break;
3286  case TT_4X4:
3287  pat = ~subblkpat & 0xF;
3288  for (j = 0; j < 4; j++) {
3289  last = subblkpat & (1 << (3 - j));
3290  i = 0;
3291  off = (j & 1) * 4 + (j & 2) * 16;
3292  while (!last) {
3293  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3294  i += skip;
3295  if (i > 15)
3296  break;
3297  if (!v->fcm)
3299  else
3300  idx = ff_vc1_adv_interlaced_4x4_zz[i++];
3301  block[idx + off] = value * scale;
3302  if (!v->pquantizer)
3303  block[idx + off] += (block[idx + off] < 0) ? -mquant : mquant;
3304  }
3305  if (!(subblkpat & (1 << (3 - j))) && !skip_block) {
3306  if (i == 1)
3307  v->vc1dsp.vc1_inv_trans_4x4_dc(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3308  else
3309  v->vc1dsp.vc1_inv_trans_4x4(dst + (j & 1) * 4 + (j & 2) * 2 * linesize, linesize, block + off);
3310  }
3311  }
3312  break;
3313  case TT_8X4:
3314  pat = ~((subblkpat & 2) * 6 + (subblkpat & 1) * 3) & 0xF;
3315  for (j = 0; j < 2; j++) {
3316  last = subblkpat & (1 << (1 - j));
3317  i = 0;
3318  off = j * 32;
3319  while (!last) {
3320  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3321  i += skip;
3322  if (i > 31)
3323  break;
3324  if (!v->fcm)
3325  idx = v->zz_8x4[i++] + off;
3326  else
3327  idx = ff_vc1_adv_interlaced_8x4_zz[i++] + off;
3328  block[idx] = value * scale;
3329  if (!v->pquantizer)
3330  block[idx] += (block[idx] < 0) ? -mquant : mquant;
3331  }
3332  if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3333  if (i == 1)
3334  v->vc1dsp.vc1_inv_trans_8x4_dc(dst + j * 4 * linesize, linesize, block + off);
3335  else
3336  v->vc1dsp.vc1_inv_trans_8x4(dst + j * 4 * linesize, linesize, block + off);
3337  }
3338  }
3339  break;
3340  case TT_4X8:
3341  pat = ~(subblkpat * 5) & 0xF;
3342  for (j = 0; j < 2; j++) {
3343  last = subblkpat & (1 << (1 - j));
3344  i = 0;
3345  off = j * 4;
3346  while (!last) {
3347  vc1_decode_ac_coeff(v, &last, &skip, &value, v->codingset2);
3348  i += skip;
3349  if (i > 31)
3350  break;
3351  if (!v->fcm)
3352  idx = v->zz_4x8[i++] + off;
3353  else
3354  idx = ff_vc1_adv_interlaced_4x8_zz[i++] + off;
3355  block[idx] = value * scale;
3356  if (!v->pquantizer)
3357  block[idx] += (block[idx] < 0) ? -mquant : mquant;
3358  }
3359  if (!(subblkpat & (1 << (1 - j))) && !skip_block) {
3360  if (i == 1)
3361  v->vc1dsp.vc1_inv_trans_4x8_dc(dst + j * 4, linesize, block + off);
3362  else
3363  v->vc1dsp.vc1_inv_trans_4x8(dst + j*4, linesize, block + off);
3364  }
3365  }
3366  break;
3367  }
3368  if (ttmb_out)
3369  *ttmb_out |= ttblk << (n * 4);
3370  return pat;
3371 }
3372 
3373 /** @} */ // Macroblock group
3374 
3375 static const int size_table [6] = { 0, 2, 3, 4, 5, 8 };
3376 static const int offset_table[6] = { 0, 1, 3, 7, 15, 31 };
3377 
3379 {
3380  MpegEncContext *s = &v->s;
3381  int mb_cbp = v->cbp[s->mb_x - s->mb_stride],
3382  block_cbp = mb_cbp >> (block_num * 4), bottom_cbp,
3383  mb_is_intra = v->is_intra[s->mb_x - s->mb_stride],
3384  block_is_intra = mb_is_intra >> (block_num * 4), bottom_is_intra;
3385  int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3386  uint8_t *dst;
3387 
3388  if (block_num > 3) {
3389  dst = s->dest[block_num - 3];
3390  } else {
3391  dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 8) * linesize;
3392  }
3393  if (s->mb_y != s->end_mb_y || block_num < 2) {
3394  int16_t (*mv)[2];
3395  int mv_stride;
3396 
3397  if (block_num > 3) {
3398  bottom_cbp = v->cbp[s->mb_x] >> (block_num * 4);
3399  bottom_is_intra = v->is_intra[s->mb_x] >> (block_num * 4);
3400  mv = &v->luma_mv[s->mb_x - s->mb_stride];
3401  mv_stride = s->mb_stride;
3402  } else {
3403  bottom_cbp = (block_num < 2) ? (mb_cbp >> ((block_num + 2) * 4))
3404  : (v->cbp[s->mb_x] >> ((block_num - 2) * 4));
3405  bottom_is_intra = (block_num < 2) ? (mb_is_intra >> ((block_num + 2) * 4))
3406  : (v->is_intra[s->mb_x] >> ((block_num - 2) * 4));
3407  mv_stride = s->b8_stride;
3408  mv = &s->current_picture.motion_val[0][s->block_index[block_num] - 2 * mv_stride];
3409  }
3410 
3411  if (bottom_is_intra & 1 || block_is_intra & 1 ||
3412  mv[0][0] != mv[mv_stride][0] || mv[0][1] != mv[mv_stride][1]) {
3413  v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3414  } else {
3415  idx = ((bottom_cbp >> 2) | block_cbp) & 3;
3416  if (idx == 3) {
3417  v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3418  } else if (idx) {
3419  if (idx == 1)
3420  v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3421  else
3422  v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3423  }
3424  }
3425  }
3426 
3427  dst -= 4 * linesize;
3428  ttblk = (v->ttblk[s->mb_x - s->mb_stride] >> (block_num * 4)) & 0xF;
3429  if (ttblk == TT_4X4 || ttblk == TT_8X4) {
3430  idx = (block_cbp | (block_cbp >> 2)) & 3;
3431  if (idx == 3) {
3432  v->vc1dsp.vc1_v_loop_filter8(dst, linesize, v->pq);
3433  } else if (idx) {
3434  if (idx == 1)
3435  v->vc1dsp.vc1_v_loop_filter4(dst + 4, linesize, v->pq);
3436  else
3437  v->vc1dsp.vc1_v_loop_filter4(dst, linesize, v->pq);
3438  }
3439  }
3440 }
3441 
3443 {
3444  MpegEncContext *s = &v->s;
3445  int mb_cbp = v->cbp[s->mb_x - 1 - s->mb_stride],
3446  block_cbp = mb_cbp >> (block_num * 4), right_cbp,
3447  mb_is_intra = v->is_intra[s->mb_x - 1 - s->mb_stride],
3448  block_is_intra = mb_is_intra >> (block_num * 4), right_is_intra;
3449  int idx, linesize = block_num > 3 ? s->uvlinesize : s->linesize, ttblk;
3450  uint8_t *dst;
3451 
3452  if (block_num > 3) {
3453  dst = s->dest[block_num - 3] - 8 * linesize;
3454  } else {
3455  dst = s->dest[0] + (block_num & 1) * 8 + ((block_num & 2) * 4 - 16) * linesize - 8;
3456  }
3457 
3458  if (s->mb_x != s->mb_width || !(block_num & 5)) {
3459  int16_t (*mv)[2];
3460 
3461  if (block_num > 3) {
3462  right_cbp = v->cbp[s->mb_x - s->mb_stride] >> (block_num * 4);
3463  right_is_intra = v->is_intra[s->mb_x - s->mb_stride] >> (block_num * 4);
3464  mv = &v->luma_mv[s->mb_x - s->mb_stride - 1];
3465  } else {
3466  right_cbp = (block_num & 1) ? (v->cbp[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3467  : (mb_cbp >> ((block_num + 1) * 4));
3468  right_is_intra = (block_num & 1) ? (v->is_intra[s->mb_x - s->mb_stride] >> ((block_num - 1) * 4))
3469  : (mb_is_intra >> ((block_num + 1) * 4));
3470  mv = &s->current_picture.motion_val[0][s->block_index[block_num] - s->b8_stride * 2 - 2];
3471  }
3472  if (block_is_intra & 1 || right_is_intra & 1 || mv[0][0] != mv[1][0] || mv[0][1] != mv[1][1]) {
3473  v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3474  } else {
3475  idx = ((right_cbp >> 1) | block_cbp) & 5; // FIXME check
3476  if (idx == 5) {
3477  v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3478  } else if (idx) {
3479  if (idx == 1)
3480  v->vc1dsp.vc1_h_loop_filter4(dst + 4 * linesize, linesize, v->pq);
3481  else
3482  v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3483  }
3484  }
3485  }
3486 
3487  dst -= 4;
3488  ttblk = (v->ttblk[s->mb_x - s->mb_stride - 1] >> (block_num * 4)) & 0xf;
3489  if (ttblk == TT_4X4 || ttblk == TT_4X8) {
3490  idx = (block_cbp | (block_cbp >> 1)) & 5;
3491  if (idx == 5) {
3492  v->vc1dsp.vc1_h_loop_filter8(dst, linesize, v->pq);
3493  } else if (idx) {
3494  if (idx == 1)
3495  v->vc1dsp.vc1_h_loop_filter4(dst + linesize * 4, linesize, v->pq);
3496  else
3497  v->vc1dsp.vc1_h_loop_filter4(dst, linesize, v->pq);
3498  }
3499  }
3500 }
3501 
3503 {
3504  MpegEncContext *s = &v->s;
3505  int i;
3506 
3507  for (i = 0; i < 6; i++) {
3509  }
3510 
3511  /* V always precedes H, therefore we run H one MB before V;
3512  * at the end of a row, we catch up to complete the row */
3513  if (s->mb_x) {
3514  for (i = 0; i < 6; i++) {
3516  }
3517  if (s->mb_x == s->mb_width - 1) {
3518  s->mb_x++;
3520  for (i = 0; i < 6; i++) {
3522  }
3523  }
3524  }
3525 }
3526 
3527 /** Decode one P-frame MB
3528  */
3530 {
3531  MpegEncContext *s = &v->s;
3532  GetBitContext *gb = &s->gb;
3533  int i, j;
3534  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3535  int cbp; /* cbp decoding stuff */
3536  int mqdiff, mquant; /* MB quantization */
3537  int ttmb = v->ttfrm; /* MB Transform type */
3538 
3539  int mb_has_coeffs = 1; /* last_flag */
3540  int dmv_x, dmv_y; /* Differential MV components */
3541  int index, index1; /* LUT indexes */
3542  int val, sign; /* temp values */
3543  int first_block = 1;
3544  int dst_idx, off;
3545  int skipped, fourmv;
3546  int block_cbp = 0, pat, block_tt = 0, block_intra = 0;
3547 
3548  mquant = v->pq; /* lossy initialization */
3549 
3550  if (v->mv_type_is_raw)
3551  fourmv = get_bits1(gb);
3552  else
3553  fourmv = v->mv_type_mb_plane[mb_pos];
3554  if (v->skip_is_raw)
3555  skipped = get_bits1(gb);
3556  else
3557  skipped = v->s.mbskip_table[mb_pos];
3558 
3559  if (!fourmv) { /* 1MV mode */
3560  if (!skipped) {
3561  GET_MVDATA(dmv_x, dmv_y);
3562 
3563  if (s->mb_intra) {
3564  s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
3565  s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
3566  }
3568  vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3569 
3570  /* FIXME Set DC val for inter block ? */
3571  if (s->mb_intra && !mb_has_coeffs) {
3572  GET_MQUANT();
3573  s->ac_pred = get_bits1(gb);
3574  cbp = 0;
3575  } else if (mb_has_coeffs) {
3576  if (s->mb_intra)
3577  s->ac_pred = get_bits1(gb);
3578  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3579  GET_MQUANT();
3580  } else {
3581  mquant = v->pq;
3582  cbp = 0;
3583  }
3584  s->current_picture.qscale_table[mb_pos] = mquant;
3585 
3586  if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
3587  ttmb = get_vlc2(gb, ff_vc1_ttmb_vlc[v->tt_index].table,
3588  VC1_TTMB_VLC_BITS, 2);
3589  if (!s->mb_intra) vc1_mc_1mv(v, 0);
3590  dst_idx = 0;
3591  for (i = 0; i < 6; i++) {
3592  s->dc_val[0][s->block_index[i]] = 0;
3593  dst_idx += i >> 2;
3594  val = ((cbp >> (5 - i)) & 1);
3595  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3596  v->mb_type[0][s->block_index[i]] = s->mb_intra;
3597  if (s->mb_intra) {
3598  /* check if prediction blocks A and C are available */
3599  v->a_avail = v->c_avail = 0;
3600  if (i == 2 || i == 3 || !s->first_slice_line)
3601  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3602  if (i == 1 || i == 3 || s->mb_x)
3603  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3604 
3605  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3606  (i & 4) ? v->codingset2 : v->codingset);
3607  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3608  continue;
3609  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3610  if (v->rangeredfrm)
3611  for (j = 0; j < 64; j++)
3612  s->block[i][j] <<= 1;
3613  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3614  if (v->pq >= 9 && v->overlap) {
3615  if (v->c_avail)
3616  v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3617  if (v->a_avail)
3618  v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3619  }
3620  block_cbp |= 0xF << (i << 2);
3621  block_intra |= 1 << i;
3622  } else if (val) {
3623  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb, first_block,
3624  s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize,
3625  (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3626  block_cbp |= pat << (i << 2);
3627  if (!v->ttmbf && ttmb < 8)
3628  ttmb = -1;
3629  first_block = 0;
3630  }
3631  }
3632  } else { // skipped
3633  s->mb_intra = 0;
3634  for (i = 0; i < 6; i++) {
3635  v->mb_type[0][s->block_index[i]] = 0;
3636  s->dc_val[0][s->block_index[i]] = 0;
3637  }
3638  s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
3639  s->current_picture.qscale_table[mb_pos] = 0;
3640  vc1_pred_mv(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3641  vc1_mc_1mv(v, 0);
3642  }
3643  } else { // 4MV mode
3644  if (!skipped /* unskipped MB */) {
3645  int intra_count = 0, coded_inter = 0;
3646  int is_intra[6], is_coded[6];
3647  /* Get CBPCY */
3648  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3649  for (i = 0; i < 6; i++) {
3650  val = ((cbp >> (5 - i)) & 1);
3651  s->dc_val[0][s->block_index[i]] = 0;
3652  s->mb_intra = 0;
3653  if (i < 4) {
3654  dmv_x = dmv_y = 0;
3655  s->mb_intra = 0;
3656  mb_has_coeffs = 0;
3657  if (val) {
3658  GET_MVDATA(dmv_x, dmv_y);
3659  }
3660  vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3661  if (!s->mb_intra)
3662  vc1_mc_4mv_luma(v, i, 0, 0);
3663  intra_count += s->mb_intra;
3664  is_intra[i] = s->mb_intra;
3665  is_coded[i] = mb_has_coeffs;
3666  }
3667  if (i & 4) {
3668  is_intra[i] = (intra_count >= 3);
3669  is_coded[i] = val;
3670  }
3671  if (i == 4)
3672  vc1_mc_4mv_chroma(v, 0);
3673  v->mb_type[0][s->block_index[i]] = is_intra[i];
3674  if (!coded_inter)
3675  coded_inter = !is_intra[i] & is_coded[i];
3676  }
3677  // if there are no coded blocks then don't do anything more
3678  dst_idx = 0;
3679  if (!intra_count && !coded_inter)
3680  goto end;
3681  GET_MQUANT();
3682  s->current_picture.qscale_table[mb_pos] = mquant;
3683  /* test if block is intra and has pred */
3684  {
3685  int intrapred = 0;
3686  for (i = 0; i < 6; i++)
3687  if (is_intra[i]) {
3688  if (((!s->first_slice_line || (i == 2 || i == 3)) && v->mb_type[0][s->block_index[i] - s->block_wrap[i]])
3689  || ((s->mb_x || (i == 1 || i == 3)) && v->mb_type[0][s->block_index[i] - 1])) {
3690  intrapred = 1;
3691  break;
3692  }
3693  }
3694  if (intrapred)
3695  s->ac_pred = get_bits1(gb);
3696  else
3697  s->ac_pred = 0;
3698  }
3699  if (!v->ttmbf && coded_inter)
3701  for (i = 0; i < 6; i++) {
3702  dst_idx += i >> 2;
3703  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3704  s->mb_intra = is_intra[i];
3705  if (is_intra[i]) {
3706  /* check if prediction blocks A and C are available */
3707  v->a_avail = v->c_avail = 0;
3708  if (i == 2 || i == 3 || !s->first_slice_line)
3709  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3710  if (i == 1 || i == 3 || s->mb_x)
3711  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3712 
3713  vc1_decode_intra_block(v, s->block[i], i, is_coded[i], mquant,
3714  (i & 4) ? v->codingset2 : v->codingset);
3715  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
3716  continue;
3717  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3718  if (v->rangeredfrm)
3719  for (j = 0; j < 64; j++)
3720  s->block[i][j] <<= 1;
3721  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off,
3722  (i & 4) ? s->uvlinesize : s->linesize);
3723  if (v->pq >= 9 && v->overlap) {
3724  if (v->c_avail)
3725  v->vc1dsp.vc1_h_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3726  if (v->a_avail)
3727  v->vc1dsp.vc1_v_overlap(s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
3728  }
3729  block_cbp |= 0xF << (i << 2);
3730  block_intra |= 1 << i;
3731  } else if (is_coded[i]) {
3732  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3733  first_block, s->dest[dst_idx] + off,
3734  (i & 4) ? s->uvlinesize : s->linesize,
3735  (i & 4) && (s->flags & CODEC_FLAG_GRAY),
3736  &block_tt);
3737  block_cbp |= pat << (i << 2);
3738  if (!v->ttmbf && ttmb < 8)
3739  ttmb = -1;
3740  first_block = 0;
3741  }
3742  }
3743  } else { // skipped MB
3744  s->mb_intra = 0;
3745  s->current_picture.qscale_table[mb_pos] = 0;
3746  for (i = 0; i < 6; i++) {
3747  v->mb_type[0][s->block_index[i]] = 0;
3748  s->dc_val[0][s->block_index[i]] = 0;
3749  }
3750  for (i = 0; i < 4; i++) {
3751  vc1_pred_mv(v, i, 0, 0, 0, v->range_x, v->range_y, v->mb_type[0], 0, 0);
3752  vc1_mc_4mv_luma(v, i, 0, 0);
3753  }
3754  vc1_mc_4mv_chroma(v, 0);
3755  s->current_picture.qscale_table[mb_pos] = 0;
3756  }
3757  }
3758 end:
3759  v->cbp[s->mb_x] = block_cbp;
3760  v->ttblk[s->mb_x] = block_tt;
3761  v->is_intra[s->mb_x] = block_intra;
3762 
3763  return 0;
3764 }
3765 
3766 /* Decode one macroblock in an interlaced frame p picture */
3767 
3769 {
3770  MpegEncContext *s = &v->s;
3771  GetBitContext *gb = &s->gb;
3772  int i;
3773  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3774  int cbp = 0; /* cbp decoding stuff */
3775  int mqdiff, mquant; /* MB quantization */
3776  int ttmb = v->ttfrm; /* MB Transform type */
3777 
3778  int mb_has_coeffs = 1; /* last_flag */
3779  int dmv_x, dmv_y; /* Differential MV components */
3780  int val; /* temp value */
3781  int first_block = 1;
3782  int dst_idx, off;
3783  int skipped, fourmv = 0, twomv = 0;
3784  int block_cbp = 0, pat, block_tt = 0;
3785  int idx_mbmode = 0, mvbp;
3786  int stride_y, fieldtx;
3787 
3788  mquant = v->pq; /* Lossy initialization */
3789 
3790  if (v->skip_is_raw)
3791  skipped = get_bits1(gb);
3792  else
3793  skipped = v->s.mbskip_table[mb_pos];
3794  if (!skipped) {
3795  if (v->fourmvswitch)
3796  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_4MV_MBMODE_VLC_BITS, 2); // try getting this done
3797  else
3798  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2); // in a single line
3799  switch (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0]) {
3800  /* store the motion vector type in a flag (useful later) */
3801  case MV_PMODE_INTFR_4MV:
3802  fourmv = 1;
3803  v->blk_mv_type[s->block_index[0]] = 0;
3804  v->blk_mv_type[s->block_index[1]] = 0;
3805  v->blk_mv_type[s->block_index[2]] = 0;
3806  v->blk_mv_type[s->block_index[3]] = 0;
3807  break;
3809  fourmv = 1;
3810  v->blk_mv_type[s->block_index[0]] = 1;
3811  v->blk_mv_type[s->block_index[1]] = 1;
3812  v->blk_mv_type[s->block_index[2]] = 1;
3813  v->blk_mv_type[s->block_index[3]] = 1;
3814  break;
3816  twomv = 1;
3817  v->blk_mv_type[s->block_index[0]] = 1;
3818  v->blk_mv_type[s->block_index[1]] = 1;
3819  v->blk_mv_type[s->block_index[2]] = 1;
3820  v->blk_mv_type[s->block_index[3]] = 1;
3821  break;
3822  case MV_PMODE_INTFR_1MV:
3823  v->blk_mv_type[s->block_index[0]] = 0;
3824  v->blk_mv_type[s->block_index[1]] = 0;
3825  v->blk_mv_type[s->block_index[2]] = 0;
3826  v->blk_mv_type[s->block_index[3]] = 0;
3827  break;
3828  }
3829  if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_INTRA) { // intra MB
3830  for (i = 0; i < 4; i++) {
3831  s->current_picture.motion_val[1][s->block_index[i]][0] = 0;
3832  s->current_picture.motion_val[1][s->block_index[i]][1] = 0;
3833  }
3834  s->current_picture.mb_type[mb_pos] = MB_TYPE_INTRA;
3835  s->mb_intra = v->is_intra[s->mb_x] = 1;
3836  for (i = 0; i < 6; i++)
3837  v->mb_type[0][s->block_index[i]] = 1;
3838  fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
3839  mb_has_coeffs = get_bits1(gb);
3840  if (mb_has_coeffs)
3841  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3842  v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
3843  GET_MQUANT();
3844  s->current_picture.qscale_table[mb_pos] = mquant;
3845  /* Set DC scale - y and c use the same (not sure if necessary here) */
3846  s->y_dc_scale = s->y_dc_scale_table[mquant];
3847  s->c_dc_scale = s->c_dc_scale_table[mquant];
3848  dst_idx = 0;
3849  for (i = 0; i < 6; i++) {
3850  s->dc_val[0][s->block_index[i]] = 0;
3851  dst_idx += i >> 2;
3852  val = ((cbp >> (5 - i)) & 1);
3853  v->mb_type[0][s->block_index[i]] = s->mb_intra;
3854  v->a_avail = v->c_avail = 0;
3855  if (i == 2 || i == 3 || !s->first_slice_line)
3856  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
3857  if (i == 1 || i == 3 || s->mb_x)
3858  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
3859 
3860  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
3861  (i & 4) ? v->codingset2 : v->codingset);
3862  if ((i>3) && (s->flags & CODEC_FLAG_GRAY)) continue;
3863  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
3864  if (i < 4) {
3865  stride_y = s->linesize << fieldtx;
3866  off = (fieldtx) ? ((i & 1) * 8) + ((i & 2) >> 1) * s->linesize : (i & 1) * 8 + 4 * (i & 2) * s->linesize;
3867  } else {
3868  stride_y = s->uvlinesize;
3869  off = 0;
3870  }
3871  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, stride_y);
3872  //TODO: loop filter
3873  }
3874 
3875  } else { // inter MB
3876  mb_has_coeffs = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][3];
3877  if (mb_has_coeffs)
3878  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
3879  if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
3881  } else {
3882  if ((ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV)
3883  || (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_4MV_FIELD)) {
3885  }
3886  }
3887  s->mb_intra = v->is_intra[s->mb_x] = 0;
3888  for (i = 0; i < 6; i++)
3889  v->mb_type[0][s->block_index[i]] = 0;
3890  fieldtx = v->fieldtx_plane[mb_pos] = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][1];
3891  /* for all motion vector read MVDATA and motion compensate each block */
3892  dst_idx = 0;
3893  if (fourmv) {
3894  mvbp = v->fourmvbp;
3895  for (i = 0; i < 6; i++) {
3896  if (i < 4) {
3897  dmv_x = dmv_y = 0;
3898  val = ((mvbp >> (3 - i)) & 1);
3899  if (val) {
3900  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3901  }
3902  vc1_pred_mv_intfr(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], 0);
3903  vc1_mc_4mv_luma(v, i, 0, 0);
3904  } else if (i == 4) {
3905  vc1_mc_4mv_chroma4(v, 0, 0, 0);
3906  }
3907  }
3908  } else if (twomv) {
3909  mvbp = v->twomvbp;
3910  dmv_x = dmv_y = 0;
3911  if (mvbp & 2) {
3912  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3913  }
3914  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], 0);
3915  vc1_mc_4mv_luma(v, 0, 0, 0);
3916  vc1_mc_4mv_luma(v, 1, 0, 0);
3917  dmv_x = dmv_y = 0;
3918  if (mvbp & 1) {
3919  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3920  }
3921  vc1_pred_mv_intfr(v, 2, dmv_x, dmv_y, 2, v->range_x, v->range_y, v->mb_type[0], 0);
3922  vc1_mc_4mv_luma(v, 2, 0, 0);
3923  vc1_mc_4mv_luma(v, 3, 0, 0);
3924  vc1_mc_4mv_chroma4(v, 0, 0, 0);
3925  } else {
3926  mvbp = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][2];
3927  dmv_x = dmv_y = 0;
3928  if (mvbp) {
3929  get_mvdata_interlaced(v, &dmv_x, &dmv_y, 0);
3930  }
3931  vc1_pred_mv_intfr(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], 0);
3932  vc1_mc_1mv(v, 0);
3933  }
3934  if (cbp)
3935  GET_MQUANT(); // p. 227
3936  s->current_picture.qscale_table[mb_pos] = mquant;
3937  if (!v->ttmbf && cbp)
3939  for (i = 0; i < 6; i++) {
3940  s->dc_val[0][s->block_index[i]] = 0;
3941  dst_idx += i >> 2;
3942  val = ((cbp >> (5 - i)) & 1);
3943  if (!fieldtx)
3944  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
3945  else
3946  off = (i & 4) ? 0 : ((i & 1) * 8 + ((i > 1) * s->linesize));
3947  if (val) {
3948  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
3949  first_block, s->dest[dst_idx] + off,
3950  (i & 4) ? s->uvlinesize : (s->linesize << fieldtx),
3951  (i & 4) && (s->flags & CODEC_FLAG_GRAY), &block_tt);
3952  block_cbp |= pat << (i << 2);
3953  if (!v->ttmbf && ttmb < 8)
3954  ttmb = -1;
3955  first_block = 0;
3956  }
3957  }
3958  }
3959  } else { // skipped
3960  s->mb_intra = v->is_intra[s->mb_x] = 0;
3961  for (i = 0; i < 6; i++) {
3962  v->mb_type[0][s->block_index[i]] = 0;
3963  s->dc_val[0][s->block_index[i]] = 0;
3964  }
3965  s->current_picture.mb_type[mb_pos] = MB_TYPE_SKIP;
3966  s->current_picture.qscale_table[mb_pos] = 0;
3967  v->blk_mv_type[s->block_index[0]] = 0;
3968  v->blk_mv_type[s->block_index[1]] = 0;
3969  v->blk_mv_type[s->block_index[2]] = 0;
3970  v->blk_mv_type[s->block_index[3]] = 0;
3971  vc1_pred_mv_intfr(v, 0, 0, 0, 1, v->range_x, v->range_y, v->mb_type[0], 0);
3972  vc1_mc_1mv(v, 0);
3973  }
3974  if (s->mb_x == s->mb_width - 1)
3975  memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0])*s->mb_stride);
3976  return 0;
3977 }
3978 
3980 {
3981  MpegEncContext *s = &v->s;
3982  GetBitContext *gb = &s->gb;
3983  int i;
3984  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
3985  int cbp = 0; /* cbp decoding stuff */
3986  int mqdiff, mquant; /* MB quantization */
3987  int ttmb = v->ttfrm; /* MB Transform type */
3988 
3989  int mb_has_coeffs = 1; /* last_flag */
3990  int dmv_x, dmv_y; /* Differential MV components */
3991  int val; /* temp values */
3992  int first_block = 1;
3993  int dst_idx, off;
3994  int pred_flag = 0;
3995  int block_cbp = 0, pat, block_tt = 0;
3996  int idx_mbmode = 0;
3997 
3998  mquant = v->pq; /* Lossy initialization */
3999 
4000  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4001  if (idx_mbmode <= 1) { // intra MB
4002  s->mb_intra = v->is_intra[s->mb_x] = 1;
4003  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
4004  s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
4005  s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4006  GET_MQUANT();
4007  s->current_picture.qscale_table[mb_pos] = mquant;
4008  /* Set DC scale - y and c use the same (not sure if necessary here) */
4009  s->y_dc_scale = s->y_dc_scale_table[mquant];
4010  s->c_dc_scale = s->c_dc_scale_table[mquant];
4011  v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4012  mb_has_coeffs = idx_mbmode & 1;
4013  if (mb_has_coeffs)
4014  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4015  dst_idx = 0;
4016  for (i = 0; i < 6; i++) {
4017  s->dc_val[0][s->block_index[i]] = 0;
4018  v->mb_type[0][s->block_index[i]] = 1;
4019  dst_idx += i >> 2;
4020  val = ((cbp >> (5 - i)) & 1);
4021  v->a_avail = v->c_avail = 0;
4022  if (i == 2 || i == 3 || !s->first_slice_line)
4023  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4024  if (i == 1 || i == 3 || s->mb_x)
4025  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4026 
4027  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4028  (i & 4) ? v->codingset2 : v->codingset);
4029  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4030  continue;
4031  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4032  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4033  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
4034  // TODO: loop filter
4035  }
4036  } else {
4037  s->mb_intra = v->is_intra[s->mb_x] = 0;
4038  s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4039  for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4040  if (idx_mbmode <= 5) { // 1-MV
4041  dmv_x = dmv_y = pred_flag = 0;
4042  if (idx_mbmode & 1) {
4043  get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
4044  }
4045  vc1_pred_mv(v, 0, dmv_x, dmv_y, 1, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
4046  vc1_mc_1mv(v, 0);
4047  mb_has_coeffs = !(idx_mbmode & 2);
4048  } else { // 4-MV
4050  for (i = 0; i < 6; i++) {
4051  if (i < 4) {
4052  dmv_x = dmv_y = pred_flag = 0;
4053  val = ((v->fourmvbp >> (3 - i)) & 1);
4054  if (val) {
4055  get_mvdata_interlaced(v, &dmv_x, &dmv_y, &pred_flag);
4056  }
4057  vc1_pred_mv(v, i, dmv_x, dmv_y, 0, v->range_x, v->range_y, v->mb_type[0], pred_flag, 0);
4058  vc1_mc_4mv_luma(v, i, 0, 0);
4059  } else if (i == 4)
4060  vc1_mc_4mv_chroma(v, 0);
4061  }
4062  mb_has_coeffs = idx_mbmode & 1;
4063  }
4064  if (mb_has_coeffs)
4065  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4066  if (cbp) {
4067  GET_MQUANT();
4068  }
4069  s->current_picture.qscale_table[mb_pos] = mquant;
4070  if (!v->ttmbf && cbp) {
4072  }
4073  dst_idx = 0;
4074  for (i = 0; i < 6; i++) {
4075  s->dc_val[0][s->block_index[i]] = 0;
4076  dst_idx += i >> 2;
4077  val = ((cbp >> (5 - i)) & 1);
4078  off = (i & 4) ? 0 : (i & 1) * 8 + (i & 2) * 4 * s->linesize;
4079  if (val) {
4080  pat = vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4081  first_block, s->dest[dst_idx] + off,
4082  (i & 4) ? s->uvlinesize : s->linesize,
4083  (i & 4) && (s->flags & CODEC_FLAG_GRAY),
4084  &block_tt);
4085  block_cbp |= pat << (i << 2);
4086  if (!v->ttmbf && ttmb < 8) ttmb = -1;
4087  first_block = 0;
4088  }
4089  }
4090  }
4091  if (s->mb_x == s->mb_width - 1)
4092  memmove(v->is_intra_base, v->is_intra, sizeof(v->is_intra_base[0]) * s->mb_stride);
4093  return 0;
4094 }
4095 
4096 /** Decode one B-frame MB (in Main profile)
4097  */
4099 {
4100  MpegEncContext *s = &v->s;
4101  GetBitContext *gb = &s->gb;
4102  int i, j;
4103  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4104  int cbp = 0; /* cbp decoding stuff */
4105  int mqdiff, mquant; /* MB quantization */
4106  int ttmb = v->ttfrm; /* MB Transform type */
4107  int mb_has_coeffs = 0; /* last_flag */
4108  int index, index1; /* LUT indexes */
4109  int val, sign; /* temp values */
4110  int first_block = 1;
4111  int dst_idx, off;
4112  int skipped, direct;
4113  int dmv_x[2], dmv_y[2];
4114  int bmvtype = BMV_TYPE_BACKWARD;
4115 
4116  mquant = v->pq; /* lossy initialization */
4117  s->mb_intra = 0;
4118 
4119  if (v->dmb_is_raw)
4120  direct = get_bits1(gb);
4121  else
4122  direct = v->direct_mb_plane[mb_pos];
4123  if (v->skip_is_raw)
4124  skipped = get_bits1(gb);
4125  else
4126  skipped = v->s.mbskip_table[mb_pos];
4127 
4128  dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4129  for (i = 0; i < 6; i++) {
4130  v->mb_type[0][s->block_index[i]] = 0;
4131  s->dc_val[0][s->block_index[i]] = 0;
4132  }
4133  s->current_picture.qscale_table[mb_pos] = 0;
4134 
4135  if (!direct) {
4136  if (!skipped) {
4137  GET_MVDATA(dmv_x[0], dmv_y[0]);
4138  dmv_x[1] = dmv_x[0];
4139  dmv_y[1] = dmv_y[0];
4140  }
4141  if (skipped || !s->mb_intra) {
4142  bmvtype = decode012(gb);
4143  switch (bmvtype) {
4144  case 0:
4145  bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_BACKWARD : BMV_TYPE_FORWARD;
4146  break;
4147  case 1:
4148  bmvtype = (v->bfraction >= (B_FRACTION_DEN/2)) ? BMV_TYPE_FORWARD : BMV_TYPE_BACKWARD;
4149  break;
4150  case 2:
4151  bmvtype = BMV_TYPE_INTERPOLATED;
4152  dmv_x[0] = dmv_y[0] = 0;
4153  }
4154  }
4155  }
4156  for (i = 0; i < 6; i++)
4157  v->mb_type[0][s->block_index[i]] = s->mb_intra;
4158 
4159  if (skipped) {
4160  if (direct)
4161  bmvtype = BMV_TYPE_INTERPOLATED;
4162  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4163  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4164  return;
4165  }
4166  if (direct) {
4167  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4168  GET_MQUANT();
4169  s->mb_intra = 0;
4170  s->current_picture.qscale_table[mb_pos] = mquant;
4171  if (!v->ttmbf)
4173  dmv_x[0] = dmv_y[0] = dmv_x[1] = dmv_y[1] = 0;
4174  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4175  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4176  } else {
4177  if (!mb_has_coeffs && !s->mb_intra) {
4178  /* no coded blocks - effectively skipped */
4179  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4180  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4181  return;
4182  }
4183  if (s->mb_intra && !mb_has_coeffs) {
4184  GET_MQUANT();
4185  s->current_picture.qscale_table[mb_pos] = mquant;
4186  s->ac_pred = get_bits1(gb);
4187  cbp = 0;
4188  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4189  } else {
4190  if (bmvtype == BMV_TYPE_INTERPOLATED) {
4191  GET_MVDATA(dmv_x[0], dmv_y[0]);
4192  if (!mb_has_coeffs) {
4193  /* interpolated skipped block */
4194  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4195  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4196  return;
4197  }
4198  }
4199  vc1_pred_b_mv(v, dmv_x, dmv_y, direct, bmvtype);
4200  if (!s->mb_intra) {
4201  vc1_b_mc(v, dmv_x, dmv_y, direct, bmvtype);
4202  }
4203  if (s->mb_intra)
4204  s->ac_pred = get_bits1(gb);
4205  cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
4206  GET_MQUANT();
4207  s->current_picture.qscale_table[mb_pos] = mquant;
4208  if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
4210  }
4211  }
4212  dst_idx = 0;
4213  for (i = 0; i < 6; i++) {
4214  s->dc_val[0][s->block_index[i]] = 0;
4215  dst_idx += i >> 2;
4216  val = ((cbp >> (5 - i)) & 1);
4217  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4218  v->mb_type[0][s->block_index[i]] = s->mb_intra;
4219  if (s->mb_intra) {
4220  /* check if prediction blocks A and C are available */
4221  v->a_avail = v->c_avail = 0;
4222  if (i == 2 || i == 3 || !s->first_slice_line)
4223  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4224  if (i == 1 || i == 3 || s->mb_x)
4225  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4226 
4227  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4228  (i & 4) ? v->codingset2 : v->codingset);
4229  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4230  continue;
4231  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4232  if (v->rangeredfrm)
4233  for (j = 0; j < 64; j++)
4234  s->block[i][j] <<= 1;
4235  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, i & 4 ? s->uvlinesize : s->linesize);
4236  } else if (val) {
4237  vc1_decode_p_block(v, s->block[i], i, mquant, ttmb,
4238  first_block, s->dest[dst_idx] + off,
4239  (i & 4) ? s->uvlinesize : s->linesize,
4240  (i & 4) && (s->flags & CODEC_FLAG_GRAY), NULL);
4241  if (!v->ttmbf && ttmb < 8)
4242  ttmb = -1;
4243  first_block = 0;
4244  }
4245  }
4246 }
4247 
4248 /** Decode one B-frame MB (in interlaced field B picture)
4249  */
4251 {
4252  MpegEncContext *s = &v->s;
4253  GetBitContext *gb = &s->gb;
4254  int i, j;
4255  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
4256  int cbp = 0; /* cbp decoding stuff */
4257  int mqdiff, mquant; /* MB quantization */
4258  int ttmb = v->ttfrm; /* MB Transform type */
4259  int mb_has_coeffs = 0; /* last_flag */
4260  int val; /* temp value */
4261  int first_block = 1;
4262  int dst_idx, off;
4263  int fwd;
4264  int dmv_x[2], dmv_y[2], pred_flag[2];
4265  int bmvtype = BMV_TYPE_BACKWARD;
4266  int idx_mbmode;
4267 
4268  mquant = v->pq; /* Lossy initialization */
4269  s->mb_intra = 0;
4270 
4271  idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_IF_MBMODE_VLC_BITS, 2);
4272  if (idx_mbmode <= 1) { // intra MB
4273  s->mb_intra = v->is_intra[s->mb_x] = 1;
4274  s->current_picture.motion_val[1][s->block_index[0]][0] = 0;
4275  s->current_picture.motion_val[1][s->block_index[0]][1] = 0;
4276  s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_INTRA;
4277  GET_MQUANT();
4278  s->current_picture.qscale_table[mb_pos] = mquant;
4279  /* Set DC scale - y and c use the same (not sure if necessary here) */
4280  s->y_dc_scale = s->y_dc_scale_table[mquant];
4281  s->c_dc_scale = s->c_dc_scale_table[mquant];
4282  v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
4283  mb_has_coeffs = idx_mbmode & 1;
4284  if (mb_has_coeffs)
4285  cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
4286  dst_idx = 0;
4287  for (i = 0; i < 6; i++) {
4288  s->dc_val[0][s->block_index[i]] = 0;
4289  dst_idx += i >> 2;
4290  val = ((cbp >> (5 - i)) & 1);
4291  v->mb_type[0][s->block_index[i]] = s->mb_intra;
4292  v->a_avail = v->c_avail = 0;
4293  if (i == 2 || i == 3 || !s->first_slice_line)
4294  v->a_avail = v->mb_type[0][s->block_index[i] - s->block_wrap[i]];
4295  if (i == 1 || i == 3 || s->mb_x)
4296  v->c_avail = v->mb_type[0][s->block_index[i] - 1];
4297 
4298  vc1_decode_intra_block(v, s->block[i], i, val, mquant,
4299  (i & 4) ? v->codingset2 : v->codingset);
4300  if ((i>3) && (s->flags & CODEC_FLAG_GRAY))
4301  continue;
4302  v->vc1dsp.vc1_inv_trans_8x8(s->block[i]);
4303  if (v->rangeredfrm)
4304  for (j = 0; j < 64; j++)
4305  s->block[i][j] <<= 1;
4306  off = (i & 4) ? 0 : ((i & 1) * 8 + (i & 2) * 4 * s->linesize);
4307  s->dsp.put_signed_pixels_clamped(s->block[i], s->dest[dst_idx] + off, (i & 4) ? s->uvlinesize : s->linesize);
4308  // TODO: yet to perform loop filter
4309  }
4310  } else {
4311  s->mb_intra = v->is_intra[s->mb_x] = 0;
4312  s->current_picture.mb_type[mb_pos + v->mb_off] = MB_TYPE_16x16;
4313  for (i = 0; i < 6; i++) v->mb_type[0][s->block_index[i]] = 0;
4314  if (v->fmb_is_raw)
4315  fwd = v->forward_mb_plane[mb_pos] = get_bits1(gb);
4316  else
4317  fwd = v->forward_mb_plane[mb_pos];
4318  if (idx_mbmode <= 5) { // 1-MV
4319  int interpmvp = 0;
4320  dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
4321  pred_flag[0] = pred_flag[1] = 0;
4322  if (fwd)
4323  bmvtype = BMV_TYPE_FORWARD;
4324  else {
4325  bmvtype = decode012(gb);
4326  switch (bmvtype) {
4327  case 0:
4328  bmvtype = BMV_TYPE_BACKWARD;
4329  break;
4330  case 1:
4331  bmvtype = BMV_TYPE_DIRECT;
4332  break;
4333  case 2:
4334  bmvtype = BMV_TYPE_INTERPOLATED;
4335  interpmvp = get_bits1(gb);
4336  }
4337  }
4338  v->bmvtype = bmvtype;
4339  if (bmvtype != BMV_TYPE_DIRECT && idx_mbmode & 1) {
4340  get_mvdata_interlaced(v, &dmv_x[bmvtype == BMV_TYPE_BACKWARD], &dmv_y[bmvtype == BMV_TYPE_BACKWARD], &pred_flag[bmvtype == BMV_TYPE_BACKWARD]);