FFmpeg
vc1_pred.c
Go to the documentation of this file.
1 /*
2  * VC-1 and WMV3 decoder
3  * Copyright (c) 2011 Mashiat Sarker Shakkhar
4  * Copyright (c) 2006-2007 Konstantin Shishkov
5  * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 /**
25  * @file
26  * VC-1 and WMV3 block decoding routines
27  */
28 
29 #include "mathops.h"
30 #include "mpegutils.h"
31 #include "mpegvideo.h"
32 #include "vc1.h"
33 #include "vc1_pred.h"
34 #include "vc1data.h"
35 
36 static av_always_inline int scaleforsame_x(const VC1Context *v, int n /* MV */, int dir)
37 {
38  int scaledvalue, refdist;
39  int scalesame1, scalesame2;
40  int scalezone1_x, zone1offset_x;
41  int table_index = dir ^ v->second_field;
42 
43  if (v->s.pict_type != AV_PICTURE_TYPE_B)
44  refdist = v->refdist;
45  else
46  refdist = dir ? v->brfd : v->frfd;
47  if (refdist > 3)
48  refdist = 3;
49  scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
50  scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
51  scalezone1_x = ff_vc1_field_mvpred_scales[table_index][3][refdist];
52  zone1offset_x = ff_vc1_field_mvpred_scales[table_index][5][refdist];
53 
54  if (FFABS(n) > 255)
55  scaledvalue = n;
56  else {
57  if (FFABS(n) < scalezone1_x)
58  scaledvalue = (n * scalesame1) >> 8;
59  else {
60  if (n < 0)
61  scaledvalue = ((n * scalesame2) >> 8) - zone1offset_x;
62  else
63  scaledvalue = ((n * scalesame2) >> 8) + zone1offset_x;
64  }
65  }
66  return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
67 }
68 
69 static av_always_inline int scaleforsame_y(const VC1Context *v, int n /* MV */, int dir)
70 {
71  int scaledvalue, refdist;
72  int scalesame1, scalesame2;
73  int scalezone1_y, zone1offset_y;
74  int table_index = dir ^ v->second_field;
75 
76  if (v->s.pict_type != AV_PICTURE_TYPE_B)
77  refdist = v->refdist;
78  else
79  refdist = dir ? v->brfd : v->frfd;
80  if (refdist > 3)
81  refdist = 3;
82  scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist];
83  scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist];
84  scalezone1_y = ff_vc1_field_mvpred_scales[table_index][4][refdist];
85  zone1offset_y = ff_vc1_field_mvpred_scales[table_index][6][refdist];
86 
87  if (FFABS(n) > 63)
88  scaledvalue = n;
89  else {
90  if (FFABS(n) < scalezone1_y)
91  scaledvalue = (n * scalesame1) >> 8;
92  else {
93  if (n < 0)
94  scaledvalue = ((n * scalesame2) >> 8) - zone1offset_y;
95  else
96  scaledvalue = ((n * scalesame2) >> 8) + zone1offset_y;
97  }
98  }
99 
100  if (v->cur_field_type && !v->ref_field_type[dir])
101  return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
102  else
103  return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
104 }
105 
106 static av_always_inline int scaleforopp_x(const VC1Context *v, int n /* MV */)
107 {
108  int scalezone1_x, zone1offset_x;
109  int scaleopp1, scaleopp2, brfd;
110  int scaledvalue;
111 
112  brfd = FFMIN(v->brfd, 3);
113  scalezone1_x = ff_vc1_b_field_mvpred_scales[3][brfd];
114  zone1offset_x = ff_vc1_b_field_mvpred_scales[5][brfd];
115  scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
116  scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
117 
118  if (FFABS(n) > 255)
119  scaledvalue = n;
120  else {
121  if (FFABS(n) < scalezone1_x)
122  scaledvalue = (n * scaleopp1) >> 8;
123  else {
124  if (n < 0)
125  scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_x;
126  else
127  scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_x;
128  }
129  }
130  return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
131 }
132 
133 static av_always_inline int scaleforopp_y(const VC1Context *v, int n /* MV */, int dir)
134 {
135  int scalezone1_y, zone1offset_y;
136  int scaleopp1, scaleopp2, brfd;
137  int scaledvalue;
138 
139  brfd = FFMIN(v->brfd, 3);
140  scalezone1_y = ff_vc1_b_field_mvpred_scales[4][brfd];
141  zone1offset_y = ff_vc1_b_field_mvpred_scales[6][brfd];
142  scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd];
143  scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd];
144 
145  if (FFABS(n) > 63)
146  scaledvalue = n;
147  else {
148  if (FFABS(n) < scalezone1_y)
149  scaledvalue = (n * scaleopp1) >> 8;
150  else {
151  if (n < 0)
152  scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_y;
153  else
154  scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_y;
155  }
156  }
157  if (v->cur_field_type && !v->ref_field_type[dir]) {
158  return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2);
159  } else {
160  return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
161  }
162 }
163 
164 static av_always_inline int scaleforsame(const VC1Context *v, int n /* MV */,
165  int dim, int dir)
166 {
167  int brfd, scalesame;
168  int hpel = 1 - v->s.quarter_sample;
169 
170  n >>= hpel;
171  if (v->s.pict_type != AV_PICTURE_TYPE_B || v->second_field || !dir) {
172  if (dim)
173  n = scaleforsame_y(v, n, dir) * (1 << hpel);
174  else
175  n = scaleforsame_x(v, n, dir) * (1 << hpel);
176  return n;
177  }
178  brfd = FFMIN(v->brfd, 3);
179  scalesame = ff_vc1_b_field_mvpred_scales[0][brfd];
180 
181  n = (n * scalesame >> 8) * (1 << hpel);
182  return n;
183 }
184 
185 static av_always_inline int scaleforopp(const VC1Context *v, int n /* MV */,
186  int dim, int dir)
187 {
188  int refdist, scaleopp;
189  int hpel = 1 - v->s.quarter_sample;
190 
191  n >>= hpel;
192  if (v->s.pict_type == AV_PICTURE_TYPE_B && !v->second_field && dir == 1) {
193  if (dim)
194  n = scaleforopp_y(v, n, dir) * (1 << hpel);
195  else
196  n = scaleforopp_x(v, n) * (1 << hpel);
197  return n;
198  }
199  if (v->s.pict_type != AV_PICTURE_TYPE_B)
200  refdist = v->refdist;
201  else
202  refdist = dir ? v->brfd : v->frfd;
203  refdist = FFMIN(refdist, 3);
204  scaleopp = ff_vc1_field_mvpred_scales[dir ^ v->second_field][0][refdist];
205 
206  n = (n * scaleopp >> 8) * (1 << hpel);
207  return n;
208 }
209 
210 /** Predict and set motion vector
211  */
212 void ff_vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
213  int mv1, int r_x, int r_y, uint8_t* is_intra,
214  int pred_flag, int dir)
215 {
216  MpegEncContext *s = &v->s;
217  int xy, wrap, off = 0;
218  int px, py;
219  int sum;
220  int mixedmv_pic, num_samefield = 0, num_oppfield = 0;
221  int opposite, a_f, b_f, c_f;
222  int16_t field_predA[2];
223  int16_t field_predB[2];
224  int16_t field_predC[2];
225  int a_valid, b_valid, c_valid;
226  int hybridmv_thresh, y_bias = 0;
227 
228  if (v->mv_mode == MV_PMODE_MIXED_MV ||
230  mixedmv_pic = 1;
231  else
232  mixedmv_pic = 0;
233  /* scale MV difference to be quad-pel */
234  if (!s->quarter_sample) {
235  dmv_x *= 2;
236  dmv_y *= 2;
237  }
238 
239  wrap = s->b8_stride;
240  xy = s->block_index[n];
241 
242  if (s->mb_intra) {
243  s->mv[0][n][0] = s->cur_pic.motion_val[0][xy + v->blocks_off][0] = 0;
244  s->mv[0][n][1] = s->cur_pic.motion_val[0][xy + v->blocks_off][1] = 0;
245  s->cur_pic.motion_val[1][xy + v->blocks_off][0] = 0;
246  s->cur_pic.motion_val[1][xy + v->blocks_off][1] = 0;
247  if (mv1) { /* duplicate motion data for 1-MV block */
248  s->cur_pic.motion_val[0][xy + 1 + v->blocks_off][0] = 0;
249  s->cur_pic.motion_val[0][xy + 1 + v->blocks_off][1] = 0;
250  s->cur_pic.motion_val[0][xy + wrap + v->blocks_off][0] = 0;
251  s->cur_pic.motion_val[0][xy + wrap + v->blocks_off][1] = 0;
252  s->cur_pic.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0;
253  s->cur_pic.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0;
254  v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
255  s->cur_pic.motion_val[1][xy + 1 + v->blocks_off][0] = 0;
256  s->cur_pic.motion_val[1][xy + 1 + v->blocks_off][1] = 0;
257  s->cur_pic.motion_val[1][xy + wrap + v->blocks_off][0] = 0;
258  s->cur_pic.motion_val[1][xy + wrap + v->blocks_off][1] = 0;
259  s->cur_pic.motion_val[1][xy + wrap + 1 + v->blocks_off][0] = 0;
260  s->cur_pic.motion_val[1][xy + wrap + 1 + v->blocks_off][1] = 0;
261  }
262  return;
263  }
264 
265  a_valid = !s->first_slice_line || (n == 2 || n == 3);
266  b_valid = a_valid;
267  c_valid = s->mb_x || (n == 1 || n == 3);
268  if (mv1) {
269  if (v->field_mode && mixedmv_pic)
270  off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
271  else
272  off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2;
273  b_valid = b_valid && s->mb_width > 1;
274  } else {
275  //in 4-MV mode different blocks have different B predictor position
276  switch (n) {
277  case 0:
278  if (v->res_rtm_flag)
279  off = s->mb_x ? -1 : 1;
280  else
281  off = s->mb_x ? -1 : 2 * s->mb_width - wrap - 1;
282  break;
283  case 1:
284  off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1;
285  break;
286  case 2:
287  off = 1;
288  break;
289  case 3:
290  off = -1;
291  }
292  if (v->field_mode && s->mb_width == 1)
293  b_valid = b_valid && c_valid;
294  }
295 
296  if (v->field_mode) {
297  a_valid = a_valid && !is_intra[xy - wrap];
298  b_valid = b_valid && !is_intra[xy - wrap + off];
299  c_valid = c_valid && !is_intra[xy - 1];
300  }
301 
302  if (a_valid) {
303  const int16_t *A = s->cur_pic.motion_val[dir][xy - wrap + v->blocks_off];
304  a_f = v->mv_f[dir][xy - wrap + v->blocks_off];
305  num_oppfield += a_f;
306  num_samefield += 1 - a_f;
307  field_predA[0] = A[0];
308  field_predA[1] = A[1];
309  } else {
310  field_predA[0] = field_predA[1] = 0;
311  a_f = 0;
312  }
313  if (b_valid) {
314  const int16_t *B = s->cur_pic.motion_val[dir][xy - wrap + off + v->blocks_off];
315  b_f = v->mv_f[dir][xy - wrap + off + v->blocks_off];
316  num_oppfield += b_f;
317  num_samefield += 1 - b_f;
318  field_predB[0] = B[0];
319  field_predB[1] = B[1];
320  } else {
321  field_predB[0] = field_predB[1] = 0;
322  b_f = 0;
323  }
324  if (c_valid) {
325  const int16_t *C = s->cur_pic.motion_val[dir][xy - 1 + v->blocks_off];
326  c_f = v->mv_f[dir][xy - 1 + v->blocks_off];
327  num_oppfield += c_f;
328  num_samefield += 1 - c_f;
329  field_predC[0] = C[0];
330  field_predC[1] = C[1];
331  } else {
332  field_predC[0] = field_predC[1] = 0;
333  c_f = 0;
334  }
335 
336  if (v->field_mode) {
337  if (!v->numref)
338  // REFFIELD determines if the last field or the second-last field is
339  // to be used as reference
340  opposite = 1 - v->reffield;
341  else {
342  if (num_samefield <= num_oppfield)
343  opposite = 1 - pred_flag;
344  else
345  opposite = pred_flag;
346  }
347  } else
348  opposite = 0;
349  if (opposite) {
350  v->mv_f[dir][xy + v->blocks_off] = 1;
351  v->ref_field_type[dir] = !v->cur_field_type;
352  if (a_valid && !a_f) {
353  field_predA[0] = scaleforopp(v, field_predA[0], 0, dir);
354  field_predA[1] = scaleforopp(v, field_predA[1], 1, dir);
355  }
356  if (b_valid && !b_f) {
357  field_predB[0] = scaleforopp(v, field_predB[0], 0, dir);
358  field_predB[1] = scaleforopp(v, field_predB[1], 1, dir);
359  }
360  if (c_valid && !c_f) {
361  field_predC[0] = scaleforopp(v, field_predC[0], 0, dir);
362  field_predC[1] = scaleforopp(v, field_predC[1], 1, dir);
363  }
364  } else {
365  v->mv_f[dir][xy + v->blocks_off] = 0;
366  v->ref_field_type[dir] = v->cur_field_type;
367  if (a_valid && a_f) {
368  field_predA[0] = scaleforsame(v, field_predA[0], 0, dir);
369  field_predA[1] = scaleforsame(v, field_predA[1], 1, dir);
370  }
371  if (b_valid && b_f) {
372  field_predB[0] = scaleforsame(v, field_predB[0], 0, dir);
373  field_predB[1] = scaleforsame(v, field_predB[1], 1, dir);
374  }
375  if (c_valid && c_f) {
376  field_predC[0] = scaleforsame(v, field_predC[0], 0, dir);
377  field_predC[1] = scaleforsame(v, field_predC[1], 1, dir);
378  }
379  }
380 
381  if (a_valid) {
382  px = field_predA[0];
383  py = field_predA[1];
384  } else if (c_valid) {
385  px = field_predC[0];
386  py = field_predC[1];
387  } else if (b_valid) {
388  px = field_predB[0];
389  py = field_predB[1];
390  } else {
391  px = 0;
392  py = 0;
393  }
394 
395  if (num_samefield + num_oppfield > 1) {
396  px = mid_pred(field_predA[0], field_predB[0], field_predC[0]);
397  py = mid_pred(field_predA[1], field_predB[1], field_predC[1]);
398  }
399 
400  /* Pullback MV as specified in 8.3.5.3.4 */
401  if (!v->field_mode) {
402  int qx, qy, X, Y;
403  int MV = mv1 ? -60 : -28;
404  qx = (s->mb_x << 6) + ((n == 1 || n == 3) ? 32 : 0);
405  qy = (s->mb_y << 6) + ((n == 2 || n == 3) ? 32 : 0);
406  X = (s->mb_width << 6) - 4;
407  Y = (s->mb_height << 6) - 4;
408  if (qx + px < MV) px = MV - qx;
409  if (qy + py < MV) py = MV - qy;
410  if (qx + px > X) px = X - qx;
411  if (qy + py > Y) py = Y - qy;
412  }
413 
414  if (!v->field_mode || s->pict_type != AV_PICTURE_TYPE_B) {
415  /* Calculate hybrid prediction as specified in 8.3.5.3.5 (also 10.3.5.4.3.5) */
416  hybridmv_thresh = 32;
417  if (a_valid && c_valid) {
418  if (is_intra[xy - wrap])
419  sum = FFABS(px) + FFABS(py);
420  else
421  sum = FFABS(px - field_predA[0]) + FFABS(py - field_predA[1]);
422  if (sum > hybridmv_thresh) {
423  if (get_bits1(&s->gb)) { // read HYBRIDPRED bit
424  px = field_predA[0];
425  py = field_predA[1];
426  } else {
427  px = field_predC[0];
428  py = field_predC[1];
429  }
430  } else {
431  if (is_intra[xy - 1])
432  sum = FFABS(px) + FFABS(py);
433  else
434  sum = FFABS(px - field_predC[0]) + FFABS(py - field_predC[1]);
435  if (sum > hybridmv_thresh) {
436  if (get_bits1(&s->gb)) {
437  px = field_predA[0];
438  py = field_predA[1];
439  } else {
440  px = field_predC[0];
441  py = field_predC[1];
442  }
443  }
444  }
445  }
446  }
447 
448  if (v->field_mode && v->numref)
449  r_y >>= 1;
450  if (v->field_mode && v->cur_field_type && v->ref_field_type[dir] == 0)
451  y_bias = 1;
452  /* store MV using signed modulus of MV range defined in 4.11 */
453  s->mv[dir][n][0] = s->cur_pic.motion_val[dir][xy + v->blocks_off][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
454  s->mv[dir][n][1] = s->cur_pic.motion_val[dir][xy + v->blocks_off][1] = ((py + dmv_y + r_y - y_bias) & ((r_y << 1) - 1)) - r_y + y_bias;
455  if (mv1) { /* duplicate motion data for 1-MV block */
456  s->cur_pic.motion_val[dir][xy + 1 + v->blocks_off][0] = s->cur_pic.motion_val[dir][xy + v->blocks_off][0];
457  s->cur_pic.motion_val[dir][xy + 1 + v->blocks_off][1] = s->cur_pic.motion_val[dir][xy + v->blocks_off][1];
458  s->cur_pic.motion_val[dir][xy + wrap + v->blocks_off][0] = s->cur_pic.motion_val[dir][xy + v->blocks_off][0];
459  s->cur_pic.motion_val[dir][xy + wrap + v->blocks_off][1] = s->cur_pic.motion_val[dir][xy + v->blocks_off][1];
460  s->cur_pic.motion_val[dir][xy + wrap + 1 + v->blocks_off][0] = s->cur_pic.motion_val[dir][xy + v->blocks_off][0];
461  s->cur_pic.motion_val[dir][xy + wrap + 1 + v->blocks_off][1] = s->cur_pic.motion_val[dir][xy + v->blocks_off][1];
462  v->mv_f[dir][xy + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
463  v->mv_f[dir][xy + wrap + v->blocks_off] = v->mv_f[dir][xy + wrap + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off];
464  }
465 }
466 
467 /** Predict and set motion vector for interlaced frame picture MBs
468  */
469 void ff_vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
470  int mvn, int r_x, int r_y, int dir)
471 {
472  MpegEncContext *s = &v->s;
473  int xy, wrap, off = 0;
474  int A[2], B[2], C[2];
475  int px = 0, py = 0;
476  int a_valid = 0, b_valid = 0, c_valid = 0;
477  int field_a, field_b, field_c; // 0: same, 1: opposite
478  int total_valid, num_samefield, num_oppfield;
479  int pos_c, pos_b, n_adj;
480 
481  wrap = s->b8_stride;
482  xy = s->block_index[n];
483 
484  if (s->mb_intra) {
485  s->mv[0][n][0] = s->cur_pic.motion_val[0][xy][0] = 0;
486  s->mv[0][n][1] = s->cur_pic.motion_val[0][xy][1] = 0;
487  s->cur_pic.motion_val[1][xy][0] = 0;
488  s->cur_pic.motion_val[1][xy][1] = 0;
489  if (mvn == 1) { /* duplicate motion data for 1-MV block */
490  s->cur_pic.motion_val[0][xy + 1][0] = 0;
491  s->cur_pic.motion_val[0][xy + 1][1] = 0;
492  s->cur_pic.motion_val[0][xy + wrap][0] = 0;
493  s->cur_pic.motion_val[0][xy + wrap][1] = 0;
494  s->cur_pic.motion_val[0][xy + wrap + 1][0] = 0;
495  s->cur_pic.motion_val[0][xy + wrap + 1][1] = 0;
496  v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
497  s->cur_pic.motion_val[1][xy + 1][0] = 0;
498  s->cur_pic.motion_val[1][xy + 1][1] = 0;
499  s->cur_pic.motion_val[1][xy + wrap][0] = 0;
500  s->cur_pic.motion_val[1][xy + wrap][1] = 0;
501  s->cur_pic.motion_val[1][xy + wrap + 1][0] = 0;
502  s->cur_pic.motion_val[1][xy + wrap + 1][1] = 0;
503  }
504  return;
505  }
506 
507  off = ((n == 0) || (n == 1)) ? 1 : -1;
508  /* predict A */
509  if (s->mb_x || (n == 1) || (n == 3)) {
510  if ((v->blk_mv_type[xy]) // current block (MB) has a field MV
511  || (!v->blk_mv_type[xy] && !v->blk_mv_type[xy - 1])) { // or both have frame MV
512  A[0] = s->cur_pic.motion_val[dir][xy - 1][0];
513  A[1] = s->cur_pic.motion_val[dir][xy - 1][1];
514  a_valid = 1;
515  } else { // current block has frame mv and cand. has field MV (so average)
516  A[0] = (s->cur_pic.motion_val[dir][xy - 1][0]
517  + s->cur_pic.motion_val[dir][xy - 1 + off * wrap][0] + 1) >> 1;
518  A[1] = (s->cur_pic.motion_val[dir][xy - 1][1]
519  + s->cur_pic.motion_val[dir][xy - 1 + off * wrap][1] + 1) >> 1;
520  a_valid = 1;
521  }
522  if (!(n & 1) && v->is_intra[s->mb_x - 1]) {
523  a_valid = 0;
524  A[0] = A[1] = 0;
525  }
526  } else
527  A[0] = A[1] = 0;
528  /* Predict B and C */
529  B[0] = B[1] = C[0] = C[1] = 0;
530  if (n == 0 || n == 1 || v->blk_mv_type[xy]) {
531  if (!s->first_slice_line) {
532  if (!v->is_intra[s->mb_x - s->mb_stride]) {
533  b_valid = 1;
534  n_adj = n | 2;
535  pos_b = s->block_index[n_adj] - 2 * wrap;
536  if (v->blk_mv_type[pos_b] && v->blk_mv_type[xy]) {
537  n_adj = (n & 2) | (n & 1);
538  }
539  B[0] = s->cur_pic.motion_val[dir][s->block_index[n_adj] - 2 * wrap][0];
540  B[1] = s->cur_pic.motion_val[dir][s->block_index[n_adj] - 2 * wrap][1];
541  if (v->blk_mv_type[pos_b] && !v->blk_mv_type[xy]) {
542  B[0] = (B[0] + s->cur_pic.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1;
543  B[1] = (B[1] + s->cur_pic.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1;
544  }
545  }
546  if (s->mb_width > 1) {
547  if (!v->is_intra[s->mb_x - s->mb_stride + 1]) {
548  c_valid = 1;
549  n_adj = 2;
550  pos_c = s->block_index[2] - 2 * wrap + 2;
551  if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
552  n_adj = n & 2;
553  }
554  C[0] = s->cur_pic.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][0];
555  C[1] = s->cur_pic.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][1];
556  if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
557  C[0] = (1 + C[0] + (s->cur_pic.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1;
558  C[1] = (1 + C[1] + (s->cur_pic.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1;
559  }
560  if (s->mb_x == s->mb_width - 1) {
561  if (!v->is_intra[s->mb_x - s->mb_stride - 1]) {
562  c_valid = 1;
563  n_adj = 3;
564  pos_c = s->block_index[3] - 2 * wrap - 2;
565  if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) {
566  n_adj = n | 1;
567  }
568  C[0] = s->cur_pic.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][0];
569  C[1] = s->cur_pic.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][1];
570  if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) {
571  C[0] = (1 + C[0] + s->cur_pic.motion_val[dir][s->block_index[1] - 2 * wrap - 2][0]) >> 1;
572  C[1] = (1 + C[1] + s->cur_pic.motion_val[dir][s->block_index[1] - 2 * wrap - 2][1]) >> 1;
573  }
574  } else
575  c_valid = 0;
576  }
577  }
578  }
579  }
580  } else {
581  pos_b = s->block_index[1];
582  b_valid = 1;
583  B[0] = s->cur_pic.motion_val[dir][pos_b][0];
584  B[1] = s->cur_pic.motion_val[dir][pos_b][1];
585  pos_c = s->block_index[0];
586  c_valid = 1;
587  C[0] = s->cur_pic.motion_val[dir][pos_c][0];
588  C[1] = s->cur_pic.motion_val[dir][pos_c][1];
589  }
590 
591  total_valid = a_valid + b_valid + c_valid;
592  // check if predictor A is out of bounds
593  if (!s->mb_x && !(n == 1 || n == 3)) {
594  A[0] = A[1] = 0;
595  }
596  // check if predictor B is out of bounds
597  if ((s->first_slice_line && v->blk_mv_type[xy]) || (s->first_slice_line && !(n & 2))) {
598  B[0] = B[1] = C[0] = C[1] = 0;
599  }
600  if (!v->blk_mv_type[xy]) {
601  if (s->mb_width == 1) {
602  px = B[0];
603  py = B[1];
604  } else {
605  if (total_valid >= 2) {
606  px = mid_pred(A[0], B[0], C[0]);
607  py = mid_pred(A[1], B[1], C[1]);
608  } else if (total_valid) {
609  if (a_valid) { px = A[0]; py = A[1]; }
610  else if (b_valid) { px = B[0]; py = B[1]; }
611  else { px = C[0]; py = C[1]; }
612  }
613  }
614  } else {
615  if (a_valid)
616  field_a = (A[1] & 4) ? 1 : 0;
617  else
618  field_a = 0;
619  if (b_valid)
620  field_b = (B[1] & 4) ? 1 : 0;
621  else
622  field_b = 0;
623  if (c_valid)
624  field_c = (C[1] & 4) ? 1 : 0;
625  else
626  field_c = 0;
627 
628  num_oppfield = field_a + field_b + field_c;
629  num_samefield = total_valid - num_oppfield;
630  if (total_valid == 3) {
631  if ((num_samefield == 3) || (num_oppfield == 3)) {
632  px = mid_pred(A[0], B[0], C[0]);
633  py = mid_pred(A[1], B[1], C[1]);
634  } else if (num_samefield >= num_oppfield) {
635  /* take one MV from same field set depending on priority
636  the check for B may not be necessary */
637  px = !field_a ? A[0] : B[0];
638  py = !field_a ? A[1] : B[1];
639  } else {
640  px = field_a ? A[0] : B[0];
641  py = field_a ? A[1] : B[1];
642  }
643  } else if (total_valid == 2) {
644  if (num_samefield >= num_oppfield) {
645  if (!field_a && a_valid) {
646  px = A[0];
647  py = A[1];
648  } else if (!field_b && b_valid) {
649  px = B[0];
650  py = B[1];
651  } else /*if (c_valid)*/ {
652  av_assert1(c_valid);
653  px = C[0];
654  py = C[1];
655  }
656  } else {
657  if (field_a && a_valid) {
658  px = A[0];
659  py = A[1];
660  } else /*if (field_b && b_valid)*/ {
661  av_assert1(field_b && b_valid);
662  px = B[0];
663  py = B[1];
664  }
665  }
666  } else if (total_valid == 1) {
667  px = (a_valid) ? A[0] : ((b_valid) ? B[0] : C[0]);
668  py = (a_valid) ? A[1] : ((b_valid) ? B[1] : C[1]);
669  }
670  }
671 
672  /* store MV using signed modulus of MV range defined in 4.11 */
673  s->mv[dir][n][0] = s->cur_pic.motion_val[dir][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x;
674  s->mv[dir][n][1] = s->cur_pic.motion_val[dir][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y;
675  if (mvn == 1) { /* duplicate motion data for 1-MV block */
676  s->cur_pic.motion_val[dir][xy + 1 ][0] = s->cur_pic.motion_val[dir][xy][0];
677  s->cur_pic.motion_val[dir][xy + 1 ][1] = s->cur_pic.motion_val[dir][xy][1];
678  s->cur_pic.motion_val[dir][xy + wrap ][0] = s->cur_pic.motion_val[dir][xy][0];
679  s->cur_pic.motion_val[dir][xy + wrap ][1] = s->cur_pic.motion_val[dir][xy][1];
680  s->cur_pic.motion_val[dir][xy + wrap + 1][0] = s->cur_pic.motion_val[dir][xy][0];
681  s->cur_pic.motion_val[dir][xy + wrap + 1][1] = s->cur_pic.motion_val[dir][xy][1];
682  } else if (mvn == 2) { /* duplicate motion data for 2-Field MV block */
683  s->cur_pic.motion_val[dir][xy + 1][0] = s->cur_pic.motion_val[dir][xy][0];
684  s->cur_pic.motion_val[dir][xy + 1][1] = s->cur_pic.motion_val[dir][xy][1];
685  s->mv[dir][n + 1][0] = s->mv[dir][n][0];
686  s->mv[dir][n + 1][1] = s->mv[dir][n][1];
687  }
688 }
689 
690 void ff_vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
691  int direct, int mvtype)
692 {
693  MpegEncContext *s = &v->s;
694  int xy, wrap;
695  int px, py;
696  int sum;
697  int r_x, r_y;
698  const uint8_t *is_intra = v->mb_type[0];
699 
700  av_assert0(!v->field_mode);
701 
702  r_x = v->range_x;
703  r_y = v->range_y;
704  /* scale MV difference to be quad-pel */
705  if (!s->quarter_sample) {
706  dmv_x[0] *= 2;
707  dmv_y[0] *= 2;
708  dmv_x[1] *= 2;
709  dmv_y[1] *= 2;
710  }
711 
712  wrap = s->b8_stride;
713  xy = s->block_index[0];
714 
715  if (s->mb_intra) {
716  s->cur_pic.motion_val[0][xy][0] =
717  s->cur_pic.motion_val[0][xy][1] =
718  s->cur_pic.motion_val[1][xy][0] =
719  s->cur_pic.motion_val[1][xy][1] = 0;
720  return;
721  }
722  if (direct && s->next_pic.ptr->field_picture)
723  av_log(s->avctx, AV_LOG_WARNING, "Mixed frame/field direct mode not supported\n");
724 
725  s->mv[0][0][0] = scale_mv(s->next_pic.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample);
726  s->mv[0][0][1] = scale_mv(s->next_pic.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample);
727  s->mv[1][0][0] = scale_mv(s->next_pic.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample);
728  s->mv[1][0][1] = scale_mv(s->next_pic.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample);
729 
730  /* Pullback predicted motion vectors as specified in 8.4.5.4 */
731  s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
732  s->mv[0][0][1] = av_clip(s->mv[0][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
733  s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6));
734  s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6));
735  if (direct) {
736  s->cur_pic.motion_val[0][xy][0] = s->mv[0][0][0];
737  s->cur_pic.motion_val[0][xy][1] = s->mv[0][0][1];
738  s->cur_pic.motion_val[1][xy][0] = s->mv[1][0][0];
739  s->cur_pic.motion_val[1][xy][1] = s->mv[1][0][1];
740  return;
741  }
742 
743  if ((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
744  int16_t *C = s->cur_pic.motion_val[0][xy - 2];
745  const int16_t *A = s->cur_pic.motion_val[0][xy - wrap * 2];
746  int off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
747  const int16_t *B = s->cur_pic.motion_val[0][xy - wrap * 2 + off];
748 
749  if (!s->mb_x) C[0] = C[1] = 0;
750  if (!s->first_slice_line) { // predictor A is not out of bounds
751  if (s->mb_width == 1) {
752  px = A[0];
753  py = A[1];
754  } else {
755  px = mid_pred(A[0], B[0], C[0]);
756  py = mid_pred(A[1], B[1], C[1]);
757  }
758  } else if (s->mb_x) { // predictor C is not out of bounds
759  px = C[0];
760  py = C[1];
761  } else {
762  px = py = 0;
763  }
764  /* Pullback MV as specified in 8.3.5.3.4 */
765  {
766  int qx, qy, X, Y;
767  int sh = v->profile < PROFILE_ADVANCED ? 5 : 6;
768  int MV = 4 - (1 << sh);
769  qx = (s->mb_x << sh);
770  qy = (s->mb_y << sh);
771  X = (s->mb_width << sh) - 4;
772  Y = (s->mb_height << sh) - 4;
773  if (qx + px < MV) px = MV - qx;
774  if (qy + py < MV) py = MV - qy;
775  if (qx + px > X) px = X - qx;
776  if (qy + py > Y) py = Y - qy;
777  }
778  /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
779  if (0 && !s->first_slice_line && s->mb_x) {
780  if (is_intra[xy - wrap])
781  sum = FFABS(px) + FFABS(py);
782  else
783  sum = FFABS(px - A[0]) + FFABS(py - A[1]);
784  if (sum > 32) {
785  if (get_bits1(&s->gb)) {
786  px = A[0];
787  py = A[1];
788  } else {
789  px = C[0];
790  py = C[1];
791  }
792  } else {
793  if (is_intra[xy - 2])
794  sum = FFABS(px) + FFABS(py);
795  else
796  sum = FFABS(px - C[0]) + FFABS(py - C[1]);
797  if (sum > 32) {
798  if (get_bits1(&s->gb)) {
799  px = A[0];
800  py = A[1];
801  } else {
802  px = C[0];
803  py = C[1];
804  }
805  }
806  }
807  }
808  /* store MV using signed modulus of MV range defined in 4.11 */
809  s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x;
810  s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
811  }
812  if ((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
813  int16_t *C = s->cur_pic.motion_val[1][xy - 2];
814  const int16_t *A = s->cur_pic.motion_val[1][xy - wrap * 2];
815  int off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
816  const int16_t *B = s->cur_pic.motion_val[1][xy - wrap * 2 + off];
817 
818  if (!s->mb_x)
819  C[0] = C[1] = 0;
820  if (!s->first_slice_line) { // predictor A is not out of bounds
821  if (s->mb_width == 1) {
822  px = A[0];
823  py = A[1];
824  } else {
825  px = mid_pred(A[0], B[0], C[0]);
826  py = mid_pred(A[1], B[1], C[1]);
827  }
828  } else if (s->mb_x) { // predictor C is not out of bounds
829  px = C[0];
830  py = C[1];
831  } else {
832  px = py = 0;
833  }
834  /* Pullback MV as specified in 8.3.5.3.4 */
835  {
836  int qx, qy, X, Y;
837  int sh = v->profile < PROFILE_ADVANCED ? 5 : 6;
838  int MV = 4 - (1 << sh);
839  qx = (s->mb_x << sh);
840  qy = (s->mb_y << sh);
841  X = (s->mb_width << sh) - 4;
842  Y = (s->mb_height << sh) - 4;
843  if (qx + px < MV) px = MV - qx;
844  if (qy + py < MV) py = MV - qy;
845  if (qx + px > X) px = X - qx;
846  if (qy + py > Y) py = Y - qy;
847  }
848  /* Calculate hybrid prediction as specified in 8.3.5.3.5 */
849  if (0 && !s->first_slice_line && s->mb_x) {
850  if (is_intra[xy - wrap])
851  sum = FFABS(px) + FFABS(py);
852  else
853  sum = FFABS(px - A[0]) + FFABS(py - A[1]);
854  if (sum > 32) {
855  if (get_bits1(&s->gb)) {
856  px = A[0];
857  py = A[1];
858  } else {
859  px = C[0];
860  py = C[1];
861  }
862  } else {
863  if (is_intra[xy - 2])
864  sum = FFABS(px) + FFABS(py);
865  else
866  sum = FFABS(px - C[0]) + FFABS(py - C[1]);
867  if (sum > 32) {
868  if (get_bits1(&s->gb)) {
869  px = A[0];
870  py = A[1];
871  } else {
872  px = C[0];
873  py = C[1];
874  }
875  }
876  }
877  }
878  /* store MV using signed modulus of MV range defined in 4.11 */
879 
880  s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x;
881  s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y;
882  }
883  s->cur_pic.motion_val[0][xy][0] = s->mv[0][0][0];
884  s->cur_pic.motion_val[0][xy][1] = s->mv[0][0][1];
885  s->cur_pic.motion_val[1][xy][0] = s->mv[1][0][0];
886  s->cur_pic.motion_val[1][xy][1] = s->mv[1][0][1];
887 }
888 
889 void ff_vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y,
890  int mv1, int *pred_flag)
891 {
892  int dir = (v->bmvtype == BMV_TYPE_BACKWARD) ? 1 : 0;
893  MpegEncContext *s = &v->s;
894  int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
895 
896  if (v->bmvtype == BMV_TYPE_DIRECT) {
897  int total_opp, k, f;
898  if (s->next_pic.mb_type[mb_pos + v->mb_off] != MB_TYPE_INTRA) {
899  s->mv[0][0][0] = scale_mv(s->next_pic.motion_val[1][s->block_index[0] + v->blocks_off][0],
900  v->bfraction, 0, s->quarter_sample);
901  s->mv[0][0][1] = scale_mv(s->next_pic.motion_val[1][s->block_index[0] + v->blocks_off][1],
902  v->bfraction, 0, s->quarter_sample);
903  s->mv[1][0][0] = scale_mv(s->next_pic.motion_val[1][s->block_index[0] + v->blocks_off][0],
904  v->bfraction, 1, s->quarter_sample);
905  s->mv[1][0][1] = scale_mv(s->next_pic.motion_val[1][s->block_index[0] + v->blocks_off][1],
906  v->bfraction, 1, s->quarter_sample);
907 
908  total_opp = v->mv_f_next[0][s->block_index[0] + v->blocks_off]
909  + v->mv_f_next[0][s->block_index[1] + v->blocks_off]
910  + v->mv_f_next[0][s->block_index[2] + v->blocks_off]
911  + v->mv_f_next[0][s->block_index[3] + v->blocks_off];
912  f = (total_opp > 2) ? 1 : 0;
913  } else {
914  s->mv[0][0][0] = s->mv[0][0][1] = 0;
915  s->mv[1][0][0] = s->mv[1][0][1] = 0;
916  f = 0;
917  }
918  v->ref_field_type[0] = v->ref_field_type[1] = v->cur_field_type ^ f;
919  for (k = 0; k < 4; k++) {
920  s->cur_pic.motion_val[0][s->block_index[k] + v->blocks_off][0] = s->mv[0][0][0];
921  s->cur_pic.motion_val[0][s->block_index[k] + v->blocks_off][1] = s->mv[0][0][1];
922  s->cur_pic.motion_val[1][s->block_index[k] + v->blocks_off][0] = s->mv[1][0][0];
923  s->cur_pic.motion_val[1][s->block_index[k] + v->blocks_off][1] = s->mv[1][0][1];
924  v->mv_f[0][s->block_index[k] + v->blocks_off] = f;
925  v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
926  }
927  return;
928  }
929  if (v->bmvtype == BMV_TYPE_INTERPOLATED) {
930  ff_vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
931  ff_vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
932  return;
933  }
934  if (dir) { // backward
935  ff_vc1_pred_mv(v, n, dmv_x[1], dmv_y[1], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1);
936  if (n == 3 || mv1) {
937  ff_vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], 0, 0);
938  }
939  } else { // forward
940  ff_vc1_pred_mv(v, n, dmv_x[0], dmv_y[0], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0);
941  if (n == 3 || mv1) {
942  ff_vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], 0, 1);
943  }
944  }
945 }
A
#define A(x)
Definition: vpx_arith.h:28
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
av_clip
#define av_clip
Definition: common.h:100
VC1Context
The VC1 Context.
Definition: vc1.h:173
scaleforsame_x
static av_always_inline int scaleforsame_x(const VC1Context *v, int n, int dir)
Definition: vc1_pred.c:36
vc1.h
BMV_TYPE_DIRECT
@ BMV_TYPE_DIRECT
Definition: vc1.h:105
VC1Context::reffield
int reffield
if numref = 0 (1 reference) then reffield decides which
Definition: vc1.h:356
MV_PMODE_INTENSITY_COMP
@ MV_PMODE_INTENSITY_COMP
Definition: vc1.h:83
mpegvideo.h
VC1Context::luma_mv
int16_t((* luma_mv)[2]
Definition: vc1.h:391
mpegutils.h
scaleforsame_y
static av_always_inline int scaleforsame_y(const VC1Context *v, int n, int dir)
Definition: vc1_pred.c:69
MpegEncContext::pict_type
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:206
wrap
#define wrap(func)
Definition: neontest.h:65
VC1Context::numref
int numref
number of past field pictures used as reference
Definition: vc1.h:354
C
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
Definition: writing_filters.txt:58
VC1Context::refdist
int refdist
distance of the current picture from reference
Definition: vc1.h:353
ff_vc1_pred_b_mv_intfi
void ff_vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, int mv1, int *pred_flag)
Definition: vc1_pred.c:889
VC1Context::mb_type
uint8_t * mb_type[3]
Definition: vc1.h:262
s
#define s(width, name)
Definition: cbs_vp9.c:198
ff_vc1_b_field_mvpred_scales
const uint16_t ff_vc1_b_field_mvpred_scales[7][4]
Definition: vc1data.c:281
VC1Context::mv_f
uint8_t * mv_f[2]
0: MV obtained from same field, 1: opposite field
Definition: vc1.h:348
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
B
#define B
Definition: huffyuv.h:42
PROFILE_ADVANCED
@ PROFILE_ADVANCED
Definition: vc1_common.h:52
VC1Context::frfd
int frfd
Definition: vc1.h:365
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
VC1Context::mv_mode
uint8_t mv_mode
Frame decoding info for all profiles.
Definition: vc1.h:231
VC1Context::field_mode
int field_mode
1 for interlaced field pictures
Definition: vc1.h:350
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:388
mathops.h
scaleforopp_x
static av_always_inline int scaleforopp_x(const VC1Context *v, int n)
Definition: vc1_pred.c:106
ff_vc1_field_mvpred_scales
const uint16_t ff_vc1_field_mvpred_scales[2][7][4]
Definition: vc1data.c:257
VC1Context::mv_mode2
uint8_t mv_mode2
Secondary MV coding mode (B-frames)
Definition: vc1.h:232
vc1_pred.h
VC1Context::mv_f_next
uint8_t * mv_f_next[2]
Definition: vc1.h:349
f
f
Definition: af_crystalizer.c:122
VC1Context::is_intra
uint8_t * is_intra
Definition: vc1.h:390
VC1Context::mb_off
int mb_off
Definition: vc1.h:362
MV
Definition: clearvideo.c:48
VC1Context::bfraction
int16_t bfraction
Relative position % anchors=> how to scale MVs.
Definition: vc1.h:270
MV_PMODE_MIXED_MV
@ MV_PMODE_MIXED_MV
Definition: vc1.h:82
BMV_TYPE_INTERPOLATED
@ BMV_TYPE_INTERPOLATED
Definition: vc1.h:104
scaleforopp_y
static av_always_inline int scaleforopp_y(const VC1Context *v, int n, int dir)
Definition: vc1_pred.c:133
ff_vc1_pred_b_mv
void ff_vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int direct, int mvtype)
Definition: vc1_pred.c:690
scale_mv
#define scale_mv(n, dim)
MpegEncContext::quarter_sample
int quarter_sample
1->qpel, 0->half pel ME/MC
Definition: mpegvideo.h:387
vc1data.h
Y
#define Y
Definition: boxblur.h:37
VC1Context::cur_field_type
int cur_field_type
0: top, 1: bottom
Definition: vc1.h:360
scaleforopp
static av_always_inline int scaleforopp(const VC1Context *v, int n, int dim, int dir)
Definition: vc1_pred.c:185
X
@ X
Definition: vf_addroi.c:27
VC1Context::range_x
int range_x
Definition: vc1.h:235
BMV_TYPE_FORWARD
@ BMV_TYPE_FORWARD
Definition: vc1.h:103
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
av_always_inline
#define av_always_inline
Definition: attributes.h:49
VC1Context::s
MpegEncContext s
Definition: vc1.h:174
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
ff_vc1_pred_mv_intfr
void ff_vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y, int mvn, int r_x, int r_y, int dir)
Predict and set motion vector for interlaced frame picture MBs.
Definition: vc1_pred.c:469
dim
int dim
Definition: vorbis_enc_data.h:425
mid_pred
#define mid_pred
Definition: mathops.h:96
VC1Context::second_field
int second_field
Definition: vc1.h:352
VC1Context::ref_field_type
int ref_field_type[2]
forward and backward reference field type (top or bottom)
Definition: vc1.h:361
BMV_TYPE_BACKWARD
@ BMV_TYPE_BACKWARD
Definition: vc1.h:102
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:281
VC1Context::brfd
int brfd
reference frame distance (forward or backward)
Definition: vc1.h:365
VC1Context::res_rtm_flag
int res_rtm_flag
reserved, set to 1
Definition: vc1.h:189
VC1Context::profile
int profile
Sequence header data for all Profiles TODO: choose between ints, uint8_ts and monobit flags.
Definition: vc1.h:216
VC1Context::range_y
int range_y
MV range.
Definition: vc1.h:235
VC1Context::bmvtype
int bmvtype
Definition: vc1.h:364
ff_vc1_pred_mv
void ff_vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y, int mv1, int r_x, int r_y, uint8_t *is_intra, int pred_flag, int dir)
Predict and set motion vector.
Definition: vc1_pred.c:212
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
scaleforsame
static av_always_inline int scaleforsame(const VC1Context *v, int n, int dim, int dir)
Definition: vc1_pred.c:164
VC1Context::blk_mv_type
uint8_t * blk_mv_type
0: frame MV, 1: field MV (interlaced frame)
Definition: vc1.h:347
VC1Context::blocks_off
int blocks_off
Definition: vc1.h:362
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:73
MB_TYPE_INTRA
#define MB_TYPE_INTRA
Definition: mpegutils.h:65