FFmpeg
mpegvideo_motion.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000,2001 Fabrice Bellard
3  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include "config_components.h"
25 
26 #include "libavutil/avassert.h"
27 #include "libavutil/internal.h"
28 #include "libavutil/mem_internal.h"
29 
30 #include "avcodec.h"
31 #include "h261.h"
32 #include "mpegutils.h"
33 #include "mpegvideo.h"
34 #include "mpeg4videodec.h"
35 #include "qpeldsp.h"
36 #include "wmv2.h"
37 
38 static inline int hpel_motion(MpegEncContext *s,
39  uint8_t *dest, uint8_t *src,
40  int src_x, int src_y,
41  op_pixels_func *pix_op,
42  int motion_x, int motion_y)
43 {
44  int dxy = 0;
45  int emu = 0;
46 
47  src_x += motion_x >> 1;
48  src_y += motion_y >> 1;
49 
50  /* WARNING: do no forget half pels */
51  src_x = av_clip(src_x, -16, s->width); // FIXME unneeded for emu?
52  if (src_x != s->width)
53  dxy |= motion_x & 1;
54  src_y = av_clip(src_y, -16, s->height);
55  if (src_y != s->height)
56  dxy |= (motion_y & 1) << 1;
57  src += src_y * s->linesize + src_x;
58 
59  if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 1) - 7, 0) ||
60  (unsigned)src_y >= FFMAX(s->v_edge_pos - (motion_y & 1) - 7, 0)) {
61  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, src,
62  s->linesize, s->linesize,
63  9, 9,
64  src_x, src_y,
65  s->h_edge_pos, s->v_edge_pos);
66  src = s->sc.edge_emu_buffer;
67  emu = 1;
68  }
69  pix_op[dxy](dest, src, s->linesize, 8);
70  return emu;
71 }
72 
73 static av_always_inline
75  uint8_t *dest_y,
76  uint8_t *dest_cb,
77  uint8_t *dest_cr,
78  int field_based,
79  int bottom_field,
80  int field_select,
81  uint8_t *const *ref_picture,
82  op_pixels_func (*pix_op)[4],
83  int motion_x,
84  int motion_y,
85  int h,
86  int is_mpeg12,
87  int is_16x8,
88  int mb_y)
89 {
90  const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
91  int dxy, uvdxy, mx, my, src_x, src_y,
92  uvsrc_x, uvsrc_y, v_edge_pos, block_y_half;
93  ptrdiff_t uvlinesize, linesize;
94 
95  v_edge_pos = s->v_edge_pos >> field_based;
96  linesize = s->current_picture.f->linesize[0] << field_based;
97  uvlinesize = s->current_picture.f->linesize[1] << field_based;
98  block_y_half = (field_based | is_16x8);
99 
100  dxy = ((motion_y & 1) << 1) | (motion_x & 1);
101  src_x = s->mb_x * 16 + (motion_x >> 1);
102  src_y = (mb_y << (4 - block_y_half)) + (motion_y >> 1);
103 
104  if (!is_mpeg12 && s->out_format == FMT_H263) {
105  if ((s->workaround_bugs & FF_BUG_HPEL_CHROMA) && field_based) {
106  mx = (motion_x >> 1) | (motion_x & 1);
107  my = motion_y >> 1;
108  uvdxy = ((my & 1) << 1) | (mx & 1);
109  uvsrc_x = s->mb_x * 8 + (mx >> 1);
110  uvsrc_y = (mb_y << (3 - block_y_half)) + (my >> 1);
111  } else {
112  uvdxy = dxy | (motion_y & 2) | ((motion_x & 2) >> 1);
113  uvsrc_x = src_x >> 1;
114  uvsrc_y = src_y >> 1;
115  }
116  // Even chroma mv's are full pel in H261
117  } else if (!is_mpeg12 && s->out_format == FMT_H261) {
118  mx = motion_x / 4;
119  my = motion_y / 4;
120  uvdxy = 0;
121  uvsrc_x = s->mb_x * 8 + mx;
122  uvsrc_y = mb_y * 8 + my;
123  } else {
124  if (s->chroma_y_shift) {
125  mx = motion_x / 2;
126  my = motion_y / 2;
127  uvdxy = ((my & 1) << 1) | (mx & 1);
128  uvsrc_x = s->mb_x * 8 + (mx >> 1);
129  uvsrc_y = (mb_y << (3 - block_y_half)) + (my >> 1);
130  } else {
131  if (s->chroma_x_shift) {
132  // Chroma422
133  mx = motion_x / 2;
134  uvdxy = ((motion_y & 1) << 1) | (mx & 1);
135  uvsrc_x = s->mb_x * 8 + (mx >> 1);
136  uvsrc_y = src_y;
137  } else {
138  // Chroma444
139  uvdxy = dxy;
140  uvsrc_x = src_x;
141  uvsrc_y = src_y;
142  }
143  }
144  }
145 
146  ptr_y = ref_picture[0] + src_y * linesize + src_x;
147  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
148  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
149 
150  if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 1) - 15 , 0) ||
151  (unsigned)src_y >= FFMAX( v_edge_pos - (motion_y & 1) - h + 1, 0)) {
152  if (is_mpeg12 || (CONFIG_SMALL &&
153  (s->codec_id == AV_CODEC_ID_MPEG2VIDEO ||
154  s->codec_id == AV_CODEC_ID_MPEG1VIDEO))) {
155  av_log(s->avctx, AV_LOG_DEBUG,
156  "MPEG motion vector out of boundary (%d %d)\n", src_x,
157  src_y);
158  return;
159  }
160  src_y = (unsigned)src_y << field_based;
161  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
162  s->linesize, s->linesize,
163  17, 17 + field_based,
164  src_x, src_y,
165  s->h_edge_pos, s->v_edge_pos);
166  ptr_y = s->sc.edge_emu_buffer;
167  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
168  uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
169  uint8_t *vbuf = ubuf + 10 * s->uvlinesize;
170  if (s->workaround_bugs & FF_BUG_IEDGE)
171  vbuf -= s->uvlinesize;
172  uvsrc_y = (unsigned)uvsrc_y << field_based;
173  s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
174  s->uvlinesize, s->uvlinesize,
175  9, 9 + field_based,
176  uvsrc_x, uvsrc_y,
177  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
178  s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
179  s->uvlinesize, s->uvlinesize,
180  9, 9 + field_based,
181  uvsrc_x, uvsrc_y,
182  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
183  ptr_cb = ubuf;
184  ptr_cr = vbuf;
185  }
186  }
187 
188  /* FIXME use this for field pix too instead of the obnoxious hack which
189  * changes picture.data */
190  if (bottom_field) {
191  dest_y += s->linesize;
192  dest_cb += s->uvlinesize;
193  dest_cr += s->uvlinesize;
194  }
195 
196  if (field_select) {
197  ptr_y += s->linesize;
198  ptr_cb += s->uvlinesize;
199  ptr_cr += s->uvlinesize;
200  }
201 
202  pix_op[0][dxy](dest_y, ptr_y, linesize, h);
203 
204  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
205  pix_op[s->chroma_x_shift][uvdxy]
206  (dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift);
207  pix_op[s->chroma_x_shift][uvdxy]
208  (dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift);
209  }
210  if (!is_mpeg12 && (CONFIG_H261_ENCODER || CONFIG_H261_DECODER) &&
211  s->out_format == FMT_H261) {
213  }
214 }
215 /* apply one mpeg motion vector to the three components */
217  uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
218  int field_select, uint8_t *const *ref_picture,
219  op_pixels_func (*pix_op)[4],
220  int motion_x, int motion_y, int h, int is_16x8, int mb_y)
221 {
222 #if !CONFIG_SMALL
223  if (s->out_format == FMT_MPEG1)
224  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 0, 0,
225  field_select, ref_picture, pix_op,
226  motion_x, motion_y, h, 1, is_16x8, mb_y);
227  else
228 #endif
229  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 0, 0,
230  field_select, ref_picture, pix_op,
231  motion_x, motion_y, h, 0, is_16x8, mb_y);
232 }
233 
234 static void mpeg_motion_field(MpegEncContext *s, uint8_t *dest_y,
235  uint8_t *dest_cb, uint8_t *dest_cr,
236  int bottom_field, int field_select,
237  uint8_t *const *ref_picture,
238  op_pixels_func (*pix_op)[4],
239  int motion_x, int motion_y, int h, int mb_y)
240 {
241 #if !CONFIG_SMALL
242  if (s->out_format == FMT_MPEG1)
243  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 1,
244  bottom_field, field_select, ref_picture, pix_op,
245  motion_x, motion_y, h, 1, 0, mb_y);
246  else
247 #endif
248  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 1,
249  bottom_field, field_select, ref_picture, pix_op,
250  motion_x, motion_y, h, 0, 0, mb_y);
251 }
252 
253 // FIXME: SIMDify, avg variant, 16x16 version
254 static inline void put_obmc(uint8_t *dst, uint8_t *src[5], int stride)
255 {
256  int x;
257  uint8_t *const top = src[1];
258  uint8_t *const left = src[2];
259  uint8_t *const mid = src[0];
260  uint8_t *const right = src[3];
261  uint8_t *const bottom = src[4];
262 #define OBMC_FILTER(x, t, l, m, r, b)\
263  dst[x]= (t*top[x] + l*left[x] + m*mid[x] + r*right[x] + b*bottom[x] + 4)>>3
264 #define OBMC_FILTER4(x, t, l, m, r, b)\
265  OBMC_FILTER(x , t, l, m, r, b);\
266  OBMC_FILTER(x+1 , t, l, m, r, b);\
267  OBMC_FILTER(x +stride, t, l, m, r, b);\
268  OBMC_FILTER(x+1+stride, t, l, m, r, b);
269 
270  x = 0;
271  OBMC_FILTER (x , 2, 2, 4, 0, 0);
272  OBMC_FILTER (x + 1, 2, 1, 5, 0, 0);
273  OBMC_FILTER4(x + 2, 2, 1, 5, 0, 0);
274  OBMC_FILTER4(x + 4, 2, 0, 5, 1, 0);
275  OBMC_FILTER (x + 6, 2, 0, 5, 1, 0);
276  OBMC_FILTER (x + 7, 2, 0, 4, 2, 0);
277  x += stride;
278  OBMC_FILTER (x , 1, 2, 5, 0, 0);
279  OBMC_FILTER (x + 1, 1, 2, 5, 0, 0);
280  OBMC_FILTER (x + 6, 1, 0, 5, 2, 0);
281  OBMC_FILTER (x + 7, 1, 0, 5, 2, 0);
282  x += stride;
283  OBMC_FILTER4(x , 1, 2, 5, 0, 0);
284  OBMC_FILTER4(x + 2, 1, 1, 6, 0, 0);
285  OBMC_FILTER4(x + 4, 1, 0, 6, 1, 0);
286  OBMC_FILTER4(x + 6, 1, 0, 5, 2, 0);
287  x += 2 * stride;
288  OBMC_FILTER4(x , 0, 2, 5, 0, 1);
289  OBMC_FILTER4(x + 2, 0, 1, 6, 0, 1);
290  OBMC_FILTER4(x + 4, 0, 0, 6, 1, 1);
291  OBMC_FILTER4(x + 6, 0, 0, 5, 2, 1);
292  x += 2*stride;
293  OBMC_FILTER (x , 0, 2, 5, 0, 1);
294  OBMC_FILTER (x + 1, 0, 2, 5, 0, 1);
295  OBMC_FILTER4(x + 2, 0, 1, 5, 0, 2);
296  OBMC_FILTER4(x + 4, 0, 0, 5, 1, 2);
297  OBMC_FILTER (x + 6, 0, 0, 5, 2, 1);
298  OBMC_FILTER (x + 7, 0, 0, 5, 2, 1);
299  x += stride;
300  OBMC_FILTER (x , 0, 2, 4, 0, 2);
301  OBMC_FILTER (x + 1, 0, 1, 5, 0, 2);
302  OBMC_FILTER (x + 6, 0, 0, 5, 1, 2);
303  OBMC_FILTER (x + 7, 0, 0, 4, 2, 2);
304 }
305 
306 /* obmc for 1 8x8 luma block */
307 static inline void obmc_motion(MpegEncContext *s,
308  uint8_t *dest, uint8_t *src,
309  int src_x, int src_y,
310  op_pixels_func *pix_op,
311  int16_t mv[5][2] /* mid top left right bottom */)
312 #define MID 0
313 {
314  int i;
315  uint8_t *ptr[5];
316 
317  av_assert2(s->quarter_sample == 0);
318 
319  for (i = 0; i < 5; i++) {
320  if (i && mv[i][0] == mv[MID][0] && mv[i][1] == mv[MID][1]) {
321  ptr[i] = ptr[MID];
322  } else {
323  ptr[i] = s->sc.obmc_scratchpad + 8 * (i & 1) +
324  s->linesize * 8 * (i >> 1);
325  hpel_motion(s, ptr[i], src, src_x, src_y, pix_op,
326  mv[i][0], mv[i][1]);
327  }
328  }
329 
330  put_obmc(dest, ptr, s->linesize);
331 }
332 
333 static inline void qpel_motion(MpegEncContext *s,
334  uint8_t *dest_y,
335  uint8_t *dest_cb,
336  uint8_t *dest_cr,
337  int field_based, int bottom_field,
338  int field_select, uint8_t *const *ref_picture,
339  op_pixels_func (*pix_op)[4],
340  qpel_mc_func (*qpix_op)[16],
341  int motion_x, int motion_y, int h)
342 {
343  const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
344  int dxy, uvdxy, mx, my, src_x, src_y, uvsrc_x, uvsrc_y, v_edge_pos;
345  ptrdiff_t linesize, uvlinesize;
346 
347  dxy = ((motion_y & 3) << 2) | (motion_x & 3);
348 
349  src_x = s->mb_x * 16 + (motion_x >> 2);
350  src_y = s->mb_y * (16 >> field_based) + (motion_y >> 2);
351 
352  v_edge_pos = s->v_edge_pos >> field_based;
353  linesize = s->linesize << field_based;
354  uvlinesize = s->uvlinesize << field_based;
355 
356  if (field_based) {
357  mx = motion_x / 2;
358  my = motion_y >> 1;
359  } else if (s->workaround_bugs & FF_BUG_QPEL_CHROMA2) {
360  static const int rtab[8] = { 0, 0, 1, 1, 0, 0, 0, 1 };
361  mx = (motion_x >> 1) + rtab[motion_x & 7];
362  my = (motion_y >> 1) + rtab[motion_y & 7];
363  } else if (s->workaround_bugs & FF_BUG_QPEL_CHROMA) {
364  mx = (motion_x >> 1) | (motion_x & 1);
365  my = (motion_y >> 1) | (motion_y & 1);
366  } else {
367  mx = motion_x / 2;
368  my = motion_y / 2;
369  }
370  mx = (mx >> 1) | (mx & 1);
371  my = (my >> 1) | (my & 1);
372 
373  uvdxy = (mx & 1) | ((my & 1) << 1);
374  mx >>= 1;
375  my >>= 1;
376 
377  uvsrc_x = s->mb_x * 8 + mx;
378  uvsrc_y = s->mb_y * (8 >> field_based) + my;
379 
380  ptr_y = ref_picture[0] + src_y * linesize + src_x;
381  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
382  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
383 
384  if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 3) - 15 , 0) ||
385  (unsigned)src_y >= FFMAX( v_edge_pos - (motion_y & 3) - h + 1, 0)) {
386  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
387  s->linesize, s->linesize,
388  17, 17 + field_based,
389  src_x, src_y * (1 << field_based),
390  s->h_edge_pos, s->v_edge_pos);
391  ptr_y = s->sc.edge_emu_buffer;
392  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
393  uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
394  uint8_t *vbuf = ubuf + 10 * s->uvlinesize;
395  if (s->workaround_bugs & FF_BUG_IEDGE)
396  vbuf -= s->uvlinesize;
397  s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
398  s->uvlinesize, s->uvlinesize,
399  9, 9 + field_based,
400  uvsrc_x, uvsrc_y * (1 << field_based),
401  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
402  s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
403  s->uvlinesize, s->uvlinesize,
404  9, 9 + field_based,
405  uvsrc_x, uvsrc_y * (1 << field_based),
406  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
407  ptr_cb = ubuf;
408  ptr_cr = vbuf;
409  }
410  }
411 
412  if (!field_based)
413  qpix_op[0][dxy](dest_y, ptr_y, linesize);
414  else {
415  if (bottom_field) {
416  dest_y += s->linesize;
417  dest_cb += s->uvlinesize;
418  dest_cr += s->uvlinesize;
419  }
420 
421  if (field_select) {
422  ptr_y += s->linesize;
423  ptr_cb += s->uvlinesize;
424  ptr_cr += s->uvlinesize;
425  }
426  // damn interlaced mode
427  // FIXME boundary mirroring is not exactly correct here
428  qpix_op[1][dxy](dest_y, ptr_y, linesize);
429  qpix_op[1][dxy](dest_y + 8, ptr_y + 8, linesize);
430  }
431  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
432  pix_op[1][uvdxy](dest_cr, ptr_cr, uvlinesize, h >> 1);
433  pix_op[1][uvdxy](dest_cb, ptr_cb, uvlinesize, h >> 1);
434  }
435 }
436 
437 /**
438  * H.263 chroma 4mv motion compensation.
439  */
441  uint8_t *dest_cb, uint8_t *dest_cr,
442  uint8_t *const *ref_picture,
443  op_pixels_func *pix_op,
444  int mx, int my)
445 {
446  const uint8_t *ptr;
447  int src_x, src_y, dxy, emu = 0;
448  ptrdiff_t offset;
449 
450  /* In case of 8X8, we construct a single chroma motion vector
451  * with a special rounding */
452  mx = ff_h263_round_chroma(mx);
453  my = ff_h263_round_chroma(my);
454 
455  dxy = ((my & 1) << 1) | (mx & 1);
456  mx >>= 1;
457  my >>= 1;
458 
459  src_x = s->mb_x * 8 + mx;
460  src_y = s->mb_y * 8 + my;
461  src_x = av_clip(src_x, -8, (s->width >> 1));
462  if (src_x == (s->width >> 1))
463  dxy &= ~1;
464  src_y = av_clip(src_y, -8, (s->height >> 1));
465  if (src_y == (s->height >> 1))
466  dxy &= ~2;
467 
468  offset = src_y * s->uvlinesize + src_x;
469  ptr = ref_picture[1] + offset;
470  if ((unsigned)src_x >= FFMAX((s->h_edge_pos >> 1) - (dxy & 1) - 7, 0) ||
471  (unsigned)src_y >= FFMAX((s->v_edge_pos >> 1) - (dxy >> 1) - 7, 0)) {
472  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
473  s->uvlinesize, s->uvlinesize,
474  9, 9, src_x, src_y,
475  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
476  ptr = s->sc.edge_emu_buffer;
477  emu = 1;
478  }
479  pix_op[dxy](dest_cb, ptr, s->uvlinesize, 8);
480 
481  ptr = ref_picture[2] + offset;
482  if (emu) {
483  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
484  s->uvlinesize, s->uvlinesize,
485  9, 9, src_x, src_y,
486  s->h_edge_pos >> 1, s->v_edge_pos >> 1);
487  ptr = s->sc.edge_emu_buffer;
488  }
489  pix_op[dxy](dest_cr, ptr, s->uvlinesize, 8);
490 }
491 
492 static inline void prefetch_motion(MpegEncContext *s, uint8_t *const *pix, int dir)
493 {
494  /* fetch pixels for estimated mv 4 macroblocks ahead
495  * optimized for 64byte cache lines */
496  const int shift = s->quarter_sample ? 2 : 1;
497  const int mx = (s->mv[dir][0][0] >> shift) + 16 * s->mb_x + 8;
498  const int my = (s->mv[dir][0][1] >> shift) + 16 * s->mb_y;
499  int off = mx + (my + (s->mb_x & 3) * 4) * s->linesize + 64;
500 
501  s->vdsp.prefetch(pix[0] + off, s->linesize, 4);
502  off = (mx >> 1) + ((my >> 1) + (s->mb_x & 7)) * s->uvlinesize + 64;
503  s->vdsp.prefetch(pix[1] + off, pix[2] - pix[1], 2);
504 }
505 
506 static inline void apply_obmc(MpegEncContext *s,
507  uint8_t *dest_y,
508  uint8_t *dest_cb,
509  uint8_t *dest_cr,
510  uint8_t *const *ref_picture,
511  op_pixels_func (*pix_op)[4])
512 {
513  LOCAL_ALIGNED_8(int16_t, mv_cache, [4], [4][2]);
514  const Picture *cur_frame = &s->current_picture;
515  int mb_x = s->mb_x;
516  int mb_y = s->mb_y;
517  const int xy = mb_x + mb_y * s->mb_stride;
518  const int mot_stride = s->b8_stride;
519  const int mot_xy = mb_x * 2 + mb_y * 2 * mot_stride;
520  int mx, my, i;
521 
522  av_assert2(!s->mb_skipped);
523 
524  AV_COPY32(mv_cache[1][1], cur_frame->motion_val[0][mot_xy]);
525  AV_COPY32(mv_cache[1][2], cur_frame->motion_val[0][mot_xy + 1]);
526 
527  AV_COPY32(mv_cache[2][1],
528  cur_frame->motion_val[0][mot_xy + mot_stride]);
529  AV_COPY32(mv_cache[2][2],
530  cur_frame->motion_val[0][mot_xy + mot_stride + 1]);
531 
532  AV_COPY32(mv_cache[3][1],
533  cur_frame->motion_val[0][mot_xy + mot_stride]);
534  AV_COPY32(mv_cache[3][2],
535  cur_frame->motion_val[0][mot_xy + mot_stride + 1]);
536 
537  if (mb_y == 0 || IS_INTRA(cur_frame->mb_type[xy - s->mb_stride])) {
538  AV_COPY32(mv_cache[0][1], mv_cache[1][1]);
539  AV_COPY32(mv_cache[0][2], mv_cache[1][2]);
540  } else {
541  AV_COPY32(mv_cache[0][1],
542  cur_frame->motion_val[0][mot_xy - mot_stride]);
543  AV_COPY32(mv_cache[0][2],
544  cur_frame->motion_val[0][mot_xy - mot_stride + 1]);
545  }
546 
547  if (mb_x == 0 || IS_INTRA(cur_frame->mb_type[xy - 1])) {
548  AV_COPY32(mv_cache[1][0], mv_cache[1][1]);
549  AV_COPY32(mv_cache[2][0], mv_cache[2][1]);
550  } else {
551  AV_COPY32(mv_cache[1][0], cur_frame->motion_val[0][mot_xy - 1]);
552  AV_COPY32(mv_cache[2][0],
553  cur_frame->motion_val[0][mot_xy - 1 + mot_stride]);
554  }
555 
556  if (mb_x + 1 >= s->mb_width || IS_INTRA(cur_frame->mb_type[xy + 1])) {
557  AV_COPY32(mv_cache[1][3], mv_cache[1][2]);
558  AV_COPY32(mv_cache[2][3], mv_cache[2][2]);
559  } else {
560  AV_COPY32(mv_cache[1][3], cur_frame->motion_val[0][mot_xy + 2]);
561  AV_COPY32(mv_cache[2][3],
562  cur_frame->motion_val[0][mot_xy + 2 + mot_stride]);
563  }
564 
565  mx = 0;
566  my = 0;
567  for (i = 0; i < 4; i++) {
568  const int x = (i & 1) + 1;
569  const int y = (i >> 1) + 1;
570  int16_t mv[5][2] = {
571  { mv_cache[y][x][0], mv_cache[y][x][1] },
572  { mv_cache[y - 1][x][0], mv_cache[y - 1][x][1] },
573  { mv_cache[y][x - 1][0], mv_cache[y][x - 1][1] },
574  { mv_cache[y][x + 1][0], mv_cache[y][x + 1][1] },
575  { mv_cache[y + 1][x][0], mv_cache[y + 1][x][1] }
576  };
577  // FIXME cleanup
578  obmc_motion(s, dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize,
579  ref_picture[0],
580  mb_x * 16 + (i & 1) * 8, mb_y * 16 + (i >> 1) * 8,
581  pix_op[1],
582  mv);
583 
584  mx += mv[0][0];
585  my += mv[0][1];
586  }
587  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
588  chroma_4mv_motion(s, dest_cb, dest_cr,
589  ref_picture, pix_op[1],
590  mx, my);
591 }
592 
593 static inline void apply_8x8(MpegEncContext *s,
594  uint8_t *dest_y,
595  uint8_t *dest_cb,
596  uint8_t *dest_cr,
597  int dir,
598  uint8_t *const *ref_picture,
599  qpel_mc_func (*qpix_op)[16],
600  op_pixels_func (*pix_op)[4])
601 {
602  int dxy, mx, my, src_x, src_y;
603  int i;
604  int mb_x = s->mb_x;
605  int mb_y = s->mb_y;
606  uint8_t *dest;
607  const uint8_t *ptr;
608 
609  mx = 0;
610  my = 0;
611  if (s->quarter_sample) {
612  for (i = 0; i < 4; i++) {
613  int motion_x = s->mv[dir][i][0];
614  int motion_y = s->mv[dir][i][1];
615 
616  dxy = ((motion_y & 3) << 2) | (motion_x & 3);
617  src_x = mb_x * 16 + (motion_x >> 2) + (i & 1) * 8;
618  src_y = mb_y * 16 + (motion_y >> 2) + (i >> 1) * 8;
619 
620  /* WARNING: do no forget half pels */
621  src_x = av_clip(src_x, -16, s->width);
622  if (src_x == s->width)
623  dxy &= ~3;
624  src_y = av_clip(src_y, -16, s->height);
625  if (src_y == s->height)
626  dxy &= ~12;
627 
628  ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
629  if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 3) - 7, 0) ||
630  (unsigned)src_y >= FFMAX(s->v_edge_pos - (motion_y & 3) - 7, 0)) {
631  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
632  s->linesize, s->linesize,
633  9, 9,
634  src_x, src_y,
635  s->h_edge_pos,
636  s->v_edge_pos);
637  ptr = s->sc.edge_emu_buffer;
638  }
639  dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
640  qpix_op[1][dxy](dest, ptr, s->linesize);
641 
642  mx += s->mv[dir][i][0] / 2;
643  my += s->mv[dir][i][1] / 2;
644  }
645  } else {
646  for (i = 0; i < 4; i++) {
647  hpel_motion(s,
648  dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize,
649  ref_picture[0],
650  mb_x * 16 + (i & 1) * 8,
651  mb_y * 16 + (i >> 1) * 8,
652  pix_op[1],
653  s->mv[dir][i][0],
654  s->mv[dir][i][1]);
655 
656  mx += s->mv[dir][i][0];
657  my += s->mv[dir][i][1];
658  }
659  }
660 
661  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
662  chroma_4mv_motion(s, dest_cb, dest_cr,
663  ref_picture, pix_op[1], mx, my);
664 }
665 
666 /**
667  * motion compensation of a single macroblock
668  * @param s context
669  * @param dest_y luma destination pointer
670  * @param dest_cb chroma cb/u destination pointer
671  * @param dest_cr chroma cr/v destination pointer
672  * @param dir direction (0->forward, 1->backward)
673  * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
674  * @param pix_op halfpel motion compensation function (average or put normally)
675  * @param qpix_op qpel motion compensation function (average or put normally)
676  * the motion vectors are taken from s->mv and the MV type from s->mv_type
677  */
679  uint8_t *dest_y,
680  uint8_t *dest_cb,
681  uint8_t *dest_cr,
682  int dir,
683  uint8_t *const *ref_picture,
684  op_pixels_func (*pix_op)[4],
685  qpel_mc_func (*qpix_op)[16],
686  int is_mpeg12)
687 {
688  int i;
689  int mb_y = s->mb_y;
690 
691  if (!is_mpeg12 && s->obmc && s->pict_type != AV_PICTURE_TYPE_B) {
692  apply_obmc(s, dest_y, dest_cb, dest_cr, ref_picture, pix_op);
693  return;
694  }
695 
696  switch (s->mv_type) {
697  case MV_TYPE_16X16:
698  if (CONFIG_MPEG4_DECODER && !is_mpeg12 && s->mcsel) {
699  ff_mpeg4_mcsel_motion(s, dest_y, dest_cb, dest_cr, ref_picture);
700  } else if (!is_mpeg12 && s->quarter_sample) {
701  qpel_motion(s, dest_y, dest_cb, dest_cr,
702  0, 0, 0,
703  ref_picture, pix_op, qpix_op,
704  s->mv[dir][0][0], s->mv[dir][0][1], 16);
705  } else if (!is_mpeg12 && (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) &&
706  s->mspel && s->codec_id == AV_CODEC_ID_WMV2) {
707  ff_mspel_motion(s, dest_y, dest_cb, dest_cr,
708  ref_picture, pix_op,
709  s->mv[dir][0][0], s->mv[dir][0][1], 16);
710  } else {
711  mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
712  ref_picture, pix_op,
713  s->mv[dir][0][0], s->mv[dir][0][1], 16, 0, mb_y);
714  }
715  break;
716  case MV_TYPE_8X8:
717  if (!is_mpeg12)
718  apply_8x8(s, dest_y, dest_cb, dest_cr,
719  dir, ref_picture, qpix_op, pix_op);
720  break;
721  case MV_TYPE_FIELD:
722  if (s->picture_structure == PICT_FRAME) {
723  if (!is_mpeg12 && s->quarter_sample) {
724  for (i = 0; i < 2; i++)
725  qpel_motion(s, dest_y, dest_cb, dest_cr,
726  1, i, s->field_select[dir][i],
727  ref_picture, pix_op, qpix_op,
728  s->mv[dir][i][0], s->mv[dir][i][1], 8);
729  } else {
730  /* top field */
731  mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
732  0, s->field_select[dir][0],
733  ref_picture, pix_op,
734  s->mv[dir][0][0], s->mv[dir][0][1], 8, mb_y);
735  /* bottom field */
736  mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
737  1, s->field_select[dir][1],
738  ref_picture, pix_op,
739  s->mv[dir][1][0], s->mv[dir][1][1], 8, mb_y);
740  }
741  } else {
742  if ( s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field
743  || !ref_picture[0]) {
744  ref_picture = s->current_picture_ptr->f->data;
745  }
746 
747  mpeg_motion(s, dest_y, dest_cb, dest_cr,
748  s->field_select[dir][0],
749  ref_picture, pix_op,
750  s->mv[dir][0][0], s->mv[dir][0][1], 16, 0, mb_y >> 1);
751  }
752  break;
753  case MV_TYPE_16X8:
754  if (CONFIG_SMALL || is_mpeg12) {
755  for (i = 0; i < 2; i++) {
756  uint8_t *const *ref2picture;
757 
758  if ((s->picture_structure == s->field_select[dir][i] + 1 ||
759  s->pict_type == AV_PICTURE_TYPE_B || s->first_field) &&
760  ref_picture[0]) {
761  ref2picture = ref_picture;
762  } else {
763  ref2picture = s->current_picture_ptr->f->data;
764  }
765 
766  mpeg_motion(s, dest_y, dest_cb, dest_cr,
767  s->field_select[dir][i],
768  ref2picture, pix_op,
769  s->mv[dir][i][0], s->mv[dir][i][1],
770  8, 1, (mb_y & ~1) + i);
771 
772  dest_y += 16 * s->linesize;
773  dest_cb += (16 >> s->chroma_y_shift) * s->uvlinesize;
774  dest_cr += (16 >> s->chroma_y_shift) * s->uvlinesize;
775  }
776  break;
777  }
778  case MV_TYPE_DMV:
779  if (CONFIG_SMALL || is_mpeg12) {
780  if (s->picture_structure == PICT_FRAME) {
781  for (i = 0; i < 2; i++) {
782  for (int j = 0; j < 2; j++)
783  mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
784  j, j ^ i, ref_picture, pix_op,
785  s->mv[dir][2 * i + j][0],
786  s->mv[dir][2 * i + j][1], 8, mb_y);
787  pix_op = s->hdsp.avg_pixels_tab;
788  }
789  } else {
790  if (!ref_picture[0]) {
791  ref_picture = s->current_picture_ptr->f->data;
792  }
793  for (i = 0; i < 2; i++) {
794  mpeg_motion(s, dest_y, dest_cb, dest_cr,
795  s->picture_structure != i + 1,
796  ref_picture, pix_op,
797  s->mv[dir][2 * i][0], s->mv[dir][2 * i][1],
798  16, 0, mb_y >> 1);
799 
800  // after put we make avg of the same block
801  pix_op = s->hdsp.avg_pixels_tab;
802 
803  /* opposite parity is always in the same frame if this is
804  * second field */
805  if (!s->first_field)
806  ref_picture = s->current_picture_ptr->f->data;
807  }
808  }
809  break;
810  }
811  default: av_assert2(0);
812  }
813 }
814 
816  uint8_t *dest_y, uint8_t *dest_cb,
817  uint8_t *dest_cr, int dir,
818  uint8_t *const *ref_picture,
819  op_pixels_func (*pix_op)[4],
820  qpel_mc_func (*qpix_op)[16])
821 {
822  prefetch_motion(s, ref_picture, dir);
823 
824 #if !CONFIG_SMALL
825  if (s->out_format == FMT_MPEG1)
826  mpv_motion_internal(s, dest_y, dest_cb, dest_cr, dir,
827  ref_picture, pix_op, qpix_op, 1);
828  else
829 #endif
830  mpv_motion_internal(s, dest_y, dest_cb, dest_cr, dir,
831  ref_picture, pix_op, qpix_op, 0);
832 }
ff_h263_round_chroma
static int ff_h263_round_chroma(int x)
Definition: motion_est.h:98
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:38
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:262
av_clip
#define av_clip
Definition: common.h:95
chroma_4mv_motion
static void chroma_4mv_motion(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t *const *ref_picture, op_pixels_func *pix_op, int mx, int my)
H.263 chroma 4mv motion compensation.
Definition: mpegvideo_motion.c:440
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegutils.h:117
mem_internal.h
ff_mspel_motion
void ff_mspel_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t *const *ref_picture, op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h)
Definition: wmv2.c:50
mpeg4videodec.h
MV_TYPE_16X8
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:264
mv
static const int8_t mv[256][2]
Definition: 4xm.c:80
obmc_motion
static void obmc_motion(MpegEncContext *s, uint8_t *dest, uint8_t *src, int src_x, int src_y, op_pixels_func *pix_op, int16_t mv[5][2])
Definition: mpegvideo_motion.c:307
MpegEncContext::dest
uint8_t * dest[3]
Definition: mpegvideo.h:291
mpegvideo.h
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
Picture
Picture.
Definition: mpegpicture.h:46
FF_BUG_HPEL_CHROMA
#define FF_BUG_HPEL_CHROMA
Definition: avcodec.h:1322
mpegutils.h
MV_TYPE_DMV
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:266
FF_BUG_QPEL_CHROMA2
#define FF_BUG_QPEL_CHROMA2
Definition: avcodec.h:1319
h261.h
MpegEncContext::linesize
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:124
qpel_motion
static void qpel_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t *const *ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16], int motion_x, int motion_y, int h)
Definition: mpegvideo_motion.c:333
mpeg_motion_internal
static av_always_inline void mpeg_motion_internal(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t *const *ref_picture, op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h, int is_mpeg12, int is_16x8, int mb_y)
Definition: mpegvideo_motion.c:74
avassert.h
s
#define s(width, name)
Definition: cbs_vp9.c:256
AV_CODEC_ID_WMV2
@ AV_CODEC_ID_WMV2
Definition: codec_id.h:70
FMT_H261
@ FMT_H261
Definition: mpegutils.h:118
OBMC_FILTER4
#define OBMC_FILTER4(x, t, l, m, r, b)
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
IS_INTRA
#define IS_INTRA(x, y)
LOCAL_ALIGNED_8
#define LOCAL_ALIGNED_8(t, v,...)
Definition: mem_internal.h:123
MpegEncContext::field_select
int field_select[2][2]
Definition: mpegvideo.h:273
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:284
ff_mpv_motion
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
Definition: mpegvideo_motion.c:815
mpv_motion_internal
static av_always_inline void mpv_motion_internal(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16], int is_mpeg12)
motion compensation of a single macroblock
Definition: mpegvideo_motion.c:678
FF_BUG_IEDGE
#define FF_BUG_IEDGE
Definition: avcodec.h:1326
qpeldsp.h
op_pixels_func
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
wmv2.h
prefetch_motion
static void prefetch_motion(MpegEncContext *s, uint8_t *const *pix, int dir)
Definition: mpegvideo_motion.c:492
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:263
hpel_motion
static int hpel_motion(MpegEncContext *s, uint8_t *dest, uint8_t *src, int src_x, int src_y, op_pixels_func *pix_op, int motion_x, int motion_y)
Definition: mpegvideo_motion.c:38
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:301
shift
static int shift(int a, int b)
Definition: bonk.c:257
MpegEncContext::v_edge_pos
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:122
OBMC_FILTER
#define OBMC_FILTER(x, t, l, m, r, b)
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:265
mpeg_motion
static void mpeg_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_select, uint8_t *const *ref_picture, op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h, int is_16x8, int mb_y)
Definition: mpegvideo_motion.c:216
Picture::motion_val
int16_t(*[2] motion_val)[2]
Definition: mpegpicture.h:54
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
MpegEncContext::uvlinesize
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:125
FMT_H263
@ FMT_H263
Definition: mpegutils.h:119
ff_mpeg4_mcsel_motion
void ff_mpeg4_mcsel_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t *const *ref_picture)
Definition: mpeg4videodec.c:231
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
internal.h
mpeg_motion_field
static void mpeg_motion_field(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int bottom_field, int field_select, uint8_t *const *ref_picture, op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h, int mb_y)
Definition: mpegvideo_motion.c:234
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:284
av_always_inline
#define av_always_inline
Definition: attributes.h:49
AV_COPY32
#define AV_COPY32(d, s)
Definition: intreadwrite.h:601
MID
#define MID
FF_BUG_QPEL_CHROMA
#define FF_BUG_QPEL_CHROMA
Definition: avcodec.h:1317
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
ff_h261_loop_filter
void ff_h261_loop_filter(MpegEncContext *s)
Definition: h261.c:61
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
put_obmc
static void put_obmc(uint8_t *dst, uint8_t *src[5], int stride)
Definition: mpegvideo_motion.c:254
Picture::mb_type
uint32_t * mb_type
types and macros are defined in mpegutils.h
Definition: mpegpicture.h:57
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
apply_8x8
static void apply_8x8(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, qpel_mc_func(*qpix_op)[16], op_pixels_func(*pix_op)[4])
Definition: mpegvideo_motion.c:593
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
h
h
Definition: vp9dsp_template.c:2038
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:67
apply_obmc
static void apply_obmc(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t *const *ref_picture, op_pixels_func(*pix_op)[4])
Definition: mpegvideo_motion.c:506