FFmpeg
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mpegvideo_motion.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000,2001 Fabrice Bellard
3  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
4  *
5  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include <string.h>
25 
26 #include "libavutil/avassert.h"
27 #include "libavutil/internal.h"
28 #include "avcodec.h"
29 #include "dsputil.h"
30 #include "mpegvideo.h"
31 #include "mjpegenc.h"
32 #include "msmpeg4.h"
33 #include <limits.h>
34 
35 static void gmc1_motion(MpegEncContext *s,
36  uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
37  uint8_t **ref_picture)
38 {
39  uint8_t *ptr;
40  int offset, src_x, src_y, linesize, uvlinesize;
41  int motion_x, motion_y;
42  int emu=0;
43 
44  motion_x= s->sprite_offset[0][0];
45  motion_y= s->sprite_offset[0][1];
46  src_x = s->mb_x * 16 + (motion_x >> (s->sprite_warping_accuracy+1));
47  src_y = s->mb_y * 16 + (motion_y >> (s->sprite_warping_accuracy+1));
48  motion_x<<=(3-s->sprite_warping_accuracy);
49  motion_y<<=(3-s->sprite_warping_accuracy);
50  src_x = av_clip(src_x, -16, s->width);
51  if (src_x == s->width)
52  motion_x =0;
53  src_y = av_clip(src_y, -16, s->height);
54  if (src_y == s->height)
55  motion_y =0;
56 
57  linesize = s->linesize;
58  uvlinesize = s->uvlinesize;
59 
60  ptr = ref_picture[0] + (src_y * linesize) + src_x;
61 
63  if( (unsigned)src_x >= FFMAX(s->h_edge_pos - 17, 0)
64  || (unsigned)src_y >= FFMAX(s->v_edge_pos - 17, 0)){
65  s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, linesize, 17, 17, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
66  ptr= s->edge_emu_buffer;
67  }
68  }
69 
70  if((motion_x|motion_y)&7){
71  s->dsp.gmc1(dest_y , ptr , linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding);
72  s->dsp.gmc1(dest_y+8, ptr+8, linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding);
73  }else{
74  int dxy;
75 
76  dxy= ((motion_x>>3)&1) | ((motion_y>>2)&2);
77  if (s->no_rounding){
78  s->dsp.put_no_rnd_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
79  }else{
80  s->dsp.put_pixels_tab [0][dxy](dest_y, ptr, linesize, 16);
81  }
82  }
83 
84  if(CONFIG_GRAY && s->flags&CODEC_FLAG_GRAY) return;
85 
86  motion_x= s->sprite_offset[1][0];
87  motion_y= s->sprite_offset[1][1];
88  src_x = s->mb_x * 8 + (motion_x >> (s->sprite_warping_accuracy+1));
89  src_y = s->mb_y * 8 + (motion_y >> (s->sprite_warping_accuracy+1));
90  motion_x<<=(3-s->sprite_warping_accuracy);
91  motion_y<<=(3-s->sprite_warping_accuracy);
92  src_x = av_clip(src_x, -8, s->width>>1);
93  if (src_x == s->width>>1)
94  motion_x =0;
95  src_y = av_clip(src_y, -8, s->height>>1);
96  if (src_y == s->height>>1)
97  motion_y =0;
98 
99  offset = (src_y * uvlinesize) + src_x;
100  ptr = ref_picture[1] + offset;
101  if(s->flags&CODEC_FLAG_EMU_EDGE){
102  if( (unsigned)src_x >= FFMAX((s->h_edge_pos>>1) - 9, 0)
103  || (unsigned)src_y >= FFMAX((s->v_edge_pos>>1) - 9, 0)){
104  s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
105  ptr= s->edge_emu_buffer;
106  emu=1;
107  }
108  }
109  s->dsp.gmc1(dest_cb, ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding);
110 
111  ptr = ref_picture[2] + offset;
112  if(emu){
113  s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
114  ptr= s->edge_emu_buffer;
115  }
116  s->dsp.gmc1(dest_cr, ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding);
117 
118  return;
119 }
120 
121 static void gmc_motion(MpegEncContext *s,
122  uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
123  uint8_t **ref_picture)
124 {
125  uint8_t *ptr;
126  int linesize, uvlinesize;
127  const int a= s->sprite_warping_accuracy;
128  int ox, oy;
129 
130  linesize = s->linesize;
131  uvlinesize = s->uvlinesize;
132 
133  ptr = ref_picture[0];
134 
135  ox= s->sprite_offset[0][0] + s->sprite_delta[0][0]*s->mb_x*16 + s->sprite_delta[0][1]*s->mb_y*16;
136  oy= s->sprite_offset[0][1] + s->sprite_delta[1][0]*s->mb_x*16 + s->sprite_delta[1][1]*s->mb_y*16;
137 
138  s->dsp.gmc(dest_y, ptr, linesize, 16,
139  ox,
140  oy,
141  s->sprite_delta[0][0], s->sprite_delta[0][1],
142  s->sprite_delta[1][0], s->sprite_delta[1][1],
143  a+1, (1<<(2*a+1)) - s->no_rounding,
144  s->h_edge_pos, s->v_edge_pos);
145  s->dsp.gmc(dest_y+8, ptr, linesize, 16,
146  ox + s->sprite_delta[0][0]*8,
147  oy + s->sprite_delta[1][0]*8,
148  s->sprite_delta[0][0], s->sprite_delta[0][1],
149  s->sprite_delta[1][0], s->sprite_delta[1][1],
150  a+1, (1<<(2*a+1)) - s->no_rounding,
151  s->h_edge_pos, s->v_edge_pos);
152 
153  if(CONFIG_GRAY && s->flags&CODEC_FLAG_GRAY) return;
154 
155  ox= s->sprite_offset[1][0] + s->sprite_delta[0][0]*s->mb_x*8 + s->sprite_delta[0][1]*s->mb_y*8;
156  oy= s->sprite_offset[1][1] + s->sprite_delta[1][0]*s->mb_x*8 + s->sprite_delta[1][1]*s->mb_y*8;
157 
158  ptr = ref_picture[1];
159  s->dsp.gmc(dest_cb, ptr, uvlinesize, 8,
160  ox,
161  oy,
162  s->sprite_delta[0][0], s->sprite_delta[0][1],
163  s->sprite_delta[1][0], s->sprite_delta[1][1],
164  a+1, (1<<(2*a+1)) - s->no_rounding,
165  s->h_edge_pos>>1, s->v_edge_pos>>1);
166 
167  ptr = ref_picture[2];
168  s->dsp.gmc(dest_cr, ptr, uvlinesize, 8,
169  ox,
170  oy,
171  s->sprite_delta[0][0], s->sprite_delta[0][1],
172  s->sprite_delta[1][0], s->sprite_delta[1][1],
173  a+1, (1<<(2*a+1)) - s->no_rounding,
174  s->h_edge_pos>>1, s->v_edge_pos>>1);
175 }
176 
177 static inline int hpel_motion(MpegEncContext *s,
178  uint8_t *dest, uint8_t *src,
179  int src_x, int src_y,
180  op_pixels_func *pix_op,
181  int motion_x, int motion_y)
182 {
183  int dxy = 0;
184  int emu=0;
185 
186  src_x += motion_x >> 1;
187  src_y += motion_y >> 1;
188 
189  /* WARNING: do no forget half pels */
190  src_x = av_clip(src_x, -16, s->width); //FIXME unneeded for emu?
191  if (src_x != s->width)
192  dxy |= motion_x & 1;
193  src_y = av_clip(src_y, -16, s->height);
194  if (src_y != s->height)
195  dxy |= (motion_y & 1) << 1;
196  src += src_y * s->linesize + src_x;
197 
199  if( (unsigned)src_x > FFMAX(s->h_edge_pos - (motion_x&1) - 8, 0)
200  || (unsigned)src_y > FFMAX(s->v_edge_pos - (motion_y&1) - 8, 0)){
201  s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, 9, 9,
202  src_x, src_y, s->h_edge_pos, s->v_edge_pos);
203  src= s->edge_emu_buffer;
204  emu=1;
205  }
206  }
207  pix_op[dxy](dest, src, s->linesize, 8);
208  return emu;
209 }
210 
211 static av_always_inline
213  uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
214  int field_based, int bottom_field, int field_select,
215  uint8_t **ref_picture, op_pixels_func (*pix_op)[4],
216  int motion_x, int motion_y, int h, int is_mpeg12, int mb_y)
217 {
218  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
219  int dxy, uvdxy, mx, my, src_x, src_y,
220  uvsrc_x, uvsrc_y, v_edge_pos, uvlinesize, linesize;
221 
222 #if 0
223 if(s->quarter_sample)
224 {
225  motion_x>>=1;
226  motion_y>>=1;
227 }
228 #endif
229 
230  v_edge_pos = s->v_edge_pos >> field_based;
231  linesize = s->current_picture.f.linesize[0] << field_based;
232  uvlinesize = s->current_picture.f.linesize[1] << field_based;
233 
234  dxy = ((motion_y & 1) << 1) | (motion_x & 1);
235  src_x = s->mb_x* 16 + (motion_x >> 1);
236  src_y =( mb_y<<(4-field_based)) + (motion_y >> 1);
237 
238  if (!is_mpeg12 && s->out_format == FMT_H263) {
239  if((s->workaround_bugs & FF_BUG_HPEL_CHROMA) && field_based){
240  mx = (motion_x>>1)|(motion_x&1);
241  my = motion_y >>1;
242  uvdxy = ((my & 1) << 1) | (mx & 1);
243  uvsrc_x = s->mb_x* 8 + (mx >> 1);
244  uvsrc_y =( mb_y<<(3-field_based))+ (my >> 1);
245  }else{
246  uvdxy = dxy | (motion_y & 2) | ((motion_x & 2) >> 1);
247  uvsrc_x = src_x>>1;
248  uvsrc_y = src_y>>1;
249  }
250  }else if(!is_mpeg12 && s->out_format == FMT_H261){//even chroma mv's are full pel in H261
251  mx = motion_x / 4;
252  my = motion_y / 4;
253  uvdxy = 0;
254  uvsrc_x = s->mb_x*8 + mx;
255  uvsrc_y = mb_y*8 + my;
256  } else {
257  if(s->chroma_y_shift){
258  mx = motion_x / 2;
259  my = motion_y / 2;
260  uvdxy = ((my & 1) << 1) | (mx & 1);
261  uvsrc_x = s->mb_x* 8 + (mx >> 1);
262  uvsrc_y =( mb_y<<(3-field_based))+ (my >> 1);
263  } else {
264  if(s->chroma_x_shift){
265  //Chroma422
266  mx = motion_x / 2;
267  uvdxy = ((motion_y & 1) << 1) | (mx & 1);
268  uvsrc_x = s->mb_x* 8 + (mx >> 1);
269  uvsrc_y = src_y;
270  } else {
271  //Chroma444
272  uvdxy = dxy;
273  uvsrc_x = src_x;
274  uvsrc_y = src_y;
275  }
276  }
277  }
278 
279  ptr_y = ref_picture[0] + src_y * linesize + src_x;
280  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
281  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
282 
283  if( (unsigned)src_x > FFMAX(s->h_edge_pos - (motion_x&1) - 16, 0)
284  || (unsigned)src_y > FFMAX( v_edge_pos - (motion_y&1) - h , 0)){
285  if(is_mpeg12 || s->codec_id == AV_CODEC_ID_MPEG2VIDEO ||
288  "MPEG motion vector out of boundary (%d %d)\n", src_x, src_y);
289  return;
290  }
292  17, 17+field_based,
293  src_x, src_y<<field_based,
294  s->h_edge_pos, s->v_edge_pos);
295  ptr_y = s->edge_emu_buffer;
296  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
297  uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
298  s->vdsp.emulated_edge_mc(uvbuf ,
299  ptr_cb, s->uvlinesize,
300  9, 9+field_based,
301  uvsrc_x, uvsrc_y<<field_based,
302  s->h_edge_pos>>1, s->v_edge_pos>>1);
303  s->vdsp.emulated_edge_mc(uvbuf+16,
304  ptr_cr, s->uvlinesize,
305  9, 9+field_based,
306  uvsrc_x, uvsrc_y<<field_based,
307  s->h_edge_pos>>1, s->v_edge_pos>>1);
308  ptr_cb= uvbuf;
309  ptr_cr= uvbuf+16;
310  }
311  }
312 
313  if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data
314  dest_y += s->linesize;
315  dest_cb+= s->uvlinesize;
316  dest_cr+= s->uvlinesize;
317  }
318 
319  if(field_select){
320  ptr_y += s->linesize;
321  ptr_cb+= s->uvlinesize;
322  ptr_cr+= s->uvlinesize;
323  }
324 
325  pix_op[0][dxy](dest_y, ptr_y, linesize, h);
326 
327  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
328  pix_op[s->chroma_x_shift][uvdxy]
329  (dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift);
330  pix_op[s->chroma_x_shift][uvdxy]
331  (dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift);
332  }
333  if(!is_mpeg12 && (CONFIG_H261_ENCODER || CONFIG_H261_DECODER) &&
334  s->out_format == FMT_H261){
336  }
337 }
338 /* apply one mpeg motion vector to the three components */
340  uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
341  int field_select, uint8_t **ref_picture,
342  op_pixels_func (*pix_op)[4],
343  int motion_x, int motion_y, int h, int mb_y)
344 {
345 #if !CONFIG_SMALL
346  if(s->out_format == FMT_MPEG1)
347  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 0, 0,
348  field_select, ref_picture, pix_op,
349  motion_x, motion_y, h, 1, mb_y);
350  else
351 #endif
352  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 0, 0,
353  field_select, ref_picture, pix_op,
354  motion_x, motion_y, h, 0, mb_y);
355 }
356 
357 static void mpeg_motion_field(MpegEncContext *s, uint8_t *dest_y,
358  uint8_t *dest_cb, uint8_t *dest_cr,
359  int bottom_field, int field_select,
360  uint8_t **ref_picture,
361  op_pixels_func (*pix_op)[4],
362  int motion_x, int motion_y, int h, int mb_y)
363 {
364 #if !CONFIG_SMALL
365  if(s->out_format == FMT_MPEG1)
366  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 1,
367  bottom_field, field_select, ref_picture, pix_op,
368  motion_x, motion_y, h, 1, mb_y);
369  else
370 #endif
371  mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 1,
372  bottom_field, field_select, ref_picture, pix_op,
373  motion_x, motion_y, h, 0, mb_y);
374 }
375 
376 //FIXME move to dsputil, avg variant, 16x16 version
377 static inline void put_obmc(uint8_t *dst, uint8_t *src[5], int stride){
378  int x;
379  uint8_t * const top = src[1];
380  uint8_t * const left = src[2];
381  uint8_t * const mid = src[0];
382  uint8_t * const right = src[3];
383  uint8_t * const bottom= src[4];
384 #define OBMC_FILTER(x, t, l, m, r, b)\
385  dst[x]= (t*top[x] + l*left[x] + m*mid[x] + r*right[x] + b*bottom[x] + 4)>>3
386 #define OBMC_FILTER4(x, t, l, m, r, b)\
387  OBMC_FILTER(x , t, l, m, r, b);\
388  OBMC_FILTER(x+1 , t, l, m, r, b);\
389  OBMC_FILTER(x +stride, t, l, m, r, b);\
390  OBMC_FILTER(x+1+stride, t, l, m, r, b);
391 
392  x=0;
393  OBMC_FILTER (x , 2, 2, 4, 0, 0);
394  OBMC_FILTER (x+1, 2, 1, 5, 0, 0);
395  OBMC_FILTER4(x+2, 2, 1, 5, 0, 0);
396  OBMC_FILTER4(x+4, 2, 0, 5, 1, 0);
397  OBMC_FILTER (x+6, 2, 0, 5, 1, 0);
398  OBMC_FILTER (x+7, 2, 0, 4, 2, 0);
399  x+= stride;
400  OBMC_FILTER (x , 1, 2, 5, 0, 0);
401  OBMC_FILTER (x+1, 1, 2, 5, 0, 0);
402  OBMC_FILTER (x+6, 1, 0, 5, 2, 0);
403  OBMC_FILTER (x+7, 1, 0, 5, 2, 0);
404  x+= stride;
405  OBMC_FILTER4(x , 1, 2, 5, 0, 0);
406  OBMC_FILTER4(x+2, 1, 1, 6, 0, 0);
407  OBMC_FILTER4(x+4, 1, 0, 6, 1, 0);
408  OBMC_FILTER4(x+6, 1, 0, 5, 2, 0);
409  x+= 2*stride;
410  OBMC_FILTER4(x , 0, 2, 5, 0, 1);
411  OBMC_FILTER4(x+2, 0, 1, 6, 0, 1);
412  OBMC_FILTER4(x+4, 0, 0, 6, 1, 1);
413  OBMC_FILTER4(x+6, 0, 0, 5, 2, 1);
414  x+= 2*stride;
415  OBMC_FILTER (x , 0, 2, 5, 0, 1);
416  OBMC_FILTER (x+1, 0, 2, 5, 0, 1);
417  OBMC_FILTER4(x+2, 0, 1, 5, 0, 2);
418  OBMC_FILTER4(x+4, 0, 0, 5, 1, 2);
419  OBMC_FILTER (x+6, 0, 0, 5, 2, 1);
420  OBMC_FILTER (x+7, 0, 0, 5, 2, 1);
421  x+= stride;
422  OBMC_FILTER (x , 0, 2, 4, 0, 2);
423  OBMC_FILTER (x+1, 0, 1, 5, 0, 2);
424  OBMC_FILTER (x+6, 0, 0, 5, 1, 2);
425  OBMC_FILTER (x+7, 0, 0, 4, 2, 2);
426 }
427 
428 /* obmc for 1 8x8 luma block */
429 static inline void obmc_motion(MpegEncContext *s,
430  uint8_t *dest, uint8_t *src,
431  int src_x, int src_y,
432  op_pixels_func *pix_op,
433  int16_t mv[5][2]/* mid top left right bottom*/)
434 #define MID 0
435 {
436  int i;
437  uint8_t *ptr[5];
438 
439  av_assert2(s->quarter_sample==0);
440 
441  for(i=0; i<5; i++){
442  if(i && mv[i][0]==mv[MID][0] && mv[i][1]==mv[MID][1]){
443  ptr[i]= ptr[MID];
444  }else{
445  ptr[i]= s->obmc_scratchpad + 8*(i&1) + s->linesize*8*(i>>1);
446  hpel_motion(s, ptr[i], src,
447  src_x, src_y,
448  pix_op,
449  mv[i][0], mv[i][1]);
450  }
451  }
452 
453  put_obmc(dest, ptr, s->linesize);
454 }
455 
456 static inline void qpel_motion(MpegEncContext *s,
457  uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
458  int field_based, int bottom_field, int field_select,
459  uint8_t **ref_picture, op_pixels_func (*pix_op)[4],
460  qpel_mc_func (*qpix_op)[16],
461  int motion_x, int motion_y, int h)
462 {
463  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
464  int dxy, uvdxy, mx, my, src_x, src_y, uvsrc_x, uvsrc_y, v_edge_pos, linesize, uvlinesize;
465 
466  dxy = ((motion_y & 3) << 2) | (motion_x & 3);
467  src_x = s->mb_x * 16 + (motion_x >> 2);
468  src_y = s->mb_y * (16 >> field_based) + (motion_y >> 2);
469 
470  v_edge_pos = s->v_edge_pos >> field_based;
471  linesize = s->linesize << field_based;
472  uvlinesize = s->uvlinesize << field_based;
473 
474  if(field_based){
475  mx= motion_x/2;
476  my= motion_y>>1;
478  static const int rtab[8]= {0,0,1,1,0,0,0,1};
479  mx= (motion_x>>1) + rtab[motion_x&7];
480  my= (motion_y>>1) + rtab[motion_y&7];
481  }else if(s->workaround_bugs&FF_BUG_QPEL_CHROMA){
482  mx= (motion_x>>1)|(motion_x&1);
483  my= (motion_y>>1)|(motion_y&1);
484  }else{
485  mx= motion_x/2;
486  my= motion_y/2;
487  }
488  mx= (mx>>1)|(mx&1);
489  my= (my>>1)|(my&1);
490 
491  uvdxy= (mx&1) | ((my&1)<<1);
492  mx>>=1;
493  my>>=1;
494 
495  uvsrc_x = s->mb_x * 8 + mx;
496  uvsrc_y = s->mb_y * (8 >> field_based) + my;
497 
498  ptr_y = ref_picture[0] + src_y * linesize + src_x;
499  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
500  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
501 
502  if( (unsigned)src_x > FFMAX(s->h_edge_pos - (motion_x&3) - 16, 0)
503  || (unsigned)src_y > FFMAX( v_edge_pos - (motion_y&3) - h , 0)){
505  17, 17+field_based, src_x, src_y<<field_based,
506  s->h_edge_pos, s->v_edge_pos);
507  ptr_y= s->edge_emu_buffer;
508  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
509  uint8_t *uvbuf= s->edge_emu_buffer + 18*s->linesize;
510  s->vdsp.emulated_edge_mc(uvbuf, ptr_cb, s->uvlinesize,
511  9, 9 + field_based,
512  uvsrc_x, uvsrc_y<<field_based,
513  s->h_edge_pos>>1, s->v_edge_pos>>1);
514  s->vdsp.emulated_edge_mc(uvbuf + 16, ptr_cr, s->uvlinesize,
515  9, 9 + field_based,
516  uvsrc_x, uvsrc_y<<field_based,
517  s->h_edge_pos>>1, s->v_edge_pos>>1);
518  ptr_cb= uvbuf;
519  ptr_cr= uvbuf + 16;
520  }
521  }
522 
523  if(!field_based)
524  qpix_op[0][dxy](dest_y, ptr_y, linesize);
525  else{
526  if(bottom_field){
527  dest_y += s->linesize;
528  dest_cb+= s->uvlinesize;
529  dest_cr+= s->uvlinesize;
530  }
531 
532  if(field_select){
533  ptr_y += s->linesize;
534  ptr_cb += s->uvlinesize;
535  ptr_cr += s->uvlinesize;
536  }
537  //damn interlaced mode
538  //FIXME boundary mirroring is not exactly correct here
539  qpix_op[1][dxy](dest_y , ptr_y , linesize);
540  qpix_op[1][dxy](dest_y+8, ptr_y+8, linesize);
541  }
542  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
543  pix_op[1][uvdxy](dest_cr, ptr_cr, uvlinesize, h >> 1);
544  pix_op[1][uvdxy](dest_cb, ptr_cb, uvlinesize, h >> 1);
545  }
546 }
547 
548 /**
549  * h263 chroma 4mv motion compensation.
550  */
552  uint8_t *dest_cb, uint8_t *dest_cr,
553  uint8_t **ref_picture,
554  op_pixels_func *pix_op,
555  int mx, int my)
556 {
557  int dxy, emu=0, src_x, src_y, offset;
558  uint8_t *ptr;
559 
560  /* In case of 8X8, we construct a single chroma motion vector
561  with a special rounding */
562  mx= ff_h263_round_chroma(mx);
563  my= ff_h263_round_chroma(my);
564 
565  dxy = ((my & 1) << 1) | (mx & 1);
566  mx >>= 1;
567  my >>= 1;
568 
569  src_x = s->mb_x * 8 + mx;
570  src_y = s->mb_y * 8 + my;
571  src_x = av_clip(src_x, -8, (s->width >> 1));
572  if (src_x == (s->width >> 1))
573  dxy &= ~1;
574  src_y = av_clip(src_y, -8, (s->height >> 1));
575  if (src_y == (s->height >> 1))
576  dxy &= ~2;
577 
578  offset = src_y * s->uvlinesize + src_x;
579  ptr = ref_picture[1] + offset;
580  if(s->flags&CODEC_FLAG_EMU_EDGE){
581  if( (unsigned)src_x > FFMAX((s->h_edge_pos>>1) - (dxy &1) - 8, 0)
582  || (unsigned)src_y > FFMAX((s->v_edge_pos>>1) - (dxy>>1) - 8, 0)){
584  9, 9, src_x, src_y,
585  s->h_edge_pos>>1, s->v_edge_pos>>1);
586  ptr= s->edge_emu_buffer;
587  emu=1;
588  }
589  }
590  pix_op[dxy](dest_cb, ptr, s->uvlinesize, 8);
591 
592  ptr = ref_picture[2] + offset;
593  if(emu){
595  9, 9, src_x, src_y,
596  s->h_edge_pos>>1, s->v_edge_pos>>1);
597  ptr= s->edge_emu_buffer;
598  }
599  pix_op[dxy](dest_cr, ptr, s->uvlinesize, 8);
600 }
601 
602 static inline void prefetch_motion(MpegEncContext *s, uint8_t **pix, int dir){
603  /* fetch pixels for estimated mv 4 macroblocks ahead
604  * optimized for 64byte cache lines */
605  const int shift = s->quarter_sample ? 2 : 1;
606  const int mx= (s->mv[dir][0][0]>>shift) + 16*s->mb_x + 8;
607  const int my= (s->mv[dir][0][1]>>shift) + 16*s->mb_y;
608  int off= mx + (my + (s->mb_x&3)*4)*s->linesize + 64;
609  s->vdsp.prefetch(pix[0]+off, s->linesize, 4);
610  off= (mx>>1) + ((my>>1) + (s->mb_x&7))*s->uvlinesize + 64;
611  s->vdsp.prefetch(pix[1]+off, pix[2]-pix[1], 2);
612 }
613 
614 /**
615  * motion compensation of a single macroblock
616  * @param s context
617  * @param dest_y luma destination pointer
618  * @param dest_cb chroma cb/u destination pointer
619  * @param dest_cr chroma cr/v destination pointer
620  * @param dir direction (0->forward, 1->backward)
621  * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
622  * @param pix_op halfpel motion compensation function (average or put normally)
623  * @param qpix_op qpel motion compensation function (average or put normally)
624  * the motion vectors are taken from s->mv and the MV type from s->mv_type
625  */
627  uint8_t *dest_y, uint8_t *dest_cb,
628  uint8_t *dest_cr, int dir,
629  uint8_t **ref_picture,
630  op_pixels_func (*pix_op)[4],
631  qpel_mc_func (*qpix_op)[16], int is_mpeg12)
632 {
633  int dxy, mx, my, src_x, src_y, motion_x, motion_y;
634  int mb_x, mb_y, i;
635  uint8_t *ptr, *dest;
636 
637  mb_x = s->mb_x;
638  mb_y = s->mb_y;
639 
640  prefetch_motion(s, ref_picture, dir);
641 
642  if(!is_mpeg12 && s->obmc && s->pict_type != AV_PICTURE_TYPE_B){
643  LOCAL_ALIGNED_8(int16_t, mv_cache, [4], [4][2]);
644  AVFrame *cur_frame = &s->current_picture.f;
645  const int xy= s->mb_x + s->mb_y*s->mb_stride;
646  const int mot_stride= s->b8_stride;
647  const int mot_xy= mb_x*2 + mb_y*2*mot_stride;
648 
649  av_assert2(!s->mb_skipped);
650 
651  AV_COPY32(mv_cache[1][1], cur_frame->motion_val[0][mot_xy ]);
652  AV_COPY32(mv_cache[1][2], cur_frame->motion_val[0][mot_xy + 1]);
653 
654  AV_COPY32(mv_cache[2][1], cur_frame->motion_val[0][mot_xy + mot_stride ]);
655  AV_COPY32(mv_cache[2][2], cur_frame->motion_val[0][mot_xy + mot_stride + 1]);
656 
657  AV_COPY32(mv_cache[3][1], cur_frame->motion_val[0][mot_xy + mot_stride ]);
658  AV_COPY32(mv_cache[3][2], cur_frame->motion_val[0][mot_xy + mot_stride + 1]);
659 
660  if (mb_y == 0 || IS_INTRA(cur_frame->mb_type[xy - s->mb_stride])) {
661  AV_COPY32(mv_cache[0][1], mv_cache[1][1]);
662  AV_COPY32(mv_cache[0][2], mv_cache[1][2]);
663  }else{
664  AV_COPY32(mv_cache[0][1], cur_frame->motion_val[0][mot_xy - mot_stride ]);
665  AV_COPY32(mv_cache[0][2], cur_frame->motion_val[0][mot_xy - mot_stride + 1]);
666  }
667 
668  if (mb_x == 0 || IS_INTRA(cur_frame->mb_type[xy - 1])) {
669  AV_COPY32(mv_cache[1][0], mv_cache[1][1]);
670  AV_COPY32(mv_cache[2][0], mv_cache[2][1]);
671  }else{
672  AV_COPY32(mv_cache[1][0], cur_frame->motion_val[0][mot_xy - 1]);
673  AV_COPY32(mv_cache[2][0], cur_frame->motion_val[0][mot_xy - 1 + mot_stride]);
674  }
675 
676  if (mb_x + 1 >= s->mb_width || IS_INTRA(cur_frame->mb_type[xy + 1])) {
677  AV_COPY32(mv_cache[1][3], mv_cache[1][2]);
678  AV_COPY32(mv_cache[2][3], mv_cache[2][2]);
679  }else{
680  AV_COPY32(mv_cache[1][3], cur_frame->motion_val[0][mot_xy + 2]);
681  AV_COPY32(mv_cache[2][3], cur_frame->motion_val[0][mot_xy + 2 + mot_stride]);
682  }
683 
684  mx = 0;
685  my = 0;
686  for(i=0;i<4;i++) {
687  const int x= (i&1)+1;
688  const int y= (i>>1)+1;
689  int16_t mv[5][2]= {
690  {mv_cache[y][x ][0], mv_cache[y][x ][1]},
691  {mv_cache[y-1][x][0], mv_cache[y-1][x][1]},
692  {mv_cache[y][x-1][0], mv_cache[y][x-1][1]},
693  {mv_cache[y][x+1][0], mv_cache[y][x+1][1]},
694  {mv_cache[y+1][x][0], mv_cache[y+1][x][1]}};
695  //FIXME cleanup
696  obmc_motion(s, dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize,
697  ref_picture[0],
698  mb_x * 16 + (i & 1) * 8, mb_y * 16 + (i >>1) * 8,
699  pix_op[1],
700  mv);
701 
702  mx += mv[0][0];
703  my += mv[0][1];
704  }
705  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
706  chroma_4mv_motion(s, dest_cb, dest_cr, ref_picture, pix_op[1], mx, my);
707 
708  return;
709  }
710 
711  switch(s->mv_type) {
712  case MV_TYPE_16X16:
713  if(s->mcsel){
714  if(s->real_sprite_warping_points==1){
715  gmc1_motion(s, dest_y, dest_cb, dest_cr,
716  ref_picture);
717  }else{
718  gmc_motion(s, dest_y, dest_cb, dest_cr,
719  ref_picture);
720  }
721  }else if(!is_mpeg12 && s->quarter_sample){
722  qpel_motion(s, dest_y, dest_cb, dest_cr,
723  0, 0, 0,
724  ref_picture, pix_op, qpix_op,
725  s->mv[dir][0][0], s->mv[dir][0][1], 16);
726  } else if (!is_mpeg12 && (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) &&
727  s->mspel && s->codec_id == AV_CODEC_ID_WMV2) {
728  ff_mspel_motion(s, dest_y, dest_cb, dest_cr,
729  ref_picture, pix_op,
730  s->mv[dir][0][0], s->mv[dir][0][1], 16);
731  }else
732  {
733  mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
734  ref_picture, pix_op,
735  s->mv[dir][0][0], s->mv[dir][0][1], 16, mb_y);
736  }
737  break;
738  case MV_TYPE_8X8:
739  if (!is_mpeg12) {
740  mx = 0;
741  my = 0;
742  if(s->quarter_sample){
743  for(i=0;i<4;i++) {
744  motion_x = s->mv[dir][i][0];
745  motion_y = s->mv[dir][i][1];
746 
747  dxy = ((motion_y & 3) << 2) | (motion_x & 3);
748  src_x = mb_x * 16 + (motion_x >> 2) + (i & 1) * 8;
749  src_y = mb_y * 16 + (motion_y >> 2) + (i >>1) * 8;
750 
751  /* WARNING: do no forget half pels */
752  src_x = av_clip(src_x, -16, s->width);
753  if (src_x == s->width)
754  dxy &= ~3;
755  src_y = av_clip(src_y, -16, s->height);
756  if (src_y == s->height)
757  dxy &= ~12;
758 
759  ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
760  if(s->flags&CODEC_FLAG_EMU_EDGE){
761  if( (unsigned)src_x > FFMAX(s->h_edge_pos - (motion_x&3) - 8, 0)
762  || (unsigned)src_y > FFMAX(s->v_edge_pos - (motion_y&3) - 8, 0)){
764  s->linesize, 9, 9,
765  src_x, src_y,
766  s->h_edge_pos, s->v_edge_pos);
767  ptr= s->edge_emu_buffer;
768  }
769  }
770  dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
771  qpix_op[1][dxy](dest, ptr, s->linesize);
772 
773  mx += s->mv[dir][i][0]/2;
774  my += s->mv[dir][i][1]/2;
775  }
776  }else{
777  for(i=0;i<4;i++) {
778  hpel_motion(s, dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize,
779  ref_picture[0],
780  mb_x * 16 + (i & 1) * 8, mb_y * 16 + (i >>1) * 8,
781  pix_op[1],
782  s->mv[dir][i][0], s->mv[dir][i][1]);
783 
784  mx += s->mv[dir][i][0];
785  my += s->mv[dir][i][1];
786  }
787  }
788 
789  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
790  chroma_4mv_motion(s, dest_cb, dest_cr, ref_picture, pix_op[1], mx, my);
791  }
792  break;
793  case MV_TYPE_FIELD:
794  if (s->picture_structure == PICT_FRAME) {
795  if(!is_mpeg12 && s->quarter_sample){
796  for(i=0; i<2; i++){
797  qpel_motion(s, dest_y, dest_cb, dest_cr,
798  1, i, s->field_select[dir][i],
799  ref_picture, pix_op, qpix_op,
800  s->mv[dir][i][0], s->mv[dir][i][1], 8);
801  }
802  }else{
803  /* top field */
804  mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
805  0, s->field_select[dir][0],
806  ref_picture, pix_op,
807  s->mv[dir][0][0], s->mv[dir][0][1], 8, mb_y);
808  /* bottom field */
809  mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
810  1, s->field_select[dir][1],
811  ref_picture, pix_op,
812  s->mv[dir][1][0], s->mv[dir][1][1], 8, mb_y);
813  }
814  } else {
815  if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){
816  ref_picture = s->current_picture_ptr->f.data;
817  }
818 
819  mpeg_motion(s, dest_y, dest_cb, dest_cr,
820  s->field_select[dir][0],
821  ref_picture, pix_op,
822  s->mv[dir][0][0], s->mv[dir][0][1], 16, mb_y>>1);
823  }
824  break;
825  case MV_TYPE_16X8:
826  for(i=0; i<2; i++){
827  uint8_t ** ref2picture;
828 
829  if(s->picture_structure == s->field_select[dir][i] + 1
830  || s->pict_type == AV_PICTURE_TYPE_B || s->first_field){
831  ref2picture= ref_picture;
832  }else{
833  ref2picture = s->current_picture_ptr->f.data;
834  }
835 
836  mpeg_motion(s, dest_y, dest_cb, dest_cr,
837  s->field_select[dir][i],
838  ref2picture, pix_op,
839  s->mv[dir][i][0], s->mv[dir][i][1] + 16*i, 8, mb_y>>1);
840 
841  dest_y += 16*s->linesize;
842  dest_cb+= (16>>s->chroma_y_shift)*s->uvlinesize;
843  dest_cr+= (16>>s->chroma_y_shift)*s->uvlinesize;
844  }
845  break;
846  case MV_TYPE_DMV:
847  if(s->picture_structure == PICT_FRAME){
848  for(i=0; i<2; i++){
849  int j;
850  for(j=0; j<2; j++){
851  mpeg_motion_field(s, dest_y, dest_cb, dest_cr,
852  j, j^i, ref_picture, pix_op,
853  s->mv[dir][2*i + j][0],
854  s->mv[dir][2*i + j][1], 8, mb_y);
855  }
856  pix_op = s->dsp.avg_pixels_tab;
857  }
858  }else{
859  for(i=0; i<2; i++){
860  mpeg_motion(s, dest_y, dest_cb, dest_cr,
861  s->picture_structure != i+1,
862  ref_picture, pix_op,
863  s->mv[dir][2*i][0],s->mv[dir][2*i][1],16, mb_y>>1);
864 
865  // after put we make avg of the same block
866  pix_op=s->dsp.avg_pixels_tab;
867 
868  //opposite parity is always in the same frame if this is second field
869  if(!s->first_field){
870  ref_picture = s->current_picture_ptr->f.data;
871  }
872  }
873  }
874  break;
875  default: av_assert2(0);
876  }
877 }
878 
880  uint8_t *dest_y, uint8_t *dest_cb,
881  uint8_t *dest_cr, int dir,
882  uint8_t **ref_picture,
883  op_pixels_func (*pix_op)[4],
884  qpel_mc_func (*qpix_op)[16])
885 {
886 #if !CONFIG_SMALL
887  if(s->out_format == FMT_MPEG1)
888  MPV_motion_internal(s, dest_y, dest_cb, dest_cr, dir,
889  ref_picture, pix_op, qpix_op, 1);
890  else
891 #endif
892  MPV_motion_internal(s, dest_y, dest_cb, dest_cr, dir,
893  ref_picture, pix_op, qpix_op, 0);
894 }