43 int src_x, src_y, motion_x, motion_y;
53 src_x = av_clip(src_x, -16, s->
width);
54 if (src_x == s->
width)
56 src_y = av_clip(src_y, -16, s->
height);
63 ptr = ref_picture[0] + src_y * linesize + src_x;
75 if ((motion_x | motion_y) & 7) {
76 s->
mdsp.
gmc1(dest_y, ptr, linesize, 16,
77 motion_x & 15, motion_y & 15, 128 - s->
no_rounding);
78 s->
mdsp.
gmc1(dest_y + 8, ptr + 8, linesize, 16,
79 motion_x & 15, motion_y & 15, 128 - s->
no_rounding);
83 dxy = ((motion_x >> 3) & 1) | ((motion_y >> 2) & 2);
100 src_x = av_clip(src_x, -8, s->
width >> 1);
101 if (src_x == s->
width >> 1)
103 src_y = av_clip(src_y, -8, s->
height >> 1);
104 if (src_y == s->
height >> 1)
108 ptr = ref_picture[1] +
offset;
112 uvlinesize, uvlinesize,
119 s->
mdsp.
gmc1(dest_cb, ptr, uvlinesize, 8,
120 motion_x & 15, motion_y & 15, 128 - s->
no_rounding);
122 ptr = ref_picture[2] +
offset;
125 uvlinesize, uvlinesize,
131 s->
mdsp.
gmc1(dest_cr, ptr, uvlinesize, 8,
132 motion_x & 15, motion_y & 15, 128 - s->
no_rounding);
147 ptr = ref_picture[0];
154 s->
mdsp.
gmc(dest_y, ptr, linesize, 16,
160 s->
mdsp.
gmc(dest_y + 8, ptr, linesize, 16,
176 ptr = ref_picture[1];
177 s->
mdsp.
gmc(dest_cb, ptr, uvlinesize, 8,
184 ptr = ref_picture[2];
185 s->
mdsp.
gmc(dest_cr, ptr, uvlinesize, 8,
195 int src_x,
int src_y,
197 int motion_x,
int motion_y)
202 src_x += motion_x >> 1;
203 src_y += motion_y >> 1;
206 src_x = av_clip(src_x, -16, s->
width);
207 if (src_x != s->
width)
209 src_y = av_clip(src_y, -16, s->
height);
211 dxy |= (motion_y & 1) << 1;
244 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
245 int dxy, uvdxy, mx, my, src_x, src_y,
260 dxy = ((motion_y & 1) << 1) | (motion_x & 1);
261 src_x = s->
mb_x * 16 + (motion_x >> 1);
262 src_y = (mb_y << (4 - field_based)) + (motion_y >> 1);
266 mx = (motion_x >> 1) | (motion_x & 1);
268 uvdxy = ((my & 1) << 1) | (mx & 1);
269 uvsrc_x = s->
mb_x * 8 + (mx >> 1);
270 uvsrc_y = (mb_y << (3 - field_based)) + (my >> 1);
272 uvdxy = dxy | (motion_y & 2) | ((motion_x & 2) >> 1);
273 uvsrc_x = src_x >> 1;
274 uvsrc_y = src_y >> 1;
281 uvsrc_x = s->
mb_x * 8 + mx;
282 uvsrc_y = mb_y * 8 + my;
287 uvdxy = ((my & 1) << 1) | (mx & 1);
288 uvsrc_x = s->
mb_x * 8 + (mx >> 1);
289 uvsrc_y = (mb_y << (3 - field_based)) + (my >> 1);
294 uvdxy = ((motion_y & 1) << 1) | (mx & 1);
295 uvsrc_x = s->
mb_x * 8 + (mx >> 1);
306 ptr_y = ref_picture[0] + src_y * linesize + src_x;
307 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
308 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
310 if ((
unsigned)src_x >=
FFMAX(s->
h_edge_pos - (motion_x & 1) - 15 , 0) ||
311 (unsigned)src_y >=
FFMAX( v_edge_pos - (motion_y & 1) - h + 1, 0)) {
316 "MPEG motion vector out of boundary (%d %d)\n", src_x,
320 src_y = (unsigned)src_y << field_based;
323 17, 17 + field_based,
330 uvsrc_y = (unsigned)uvsrc_y << field_based;
360 pix_op[0][dxy](dest_y, ptr_y,
linesize,
h);
368 if (!is_mpeg12 && (CONFIG_H261_ENCODER || CONFIG_H261_DECODER) &&
376 int field_select,
uint8_t **ref_picture,
378 int motion_x,
int motion_y,
int h,
int mb_y)
383 field_select, ref_picture, pix_op,
384 motion_x, motion_y, h, 1, mb_y);
388 field_select, ref_picture, pix_op,
389 motion_x, motion_y, h, 0, mb_y);
394 int bottom_field,
int field_select,
397 int motion_x,
int motion_y,
int h,
int mb_y)
402 bottom_field, field_select, ref_picture, pix_op,
403 motion_x, motion_y, h, 1, mb_y);
407 bottom_field, field_select, ref_picture, pix_op,
408 motion_x, motion_y, h, 0, mb_y);
419 uint8_t *
const bottom = src[4];
420 #define OBMC_FILTER(x, t, l, m, r, b)\
421 dst[x]= (t*top[x] + l*left[x] + m*mid[x] + r*right[x] + b*bottom[x] + 4)>>3
422 #define OBMC_FILTER4(x, t, l, m, r, b)\
423 OBMC_FILTER(x , t, l, m, r, b);\
424 OBMC_FILTER(x+1 , t, l, m, r, b);\
425 OBMC_FILTER(x +stride, t, l, m, r, b);\
426 OBMC_FILTER(x+1+stride, t, l, m, r, b);
467 int src_x,
int src_y,
477 for (i = 0; i < 5; i++) {
478 if (i && mv[i][0] == mv[
MID][0] && mv[i][1] == mv[
MID][1]) {
495 int field_based,
int bottom_field,
496 int field_select,
uint8_t **ref_picture,
499 int motion_x,
int motion_y,
int h)
501 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
502 int dxy, uvdxy, mx, my, src_x, src_y, uvsrc_x, uvsrc_y,
v_edge_pos;
505 dxy = ((motion_y & 3) << 2) | (motion_x & 3);
507 src_x = s->
mb_x * 16 + (motion_x >> 2);
508 src_y = s->
mb_y * (16 >> field_based) + (motion_y >> 2);
511 linesize = s->
linesize << field_based;
518 static const int rtab[8] = { 0, 0, 1, 1, 0, 0, 0, 1 };
519 mx = (motion_x >> 1) + rtab[motion_x & 7];
520 my = (motion_y >> 1) + rtab[motion_y & 7];
522 mx = (motion_x >> 1) | (motion_x & 1);
523 my = (motion_y >> 1) | (motion_y & 1);
528 mx = (mx >> 1) | (mx & 1);
529 my = (my >> 1) | (my & 1);
531 uvdxy = (mx & 1) | ((my & 1) << 1);
535 uvsrc_x = s->
mb_x * 8 + mx;
536 uvsrc_y = s->
mb_y * (8 >> field_based) + my;
538 ptr_y = ref_picture[0] + src_y * linesize + src_x;
539 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
540 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
542 if ((
unsigned)src_x >=
FFMAX(s->
h_edge_pos - (motion_x & 3) - 15 , 0) ||
543 (unsigned)src_y >=
FFMAX( v_edge_pos - (motion_y & 3) - h + 1, 0)) {
546 17, 17 + field_based,
547 src_x, src_y << field_based,
556 uvsrc_x, uvsrc_y << field_based,
561 uvsrc_x, uvsrc_y << field_based,
569 qpix_op[0][dxy](dest_y, ptr_y,
linesize);
584 qpix_op[1][dxy](dest_y, ptr_y,
linesize);
585 qpix_op[1][dxy](dest_y + 8, ptr_y + 8,
linesize);
588 pix_op[1][uvdxy](dest_cr, ptr_cr,
uvlinesize, h >> 1);
589 pix_op[1][uvdxy](dest_cb, ptr_cb,
uvlinesize, h >> 1);
603 int src_x, src_y, dxy, emu = 0;
611 dxy = ((my & 1) << 1) | (mx & 1);
615 src_x = s->
mb_x * 8 + mx;
616 src_y = s->
mb_y * 8 + my;
617 src_x = av_clip(src_x, -8, (s->
width >> 1));
618 if (src_x == (s->
width >> 1))
620 src_y = av_clip(src_y, -8, (s->
height >> 1));
621 if (src_y == (s->
height >> 1))
625 ptr = ref_picture[1] +
offset;
626 if ((
unsigned)src_x >=
FFMAX((s->
h_edge_pos >> 1) - (dxy & 1) - 7, 0) ||
637 ptr = ref_picture[2] +
offset;
653 const int mx = (s->
mv[dir][0][0] >>
shift) + 16 * s->
mb_x + 8;
654 const int my = (s->
mv[dir][0][1] >> shift) + 16 * s->
mb_y;
655 int off = mx + (my + (s->
mb_x & 3) * 4) * s->
linesize + 64;
658 off = (mx >> 1) + ((my >> 1) + (s->
mb_x & 7)) * s->
uvlinesize + 64;
673 const int xy = mb_x + mb_y * s->
mb_stride;
675 const int mot_xy = mb_x * 2 + mb_y * 2 * mot_stride;
684 cur_frame->
motion_val[0][mot_xy + mot_stride]);
686 cur_frame->
motion_val[0][mot_xy + mot_stride + 1]);
689 cur_frame->
motion_val[0][mot_xy + mot_stride]);
691 cur_frame->
motion_val[0][mot_xy + mot_stride + 1]);
694 AV_COPY32(mv_cache[0][1], mv_cache[1][1]);
695 AV_COPY32(mv_cache[0][2], mv_cache[1][2]);
698 cur_frame->
motion_val[0][mot_xy - mot_stride]);
700 cur_frame->
motion_val[0][mot_xy - mot_stride + 1]);
704 AV_COPY32(mv_cache[1][0], mv_cache[1][1]);
705 AV_COPY32(mv_cache[2][0], mv_cache[2][1]);
709 cur_frame->
motion_val[0][mot_xy - 1 + mot_stride]);
713 AV_COPY32(mv_cache[1][3], mv_cache[1][2]);
714 AV_COPY32(mv_cache[2][3], mv_cache[2][2]);
718 cur_frame->
motion_val[0][mot_xy + 2 + mot_stride]);
723 for (i = 0; i < 4; i++) {
724 const int x = (i & 1) + 1;
725 const int y = (i >> 1) + 1;
727 { mv_cache[
y][x][0], mv_cache[
y][x][1] },
728 { mv_cache[y - 1][x][0], mv_cache[y - 1][x][1] },
729 { mv_cache[
y][x - 1][0], mv_cache[
y][x - 1][1] },
730 { mv_cache[
y][x + 1][0], mv_cache[
y][x + 1][1] },
731 { mv_cache[y + 1][x][0], mv_cache[y + 1][x][1] }
736 mb_x * 16 + (i & 1) * 8, mb_y * 16 + (i >> 1) * 8,
745 ref_picture, pix_op[1],
758 int dxy, mx, my, src_x, src_y;
767 for (i = 0; i < 4; i++) {
768 int motion_x = s->
mv[dir][i][0];
769 int motion_y = s->
mv[dir][i][1];
771 dxy = ((motion_y & 3) << 2) | (motion_x & 3);
772 src_x = mb_x * 16 + (motion_x >> 2) + (i & 1) * 8;
773 src_y = mb_y * 16 + (motion_y >> 2) + (i >> 1) * 8;
776 src_x = av_clip(src_x, -16, s->
width);
777 if (src_x == s->
width)
779 src_y = av_clip(src_y, -16, s->
height);
783 ptr = ref_picture[0] + (src_y * s->
linesize) + (src_x);
794 dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->
linesize;
795 qpix_op[1][dxy](dest, ptr, s->
linesize);
797 mx += s->
mv[dir][i][0] / 2;
798 my += s->
mv[dir][i][1] / 2;
801 for (i = 0; i < 4; i++) {
803 dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->
linesize,
805 mb_x * 16 + (i & 1) * 8,
806 mb_y * 16 + (i >> 1) * 8,
811 mx += s->
mv[dir][i][0];
812 my += s->
mv[dir][i][1];
818 ref_picture, pix_op[1], mx, my);
849 apply_obmc(s, dest_y, dest_cb, dest_cr, ref_picture, pix_op);
866 ref_picture, pix_op, qpix_op,
867 s->
mv[dir][0][0], s->
mv[dir][0][1], 16);
868 }
else if (!is_mpeg12 && (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) &&
872 s->
mv[dir][0][0], s->
mv[dir][0][1], 16);
876 s->
mv[dir][0][0], s->
mv[dir][0][1], 16, mb_y);
882 dir, ref_picture, qpix_op, pix_op);
887 for (i = 0; i < 2; i++)
890 ref_picture, pix_op, qpix_op,
891 s->
mv[dir][i][0], s->
mv[dir][i][1], 8);
897 s->
mv[dir][0][0], s->
mv[dir][0][1], 8, mb_y);
902 s->
mv[dir][1][0], s->
mv[dir][1][1], 8, mb_y);
906 || !ref_picture[0]) {
913 s->
mv[dir][0][0], s->
mv[dir][0][1], 16, mb_y >> 1);
917 for (i = 0; i < 2; i++) {
922 ref2picture = ref_picture;
930 s->
mv[dir][i][0], s->
mv[dir][i][1] + 16 * i,
940 for (i = 0; i < 2; i++) {
942 for (j = 0; j < 2; j++)
944 j, j ^ i, ref_picture, pix_op,
945 s->
mv[dir][2 * i + j][0],
946 s->
mv[dir][2 * i + j][1], 8, mb_y);
950 if (!ref_picture[0]) {
953 for (i = 0; i < 2; i++) {
957 s->
mv[dir][2 * i][0], s->
mv[dir][2 * i][1],
985 ref_picture, pix_op, qpix_op, 1);
989 ref_picture, pix_op, qpix_op, 0);
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
void(* prefetch)(uint8_t *buf, ptrdiff_t stride, int h)
Prefetch memory into cache (if supported by hardware).
static int shift(int a, int b)
static void gmc_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture)
#define MV_TYPE_FIELD
2 vectors, one per field
int sprite_warping_accuracy
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
int obmc
overlapped block motion compensation
int real_sprite_warping_points
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
#define FF_BUG_QPEL_CHROMA2
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
static void chroma_4mv_motion(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, op_pixels_func *pix_op, int mx, int my)
h263 chroma 4mv motion compensation.
enum OutputFormat out_format
output format
int no_rounding
apply no rounding to motion compensation (MPEG4, msmpeg4, ...) for b-frames rounding mode is always 0...
Picture current_picture
copy of the current picture structure.
int sprite_offset[2][2]
sprite offset[isChroma][isMVY]
#define LOCAL_ALIGNED_8(t, v,...)
void(* gmc)(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy, int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height)
global motion compensation.
#define FF_BUG_QPEL_CHROMA
static void prefetch_motion(MpegEncContext *s, uint8_t **pix, int dir)
int mb_skipped
MUST BE SET only during DECODING.
void(* gmc1)(uint8_t *dst, uint8_t *src, int srcStride, int h, int x16, int y16, int rounder)
translational global motion compensation.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
simple assert() macros that are a bit more flexible than ISO C assert().
int quarter_sample
1->qpel, 0->half pel ME/MC
static const uint8_t offset[127][2]
Libavcodec external API header.
common internal API header
int sprite_delta[2][2]
sprite_delta [isY][isMVY]
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
int16_t(*[2] motion_val)[2]
Picture * current_picture_ptr
pointer to the current picture
static void gmc1_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture)
preferred ID for MPEG-1/2 video decoding
static av_always_inline void mpeg_motion_internal(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h, int is_mpeg12, int mb_y)
#define OBMC_FILTER4(x, t, l, m, r, b)
int first_field
is 1 for the first field of a field picture 0 otherwise
static const int8_t mv[256][2]
static av_always_inline void mpv_motion_internal(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16], int is_mpeg12)
motion compensation of a single macroblock
#define MV_TYPE_16X16
1 vector for the whole mb
ptrdiff_t linesize
line size, in bytes, may be different from width
static void mpeg_motion_field(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int bottom_field, int field_select, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h, int mb_y)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
int height
picture size. must be a multiple of 16
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
op_pixels_func put_no_rnd_pixels_tab[4][4]
Halfpel motion compensation with no rounding (a+b)>>1.
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
static int ff_h263_round_chroma(int x)
static void mpeg_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_select, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h, int mb_y)
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
#define FF_BUG_HPEL_CHROMA
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
struct AVCodecContext * avctx
static void qpel_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16], int motion_x, int motion_y, int h)
GLint GLenum GLboolean GLsizei stride
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 ...
#define CODEC_FLAG_GRAY
Only decode/encode grayscale.
void ff_mspel_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], int motion_x, int motion_y, int h)
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
static void apply_obmc(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, op_pixels_func(*pix_op)[4])
uint8_t * obmc_scratchpad
uint32_t * mb_type
types and macros are defined in mpegutils.h
int workaround_bugs
workaround bugs in encoders which cannot be detected automatically
void ff_h261_loop_filter(MpegEncContext *s)
static int hpel_motion(MpegEncContext *s, uint8_t *dest, uint8_t *src, int src_x, int src_y, op_pixels_func *pix_op, int motion_x, int motion_y)
static void obmc_motion(MpegEncContext *s, uint8_t *dest, uint8_t *src, int src_x, int src_y, op_pixels_func *pix_op, int16_t mv[5][2])
#define MV_TYPE_8X8
4 vectors (h263, mpeg4 4MV)
static void apply_8x8(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, qpel_mc_func(*qpix_op)[16], op_pixels_func(*pix_op)[4])
static void put_obmc(uint8_t *dst, uint8_t *src[5], int stride)
#define OBMC_FILTER(x, t, l, m, r, b)