55 int16_t *
block,
int n,
int qscale)
58 const uint16_t *quant_matrix;
65 for(i=1;i<=nCoeffs;i++) {
71 level = (
int)(level * qscale * quant_matrix[j]) >> 3;
72 level = (level - 1) | 1;
75 level = (
int)(level * qscale * quant_matrix[j]) >> 3;
76 level = (level - 1) | 1;
84 int16_t *
block,
int n,
int qscale)
87 const uint16_t *quant_matrix;
92 for(i=0; i<=nCoeffs; i++) {
98 level = (((level << 1) + 1) * qscale *
99 ((
int) (quant_matrix[j]))) >> 4;
100 level = (level - 1) | 1;
103 level = (((level << 1) + 1) * qscale *
104 ((
int) (quant_matrix[j]))) >> 4;
105 level = (level - 1) | 1;
113 int16_t *
block,
int n,
int qscale)
116 const uint16_t *quant_matrix;
126 for(i=1;i<=nCoeffs;i++) {
132 level = (
int)(level * qscale * quant_matrix[j]) >> 4;
135 level = (
int)(level * qscale * quant_matrix[j]) >> 4;
143 int16_t *
block,
int n,
int qscale)
146 const uint16_t *quant_matrix;
158 for(i=1;i<=nCoeffs;i++) {
164 level = (
int)(level * qscale * quant_matrix[j]) >> 4;
167 level = (
int)(level * qscale * quant_matrix[j]) >> 4;
177 int16_t *
block,
int n,
int qscale)
180 const uint16_t *quant_matrix;
190 for(i=0; i<=nCoeffs; i++) {
196 level = (((level << 1) + 1) * qscale *
197 ((
int) (quant_matrix[j]))) >> 5;
200 level = (((level << 1) + 1) * qscale *
201 ((
int) (quant_matrix[j]))) >> 5;
211 int16_t *
block,
int n,
int qscale)
222 qadd = (qscale - 1) | 1;
231 for(i=1; i<=nCoeffs; i++) {
235 level = level * qmul - qadd;
237 level = level * qmul + qadd;
245 int16_t *
block,
int n,
int qscale)
252 qadd = (qscale - 1) | 1;
257 for(i=0; i<=nCoeffs; i++) {
261 level = level * qmul - qadd;
263 level = level * qmul + qadd;
274 memset(dst + h*linesize, 128, 16);
280 memset(dst + h*linesize, 128, 8);
294 for (i=0; i<4; i++) {
314 if (HAVE_INTRINSICS_NEON)
363 int yc_size = y_size + 2 * c_size;
390 for (i = 0; i < 12; i++) {
440 #define COPY(a) bak->a = src->a 441 COPY(sc.edge_emu_buffer);
444 COPY(sc.rd_scratchpad);
445 COPY(sc.b_scratchpad);
446 COPY(sc.obmc_scratchpad);
452 COPY(dpcm_macroblock);
453 COPY(dpcm_direction);
476 for (i = 0; i < 12; i++) {
487 "scratch buffers.\n");
514 if (
s1->context_initialized){
548 if (
s1->picture &&
s1->picture[i].f->buf[0] &&
553 #define UPDATE_PICTURE(pic)\ 555 ff_mpeg_unref_picture(s->avctx, &s->pic);\ 556 if (s1->pic.f && s1->pic.f->buf[0])\ 557 ret = ff_mpeg_ref_picture(s->avctx, &s->pic, &s1->pic);\ 559 ret = ff_update_picture_tables(&s->pic, &s1->pic);\ 568 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \ 569 ((pic && pic >= old_ctx->picture && \ 570 pic < old_ctx->picture + MAX_PICTURE_COUNT) ? \ 571 &new_ctx->picture[pic - old_ctx->picture] : NULL) 584 (
char *) &
s1->pb_field_time +
sizeof(
s1->pb_field_time) -
585 (
char *) &
s1->last_time_base);
595 if (
s1->bitstream_buffer) {
596 if (
s1->bitstream_buffer_size +
600 s1->allocated_bitstream_buffer_size);
608 s1->bitstream_buffer_size);
617 &s->
sc,
s1->linesize) < 0) {
619 "scratch buffers.\n");
624 "be allocated due to unknown size.\n");
629 (
char *) &
s1->rtp_mode - (
char *) &
s1->progressive_sequence);
631 if (!
s1->first_field) {
633 if (
s1->current_picture_ptr)
688 int y_size, c_size, yc_size,
i, mb_array_size, mv_table_size, x, y;
712 yc_size = y_size + 2 * c_size;
752 for (i = 0; i < 2; i++) {
754 for (j = 0; j < 2; j++) {
755 for (k = 0; k < 2; k++) {
787 for (i = 0; i < yc_size; i++)
849 for (i = 0; i < 2; i++) {
850 for (j = 0; j < 2; j++) {
851 for (k = 0; k < 2; k++) {
886 int nb_slices = (HAVE_THREADS &&
902 "decoding to AV_PIX_FMT_NONE is not supported.\n");
913 " reducing to %d\n", nb_slices, max_slices);
914 nb_slices = max_slices;
955 for (i = 0; i < nb_slices; i++) {
964 (s->
mb_height * (
i) + nb_slices / 2) / nb_slices;
966 (s->
mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1002 for (i = 0; i < 2; i++) {
1003 for (j = 0; j < 2; j++) {
1004 for (k = 0; k < 2; k++) {
1080 if (nb_slices > 1) {
1081 for (i = 0; i < nb_slices; i++) {
1091 (s->
mb_height * (
i) + nb_slices / 2) / nb_slices;
1093 (s->
mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1168 int i, h_chroma_shift, v_chroma_shift;
1172 for(i=0; i<frame->
height; i++)
1176 0x80, AV_CEIL_RSHIFT(frame->
width, h_chroma_shift));
1178 0x80, AV_CEIL_RSHIFT(frame->
width, h_chroma_shift));
1275 ff_dlog(s->
avctx,
"L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1284 int h_chroma_shift, v_chroma_shift;
1286 &h_chroma_shift, &v_chroma_shift);
1289 "allocating dummy last picture for B frame\n");
1292 "warning: first frame is no keyframe\n");
1312 for(i=0; i<avctx->
height; i++)
1314 0x80, avctx->
width);
1318 0x80, AV_CEIL_RSHIFT(avctx->
width, h_chroma_shift));
1320 0x80, AV_CEIL_RSHIFT(avctx->
width, h_chroma_shift));
1325 for(i=0; i<avctx->
height; i++)
1355 #if 0 // BUFREF-FIXME 1377 for (i = 0; i < 4; i++) {
1458 int field_based,
int field_select,
1459 int src_x,
int src_y,
1461 int h_edge_pos,
int v_edge_pos,
1463 int motion_x,
int motion_y)
1466 const int op_index =
FFMIN(lowres, 3);
1467 const int s_mask = (2 <<
lowres) - 1;
1476 sx = motion_x & s_mask;
1477 sy = motion_y & s_mask;
1478 src_x += motion_x >> lowres + 1;
1479 src_y += motion_y >> lowres + 1;
1481 src += src_y * stride + src_x;
1483 if ((
unsigned)src_x >
FFMAX( h_edge_pos - (!!sx) - w, 0) ||
1484 (unsigned)src_y >
FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
1487 w + 1, (h + 1) << field_based,
1488 src_x, src_y << field_based,
1489 h_edge_pos, v_edge_pos);
1494 sx = (sx << 2) >>
lowres;
1495 sy = (sy << 2) >>
lowres;
1498 pix_op[op_index](dest,
src,
stride,
h, sx, sy);
1512 int motion_x,
int motion_y,
1515 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1516 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
1520 const int block_s = 8>>
lowres;
1521 const int s_mask = (2 <<
lowres) - 1;
1534 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
1537 sx = motion_x & s_mask;
1538 sy = motion_y & s_mask;
1539 src_x = s->
mb_x * 2 * block_s + (motion_x >> lowres + 1);
1540 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
1543 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
1544 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
1545 uvsrc_x = src_x >> 1;
1546 uvsrc_y = src_y >> 1;
1551 uvsx = (2 * mx) & s_mask;
1552 uvsy = (2 * my) & s_mask;
1553 uvsrc_x = s->
mb_x * block_s + (mx >>
lowres);
1554 uvsrc_y = mb_y * block_s + (my >>
lowres);
1561 uvsrc_x = s->
mb_x * block_s + (mx >> lowres + 1);
1562 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
1568 uvsy = motion_y & s_mask;
1570 uvsrc_x = s->
mb_x*block_s + (mx >> (lowres+1));
1573 uvsx = motion_x & s_mask;
1574 uvsy = motion_y & s_mask;
1581 ptr_y = ref_picture[0] + src_y * linesize + src_x;
1582 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1583 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1585 if ((
unsigned) src_x >
FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
1586 (
unsigned) src_y >
FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
1588 linesize >> field_based, linesize >> field_based,
1589 17, 17 + field_based,
1590 src_x, src_y << field_based, h_edge_pos,
1599 uvlinesize >> field_based, uvlinesize >> field_based,
1601 uvsrc_x, uvsrc_y << field_based,
1602 h_edge_pos >> 1, v_edge_pos >> 1);
1604 uvlinesize >> field_based,uvlinesize >> field_based,
1606 uvsrc_x, uvsrc_y << field_based,
1607 h_edge_pos >> 1, v_edge_pos >> 1);
1626 sx = (sx << 2) >>
lowres;
1627 sy = (sy << 2) >>
lowres;
1628 pix_op[lowres - 1](dest_y, ptr_y,
linesize,
h, sx, sy);
1632 uvsx = (uvsx << 2) >>
lowres;
1633 uvsy = (uvsy << 2) >>
lowres;
1635 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
1636 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
1649 const int op_index =
FFMIN(lowres, 3);
1650 const int block_s = 8 >>
lowres;
1651 const int s_mask = (2 <<
lowres) - 1;
1652 const int h_edge_pos = s->
h_edge_pos >> lowres + 1;
1653 const int v_edge_pos = s->
v_edge_pos >> lowres + 1;
1654 int emu = 0, src_x, src_y, sx, sy;
1670 src_x = s->
mb_x * block_s + (mx >> lowres + 1);
1671 src_y = s->
mb_y * block_s + (my >> lowres + 1);
1674 ptr = ref_picture[1] +
offset;
1675 if ((
unsigned) src_x >
FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
1676 (unsigned) src_y >
FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
1680 src_x, src_y, h_edge_pos, v_edge_pos);
1684 sx = (sx << 2) >>
lowres;
1685 sy = (sy << 2) >>
lowres;
1686 pix_op[op_index](dest_cb, ptr, s->
uvlinesize, block_s, sx, sy);
1688 ptr = ref_picture[2] +
offset;
1693 src_x, src_y, h_edge_pos, v_edge_pos);
1696 pix_op[op_index](dest_cr, ptr, s->
uvlinesize, block_s, sx, sy);
1713 int dir,
uint8_t **ref_picture,
1719 const int block_s = 8 >>
lowres;
1728 ref_picture, pix_op,
1729 s->
mv[dir][0][0], s->
mv[dir][0][1],
1735 for (i = 0; i < 4; i++) {
1738 ref_picture[0], 0, 0,
1739 (2 * mb_x + (i & 1)) * block_s,
1740 (2 * mb_y + (i >> 1)) * block_s,
1743 block_s, block_s, pix_op,
1744 s->
mv[dir][i][0], s->
mv[dir][i][1]);
1746 mx += s->
mv[dir][
i][0];
1747 my += s->
mv[dir][
i][1];
1759 ref_picture, pix_op,
1760 s->
mv[dir][0][0], s->
mv[dir][0][1],
1765 ref_picture, pix_op,
1766 s->
mv[dir][1][0], s->
mv[dir][1][1],
1776 ref_picture, pix_op,
1778 s->
mv[dir][0][1], 2 * block_s, mb_y >> 1);
1782 for (i = 0; i < 2; i++) {
1787 ref2picture = ref_picture;
1794 ref2picture, pix_op,
1795 s->
mv[dir][i][0], s->
mv[dir][i][1] +
1796 2 * block_s * i, block_s, mb_y >> 1);
1798 dest_y += 2 * block_s * s->
linesize;
1805 for (i = 0; i < 2; i++) {
1807 for (j = 0; j < 2; j++) {
1810 ref_picture, pix_op,
1811 s->
mv[dir][2 * i + j][0],
1812 s->
mv[dir][2 * i + j][1],
1818 for (i = 0; i < 2; i++) {
1821 ref_picture, pix_op,
1822 s->
mv[dir][2 * i][0],s->
mv[dir][2 * i][1],
1823 2 * block_s, mb_y >> 1);
1846 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->
quarter_sample;
1847 int my, off,
i, mvs;
1866 for (i = 0; i < mvs; i++) {
1867 my = s->
mv[dir][
i][1];
1868 my_max =
FFMAX(my_max, my);
1869 my_min =
FFMIN(my_min, my);
1872 off = ((
FFMAX(-my_min, my_max)<<qpel_shift) + 63) >> 6;
1881 int16_t *
block,
int i,
uint8_t *dest,
int line_size,
int qscale)
1897 int16_t *
block,
int i,
uint8_t *dest,
int line_size,
int qscale)
1919 memset(s->
ac_val[0][xy ], 0, 32 *
sizeof(int16_t));
1920 memset(s->
ac_val[0][xy + wrap], 0, 32 *
sizeof(int16_t));
1933 memset(s->
ac_val[1][xy], 0, 16 *
sizeof(int16_t));
1934 memset(s->
ac_val[2][xy], 0, 16 *
sizeof(int16_t));
1951 int lowres_flag,
int is_mpeg12)
1966 for(j=0; j<64; j++){
1993 uint8_t *dest_y, *dest_cb, *dest_cr;
1994 int dct_linesize, dct_offset;
2000 const int block_size= lowres_flag ? 8>>s->
avctx->
lowres : 8;
2019 dct_offset = s->
interlaced_dct ? linesize : linesize * block_size;
2023 dest_cb= s->
dest[1];
2024 dest_cr= s->
dest[2];
2107 add_dct(s, block[0], 0, dest_y , dct_linesize);
2108 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2109 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2110 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2114 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2115 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2119 dct_offset = s->
interlaced_dct ? uvlinesize : uvlinesize*block_size;
2121 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2122 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2123 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2124 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2126 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
2127 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
2128 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
2129 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
2134 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2141 const int act_block_size = block_size * 2;
2147 s->
idsp.
idct_put(dest_y + dct_offset + act_block_size, dct_linesize, (int16_t*)(*s->
block32)[3]);
2150 dct_offset = s->
interlaced_dct ? uvlinesize : uvlinesize*block_size;
2159 s->
idsp.
idct_put(dest_cb + act_block_size + dct_offset, dct_linesize, (int16_t*)(*s->
block32)[10]);
2160 s->
idsp.
idct_put(dest_cr + act_block_size + dct_offset, dct_linesize, (int16_t*)(*s->
block32)[11]);
2164 uint16_t *dest_pcm[3] = {(uint16_t*)dest_y, (uint16_t*)dest_cb, (uint16_t*)dest_cr};
2165 int linesize[3] = {dct_linesize, uvlinesize, uvlinesize};
2166 for(i = 0; i < 3; i++) {
2170 for(h = 0; h < (16 >> vsub); h++){
2171 for(w = 0; w < (16 >>
hsub); w++)
2173 dest_pcm[
i] += linesize[
i] / 2;
2178 uint16_t *dest_pcm[3] = {(uint16_t*)dest_y, (uint16_t*)dest_cb, (uint16_t*)dest_cr};
2179 int linesize[3] = {dct_linesize, uvlinesize, uvlinesize};
2180 for(i = 0; i < 3; i++) {
2184 dest_pcm[
i] += (linesize[
i] / 2) * ((16 >> vsub) - 1);
2185 for(h = (16 >> vsub)-1; h >= 1; h--){
2186 for(w = (16 >> hsub)-1; w >= 1; w--)
2188 dest_pcm[
i] -= linesize[
i] / 2;
2196 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->
qscale);
2197 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->
qscale);
2198 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->
qscale);
2215 s->
idsp.
idct_put(dest_y + block_size, dct_linesize, block[1]);
2216 s->
idsp.
idct_put(dest_y + dct_offset, dct_linesize, block[2]);
2217 s->
idsp.
idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2226 dct_offset = s->
interlaced_dct ? uvlinesize : uvlinesize*block_size;
2230 s->
idsp.
idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2231 s->
idsp.
idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2233 s->
idsp.
idct_put(dest_cb + block_size, dct_linesize, block[8]);
2234 s->
idsp.
idct_put(dest_cr + block_size, dct_linesize, block[9]);
2235 s->
idsp.
idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
2236 s->
idsp.
idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
2293 s->
dest[0] += s->
mb_y * linesize << height_of_mb;
2297 s->
dest[0] += (s->
mb_y>>1) * linesize << height_of_mb;
2340 else if (qscale > 31)
int bitstream_buffer_size
uint8_t * scratchpad
data area for the ME algo, so that the ME does not need to malloc/free.
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
static int init_duplicate_context(MpegEncContext *s)
int ff_thread_can_start_frame(AVCodecContext *avctx)
const struct AVCodec * codec
int16_t(* b_bidir_back_mv_table_base)[2]
#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
Decoding only.
av_cold void ff_mpv_common_init_arm(MpegEncContext *s)
discard all frames except keyframes
void ff_init_block_index(MpegEncContext *s)
void ff_wmv2_add_mb(MpegEncContext *s, int16_t block1[6][64], uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr)
av_cold void ff_mpv_common_init_neon(MpegEncContext *s)
ScanTable intra_v_scantable
av_cold void ff_mpegvideodsp_init(MpegVideoDSPContext *c)
This structure describes decoded (raw) audio or video data.
static av_always_inline AVVideoBlockParams * av_video_enc_params_block(AVVideoEncParams *par, unsigned int idx)
int16_t(* p_mv_table)[2]
MV table (1MV per MB) P-frame encoding.
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
#define MV_TYPE_FIELD
2 vectors, one per field
const uint8_t * y_dc_scale_table
qscale -> y_dc_scale table
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
int coded_width
Bitstream width / height, may be different from width/height e.g.
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
void(* dct_unquantize_h263_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
static av_always_inline void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64], int lowres_flag, int is_mpeg12)
#define AV_LOG_WARNING
Something somehow does not look correct.
static void chroma_4mv_motion_lowres(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, h264_chroma_mc_func *pix_op, int mx, int my)
uint8_t * coded_block_base
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
int16_t(*[3] ac_val)[16]
used for MPEG-4 AC prediction, all 3 arrays must be continuous
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
int32_t delta_qp
Difference between this block's final quantization parameter and the corresponding per-frame value...
h264_chroma_mc_func put_h264_chroma_pixels_tab[4]
static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
static void gray_frame(AVFrame *frame)
int msmpeg4_version
0=not msmpeg4, 1=mp41, 2=mp42, 3=mp43/divx3 4=wmv1/7 5=wmv2/8
int needs_realloc
Picture needs to be reallocated (eg due to a frame size change)
uint8_t * bitstream_buffer
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
#define FF_ALLOC_TYPED_ARRAY(p, nelem)
int field_picture
whether or not the picture was encoded in separate fields
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
int16_t(*[2][2] p_field_mv_table)[2]
MV table (2MV per MB) interlaced P-frame encoding.
int16_t(* p_mv_table_base)[2]
int src_x
Distance in luma pixels from the top-left corner of the visible frame to the top-left corner of the b...
static int lowest_referenced_row(MpegEncContext *s, int dir)
find the lowest MB row referenced in the MVs
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
uint32_t * score_map
map to store the scores
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
static void free_duplicate_context(MpegEncContext *s)
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
int padding_bug_score
used to detect the VERY common padding bug in MPEG-4
int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
int mb_num
number of MBs of a picture
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them.reget_buffer() and buffer age optimizations no longer work.*The contents of buffers must not be written to after ff_thread_report_progress() has been called on them.This includes draw_edges().Porting codecs to frame threading
void ff_draw_horiz_band(AVCodecContext *avctx, AVFrame *cur, AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
int h263_aic
Advanced INTRA Coding (AIC)
int16_t(* b_back_mv_table)[2]
MV table (1MV per MB) backward mode B-frame encoding.
void(* draw_horiz_band)(struct AVCodecContext *s, const AVFrame *src, int offset[AV_NUM_DATA_POINTERS], int y, int type, int height)
If non NULL, 'draw_horiz_band' is called by the libavcodec decoder to draw a horizontal band...
enum AVPictureType last_picture
int encoding
true if we are encoding (vs decoding)
static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Macro definitions for various function/variable attributes.
int16_t(* b_back_mv_table_base)[2]
#define REBASE_PICTURE(pic, new_ctx, old_ctx)
static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
const uint8_t ff_mpeg2_non_linear_qscale[32]
int alloc_mb_width
mb_width used to allocate tables
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
#define av_assert0(cond)
assert() equivalent, that is always enabled.
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
const uint8_t ff_mpeg1_dc_scale_table[128]
int export_side_data
Bit set of AV_CODEC_EXPORT_DATA_* flags, which affects the kind of metadata exported in frame...
The exact code depends on how similar the blocks are and how related they are to the block
av_cold void ff_mpv_common_init_axp(MpegEncContext *s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
enum OutputFormat out_format
output format
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
int ff_mpv_common_frame_size_change(MpegEncContext *s)
Data structure for storing block-level encoding information.
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
uint8_t * pred_dir_table
used to store pred_dir for partitioned decoding
Multithreading support functions.
qpel_mc_func(* qpel_put)[16]
void ff_free_picture_tables(Picture *pic)
int no_rounding
apply no rounding to motion compensation (MPEG-4, msmpeg4, ...) for B-frames rounding mode is always ...
Picture current_picture
copy of the current picture structure.
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
void ff_mpv_common_init_ppc(MpegEncContext *s)
#define PICT_BOTTOM_FIELD
int16_t(* b_bidir_forw_mv_table)[2]
MV table (1MV per MB) bidir mode B-frame encoding.
uint16_t pp_time
time distance between the last 2 p,s,i frames
int interlaced_frame
The content of the picture is interlaced.
av_cold void ff_mpv_idct_init(MpegEncContext *s)
int mb_height
number of MBs horizontally & vertically
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
int codec_tag
internal codec_tag upper case converted from avctx codec_tag
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
int16_t(*[2][2] p_field_mv_table_base)[2]
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
int intra_only
if true, only intra pictures are generated
int16_t * dc_val[3]
used for MPEG-4 DC prediction, all 3 arrays must be continuous
int h263_plus
H.263+ headers.
int slice_context_count
number of used thread_contexts
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int last_dc[3]
last DC values for MPEG-1
static void add_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size)
int mb_skipped
MUST BE SET only during DECODING.
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
int partitioned_frame
is current frame partitioned
uint8_t * rd_scratchpad
scratchpad for rate distortion mb decision
#define MAX_PICTURE_COUNT
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
int active_thread_type
Which multithreading methods are in use by the codec.
int last_lambda_for[5]
last lambda for a specific pict type
void(* h264_chroma_mc_func)(uint8_t *dst, uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s, int16_t *block, int n, int qscale)
int flags
AV_CODEC_FLAG_*.
static void put_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
simple assert() macros that are a bit more flexible than ISO C assert().
int overread_index
the index into ParseContext.buffer of the overread bytes
static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
int quarter_sample
1->qpel, 0->half pel ME/MC
uint16_t * mb_type
Table for candidate MB types for encoding (defines in mpegutils.h)
int low_delay
no reordering needed / has no B-frames
uint8_t *[2][2] b_field_select_table
void * av_memdup(const void *p, size_t size)
Duplicate a buffer with av_malloc().
void ff_mpv_common_end(MpegEncContext *s)
av_cold void ff_mpv_common_init_x86(MpegEncContext *s)
void ff_mpeg_flush(AVCodecContext *avctx)
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
int coded_picture_number
used to set pic->coded_picture_number, should not be used for/by anything else
uint8_t * error_status_table
const uint8_t ff_alternate_horizontal_scan[64]
int ff_mpeg_er_init(MpegEncContext *s)
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
common internal API header
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
enum AVPictureType pict_type
Picture type of the frame.
#define UPDATE_PICTURE(pic)
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
static int16_t mult(Float11 *f1, Float11 *f2)
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
#define FF_THREAD_FRAME
Decode more than one frame at once.
int overread
the number of bytes which where irreversibly read from the next frame
int next_p_frame_damaged
set if the next p frame is damaged, to avoid showing trashed B-frames
Video encoding parameters for a given frame.
static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
int w
Width and height of the block in luma pixels.
Picture new_picture
copy of the source picture structure for encoding.
void(* dct_unquantize_mpeg1_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
int width
picture width / height.
uint8_t * mbskip_table
used to avoid copy if macroblock skipped (for black regions for example) and used for B-frame encodin...
int16_t(*[2] motion_val)[2]
Picture * current_picture_ptr
pointer to the current picture
unsigned int allocated_bitstream_buffer_size
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
int16_t(* ac_val_base)[16]
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
void(* idct_add)(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
block -> idct -> add dest -> clip to unsigned 8 bit -> dest.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
int16_t(*[2][2][2] b_field_mv_table_base)[2]
int16_t(* b_forw_mv_table_base)[2]
static int alloc_picture(MpegEncContext *s, Picture *pic)
int16_t(*[12] pblocks)[64]
int block_last_index[12]
last non zero coefficient in block
uint8_t idct_permutation[64]
IDCT input permutation.
av_cold void ff_mpv_common_init_mips(MpegEncContext *s)
int mb_decision
macroblock decision mode
uint8_t * mbintra_table
used to avoid setting {ac, dc, cbp}-pred stuff to zero on inter MB decoding
void(* idct_put)(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
block -> idct -> clip to unsigned 8 bit -> dest.
void(* dct_unquantize_mpeg2_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table, uint32_t *mbtype_table, int8_t *qscale_table, int16_t(*motion_val[2])[2], int *low_delay, int mb_width, int mb_height, int mb_stride, int quarter_sample)
Print debugging info for the given picture.
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
preferred ID for MPEG-1/2 video decoding
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
int block_index[6]
index to current MB in block based arrays with edges
int * mb_index2xy
mb_index -> mb_x + mb_y*mb_stride
int first_field
is 1 for the first field of a field picture 0 otherwise
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
#define MV_TYPE_16X16
1 vector for the whole mb
static void clear_context(MpegEncContext *s)
int16_t(* b_bidir_forw_mv_table_base)[2]
int coded_picture_number
picture number in bitstream order
uint16_t inter_matrix[64]
int alloc_mb_height
mb_height used to allocate tables
struct MpegEncContext * thread_context[MAX_THREADS]
Libavcodec external API header.
ptrdiff_t linesize
line size, in bytes, may be different from width
enum AVDiscard skip_idct
Skip IDCT/dequantization for selected frames.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
AVVideoEncParams * av_video_enc_params_create_side_data(AVFrame *frame, enum AVVideoEncParamsType type, unsigned int nb_blocks)
Allocates memory for AVEncodeInfoFrame plus an array of.
main external API structure.
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int shared, int encoding, int chroma_x_shift, int chroma_y_shift, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
ScanTable intra_scantable
uint8_t * coded_block
used for coded block pattern prediction (msmpeg4v3, wmv1)
int height
picture size. must be a multiple of 16
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
void(* dct_unquantize_mpeg2_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
uint32_t state
contains the last few bytes in MSB order
Picture * picture
main picture buffer
ScanTable intra_h_scantable
op_pixels_func put_no_rnd_pixels_tab[4][4]
Halfpel motion compensation with no rounding (a+b)>>1.
int16_t(*[2][2][2] b_field_mv_table)[2]
MV table (4MV per MB) interlaced B-frame encoding.
uint8_t * cbp_table
used to store cbp, ac_pred for partitioned decoding
int closed_gop
MPEG1/2 GOP is closed.
int ff_mpeg_framesize_alloc(AVCodecContext *avctx, MotionEstContext *me, ScratchpadContext *sc, int linesize)
unsigned int avpriv_toupper4(unsigned int x)
#define FF_DEBUG_DCT_COEFF
#define FF_MB_DECISION_RD
rate distortion
void(* dct_unquantize_h263_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
const uint8_t ff_zigzag_direct[64]
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
static int ff_h263_round_chroma(int x)
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
int f_code
forward MV resolution
int max_b_frames
max number of B-frames for encoding
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
int h263_pred
use MPEG-4/H.263 ac/dc predictions
int16_t(* b_bidir_back_mv_table)[2]
MV table (1MV per MB) bidir mode B-frame encoding.
static int init_context_frame(MpegEncContext *s)
Initialize and allocates MpegEncContext fields dependent on the resolution.
uint8_t *[2] p_field_select_table
int16_t(* b_direct_mv_table)[2]
MV table (1MV per MB) direct mode B-frame encoding.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
const uint8_t * c_dc_scale_table
qscale -> c_dc_scale table
qpel_mc_func(* qpel_avg)[16]
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
int16_t(* b_forw_mv_table)[2]
MV table (1MV per MB) forward mode B-frame encoding.
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
static void dct_unquantize_h263_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Picture * next_picture_ptr
pointer to the next picture (for bidir pred)
struct AVCodecContext * avctx
void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
Deallocate a picture.
discard all non reference
void(* dct_unquantize_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
GLint GLenum GLboolean GLsizei stride
common internal api header.
int32_t(* block32)[12][64]
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 ...
void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Initialize the given MpegEncContext for decoding.
const uint8_t ff_default_chroma_qscale_table[32]
static av_cold int dct_init(MpegEncContext *s)
static void dct_unquantize_h263_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Picture last_picture
copy of the previous picture structure.
Picture * last_picture_ptr
pointer to the previous picture.
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
uint8_t * b_scratchpad
scratchpad used for writing into write only buffers
const uint8_t * chroma_qscale_table
qscale -> chroma_qscale (H.263)
const uint8_t ff_alternate_vertical_scan[64]
uint32_t * map
map to avoid duplicate evaluations
int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
H264ChromaContext h264chroma
int16_t(* blocks)[12][64]
h264_chroma_mc_func avg_h264_chroma_pixels_tab[4]
int slices
Number of slices.
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
int top_field_first
If the content is interlaced, is top field displayed first.
void ff_mpv_frame_end(MpegEncContext *s)
void(* dct_unquantize_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
uint8_t * obmc_scratchpad
int16_t(* block)[64]
points to one of the following blocks
ParseContext parse_context
static void add_dequant_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Picture next_picture
copy of the next picture structure.
int key_frame
1 -> keyframe, 0-> not
static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t **ref_picture, h264_chroma_mc_func *pix_op, int motion_x, int motion_y, int h, int mb_y)
int chroma_qscale
chroma QP
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding). ...
static void free_context_frame(MpegEncContext *s)
Frees and resets MpegEncContext fields depending on the resolution.
static int hpel_motion_lowres(MpegEncContext *s, uint8_t *dest, uint8_t *src, int field_based, int field_select, int src_x, int src_y, int width, int height, ptrdiff_t stride, int h_edge_pos, int v_edge_pos, int w, int h, h264_chroma_mc_func *pix_op, int motion_x, int motion_y)
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
uint32_t * mb_type
types and macros are defined in mpegutils.h
int16_t(* dpcm_macroblock)[3][256]
void(* decode_mb)(struct MpegEncContext *s)
Called for every Macroblock in a slice.
int workaround_bugs
workaround bugs in encoders which cannot be detected automatically
ScanTable inter_scantable
if inter == intra then intra should be used to reduce the cache usage
#define FFSWAP(type, a, b)
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
#define FF_QSCALE_TYPE_MPEG1
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
int16_t(* b_direct_mv_table_base)[2]
int b_code
backward MV resolution for B-frames (MPEG-4)
int alloc_mb_stride
mb_stride used to allocate tables
void(* dct_unquantize_mpeg1_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
void ff_mpv_report_decode_progress(MpegEncContext *s)
static void MPV_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, h264_chroma_mc_func *pix_op)
motion compensation of a single macroblock
#define AV_CEIL_RSHIFT(a, b)