148 int motion_x,
int motion_y)
151 int cbp, coded_cbp,
i;
160 for (i = 0; i < 6; i++)
178 for (i = 0; i < 6; i++) {
181 cbp |= val << (5 -
i);
188 coded_cbp |= val << (5 -
i);
209 for (i = 0; i < 6; i++)
230 .priv_class = &wmv2_class,
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
static av_always_inline int wmv2_get_cbp_table_index(MpegEncContext *s, int cbp_index)
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
#define LIBAVUTIL_VERSION_INT
static int encode_ext_header(Wmv2Context *w)
static av_cold int init(AVCodecContext *avctx)
const char * av_default_item_name(void *ptr)
Return the context name.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
static const AVClass wmv2_class
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
#define av_assert0(cond)
assert() equivalent, that is always enabled.
const AVOption ff_mpv_generic_options[]
The exact code depends on how similar the blocks are and how related they are to the block
static av_cold int wmv2_encode_init(AVCodecContext *avctx)
int misc_bits
cbp, mb_type
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
int no_rounding
apply no rounding to motion compensation (MPEG-4, msmpeg4, ...) for B-frames rounding mode is always ...
int mb_height
number of MBs horizontally & vertically
static int get_bits_diff(MpegEncContext *s)
const uint16_t ff_msmp4_mb_i_table[64][2]
int rl_chroma_table_index
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
const char * name
Name of the codec implementation.
int block_last_index[12]
last non zero coefficient in block
void ff_msmpeg4_encode_motion(MpegEncContext *s, int mx, int my)
static const float pred[4]
Libavcodec external API header.
const uint32_t(*const [WMV2_INTER_CBP_TABLE_COUNT] ff_wmv2_inter_table)[2]
main external API structure.
int ff_mpv_encode_init(AVCodecContext *avctx)
Describe the class of an AVClass context structure.
int slice_height
in macroblocks
void ff_msmpeg4_handle_slices(MpegEncContext *s)
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
static enum AVPixelFormat pix_fmts[]
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
struct AVCodecContext * avctx
PutBitContext pb
bit output
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
void ff_msmpeg4_code012(PutBitContext *pb, int n)
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
int ff_mpv_encode_end(AVCodecContext *avctx)
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
av_cold void ff_wmv2_common_init(Wmv2Context *w)
int64_t bit_rate
wanted bit rate
int ff_msmpeg4_coded_block_pred(MpegEncContext *s, int n, uint8_t **coded_block_ptr)
void ff_wmv2_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int h263_aic_dir
AIC direction: 0 = left, 1 = top.
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet)
AVPixelFormat
Pixel format.
static double val(void *priv, double ch)
const uint8_t ff_table_inter_intra[4][2]
void ff_msmpeg4_encode_block(MpegEncContext *s, int16_t *block, int n)
int ff_wmv2_encode_picture_header(MpegEncContext *s, int picture_number)