39 #define UNI_ENC_INDEX(last,run,level) ((last)*128*64 + (run)*128 + (level)) 44 if (width == 176 && height == 144)
47 else if (width == 352 && height == 288)
113 if (index % 11 == 0) {
123 s->
mb_x = index % 11;
127 s->
mb_x += 11 * (index % 2);
149 code = sign ? -val :
val;
159 for (i = 0; i < 6; i++)
173 int level,
run,
i, j, last_index, last_non_zero, sign, slevel,
code;
186 else if (level < 1) {
195 }
else if ((block[0] == 1 || block[0] == -1) &&
206 last_non_zero = i - 1;
207 for (; i <= last_index; i++) {
211 run = i - last_non_zero - 1;
220 if (run == 0 && level < 16)
239 int motion_x,
int motion_y)
242 int mvd, mv_diff_x, mv_diff_y,
i, cbp;
253 mvd = motion_x | motion_y;
255 if ((cbp | mvd) == 0) {
302 mv_diff_x = (motion_x >> 1) - s->
last_mv[0][0][0];
303 mv_diff_y = (motion_y >> 1) - s->
last_mv[0][0][1];
304 s->
last_mv[0][0][0] = (motion_x >> 1);
305 s->
last_mv[0][0][1] = (motion_y >> 1);
316 for (i = 0; i < 6; i++)
328 int slevel,
run, last;
333 for(slevel=-64; slevel<64; slevel++){
334 if(slevel==0)
continue;
335 for(run=0; run<64; run++){
336 for(last=0; last<=1; last++){
338 int level= slevel < 0 ? -slevel : slevel;
349 if(code!=rl->
n && len < len_tab[index]){
357 if(len < len_tab[index]){
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
void ff_init_block_index(MpegEncContext *s)
#define UNI_ENC_INDEX(last, run, level)
static void align_put_bits(PutBitContext *s)
Pad the bitstream with zeros up to the next byte boundary.
const uint8_t * y_dc_scale_table
qscale -> y_dc_scale table
int last_mv[2][2][2]
last MV, used for MV prediction in MPEG-1 & B-frame MPEG-4
static void put_sbits(PutBitContext *pb, int n, int32_t value)
void ff_h261_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
#define LIBAVUTIL_VERSION_INT
static av_cold int init(AVCodecContext *avctx)
const uint8_t ff_h261_mba_bits[35]
const char * av_default_item_name(void *ptr)
Return the context name.
int min_qcoeff
minimum encodable coefficient
void ff_h261_encode_picture_header(MpegEncContext *s, int picture_number)
uint8_t * intra_ac_vlc_length
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate.The lists are not just lists
const uint8_t ff_h261_mba_code[35]
Macro definitions for various function/variable attributes.
const uint8_t ff_h261_cbp_tab[63][2]
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
#define av_assert0(cond)
assert() equivalent, that is always enabled.
const AVOption ff_mpv_generic_options[]
const uint8_t ff_mpeg1_dc_scale_table[128]
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
The exact code depends on how similar the blocks are and how related they are to the block
static void h261_encode_gob_header(MpegEncContext *s, int mb_line)
Encode a group of blocks header.
int max_qcoeff
maximum encodable coefficient
int dquant
qscale difference to prev qscale
static void ff_update_block_index(MpegEncContext *s)
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
static void h261_encode_block(H261Context *h, int16_t *block, int n)
Encode an 8x8 block.
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
static const AVClass h261_class
uint8_t * inter_ac_vlc_last_length
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
simple assert() macros that are a bit more flexible than ISO C assert().
const char * name
Name of the codec implementation.
uint8_t * intra_ac_vlc_last_length
int n
number of entries of table_vlc minus 1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
const uint16_t(* table_vlc)[2]
int block_last_index[12]
last non zero coefficient in block
const uint8_t ff_h261_mtype_code[10]
const uint8_t ff_h261_mtype_bits[10]
int ac_esc_length
num of bits needed to encode the longest esc
av_cold void ff_rl_init(RLTable *rl, uint8_t static_store[2][2 *MAX_RUN+MAX_LEVEL+3])
const uint8_t ff_h261_mv_tab[17][2]
static av_cold void h261_encode_init_static(void)
const int ff_h261_mtype_map[10]
Libavcodec external API header.
void ff_h261_reorder_mb_index(MpegEncContext *s)
ScanTable intra_scantable
int height
picture size. must be a multiple of 16
int ff_h261_get_picture_format(int width, int height)
int ff_mpv_encode_init(AVCodecContext *avctx)
uint8_t * inter_ac_vlc_length
Describe the class of an AVClass context structure.
static int get_cbp(MpegEncContext *s, int16_t block[6][64])
static av_cold void init_uni_h261_rl_tab(const RLTable *rl, uint8_t *len_tab)
static void h261_encode_motion(H261Context *h, int val)
RLTable ff_h261_rl_tcoeff
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
static enum AVPixelFormat pix_fmts[]
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
const uint8_t * c_dc_scale_table
qscale -> c_dc_scale table
struct AVCodecContext * avctx
PutBitContext pb
bit output
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
int ff_mpv_encode_end(AVCodecContext *avctx)
static int get_rl_index(const RLTable *rl, int last, int run, int level)
static uint8_t uni_h261_rl_len[64 *64 *2 *2]
static int ff_thread_once(char *control, void(*routine)(void))
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet)
av_cold void ff_h261_encode_init(MpegEncContext *s)
AVPixelFormat
Pixel format.
static double val(void *priv, double ch)