49 while ((c->
high >> 15) - (c->
low >> 15) < 2) {
50 if ((c->
low ^ c->
high) & 0x10000) {
55 c->
high = (uint16_t)c->
high << 8 | 0xFF;
57 c->
low = (uint16_t)c->
low << 8;
68 int split = (n << 1) - range;
77 int low,
int high,
int n)
79 int split = (n << 1) - range;
82 c->
high = split + (high - split << 1);
89 c->
low += split + (low - split << 1);
96 int range = c->
high - c->
low + 1;
100 if (n << scale > range)
116 int range = c->
high - c->
low + 1, n = *probs;
120 if (n << scale > range)
126 while (probs[++i] >
val) ;
129 probs[i] << scale, probs[i - 1] << scale, n);
138 int diff = (
c->high >> 16) - (
c->low >> 16);
142 while (!(
diff & 0x80)) {
147 return (
bits + bp + 7 >> 3) + ((
c->low >> 16) + 1 ==
c->high >> 16);
154 c->
value = bytestream2_get_be24(gB);
170 if (ncol > ctx->
free_colours || buf_size < 2 + ncol * 3)
172 for (i = 0; i < ncol; i++)
179 int keyframe,
int w,
int h)
181 int last_symbol = 0, repeat = 0, prev_avail = 0;
184 int x, y, endx, endy, t;
186 #define READ_PAIR(a, b) \ 187 a = bytestream2_get_byte(gB) << 4; \ 188 t = bytestream2_get_byte(gB); \ 190 b = (t & 0xF) << 8; \ 191 b |= bytestream2_get_byte(gB); \ 196 if (endx >= w || endy >= h || x > endx || y > endy)
198 dst += x + stride * y;
209 int b = bytestream2_get_byte(gB);
211 last_symbol = b << 8 | bytestream2_get_byte(gB);
215 if (repeat >= (INT_MAX >> 8) - 1) {
219 repeat = (repeat << 8) + bytestream2_get_byte(gB) + 1;
221 if (last_symbol == -2) {
222 int skip =
FFMIN((
unsigned)repeat, dst + w - p);
227 last_symbol = 127 -
b;
229 if (last_symbol >= 0)
231 else if (last_symbol == -1 && prev_avail)
233 }
while (++p < dst + w);
242 uint8_t *rgb_dst, ptrdiff_t rgb_stride, uint32_t *pal,
243 int keyframe,
int kf_slipt,
int slice,
int w,
int h)
249 int current_length = 0, read_codes = 0, next_code = 0, current_codes = 0;
250 int remaining_codes, surplus_codes,
i;
252 const int alphabet_size = 270 - keyframe;
254 int last_symbol = 0, repeat = 0, prev_avail = 0;
257 int x, y, clipw, cliph;
264 if (x + clipw > w || y + cliph > h)
266 pal_dst += pal_stride * y + x;
267 rgb_dst += rgb_stride * y + x * 3;
274 pal_dst += pal_stride * kf_slipt;
275 rgb_dst += rgb_stride * kf_slipt;
284 while (current_codes--) {
286 if (symbol >= 204 - keyframe)
287 symbol += 14 - keyframe;
288 else if (symbol > 189)
289 symbol =
get_bits1(gb) + (symbol << 1) - 190;
292 bits[symbol] = current_length;
293 codes[symbol] = next_code++;
298 remaining_codes = (1 << current_length) - next_code;
300 if (current_length > 22 || current_codes > remaining_codes)
302 }
while (current_codes != remaining_codes);
304 remaining_codes = alphabet_size - read_codes;
307 while ((surplus_codes = (2 << current_length) -
308 (next_code << 1) - remaining_codes) < 0) {
314 for (i = 0; i < alphabet_size; i++)
316 if (surplus_codes-- == 0) {
320 bits[
i] = current_length;
321 codes[
i] = next_code++;
324 if (next_code != 1 << current_length)
327 if ((i =
init_vlc(&vlc, 9, alphabet_size, bits, 1, 1, codes, 4, 4, 0)) < 0)
349 repeat += (1 <<
b) - 1;
351 if (last_symbol == -2) {
352 int skip =
FFMIN(repeat, pal_dst + w - pp);
358 last_symbol = 267 -
b;
360 if (last_symbol >= 0) {
363 }
else if (last_symbol == -1 && prev_avail) {
364 *pp = *(pp - pal_stride);
365 memcpy(rp, rp - rgb_stride, 3);
368 }
while (++pp < pal_dst + w);
369 pal_dst += pal_stride;
370 rgb_dst += rgb_stride;
379 int x,
int y,
int w,
int h,
int wmv9_mask)
428 "disabling error correction due to block count mismatch %dx%d != %dx%d\n",
442 "Asymmetric WMV9 rectangle subsampling");
470 #define MAX_WMV9_RECTANGLES 20 471 #define ARITH2_PADDING 2 477 int buf_size = avpkt->
size;
485 int keyframe, has_wmv9, has_mv, is_rle, is_555,
ret;
487 struct Rectangle wmv9rects[MAX_WMV9_RECTANGLES], *
r;
488 int used_rects = 0,
i, implicit_rect = 0,
av_uninit(wmv9_mask);
539 implicit_rect = !arith2_get_bit(&acoder);
541 while (arith2_get_bit(&acoder)) {
542 if (used_rects == MAX_WMV9_RECTANGLES)
544 r = &wmv9rects[used_rects];
549 wmv9rects[used_rects - 1].
x) +
550 wmv9rects[used_rects - 1].
x;
557 if (implicit_rect && used_rects) {
565 wmv9rects[0].
w = avctx->
width;
566 wmv9rects[0].
h = avctx->
height;
570 for (
i = 0;
i < used_rects;
i++) {
571 if (!implicit_rect && arith2_get_bit(&acoder)) {
576 wmv9_mask = arith2_get_bit(&acoder) - 1;
590 if (keyframe && !is_555) {
604 if (c->
mvX < 0 || c->
mvY < 0) {
668 }
else if (!implicit_rect || wmv9_mask != -1) {
700 for (
i = 0;
i < used_rects;
i++) {
701 int x = wmv9rects[
i].
x;
702 int y = wmv9rects[
i].
y;
703 int w = wmv9rects[
i].
w;
704 int h = wmv9rects[
i].
h;
706 int WMV9codedFrameSize;
707 if (buf_size < 4 || !(WMV9codedFrameSize =
AV_RL24(buf)))
709 if (ret =
decode_wmv9(avctx, buf + 3, buf_size - 3,
710 x, y, w, h, wmv9_mask))
712 buf += WMV9codedFrameSize + 3;
713 buf_size -= WMV9codedFrameSize + 3;
716 if (wmv9_mask != -1) {
724 memset(dst, 0x80, w * 3);
735 if (c->
mvX < 0 || c->
mvY < 0) {
static av_cold int mss2_decode_init(AVCodecContext *avctx)
qpel_mc_func avg_qpel_pixels_tab[2][16]
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static int arith2_get_scaled_value(int value, int n, int range)
This structure describes decoded (raw) audio or video data.
ptrdiff_t const GLvoid * data
int(* get_number)(struct ArithCoder *c, int n)
const uint8_t ff_wmv2_scantableB[64]
av_cold int ff_mss12_decode_init(MSS12Context *c, int version, SliceContext *sc1, SliceContext *sc2)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
#define AV_LOG_WARNING
Something somehow does not look correct.
int ff_msmpeg4_decode_init(AVCodecContext *avctx)
packed RGB 8:8:8, 24bpp, RGBRGB...
int(* get_model_sym)(struct ArithCoder *c, Model *m)
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
static av_cold int init(AVCodecContext *avctx)
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
#define avpriv_request_sample(...)
int extended_mv
Ext MV in P/B (not in Simple)
void ff_er_frame_end(ERContext *s)
static void arith2_rescale_interval(ArithCoder *c, int range, int low, int high, int n)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
int fastuvmc
Rounding of qpel vector to hpel ? (not in Simple)
int end_mb_x
Horizontal macroblock limit (used only by mss2)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
int frmrtq_postproc
3 bits,
int ff_mss12_decode_rect(SliceContext *sc, ArithCoder *acoder, int x, int y, int width, int height)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
int res_transtab
reserved, always 0
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
av_cold int ff_vc1_init_common(VC1Context *v)
Init VC-1 specific tables and VC1Context members.
qpel_mc_func(* qpel_put)[16]
GLsizei GLboolean const GLfloat * value
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Picture current_picture
copy of the current picture structure.
int profile
Sequence header data for all Profiles TODO: choose between ints, uint8_ts and monobit flags...
static int get_bits_count(const GetBitContext *s)
int mb_height
number of MBs horizontally & vertically
int ff_vc1_decode_end(AVCodecContext *avctx)
Close a VC1/WMV3 decoder.
static int arith2_get_number(ArithCoder *c, int n)
static int arith2_get_prob(ArithCoder *c, int16_t *probs)
static av_cold int wmv9_init(AVCodecContext *avctx)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
av_cold void ff_mss2dsp_init(MSS2DSPContext *dsp)
int res_y411
reserved, old interlaced mode
int overlap
overlapped transforms in use
#define init_vlc(vlc, nb_bits, nb_codes,bits, bits_wrap, bits_size,codes, codes_wrap, codes_size,flags)
static int decode_wmv9(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int x, int y, int w, int h, int wmv9_mask)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Identical in function to ff_get_buffer(), except it reuses the existing buffer if available...
void(* upsample_plane)(uint8_t *plane, ptrdiff_t plane_stride, int w, int h)
#define ARITH_GET_MODEL_SYM(prefix)
simple assert() macros that are a bit more flexible than ISO C assert().
void ff_vc1_decode_blocks(VC1Context *v)
const char * name
Name of the codec implementation.
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_RL24
qpel_mc_func put_qpel_pixels_tab[2][16]
int resync_marker
could this stream contain resync markers
const uint8_t ff_wmv2_scantableA[64]
const uint8_t * zz_8x4
Zigzag scan table for TT_8x4 coding mode.
int res_rtm_flag
reserved, set to 1
static char * split(char *message, char delim)
static void arith2_init(ArithCoder *c, GetByteContext *gB)
void ff_mpeg_flush(AVCodecContext *avctx)
const uint8_t * zz_4x8
Zigzag scan table for TT_4x8 coding mode.
enum AVPictureType pict_type
Picture type of the frame.
int width
picture width / height.
void ff_mpeg_er_frame_start(MpegEncContext *s)
int ff_vc1_parse_frame_header(VC1Context *v, GetBitContext *gb)
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
static int decode_555(AVCodecContext *avctx, GetByteContext *gB, uint16_t *dst, ptrdiff_t stride, int keyframe, int w, int h)
static int arith2_get_consumed_bytes(ArithCoder *c)
static av_always_inline int bytestream2_tell(GetByteContext *g)
int rangered
RANGEREDFRM (range reduction) syntax element present at frame level.
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
int finterpflag
INTERPFRM present.
av_cold int ff_mss12_decode_end(MSS12Context *c)
int res_sprite
Simple/Main Profile sequence header.
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
int multires
frame-level RESPIC syntax element present
main external API structure.
static av_cold int mss2_decode_end(AVCodecContext *avctx)
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
static unsigned int get_bits1(GetBitContext *s)
static void skip_bits(GetBitContext *s, int n)
uint8_t respic
Frame-level flag for resized images.
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
int quantizer_mode
2 bits, quantizer mode used for sequence, see QUANT_*
int max_b_frames
max number of B-frames for encoding
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
int vstransform
variable-size [48]x[48] transform type + info
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
qpel_mc_func(* qpel_avg)[16]
void ff_vc1_init_transposed_scantables(VC1Context *v)
struct AVCodecContext * avctx
GLint GLenum GLboolean GLsizei stride
void(* mss2_gray_fill_masked)(uint8_t *dst, ptrdiff_t dst_stride, int maskcolor, const uint8_t *mask, ptrdiff_t mask_stride, int w, int h)
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
void(* mss2_blit_wmv9_masked)(uint8_t *dst, ptrdiff_t dst_stride, int maskcolor, const uint8_t *mask, ptrdiff_t mask_stride, const uint8_t *srcy, ptrdiff_t srcy_stride, const uint8_t *srcu, const uint8_t *srcv, ptrdiff_t srcuv_stride, int w, int h)
common internal api header.
void ff_mss12_slicecontext_reset(SliceContext *sc)
int res_fasttx
reserved, always 1
#define AV_PIX_FMT_RGB555
enum AVDiscard skip_loop_filter
Skip loop filtering for selected frames.
Microsoft Screen 2 (aka Windows Media Video V9 Screen) decoder DSP routines.
static av_always_inline int diff(const uint32_t a, const uint32_t b)
void ff_mpv_frame_end(MpegEncContext *s)
VLC_TYPE(* table)[2]
code, bits
int key_frame
1 -> keyframe, 0-> not
static const uint8_t * align_get_bits(GetBitContext *s)
int bitrtq_postproc
5 bits, quantized framerate-based postprocessing strength
static int decode_pal_v2(MSS12Context *ctx, const uint8_t *buf, int buf_size)
union ArithCoder::@103 gbc
#define ARITH_GET_BIT(prefix)
int ff_vc1_decode_init_alloc_tables(VC1Context *v)
static void arith2_normalise(ArithCoder *c)
static int mss2_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
int dquant
How qscale varies with MBs, 2 bits (not in Simple)
av_cold void ff_qpeldsp_init(QpelDSPContext *c)
#define FFSWAP(type, a, b)
void(* mss2_blit_wmv9)(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *srcy, ptrdiff_t srcy_stride, const uint8_t *srcu, const uint8_t *srcv, ptrdiff_t srcuv_stride, int w, int h)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
static double val(void *priv, double ch)
This structure stores compressed data.
void ff_free_vlc(VLC *vlc)
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
av_cold void ff_vc1dsp_init(VC1DSPContext *dsp)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
static int decode_rle(GetBitContext *gb, uint8_t *pal_dst, ptrdiff_t pal_stride, uint8_t *rgb_dst, ptrdiff_t rgb_stride, uint32_t *pal, int keyframe, int kf_slipt, int slice, int w, int h)
Common header for Microsoft Screen 1 and 2.