57 for (i = e - 1; i >= 0; i--)
60 e = -(is_signed &&
get_rac(c, state + 11 +
FFMIN(e, 10)));
77 while (i < state->error_sum) {
83 ff_dlog(
NULL,
"v:%d bias:%d error:%d drift:%d count:%d k:%d",
86 v ^= ((2 * state->
drift + state->
count) >> 31);
109 #define RENAME(name) name 115 #define RENAME(name) name ## 32 119 int w,
int h,
int stride,
int plane_index,
131 for (y = 0; y <
h; y++) {
132 int16_t *
temp = sample[0];
134 sample[0] = sample[1];
137 sample[1][-1] = sample[0][0];
138 sample[0][
w] = sample[0][w - 1];
141 int ret = decode_line(s, w, sample, plane_index, 8);
144 for (x = 0; x <
w; x++)
145 src[x*pixel_stride + stride * y] = sample[1][x];
151 for (x = 0; x <
w; x++) {
152 ((uint16_t*)(src + stride*y))[x*pixel_stride] = sample[1][x];
155 for (x = 0; x <
w; x++) {
169 memset(state, 128,
sizeof(state));
210 }
else if (ps == 2) {
213 }
else if (ps == 3) {
273 memcpy(pdst, psrc,
sizeof(*pdst));
340 decode_rgb_frame32(fs, planes, width, height, p->
linesize);
346 decode_rgb_frame(fs, planes, width, height, p->
linesize);
371 memset(state, 128,
sizeof(state));
373 for (v = 0; i < 128; v++) {
376 if (len > 128 - i || !len)
380 quant_table[
i] = scale * v;
385 for (i = 1; i < 128; i++)
386 quant_table[256 - i] = -quant_table[i];
387 quant_table[128] = -quant_table[127];
398 for (i = 0; i < 5; i++) {
402 context_count *=
ret;
403 if (context_count > 32768
U) {
407 return (context_count + 1) / 2;
418 memset(state2, 128,
sizeof(state2));
419 memset(state, 128,
sizeof(state));
438 for (i = 1; i < 256; i++)
511 "global: ver:%d.%d, coder:%d, colorspace: %d bpr:%d chroma:%d(%d:%d), alpha:%d slices:%dx%d qtabs:%d ec:%d intra:%d CRC:0x%08X\n",
533 memset(state, 128,
sizeof(state));
546 for (i = 1; i < 256; i++) {
548 if (st < 1 || st > 255) {
558 chroma_planes =
get_rac(c, state);
561 transparency =
get_rac(c, state);
577 if (chroma_h_shift > 4
U || chroma_v_shift > 4
U) {
579 chroma_h_shift, chroma_v_shift);
695 "chroma subsampling not supported in this colorspace\n");
735 if (context_count < 0) {
747 int trailer = 3 + 5*!!f->
ec;
791 "quant_table_index out of range\n");
835 int buf_size = avpkt->
size;
870 "Cannot decode non-keyframe without valid keyframe\n");
885 buf_p = buf + buf_size;
888 int trailer = 3 + 5*!!f->
ec;
989 fsdst->
ac = fsrc->
ac;
992 fsdst->
ec = fsrc->
ec;
1025 memcpy(fdst, fsrc,
sizeof(*fdst));
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
static av_always_inline int fold(int diff, int bits)
#define AV_PIX_FMT_YUVA422P16
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
#define AV_PIX_FMT_YUV440P10
#define AV_PIX_FMT_YUVA422P9
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
This structure describes decoded (raw) audio or video data.
ptrdiff_t const GLvoid * data
#define AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUV444P14
8 bits gray, 8 bits alpha
#define AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_YUVA422P10
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
#define AV_LOG_WARNING
Something somehow does not look correct.
int16_t quant_table[MAX_CONTEXT_INPUTS][256]
static av_cold int init(AVCodecContext *avctx)
static int decode_slice(AVCodecContext *c, void *arg)
#define MAX_CONTEXT_INPUTS
#define AV_PIX_FMT_GBRP10
static int read_quant_table(RangeCoder *c, int16_t *quant_table, int scale)
static av_flatten int get_symbol_inline(RangeCoder *c, uint8_t *state, int is_signed)
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
#define AV_PIX_FMT_YUV420P12
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
FF Video Codec 1 (a lossless codec)
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them.reget_buffer() and buffer age optimizations no longer work.*The contents of buffers must not be written to after ff_thread_report_progress() has been called on them.This includes draw_edges().Porting codecs to frame threading
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
enum AVPictureType last_picture
#define AV_PIX_FMT_GRAY10
static int is_input_end(FFV1Context *s)
#define AV_PIX_FMT_GRAY12
#define av_assert0(cond)
assert() equivalent, that is always enabled.
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
static int read_quant_tables(RangeCoder *c, int16_t quant_table[MAX_CONTEXT_INPUTS][256])
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
static int get_rac(RangeCoder *c, uint8_t *const state)
#define fs(width, name, subs,...)
#define FF_DEBUG_PICT_INFO
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
av_cold int ff_ffv1_common_init(AVCodecContext *avctx)
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
#define AV_PIX_FMT_YUVA420P9
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
uint8_t(*[MAX_QUANT_TABLES] initial_states)[32]
Public header for CRC hash function implementation.
av_cold int ff_ffv1_close(AVCodecContext *avctx)
int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar)
Check if the given sample aspect ratio of an image is valid.
static double av_q2d(AVRational a)
Convert an AVRational to a double.
bitstream reader API header.
#define AV_PIX_FMT_YUV444P16
int interlaced_frame
The content of the picture is interlaced.
#define AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUVA420P16
#define FF_CODEC_CAP_ALLOCATE_PROGRESS
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
static int get_bits_left(GetBitContext *gb)
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static int decode_slice_header(FFV1Context *f, FFV1Context *fs)
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
Wrapper around release_buffer() frame-for multithreaded codecs.
int16_t quant_tables[MAX_QUANT_TABLES][MAX_CONTEXT_INPUTS][256]
int skip_alpha
Skip processing alpha if supported by codec.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
static const int16_t quant_table[64]
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
#define AV_PIX_FMT_YUVA444P16
#define AV_PIX_FMT_GBRAP12
simple assert() macros that are a bit more flexible than ISO C assert().
const char * name
Name of the codec implementation.
#define AV_PIX_FMT_YUV444P10
int ff_ffv1_allocate_initial_states(FFV1Context *f)
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
#define AV_PIX_FMT_GBRAP16
static int get_vlc_symbol(GetBitContext *gb, VlcState *const state, int bits)
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
int ac
1=range coder <-> 0=golomb rice
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames.The frames must then be freed with ff_thread_release_buffer().Otherwise decode directly into the user-supplied frames.Call ff_thread_report_progress() after some part of the current picture has decoded.A good place to put this is where draw_horiz_band() is called-add this if it isn't called anywhere
int16_t quant_table[MAX_CONTEXT_INPUTS][256]
#define AC_RANGE_CUSTOM_TAB
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
#define AV_PIX_FMT_YUV422P9
static av_noinline int get_symbol(RangeCoder *c, uint8_t *state, int is_signed)
uint8_t state_transition[256]
uint8_t nb_components
The number of components each pixel has, (1-4)
static void copy_fields(FFV1Context *fsdst, FFV1Context *fssrc, FFV1Context *fsrc)
enum AVPictureType pict_type
Picture type of the frame.
#define AV_PIX_FMT_GBRP16
#define AV_PIX_FMT_GRAY16
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
static int decode_plane(FFV1Context *s, uint8_t *src, int w, int h, int stride, int plane_index, int pixel_stride)
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards.If some code can't be moved
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
static void update_vlc_state(VlcState *const state, const int v)
uint32_t av_crc(const AVCRC *ctx, uint32_t crc, const uint8_t *buffer, size_t length)
Calculate the CRC of a block.
void ff_build_rac_states(RangeCoder *c, int factor, int max_p)
av_cold int ff_ffv1_init_slice_contexts(FFV1Context *f)
av_cold int ff_ffv1_init_slice_state(FFV1Context *f, FFV1Context *fs)
#define AV_PIX_FMT_YUVA444P10
int ac_byte_count
number of bytes used for AC coding
#define AV_PIX_FMT_YUV444P9
#define AV_PIX_FMT_GBRP14
int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
static int read_header(FFV1Context *f)
static const float pred[4]
#define AV_PIX_FMT_YUV420P16
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
#define AV_PIX_FMT_YUV420P14
int context_count[MAX_QUANT_TABLES]
Libavcodec external API header.
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
main external API structure.
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
static const struct @324 planes[]
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread.Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities.There will be very little speed gain at this point but it should work.If there are inter-frame dependencies
#define AV_PIX_FMT_YUV420P10
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Rational number (pair of numerator and denominator).
av_cold void ff_init_range_decoder(RangeCoder *c, const uint8_t *buf, int buf_size)
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
#define AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV420P9
#define AV_PIX_FMT_YUV422P14
#define AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV444P12
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
const AVCRC * av_crc_get_table(AVCRCId crc_id)
Get an initialized standard CRC table.
GLint GLenum GLboolean GLsizei stride
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
common internal api header.
void ff_ffv1_clear_slice_state(FFV1Context *f, FFV1Context *fs)
#define AV_PIX_FMT_YUVA444P9
uint8_t(* state)[CONTEXT_SIZE]
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
uint8_t * bytestream_start
static av_cold int decode_init(AVCodecContext *avctx)
PlaneContext plane[MAX_PLANES]
struct FFV1Context * fsrc
int top_field_first
If the content is interlaced, is top field displayed first.
int key_frame
1 -> keyframe, 0-> not
struct FFV1Context * slice_context[MAX_SLICES]
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
enum AVFieldOrder field_order
Field order.
#define av_malloc_array(a, b)
#define FFSWAP(type, a, b)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int depth
Number of bits in the component.
static int get_sr_golomb(GetBitContext *gb, int k, int limit, int esc_len)
read signed golomb rice code (ffv1).
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
This structure stores compressed data.
static int read_extra_header(FFV1Context *f)
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
#define AV_PIX_FMT_YUV422P16
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
#define AV_NOPTS_VALUE
Undefined timestamp value.
int step
Number of elements between 2 horizontally consecutive pixels.
#define AV_PIX_FMT_0RGB32
#define AV_CEIL_RSHIFT(a, b)