Go to the documentation of this file.
59 fs->plane_count =
f->plane_count;
60 fs->transparency =
f->transparency;
61 for (j = 0; j <
f->plane_count; j++) {
85 for (j = 1; j < 256; j++) {
86 fs->c. one_state[ j] =
f->state_transition[j];
87 fs->c.zero_state[256 - j] = 256 -
fs->c.one_state[j];
97 for (
i = 0;
i <
f->max_slice_count;
i++) {
107 int i, max_slice_count =
f->num_h_slices *
f->num_v_slices;
111 for (
i = 0;
i < max_slice_count;) {
112 int sx =
i %
f->num_h_slices;
113 int sy =
i /
f->num_h_slices;
114 int sxs =
f->avctx->width * sx /
f->num_h_slices;
115 int sxe =
f->avctx->width * (sx + 1) /
f->num_h_slices;
116 int sys =
f->avctx->height * sy /
f->num_v_slices;
117 int sye =
f->avctx->height * (sy + 1) /
f->num_v_slices;
123 f->slice_context[
i++] =
fs;
124 memcpy(
fs,
f,
sizeof(*
fs));
125 memset(
fs->rc_stat2, 0,
sizeof(
fs->rc_stat2));
127 fs->slice_width = sxe - sxs;
128 fs->slice_height = sye - sys;
133 sizeof(*
fs->sample_buffer));
135 sizeof(*
fs->sample_buffer32));
136 if (!
fs->sample_buffer || !
fs->sample_buffer32)
139 f->max_slice_count = max_slice_count;
143 f->max_slice_count =
i;
151 for (
i = 0;
i <
f->quant_table_count;
i++) {
153 sizeof(*
f->initial_states[
i]));
154 if (!
f->initial_states[
i])
156 memset(
f->initial_states[
i], 128,
157 f->context_count[
i] *
sizeof(*
f->initial_states[
i]));
166 for (
i = 0;
i <
f->plane_count;
i++) {
195 for (j = 0; j <
s->max_slice_count; j++) {
197 for (
i = 0;
i <
s->plane_count;
i++) {
208 for (j = 0; j <
s->quant_table_count; j++) {
210 for (
i = 0;
i <
s->max_slice_count;
i++) {
217 for (
i = 0;
i <
s->max_slice_count;
i++)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
av_cold int ff_ffv1_common_init(AVCodecContext *avctx)
uint8_t(* state)[CONTEXT_SIZE]
#define AC_RANGE_CUSTOM_TAB
av_cold int ff_ffv1_init_slices_state(FFV1Context *f)
int flags
AV_CODEC_FLAG_*.
void ff_ffv1_clear_slice_state(const FFV1Context *f, FFV1Context *fs)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
uint8_t interlace_bit_state[2]
#define fs(width, name, subs,...)
char * stats_out
pass1 encoding statistics output buffer
av_cold int ff_ffv1_close(AVCodecContext *avctx)
av_cold int ff_ffv1_init_slice_state(const FFV1Context *f, FFV1Context *fs)
#define i(width, name, range_min, range_max)
#define av_malloc_array(a, b)
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
void * av_calloc(size_t nmemb, size_t size)
uint64_t(*[MAX_QUANT_TABLES] rc_stat2)[32][2]
int ff_ffv1_allocate_initial_states(FFV1Context *f)
main external API structure.
int width
picture width / height.
av_cold int ff_ffv1_init_slice_contexts(FFV1Context *f)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.