36 if (s->quantizer != quantizer)
38 s->quantizer = quantizer;
47 VP56mv vect[2] = {{0,0}, {0,0}};
51 for (pos=0; pos<12; pos++) {
53 mvp.
y = row + ff_vp56_candidate_predictor_pos[
pos][1];
54 if (mvp.
x < 0 || mvp.
x >= s->mb_width ||
55 mvp.
y < 0 || mvp.
y >= s->mb_height)
57 offset = mvp.
x + s->mb_width*mvp.
y;
61 if ((s->macroblocks[offset].mv.x == vect[0].
x &&
62 s->macroblocks[offset].mv.y == vect[0].
y) ||
63 (s->macroblocks[offset].mv.x == 0 &&
64 s->macroblocks[offset].mv.y == 0))
67 vect[nb_pred++] = s->macroblocks[
offset].mv;
72 s->vector_candidate_pos =
pos;
75 s->vector_candidate[0] = vect[0];
76 s->vector_candidate[1] = vect[1];
87 for (ctx=0; ctx<3; ctx++) {
95 for (type=0; type<10; type++) {
112 for (ctx=0; ctx<3; ctx++) {
115 for (type=0; type<10; type++)
118 for (type=0; type<10; type++) {
119 int p02, p34, p0234, p17, p56, p89, p5689, p156789;
134 p156789 = p17 + p5689;
155 uint8_t *mb_type_model = s->modelp->mb_type[
ctx][prev_type];
171 for (b=0; b<4; b++) {
178 for (b=0; b<4; b++) {
184 s->parse_vector_adjustment(s, &s->mv[b]);
187 s->mv[
b] = s->vector_candidate[0];
190 s->mv[
b] = s->vector_candidate[1];
198 s->macroblocks[row * s->mb_width + col].mv = s->mv[3];
201 s->mv[4].x = s->mv[5].x =
RSHIFT(mv.
x,2);
202 s->mv[4].y = s->mv[5].y =
RSHIFT(mv.
y,2);
212 s->macroblocks[row * s->mb_width + col].type = s->mb_type;
214 switch (s->mb_type) {
216 mv = &s->vector_candidate[0];
220 mv = &s->vector_candidate[1];
225 mv = &s->vector_candidate[0];
230 mv = &s->vector_candidate[1];
234 s->parse_vector_adjustment(s, &vect);
240 s->parse_vector_adjustment(s, &vect);
253 s->macroblocks[row*s->mb_width + col].mv = *
mv;
268 s->macroblocks[row * s->mb_width + col].type = s->mb_type;
272 s->macroblocks[row*s->mb_width + col].mv = *
mv;
283 int idx = s->idct_scantable[0];
286 for (b=0; b<6; b++) {
287 VP56RefDc *ab = &s->above_blocks[s->above_block_idx[
b]];
303 if (count < 2 && ref_frame == ab[-1+2*i].ref_frame) {
312 s->block_coeff[
b][idx] +=
dc;
313 s->prev_dc[
ff_vp56_b2p[
b]][ref_frame] = s->block_coeff[
b][idx];
318 s->block_coeff[
b][idx] *= s->dequant_dc;
323 ptrdiff_t
stride,
int dx,
int dy)
327 if (dx) s->vp56dsp.edge_filter_hor(yuv + 10-dx , stride, t);
328 if (dy) s->vp56dsp.edge_filter_ver(yuv + stride*(10-dy), stride, t);
330 int * bounding_values = s->bounding_values_array + 127;
339 ptrdiff_t
stride,
int x,
int y)
344 int overlap_offset = 0;
345 int mask = s->vp56_coord_div[
b] - 1;
346 int deblock_filtering = s->deblock_filtering;
353 deblock_filtering = 0;
355 dx = s->mv[
b].x / s->vp56_coord_div[
b];
356 dy = s->mv[
b].y / s->vp56_coord_div[
b];
365 if (x<0 || x+12>=s->plane_width[plane] ||
366 y<0 || y+12>=s->plane_height[plane]) {
367 s->vdsp.emulated_edge_mc(s->edge_emu_buffer,
368 src + s->block_offset[b] + (dy-2)*stride + (dx-2),
371 s->plane_width[plane],
372 s->plane_height[plane]);
373 src_block = s->edge_emu_buffer;
374 src_offset = 2 + 2*
stride;
375 }
else if (deblock_filtering) {
378 s->hdsp.put_pixels_tab[0][0](s->edge_emu_buffer,
379 src + s->block_offset[
b] + (dy-2)*stride + (dx-2),
381 src_block = s->edge_emu_buffer;
382 src_offset = 2 + 2*
stride;
385 src_offset = s->block_offset[
b] + dy*stride + dx;
388 if (deblock_filtering)
391 if (s->mv[b].x & mask)
392 overlap_offset += (s->mv[
b].x > 0) ? 1 : -1;
393 if (s->mv[b].y & mask)
394 overlap_offset += (s->mv[
b].y > 0) ? stride : -stride;
396 if (overlap_offset) {
398 s->filter(s, dst, src_block, src_offset, src_offset+overlap_offset,
399 stride, s->mv[b], mask, s->filter_selection, b<4);
401 s->vp3dsp.put_no_rnd_pixels_l2(dst, src_block+src_offset,
402 src_block+src_offset+overlap_offset,
405 s->hdsp.put_pixels_tab[1][0](dst, src_block+src_offset,
stride, 8);
411 if (selector > 10 || selector == 1)
412 s->vp3dsp.idct_put(dest, stride, block);
420 s->vp3dsp.idct_add(dest, stride, block);
421 else if (selector > 1)
424 s->vp3dsp.idct_dc_add(dest, stride, block);
429 int b, ab, b_max, plane, off;
430 AVFrame *frame_current, *frame_ref;
436 frame_ref = s->frames[ref_frame];
441 b_max = 6 - 2*is_alpha;
445 for (b=0; b<b_max; b++) {
448 s->stride[plane], s->block_coeff[b], s->idct_selector[b]);
454 for (b=0; b<b_max; b++) {
456 off = s->block_offset[
b];
457 s->hdsp.put_pixels_tab[1][0](frame_current->
data[plane] + off,
458 frame_ref->
data[plane] + off,
459 s->stride[plane], 8);
461 s->stride[plane], s->block_coeff[b], s->idct_selector[b]);
472 for (b=0; b<b_max; b++) {
473 int x_off = b==1 || b==3 ? 8 : 0;
474 int y_off = b==2 || b==3 ? 8 : 0;
476 vp56_mc(s, b, plane, frame_ref->
data[plane], s->stride[plane],
477 16*col+x_off, 16*row+y_off);
479 s->stride[plane], s->block_coeff[b], s->idct_selector[b]);
485 s->block_coeff[4][0] = 0;
486 s->block_coeff[5][0] = 0;
500 ret = s->parse_coeff(s);
529 s->plane_width[0] = s->plane_width[3] = avctx->
coded_width;
530 s->plane_width[1] = s->plane_width[2] = avctx->
coded_width/2;
531 s->plane_height[0] = s->plane_height[3] = avctx->
coded_height;
532 s->plane_height[1] = s->plane_height[2] = avctx->
coded_height/2;
534 s->have_undamaged_frame = 0;
542 if (s->mb_width > 1000 || s->mb_height > 1000) {
549 sizeof(*s->above_blocks));
551 sizeof(*s->macroblocks));
552 av_free(s->edge_emu_buffer_alloc);
553 s->edge_emu_buffer_alloc =
av_malloc(16*stride);
554 s->edge_emu_buffer = s->edge_emu_buffer_alloc;
555 if (!s->above_blocks || !s->macroblocks || !s->edge_emu_buffer_alloc)
558 s->edge_emu_buffer += 15 *
stride;
560 if (s->alpha_context)
574 int remaining_buf_size = avpkt->
size;
575 int alpha_offset = remaining_buf_size;
580 if (remaining_buf_size < 3)
582 alpha_offset = bytestream_get_be24(&buf);
583 remaining_buf_size -= 3;
584 if (remaining_buf_size < alpha_offset)
588 res = s->parse_header(s, buf, alpha_offset);
593 for (i = 0; i < 4; i++) {
595 if (s->alpha_context)
625 int bak_w = avctx->
width;
626 int bak_h = avctx->
height;
630 remaining_buf_size -= alpha_offset;
632 res = s->alpha_context->parse_header(s->alpha_context, buf, remaining_buf_size);
636 avctx->
width = bak_w;
646 s->discard_frame = 0;
649 if (s->discard_frame)
660 int jobnr,
int threadnr)
663 int is_alpha = (jobnr == 1);
664 VP56Context *
s = is_alpha ? s0->alpha_context :
s0;
666 int mb_row, mb_col, mb_row_flip, mb_offset = 0;
668 ptrdiff_t stride_y, stride_uv;
674 s->default_models_init(s);
675 for (block=0; block<s->mb_height*s->mb_width; block++)
680 s->parse_vector_models(s);
684 if (s->parse_coeff_models(s))
687 memset(s->prev_dc, 0,
sizeof(s->prev_dc));
691 for (block=0; block < 4*s->mb_width+6; block++) {
693 s->above_blocks[
block].dc_coeff = 0;
694 s->above_blocks[
block].not_null_dc = 0;
706 for (mb_row=0; mb_row<s->mb_height; mb_row++) {
708 mb_row_flip = s->mb_height - mb_row - 1;
710 mb_row_flip = mb_row;
712 for (block=0; block<4; block++) {
714 s->left_block[
block].dc_coeff = 0;
715 s->left_block[
block].not_null_dc = 0;
717 memset(s->coeff_ctx, 0,
sizeof(s->coeff_ctx));
718 memset(s->coeff_ctx_last, 24,
sizeof(s->coeff_ctx_last));
720 s->above_block_idx[0] = 1;
721 s->above_block_idx[1] = 2;
722 s->above_block_idx[2] = 1;
723 s->above_block_idx[3] = 2;
724 s->above_block_idx[4] = 2*s->mb_width + 2 + 1;
725 s->above_block_idx[5] = 3*s->mb_width + 4 + 1;
727 s->block_offset[s->frbi] = (mb_row_flip*16 + mb_offset) * stride_y;
728 s->block_offset[s->srbi] = s->block_offset[s->frbi] + 8*stride_y;
729 s->block_offset[1] = s->block_offset[0] + 8;
730 s->block_offset[3] = s->block_offset[2] + 8;
731 s->block_offset[4] = (mb_row_flip*8 + mb_offset) * stride_uv;
732 s->block_offset[5] = s->block_offset[4];
734 for (mb_col=0; mb_col<s->mb_width; mb_col++) {
740 s->discard_frame = 1;
748 for (y=0; y<4; y++) {
749 s->above_block_idx[y] += 2;
750 s->block_offset[y] += 16;
753 for (uv=4; uv<6; uv++) {
754 s->above_block_idx[uv] += 1;
755 s->block_offset[uv] += 8;
761 s->have_undamaged_frame = 1;
783 int flip,
int has_alpha)
795 for (i = 0; i < 64; i++) {
796 #define TRANSPOSE(x) (((x) >> 3) | (((x) & 7) << 3)) 808 s->edge_emu_buffer_alloc =
NULL;
810 s->above_blocks =
NULL;
811 s->macroblocks =
NULL;
813 s->deblock_filtering = 1;
818 s->has_alpha = has_alpha;
820 s->modelp = &s->model;
847 av_freep(&s->edge_emu_buffer_alloc);
av_cold int ff_vp56_free(AVCodecContext *avctx)
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
discard all frames except keyframes
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
const uint8_t ff_vp56_ac_dequant[64]
This structure describes decoded (raw) audio or video data.
ptrdiff_t const GLvoid * data
VP5 and VP6 compatible video decoder (common features)
static int vp56_get_vectors_predictors(VP56Context *s, int row, int col, VP56Frame ref_frame)
int coded_width
Bitstream width / height, may be different from width/height e.g.
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
uint8_t mb_types_stats[3][10][2]
static void vp56_idct_put(VP56Context *s, uint8_t *dest, ptrdiff_t stride, int16_t *block, int selector)
const VP56Tree ff_vp56_pmbtm_tree[]
Inter MB, no vector, from previous frame.
static VP56mb vp56_parse_mb_type(VP56Context *s, VP56mb prev_type, int ctx)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
#define FF_ARRAY_ELEMS(a)
av_cold int ff_vp56_init(AVCodecContext *avctx, int flip, int has_alpha)
void ff_vp3dsp_idct10_add(uint8_t *dest, ptrdiff_t stride, int16_t *block)
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
static VP56mb vp56_decode_mv(VP56Context *s, int row, int col)
The exact code depends on how similar the blocks are and how related they are to the block
void ff_vp3dsp_idct10_put(uint8_t *dest, ptrdiff_t stride, int16_t *block)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
av_cold int ff_vp56_init_context(AVCodecContext *avctx, VP56Context *s, int flip, int has_alpha)
static int vp56_conceal_mb(VP56Context *s, int row, int col, int is_alpha)
const uint8_t ff_vp56_dc_dequant[64]
const uint8_t ff_vp56_filter_threshold[]
const int8_t ff_vp56_candidate_predictor_pos[12][2]
static av_always_inline int vp56_rac_get_tree(VP56RangeCoder *c, const VP56Tree *tree, const uint8_t *probs)
Inter MB, second vector, from golden frame.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static const uint16_t mask[17]
static void flip(AVCodecContext *avctx, AVFrame *frame)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
int skip_alpha
Skip processing alpha if supported by codec.
static av_always_inline int vp56_rac_get(VP56RangeCoder *c)
int error_concealment
error concealment flags
static void vp56_parse_mb_type_models(VP56Context *s)
int flags
AV_CODEC_FLAG_*.
void ff_vp56_init_dequant(VP56Context *s, int quantizer)
const uint8_t ff_vp56_b6to4[]
static void vp56_deblock_filter(VP56Context *s, uint8_t *yuv, ptrdiff_t stride, int dx, int dy)
Inter MB, first vector, from previous frame.
static int vp56_size_changed(VP56Context *s)
int av_reallocp_array(void *ptr, size_t nmemb, size_t size)
Allocate, reallocate, or free an array through a pointer to a pointer.
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
VP5 and VP6 compatible video decoder (common data)
const uint8_t ff_vp56_pre_def_mb_type_stats[16][3][10][2]
Inter MB, first vector, from golden frame.
enum AVPictureType pict_type
Picture type of the frame.
int width
picture width / height.
int ff_vp56_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
static int vp56_decode_mb(VP56Context *s, int row, int col, int is_alpha)
void ff_vp3dsp_h_loop_filter_12(uint8_t *first_pixel, ptrdiff_t stride, int *bounding_values)
const uint8_t ff_vp56_b2p[]
static const int8_t mv[256][2]
const VP56Tree ff_vp56_pmbt_tree[]
static av_always_inline int vp56_rac_get_prob_branchy(VP56RangeCoder *c, int prob)
Inter MB, 4 vectors, from previous frame.
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
Inter MB, no vector, from golden frame.
Libavcodec external API header.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Inter MB, second vector, from previous frame.
main external API structure.
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff)*mv_scale Intra DC Prediction block[y][x] dc[1]
uint8_t mb_type[3][10][10]
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
static int ff_vp56_decode_mbs(AVCodecContext *avctx, void *, int, int)
static VP56mb vp56_conceal_mv(VP56Context *s, int row, int col)
Inter MB, above/left vector + delta, from previous frame.
const uint8_t ff_zigzag_direct[64]
static void vp56_decode_4mv(VP56Context *s, int row, int col)
const uint8_t ff_vp56_mb_type_model_model[]
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
static av_always_inline void vp56_render_mb(VP56Context *s, int row, int col, int is_alpha, VP56mb mb_type)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
av_cold int ff_vp56_free_context(VP56Context *s)
void ff_vp3dsp_v_loop_filter_12(uint8_t *first_pixel, ptrdiff_t stride, int *bounding_values)
GLint GLenum GLboolean GLsizei stride
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
common internal api header.
const VP56Frame ff_vp56_reference_frame[]
void ff_vp3dsp_set_bounding_values(int *bounding_values_array, int filter_limit)
static void vp56_add_predictors_dc(VP56Context *s, VP56Frame ref_frame)
Inter MB, above/left vector + delta, from golden frame.
int key_frame
1 -> keyframe, 0-> not
static void vp56_idct_add(VP56Context *s, uint8_t *dest, ptrdiff_t stride, int16_t *block, int selector)
#define FFSWAP(type, a, b)
av_cold void ff_vp3dsp_init(VP3DSPContext *c, int flags)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
static void vp56_mc(VP56Context *s, int b, int plane, uint8_t *src, ptrdiff_t stride, int x, int y)
This structure stores compressed data.
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
static int vp56_rac_gets(VP56RangeCoder *c, int bits)