36     s->quantizer = quantizer;
 
   45     VP56mv vect[2] = {{0,0}, {0,0}};
 
   49     for (pos=0; pos<12; pos++) {
 
   51         mvp.
y = row + ff_vp56_candidate_predictor_pos[pos][1];
 
   52         if (mvp.
x < 0 || mvp.
x >= s->mb_width ||
 
   53             mvp.
y < 0 || mvp.
y >= s->mb_height)
 
   55         offset = mvp.
x + s->mb_width*mvp.
y;
 
   59         if ((s->macroblocks[offset].mv.x == vect[0].
x &&
 
   60              s->macroblocks[offset].mv.y == vect[0].
y) ||
 
   61             (s->macroblocks[offset].mv.x == 0 &&
 
   62              s->macroblocks[offset].mv.y == 0))
 
   65         vect[nb_pred++] = s->macroblocks[
offset].mv;
 
   70         s->vector_candidate_pos = pos;
 
   73     s->vector_candidate[0] = vect[0];
 
   74     s->vector_candidate[1] = vect[1];
 
   85     for (ctx=0; ctx<3; ctx++) {
 
   93             for (type=0; type<10; type++) {
 
  110     for (ctx=0; ctx<3; ctx++) {
 
  113         for (type=0; type<10; type++)
 
  116         for (type=0; type<10; type++) {
 
  117             int p02, p34, p0234, p17, p56, p89, p5689, p156789;
 
  132             p156789 = p17 + p5689;
 
  134             model->
mb_type[ctx][
type][1] = 1 + 255 * p0234/(1+p0234+p156789);
 
  135             model->
mb_type[ctx][
type][2] = 1 + 255 * p02  / (1+p0234);
 
  136             model->
mb_type[ctx][
type][3] = 1 + 255 * p17  / (1+p156789);
 
  137             model->
mb_type[ctx][
type][4] = 1 + 255 * p[0] / (1+p02);
 
  138             model->
mb_type[ctx][
type][5] = 1 + 255 * p[3] / (1+p34);
 
  139             model->
mb_type[ctx][
type][6] = 1 + 255 * p[1] / (1+p17);
 
  140             model->
mb_type[ctx][
type][7] = 1 + 255 * p56  / (1+p5689);
 
  141             model->
mb_type[ctx][
type][8] = 1 + 255 * p[5] / (1+p56);
 
  142             model->
mb_type[ctx][
type][9] = 1 + 255 * p[8] / (1+p89);
 
  151                                  VP56mb prev_type, 
int ctx)
 
  153     uint8_t *mb_type_model = s->modelp->mb_type[ctx][prev_type];
 
  169     for (b=0; b<4; b++) {
 
  176     for (b=0; b<4; b++) {
 
  182                 s->parse_vector_adjustment(s, &s->mv[b]);
 
  185                 s->mv[
b] = s->vector_candidate[0];
 
  188                 s->mv[
b] = s->vector_candidate[1];
 
  196     s->macroblocks[row * s->mb_width + col].mv = s->mv[3];
 
  200         s->mv[4].x = s->mv[5].x = 
RSHIFT(mv.
x,2);
 
  201         s->mv[4].y = s->mv[5].y = 
RSHIFT(mv.
y,2);
 
  203         s->mv[4] = s->mv[5] = (
VP56mv) {mv.
x/4, mv.
y/4};
 
  214     s->macroblocks[row * s->mb_width + col].type = s->mb_type;
 
  216     switch (s->mb_type) {
 
  218             mv = &s->vector_candidate[0];
 
  222             mv = &s->vector_candidate[1];
 
  227             mv = &s->vector_candidate[0];
 
  232             mv = &s->vector_candidate[1];
 
  236             s->parse_vector_adjustment(s, &vect);
 
  242             s->parse_vector_adjustment(s, &vect);
 
  255     s->macroblocks[row*s->mb_width + col].mv = *
mv;
 
  266     int idx = s->idct_scantable[0];
 
  269     for (b=0; b<6; b++) {
 
  270         VP56RefDc *ab = &s->above_blocks[s->above_block_idx[
b]];
 
  286                 if (count < 2 && ref_frame == ab[-1+2*i].ref_frame) {
 
  295         s->block_coeff[
b][idx] += 
dc;
 
  301         s->block_coeff[
b][idx] *= s->dequant_dc;
 
  306                                 ptrdiff_t 
stride, 
int dx, 
int dy)
 
  309     if (dx)  s->vp56dsp.edge_filter_hor(yuv +         10-dx , stride, t);
 
  310     if (dy)  s->vp56dsp.edge_filter_ver(yuv + stride*(10-dy), stride, t);
 
  314                     ptrdiff_t 
stride, 
int x, 
int y)
 
  319     int overlap_offset = 0;
 
  320     int mask = s->vp56_coord_div[
b] - 1;
 
  321     int deblock_filtering = s->deblock_filtering;
 
  328         deblock_filtering = 0;
 
  330     dx = s->mv[
b].x / s->vp56_coord_div[
b];
 
  331     dy = s->mv[
b].y / s->vp56_coord_div[
b];
 
  340     if (x<0 || x+12>=s->plane_width[plane] ||
 
  341         y<0 || y+12>=s->plane_height[plane]) {
 
  342         s->vdsp.emulated_edge_mc(s->edge_emu_buffer,
 
  343                                  src + s->block_offset[b] + (dy-2)*stride + (dx-2),
 
  346                                  s->plane_width[plane],
 
  347                                  s->plane_height[plane]);
 
  348         src_block = s->edge_emu_buffer;
 
  349         src_offset = 2 + 2*
stride;
 
  350     } 
else if (deblock_filtering) {
 
  353         s->hdsp.put_pixels_tab[0][0](s->edge_emu_buffer,
 
  354                                      src + s->block_offset[
b] + (dy-2)*stride + (dx-2),
 
  356         src_block = s->edge_emu_buffer;
 
  357         src_offset = 2 + 2*
stride;
 
  360         src_offset = s->block_offset[
b] + dy*stride + dx;
 
  363     if (deblock_filtering)
 
  366     if (s->mv[b].x & mask)
 
  367         overlap_offset += (s->mv[
b].x > 0) ? 1 : -1;
 
  368     if (s->mv[b].y & mask)
 
  369         overlap_offset += (s->mv[
b].y > 0) ? stride : -stride;
 
  371     if (overlap_offset) {
 
  373             s->filter(s, dst, src_block, src_offset, src_offset+overlap_offset,
 
  374                       stride, s->mv[b], mask, s->filter_selection, b<4);
 
  376             s->vp3dsp.put_no_rnd_pixels_l2(dst, src_block+src_offset,
 
  377                                            src_block+src_offset+overlap_offset,
 
  380         s->hdsp.put_pixels_tab[1][0](dst, src_block+src_offset, 
stride, 8);
 
  386     AVFrame *frame_current, *frame_ref;
 
  389     int b, ab, b_max, 
plane, off;
 
  407     b_max = 6 - 2*is_alpha;
 
  411             for (b=0; b<b_max; b++) {
 
  413                 s->vp3dsp.idct_put(frame_current->
data[plane] + s->block_offset[b],
 
  414                                 s->stride[plane], s->block_coeff[b]);
 
  420             for (b=0; b<b_max; b++) {
 
  422                 off = s->block_offset[
b];
 
  423                 s->hdsp.put_pixels_tab[1][0](frame_current->
data[
plane] + off,
 
  425                                              s->stride[
plane], 8);
 
  426                 s->vp3dsp.idct_add(frame_current->
data[plane] + off,
 
  427                                 s->stride[plane], s->block_coeff[b]);
 
  438             for (b=0; b<b_max; b++) {
 
  439                 int x_off = b==1 || b==3 ? 8 : 0;
 
  440                 int y_off = b==2 || b==3 ? 8 : 0;
 
  442                 vp56_mc(s, b, plane, frame_ref->
data[plane], s->stride[plane],
 
  443                         16*col+x_off, 16*row+y_off);
 
  444                 s->vp3dsp.idct_add(frame_current->
data[plane] + s->block_offset[b],
 
  445                                 s->stride[plane], s->block_coeff[b]);
 
  451         s->block_coeff[4][0] = 0;
 
  452         s->block_coeff[5][0] = 0;
 
  462     s->plane_width[0]  = s->plane_width[3]  = avctx->
coded_width;
 
  463     s->plane_width[1]  = s->plane_width[2]  = avctx->
coded_width/2;
 
  464     s->plane_height[0] = s->plane_height[3] = avctx->
coded_height;
 
  465     s->plane_height[1] = s->plane_height[2] = avctx->
coded_height/2;
 
  473     if (s->mb_width > 1000 || s->mb_height > 1000) {
 
  480                       sizeof(*s->above_blocks));
 
  482                       sizeof(*s->macroblocks));
 
  483     av_free(s->edge_emu_buffer_alloc);
 
  484     s->edge_emu_buffer_alloc = 
av_malloc(16*stride);
 
  485     s->edge_emu_buffer = s->edge_emu_buffer_alloc;
 
  486     if (!s->above_blocks || !s->macroblocks || !s->edge_emu_buffer_alloc)
 
  489         s->edge_emu_buffer += 15 * 
stride;
 
  491     if (s->alpha_context)
 
  505     int remaining_buf_size = avpkt->
size;
 
  511         if (remaining_buf_size < 3)
 
  513         alpha_offset = bytestream_get_be24(&buf);
 
  514         remaining_buf_size -= 3;
 
  515         if (remaining_buf_size < alpha_offset)
 
  519     res = s->parse_header(s, buf, remaining_buf_size);
 
  524         for (i = 0; i < 4; i++) {
 
  526             if (s->alpha_context)
 
  551         int bak_w = avctx->
width;
 
  552         int bak_h = avctx->
height;
 
  556         remaining_buf_size -= alpha_offset;
 
  558         res = s->alpha_context->parse_header(s->alpha_context, buf, remaining_buf_size);
 
  562                 avctx->
width  = bak_w;
 
  582                               int jobnr, 
int threadnr)
 
  585     int is_alpha = (jobnr == 1);
 
  586     VP56Context *
s = is_alpha ? s0->alpha_context : 
s0;
 
  588     int mb_row, mb_col, mb_row_flip, mb_offset = 0;
 
  590     ptrdiff_t stride_y, stride_uv;
 
  595         s->default_models_init(s);
 
  596         for (block=0; block<s->mb_height*s->mb_width; block++)
 
  601         s->parse_vector_models(s);
 
  605     if (s->parse_coeff_models(s))
 
  608     memset(s->prev_dc, 0, 
sizeof(s->prev_dc));
 
  612     for (block=0; block < 4*s->mb_width+6; block++) {
 
  614         s->above_blocks[
block].dc_coeff = 0;
 
  615         s->above_blocks[
block].not_null_dc = 0;
 
  627     for (mb_row=0; mb_row<s->mb_height; mb_row++) {
 
  629             mb_row_flip = s->mb_height - mb_row - 1;
 
  631             mb_row_flip = mb_row;
 
  633         for (block=0; block<4; block++) {
 
  635             s->left_block[
block].dc_coeff = 0;
 
  636             s->left_block[
block].not_null_dc = 0;
 
  638         memset(s->coeff_ctx, 0, 
sizeof(s->coeff_ctx));
 
  639         memset(s->coeff_ctx_last, 24, 
sizeof(s->coeff_ctx_last));
 
  641         s->above_block_idx[0] = 1;
 
  642         s->above_block_idx[1] = 2;
 
  643         s->above_block_idx[2] = 1;
 
  644         s->above_block_idx[3] = 2;
 
  645         s->above_block_idx[4] = 2*s->mb_width + 2 + 1;
 
  646         s->above_block_idx[5] = 3*s->mb_width + 4 + 1;
 
  648         s->block_offset[s->frbi] = (mb_row_flip*16 + mb_offset) * stride_y;
 
  649         s->block_offset[s->srbi] = s->block_offset[s->frbi] + 8*stride_y;
 
  650         s->block_offset[1] = s->block_offset[0] + 8;
 
  651         s->block_offset[3] = s->block_offset[2] + 8;
 
  652         s->block_offset[4] = (mb_row_flip*8 + mb_offset) * stride_uv;
 
  653         s->block_offset[5] = s->block_offset[4];
 
  655         for (mb_col=0; mb_col<s->mb_width; mb_col++) {
 
  658             for (y=0; y<4; y++) {
 
  659                 s->above_block_idx[
y] += 2;
 
  660                 s->block_offset[
y] += 16;
 
  663             for (uv=4; uv<6; uv++) {
 
  664                 s->above_block_idx[uv] += 1;
 
  665                 s->block_offset[uv] += 8;
 
  690                                   int flip, 
int has_alpha)
 
  703     for (i = 0; i < 64; i++) {
 
  704 #define TRANSPOSE(x) (((x) >> 3) | (((x) & 7) << 3)) 
  716     s->edge_emu_buffer_alloc = 
NULL;
 
  718     s->above_blocks = 
NULL;
 
  719     s->macroblocks = 
NULL;
 
  721     s->deblock_filtering = 1;
 
  726     s->has_alpha = has_alpha;
 
  728     s->modelp = &s->model;
 
  755     av_freep(&s->edge_emu_buffer_alloc);
 
av_cold int ff_vp56_free(AVCodecContext *avctx)
 
const struct AVCodec * codec
 
discard all frames except keyframes 
 
#define AVERROR_INVALIDDATA
Invalid data found when processing input. 
 
const uint8_t ff_vp56_ac_dequant[64]
 
This structure describes decoded (raw) audio or video data. 
 
ptrdiff_t const GLvoid * data
 
VP5 and VP6 compatible video decoder (common features) 
 
static int vp56_get_vectors_predictors(VP56Context *s, int row, int col, VP56Frame ref_frame)
 
int coded_width
Bitstream width / height, may be different from width/height e.g. 
 
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context. 
 
uint8_t mb_types_stats[3][10][2]
 
const VP56Tree ff_vp56_pmbtm_tree[]
 
Inter MB, no vector, from previous frame. 
 
static VP56mb vp56_parse_mb_type(VP56Context *s, VP56mb prev_type, int ctx)
 
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx. 
 
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
 
#define FF_ARRAY_ELEMS(a)
 
av_cold int ff_vp56_init(AVCodecContext *avctx, int flip, int has_alpha)
 
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples) 
 
static VP56mb vp56_decode_mv(VP56Context *s, int row, int col)
 
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values. 
 
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame. 
 
av_cold int ff_vp56_init_context(AVCodecContext *avctx, VP56Context *s, int flip, int has_alpha)
 
const uint8_t ff_vp56_dc_dequant[64]
 
const uint8_t ff_vp56_filter_threshold[]
 
const int8_t ff_vp56_candidate_predictor_pos[12][2]
 
static av_always_inline int vp56_rac_get_tree(VP56RangeCoder *c, const VP56Tree *tree, const uint8_t *probs)
 
Inter MB, second vector, from golden frame. 
 
int av_reallocp_array(void *ptr, size_t nmemb, size_t size)
 
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered. 
 
static const uint16_t mask[17]
 
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g. 
 
int skip_alpha
Skip processing alpha if supported by codec. 
 
static av_always_inline int vp56_rac_get(VP56RangeCoder *c)
 
static void vp56_parse_mb_type_models(VP56Context *s)
 
void ff_vp56_init_dequant(VP56Context *s, int quantizer)
 
const uint8_t ff_vp56_b6to4[]
 
static const uint8_t offset[127][2]
 
static void vp56_deblock_filter(VP56Context *s, uint8_t *yuv, ptrdiff_t stride, int dx, int dy)
 
Libavcodec external API header. 
 
Inter MB, first vector, from previous frame. 
 
static int vp56_size_changed(VP56Context *s)
 
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
 
VP5 and VP6 compatible video decoder (common data) 
 
const uint8_t ff_vp56_pre_def_mb_type_stats[16][3][10][2]
 
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
 
Inter MB, first vector, from golden frame. 
 
enum AVPictureType pict_type
Picture type of the frame. 
 
int width
picture width / height. 
 
int ff_vp56_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
 
const uint8_t ff_vp56_b2p[]
 
static const int8_t mv[256][2]
 
const VP56Tree ff_vp56_pmbt_tree[]
 
static av_always_inline int vp56_rac_get_prob_branchy(VP56RangeCoder *c, int prob)
 
Inter MB, 4 vectors, from previous frame. 
 
Inter MB, no vector, from golden frame. 
 
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line. 
 
Inter MB, second vector, from previous frame. 
 
main external API structure. 
 
uint8_t mb_type[3][10][10]
 
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame. 
 
static int ff_vp56_decode_mbs(AVCodecContext *avctx, void *, int, int)
 
static void vp56_decode_mb(VP56Context *s, int row, int col, int is_alpha)
 
Inter MB, above/left vector + delta, from previous frame. 
 
const uint8_t ff_zigzag_direct[64]
 
static void vp56_decode_4mv(VP56Context *s, int row, int col)
 
const uint8_t ff_vp56_mb_type_model_model[]
 
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields. 
 
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes. 
 
av_cold int ff_vp56_free_context(VP56Context *s)
 
GLint GLenum GLboolean GLsizei stride
 
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples) 
 
common internal api header. 
 
static int ref_frame(Vp3DecodeContext *s, ThreadFrame *dst, ThreadFrame *src)
 
static void flip(AVCodecContext *avctx, AVPicture *picture)
 
const VP56Frame ff_vp56_reference_frame[]
 
static void vp56_add_predictors_dc(VP56Context *s, VP56Frame ref_frame)
 
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things. 
 
Inter MB, above/left vector + delta, from golden frame. 
 
int key_frame
1 -> keyframe, 0-> not 
 
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> dc
 
#define FFSWAP(type, a, b)
 
av_cold void ff_vp3dsp_init(VP3DSPContext *c, int flags)
 
static void vp56_mc(VP56Context *s, int b, int plane, uint8_t *src, ptrdiff_t stride, int x, int y)
 
av_cold void ff_vp56dsp_init(VP56DSPContext *s, enum AVCodecID codec)
 
This structure stores compressed data. 
 
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later. 
 
static int vp56_rac_gets(VP56RangeCoder *c, int bits)