00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00026 #include "avcodec.h"
00027 #include "bytestream.h"
00028 #include "internal.h"
00029
00030 #include "vp56.h"
00031 #include "vp56data.h"
00032
00033
00034 void ff_vp56_init_dequant(VP56Context *s, int quantizer)
00035 {
00036 s->quantizer = quantizer;
00037 s->dequant_dc = vp56_dc_dequant[quantizer] << 2;
00038 s->dequant_ac = vp56_ac_dequant[quantizer] << 2;
00039 memset(s->qscale_table, quantizer, s->mb_width);
00040 }
00041
00042 static int vp56_get_vectors_predictors(VP56Context *s, int row, int col,
00043 VP56Frame ref_frame)
00044 {
00045 int nb_pred = 0;
00046 VP56mv vect[2] = {{0,0}, {0,0}};
00047 int pos, offset;
00048 VP56mv mvp;
00049
00050 for (pos=0; pos<12; pos++) {
00051 mvp.x = col + vp56_candidate_predictor_pos[pos][0];
00052 mvp.y = row + vp56_candidate_predictor_pos[pos][1];
00053 if (mvp.x < 0 || mvp.x >= s->mb_width ||
00054 mvp.y < 0 || mvp.y >= s->mb_height)
00055 continue;
00056 offset = mvp.x + s->mb_width*mvp.y;
00057
00058 if (vp56_reference_frame[s->macroblocks[offset].type] != ref_frame)
00059 continue;
00060 if ((s->macroblocks[offset].mv.x == vect[0].x &&
00061 s->macroblocks[offset].mv.y == vect[0].y) ||
00062 (s->macroblocks[offset].mv.x == 0 &&
00063 s->macroblocks[offset].mv.y == 0))
00064 continue;
00065
00066 vect[nb_pred++] = s->macroblocks[offset].mv;
00067 if (nb_pred > 1) {
00068 nb_pred = -1;
00069 break;
00070 }
00071 s->vector_candidate_pos = pos;
00072 }
00073
00074 s->vector_candidate[0] = vect[0];
00075 s->vector_candidate[1] = vect[1];
00076
00077 return nb_pred+1;
00078 }
00079
00080 static void vp56_parse_mb_type_models(VP56Context *s)
00081 {
00082 VP56RangeCoder *c = &s->c;
00083 VP56Model *model = s->modelp;
00084 int i, ctx, type;
00085
00086 for (ctx=0; ctx<3; ctx++) {
00087 if (vp56_rac_get_prob(c, 174)) {
00088 int idx = vp56_rac_gets(c, 4);
00089 memcpy(model->mb_types_stats[ctx],
00090 vp56_pre_def_mb_type_stats[idx][ctx],
00091 sizeof(model->mb_types_stats[ctx]));
00092 }
00093 if (vp56_rac_get_prob(c, 254)) {
00094 for (type=0; type<10; type++) {
00095 for(i=0; i<2; i++) {
00096 if (vp56_rac_get_prob(c, 205)) {
00097 int delta, sign = vp56_rac_get(c);
00098
00099 delta = vp56_rac_get_tree(c, vp56_pmbtm_tree,
00100 vp56_mb_type_model_model);
00101 if (!delta)
00102 delta = 4 * vp56_rac_gets(c, 7);
00103 model->mb_types_stats[ctx][type][i] += (delta ^ -sign) + sign;
00104 }
00105 }
00106 }
00107 }
00108 }
00109
00110
00111 for (ctx=0; ctx<3; ctx++) {
00112 int p[10];
00113
00114 for (type=0; type<10; type++)
00115 p[type] = 100 * model->mb_types_stats[ctx][type][1];
00116
00117 for (type=0; type<10; type++) {
00118 int p02, p34, p0234, p17, p56, p89, p5689, p156789;
00119
00120
00121 model->mb_type[ctx][type][0] = 255 - (255 * model->mb_types_stats[ctx][type][0]) / (1 + model->mb_types_stats[ctx][type][0] + model->mb_types_stats[ctx][type][1]);
00122
00123 p[type] = 0;
00124
00125
00126 p02 = p[0] + p[2];
00127 p34 = p[3] + p[4];
00128 p0234 = p02 + p34;
00129 p17 = p[1] + p[7];
00130 p56 = p[5] + p[6];
00131 p89 = p[8] + p[9];
00132 p5689 = p56 + p89;
00133 p156789 = p17 + p5689;
00134
00135 model->mb_type[ctx][type][1] = 1 + 255 * p0234/(1+p0234+p156789);
00136 model->mb_type[ctx][type][2] = 1 + 255 * p02 / (1+p0234);
00137 model->mb_type[ctx][type][3] = 1 + 255 * p17 / (1+p156789);
00138 model->mb_type[ctx][type][4] = 1 + 255 * p[0] / (1+p02);
00139 model->mb_type[ctx][type][5] = 1 + 255 * p[3] / (1+p34);
00140 model->mb_type[ctx][type][6] = 1 + 255 * p[1] / (1+p17);
00141 model->mb_type[ctx][type][7] = 1 + 255 * p56 / (1+p5689);
00142 model->mb_type[ctx][type][8] = 1 + 255 * p[5] / (1+p56);
00143 model->mb_type[ctx][type][9] = 1 + 255 * p[8] / (1+p89);
00144
00145
00146 p[type] = 100 * model->mb_types_stats[ctx][type][1];
00147 }
00148 }
00149 }
00150
00151 static VP56mb vp56_parse_mb_type(VP56Context *s,
00152 VP56mb prev_type, int ctx)
00153 {
00154 uint8_t *mb_type_model = s->modelp->mb_type[ctx][prev_type];
00155 VP56RangeCoder *c = &s->c;
00156
00157 if (vp56_rac_get_prob(c, mb_type_model[0]))
00158 return prev_type;
00159 else
00160 return vp56_rac_get_tree(c, vp56_pmbt_tree, mb_type_model);
00161 }
00162
00163 static void vp56_decode_4mv(VP56Context *s, int row, int col)
00164 {
00165 VP56mv mv = {0,0};
00166 int type[4];
00167 int b;
00168
00169
00170 for (b=0; b<4; b++) {
00171 type[b] = vp56_rac_gets(&s->c, 2);
00172 if (type[b])
00173 type[b]++;
00174 }
00175
00176
00177 for (b=0; b<4; b++) {
00178 switch (type[b]) {
00179 case VP56_MB_INTER_NOVEC_PF:
00180 s->mv[b] = (VP56mv) {0,0};
00181 break;
00182 case VP56_MB_INTER_DELTA_PF:
00183 s->parse_vector_adjustment(s, &s->mv[b]);
00184 break;
00185 case VP56_MB_INTER_V1_PF:
00186 s->mv[b] = s->vector_candidate[0];
00187 break;
00188 case VP56_MB_INTER_V2_PF:
00189 s->mv[b] = s->vector_candidate[1];
00190 break;
00191 }
00192 mv.x += s->mv[b].x;
00193 mv.y += s->mv[b].y;
00194 }
00195
00196
00197 s->macroblocks[row * s->mb_width + col].mv = s->mv[3];
00198
00199
00200 if (s->avctx->codec->id == AV_CODEC_ID_VP5) {
00201 s->mv[4].x = s->mv[5].x = RSHIFT(mv.x,2);
00202 s->mv[4].y = s->mv[5].y = RSHIFT(mv.y,2);
00203 } else {
00204 s->mv[4] = s->mv[5] = (VP56mv) {mv.x/4, mv.y/4};
00205 }
00206 }
00207
00208 static VP56mb vp56_decode_mv(VP56Context *s, int row, int col)
00209 {
00210 VP56mv *mv, vect = {0,0};
00211 int ctx, b;
00212
00213 ctx = vp56_get_vectors_predictors(s, row, col, VP56_FRAME_PREVIOUS);
00214 s->mb_type = vp56_parse_mb_type(s, s->mb_type, ctx);
00215 s->macroblocks[row * s->mb_width + col].type = s->mb_type;
00216
00217 switch (s->mb_type) {
00218 case VP56_MB_INTER_V1_PF:
00219 mv = &s->vector_candidate[0];
00220 break;
00221
00222 case VP56_MB_INTER_V2_PF:
00223 mv = &s->vector_candidate[1];
00224 break;
00225
00226 case VP56_MB_INTER_V1_GF:
00227 vp56_get_vectors_predictors(s, row, col, VP56_FRAME_GOLDEN);
00228 mv = &s->vector_candidate[0];
00229 break;
00230
00231 case VP56_MB_INTER_V2_GF:
00232 vp56_get_vectors_predictors(s, row, col, VP56_FRAME_GOLDEN);
00233 mv = &s->vector_candidate[1];
00234 break;
00235
00236 case VP56_MB_INTER_DELTA_PF:
00237 s->parse_vector_adjustment(s, &vect);
00238 mv = &vect;
00239 break;
00240
00241 case VP56_MB_INTER_DELTA_GF:
00242 vp56_get_vectors_predictors(s, row, col, VP56_FRAME_GOLDEN);
00243 s->parse_vector_adjustment(s, &vect);
00244 mv = &vect;
00245 break;
00246
00247 case VP56_MB_INTER_4V:
00248 vp56_decode_4mv(s, row, col);
00249 return s->mb_type;
00250
00251 default:
00252 mv = &vect;
00253 break;
00254 }
00255
00256 s->macroblocks[row*s->mb_width + col].mv = *mv;
00257
00258
00259 for (b=0; b<6; b++)
00260 s->mv[b] = *mv;
00261
00262 return s->mb_type;
00263 }
00264
00265 static void vp56_add_predictors_dc(VP56Context *s, VP56Frame ref_frame)
00266 {
00267 int idx = s->scantable.permutated[0];
00268 int b;
00269
00270 for (b=0; b<6; b++) {
00271 VP56RefDc *ab = &s->above_blocks[s->above_block_idx[b]];
00272 VP56RefDc *lb = &s->left_block[ff_vp56_b6to4[b]];
00273 int count = 0;
00274 int dc = 0;
00275 int i;
00276
00277 if (ref_frame == lb->ref_frame) {
00278 dc += lb->dc_coeff;
00279 count++;
00280 }
00281 if (ref_frame == ab->ref_frame) {
00282 dc += ab->dc_coeff;
00283 count++;
00284 }
00285 if (s->avctx->codec->id == AV_CODEC_ID_VP5)
00286 for (i=0; i<2; i++)
00287 if (count < 2 && ref_frame == ab[-1+2*i].ref_frame) {
00288 dc += ab[-1+2*i].dc_coeff;
00289 count++;
00290 }
00291 if (count == 0)
00292 dc = s->prev_dc[ff_vp56_b2p[b]][ref_frame];
00293 else if (count == 2)
00294 dc /= 2;
00295
00296 s->block_coeff[b][idx] += dc;
00297 s->prev_dc[ff_vp56_b2p[b]][ref_frame] = s->block_coeff[b][idx];
00298 ab->dc_coeff = s->block_coeff[b][idx];
00299 ab->ref_frame = ref_frame;
00300 lb->dc_coeff = s->block_coeff[b][idx];
00301 lb->ref_frame = ref_frame;
00302 s->block_coeff[b][idx] *= s->dequant_dc;
00303 }
00304 }
00305
00306 static void vp56_deblock_filter(VP56Context *s, uint8_t *yuv,
00307 int stride, int dx, int dy)
00308 {
00309 int t = vp56_filter_threshold[s->quantizer];
00310 if (dx) s->vp56dsp.edge_filter_hor(yuv + 10-dx , stride, t);
00311 if (dy) s->vp56dsp.edge_filter_ver(yuv + stride*(10-dy), stride, t);
00312 }
00313
00314 static void vp56_mc(VP56Context *s, int b, int plane, uint8_t *src,
00315 int stride, int x, int y)
00316 {
00317 uint8_t *dst=s->framep[VP56_FRAME_CURRENT]->data[plane]+s->block_offset[b];
00318 uint8_t *src_block;
00319 int src_offset;
00320 int overlap_offset = 0;
00321 int mask = s->vp56_coord_div[b] - 1;
00322 int deblock_filtering = s->deblock_filtering;
00323 int dx;
00324 int dy;
00325
00326 if (s->avctx->skip_loop_filter >= AVDISCARD_ALL ||
00327 (s->avctx->skip_loop_filter >= AVDISCARD_NONKEY
00328 && !s->framep[VP56_FRAME_CURRENT]->key_frame))
00329 deblock_filtering = 0;
00330
00331 dx = s->mv[b].x / s->vp56_coord_div[b];
00332 dy = s->mv[b].y / s->vp56_coord_div[b];
00333
00334 if (b >= 4) {
00335 x /= 2;
00336 y /= 2;
00337 }
00338 x += dx - 2;
00339 y += dy - 2;
00340
00341 if (x<0 || x+12>=s->plane_width[plane] ||
00342 y<0 || y+12>=s->plane_height[plane]) {
00343 s->dsp.emulated_edge_mc(s->edge_emu_buffer,
00344 src + s->block_offset[b] + (dy-2)*stride + (dx-2),
00345 stride, 12, 12, x, y,
00346 s->plane_width[plane],
00347 s->plane_height[plane]);
00348 src_block = s->edge_emu_buffer;
00349 src_offset = 2 + 2*stride;
00350 } else if (deblock_filtering) {
00351
00352
00353 s->dsp.put_pixels_tab[0][0](s->edge_emu_buffer,
00354 src + s->block_offset[b] + (dy-2)*stride + (dx-2),
00355 stride, 12);
00356 src_block = s->edge_emu_buffer;
00357 src_offset = 2 + 2*stride;
00358 } else {
00359 src_block = src;
00360 src_offset = s->block_offset[b] + dy*stride + dx;
00361 }
00362
00363 if (deblock_filtering)
00364 vp56_deblock_filter(s, src_block, stride, dx&7, dy&7);
00365
00366 if (s->mv[b].x & mask)
00367 overlap_offset += (s->mv[b].x > 0) ? 1 : -1;
00368 if (s->mv[b].y & mask)
00369 overlap_offset += (s->mv[b].y > 0) ? stride : -stride;
00370
00371 if (overlap_offset) {
00372 if (s->filter)
00373 s->filter(s, dst, src_block, src_offset, src_offset+overlap_offset,
00374 stride, s->mv[b], mask, s->filter_selection, b<4);
00375 else
00376 s->dsp.put_no_rnd_pixels_l2[1](dst, src_block+src_offset,
00377 src_block+src_offset+overlap_offset,
00378 stride, 8);
00379 } else {
00380 s->dsp.put_pixels_tab[1][0](dst, src_block+src_offset, stride, 8);
00381 }
00382 }
00383
00384 static void vp56_decode_mb(VP56Context *s, int row, int col, int is_alpha)
00385 {
00386 AVFrame *frame_current, *frame_ref;
00387 VP56mb mb_type;
00388 VP56Frame ref_frame;
00389 int b, ab, b_max, plane, off;
00390
00391 if (s->framep[VP56_FRAME_CURRENT]->key_frame)
00392 mb_type = VP56_MB_INTRA;
00393 else
00394 mb_type = vp56_decode_mv(s, row, col);
00395 ref_frame = vp56_reference_frame[mb_type];
00396
00397 s->dsp.clear_blocks(*s->block_coeff);
00398
00399 s->parse_coeff(s);
00400
00401 vp56_add_predictors_dc(s, ref_frame);
00402
00403 frame_current = s->framep[VP56_FRAME_CURRENT];
00404 frame_ref = s->framep[ref_frame];
00405 if (mb_type != VP56_MB_INTRA && !frame_ref->data[0])
00406 return;
00407
00408 ab = 6*is_alpha;
00409 b_max = 6 - 2*is_alpha;
00410
00411 switch (mb_type) {
00412 case VP56_MB_INTRA:
00413 for (b=0; b<b_max; b++) {
00414 plane = ff_vp56_b2p[b+ab];
00415 s->vp3dsp.idct_put(frame_current->data[plane] + s->block_offset[b],
00416 s->stride[plane], s->block_coeff[b]);
00417 }
00418 break;
00419
00420 case VP56_MB_INTER_NOVEC_PF:
00421 case VP56_MB_INTER_NOVEC_GF:
00422 for (b=0; b<b_max; b++) {
00423 plane = ff_vp56_b2p[b+ab];
00424 off = s->block_offset[b];
00425 s->dsp.put_pixels_tab[1][0](frame_current->data[plane] + off,
00426 frame_ref->data[plane] + off,
00427 s->stride[plane], 8);
00428 s->vp3dsp.idct_add(frame_current->data[plane] + off,
00429 s->stride[plane], s->block_coeff[b]);
00430 }
00431 break;
00432
00433 case VP56_MB_INTER_DELTA_PF:
00434 case VP56_MB_INTER_V1_PF:
00435 case VP56_MB_INTER_V2_PF:
00436 case VP56_MB_INTER_DELTA_GF:
00437 case VP56_MB_INTER_4V:
00438 case VP56_MB_INTER_V1_GF:
00439 case VP56_MB_INTER_V2_GF:
00440 for (b=0; b<b_max; b++) {
00441 int x_off = b==1 || b==3 ? 8 : 0;
00442 int y_off = b==2 || b==3 ? 8 : 0;
00443 plane = ff_vp56_b2p[b+ab];
00444 vp56_mc(s, b, plane, frame_ref->data[plane], s->stride[plane],
00445 16*col+x_off, 16*row+y_off);
00446 s->vp3dsp.idct_add(frame_current->data[plane] + s->block_offset[b],
00447 s->stride[plane], s->block_coeff[b]);
00448 }
00449 break;
00450 }
00451 }
00452
00453 static int vp56_size_changed(VP56Context *s)
00454 {
00455 AVCodecContext *avctx = s->avctx;
00456 int stride = s->framep[VP56_FRAME_CURRENT]->linesize[0];
00457 int i;
00458
00459 s->plane_width[0] = s->plane_width[3] = avctx->coded_width;
00460 s->plane_width[1] = s->plane_width[2] = avctx->coded_width/2;
00461 s->plane_height[0] = s->plane_height[3] = avctx->coded_height;
00462 s->plane_height[1] = s->plane_height[2] = avctx->coded_height/2;
00463
00464 for (i=0; i<4; i++)
00465 s->stride[i] = s->flip * s->framep[VP56_FRAME_CURRENT]->linesize[i];
00466
00467 s->mb_width = (avctx->coded_width +15) / 16;
00468 s->mb_height = (avctx->coded_height+15) / 16;
00469
00470 if (s->mb_width > 1000 || s->mb_height > 1000) {
00471 avcodec_set_dimensions(avctx, 0, 0);
00472 av_log(avctx, AV_LOG_ERROR, "picture too big\n");
00473 return -1;
00474 }
00475
00476 s->qscale_table = av_realloc(s->qscale_table, s->mb_width);
00477 s->above_blocks = av_realloc(s->above_blocks,
00478 (4*s->mb_width+6) * sizeof(*s->above_blocks));
00479 s->macroblocks = av_realloc(s->macroblocks,
00480 s->mb_width*s->mb_height*sizeof(*s->macroblocks));
00481 av_free(s->edge_emu_buffer_alloc);
00482 s->edge_emu_buffer_alloc = av_malloc(16*stride);
00483 s->edge_emu_buffer = s->edge_emu_buffer_alloc;
00484 if (s->flip < 0)
00485 s->edge_emu_buffer += 15 * stride;
00486
00487 if (s->alpha_context)
00488 return vp56_size_changed(s->alpha_context);
00489
00490 return 0;
00491 }
00492
00493 static int ff_vp56_decode_mbs(AVCodecContext *avctx, void *, int, int);
00494
00495 int ff_vp56_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
00496 AVPacket *avpkt)
00497 {
00498 const uint8_t *buf = avpkt->data;
00499 VP56Context *s = avctx->priv_data;
00500 AVFrame *p = 0;
00501 int remaining_buf_size = avpkt->size;
00502 int av_uninit(alpha_offset);
00503 int i, res;
00504
00505
00506 for (i = 0; i < 4; ++i) {
00507 if (!s->frames[i].data[0]) {
00508 p = &s->frames[i];
00509 break;
00510 }
00511 }
00512 av_assert0(p != 0);
00513 s->framep[VP56_FRAME_CURRENT] = p;
00514 if (s->alpha_context)
00515 s->alpha_context->framep[VP56_FRAME_CURRENT] = p;
00516
00517 if (s->has_alpha) {
00518 if (remaining_buf_size < 3)
00519 return -1;
00520 alpha_offset = bytestream_get_be24(&buf);
00521 remaining_buf_size -= 3;
00522 if (remaining_buf_size < alpha_offset)
00523 return -1;
00524 }
00525
00526 res = s->parse_header(s, buf, remaining_buf_size);
00527 if (res < 0)
00528 return res;
00529
00530 if (res == VP56_SIZE_CHANGE) {
00531 for (i = 0; i < 4; i++) {
00532 if (s->frames[i].data[0])
00533 avctx->release_buffer(avctx, &s->frames[i]);
00534 }
00535 }
00536
00537 p->reference = 3;
00538 if (ff_get_buffer(avctx, p) < 0) {
00539 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
00540 return -1;
00541 }
00542
00543 if (res == VP56_SIZE_CHANGE) {
00544 if (vp56_size_changed(s)) {
00545 avctx->release_buffer(avctx, p);
00546 return -1;
00547 }
00548 }
00549
00550 if (s->has_alpha) {
00551 int bak_w = avctx->width;
00552 int bak_h = avctx->height;
00553 int bak_cw = avctx->coded_width;
00554 int bak_ch = avctx->coded_height;
00555 buf += alpha_offset;
00556 remaining_buf_size -= alpha_offset;
00557
00558 res = s->alpha_context->parse_header(s->alpha_context, buf, remaining_buf_size);
00559 if (res != 0) {
00560 if(res==VP56_SIZE_CHANGE) {
00561 av_log(avctx, AV_LOG_ERROR, "Alpha reconfiguration\n");
00562 avctx->width = bak_w;
00563 avctx->height = bak_h;
00564 avctx->coded_width = bak_cw;
00565 avctx->coded_height = bak_ch;
00566 }
00567 avctx->release_buffer(avctx, p);
00568 return -1;
00569 }
00570 }
00571
00572 avctx->execute2(avctx, ff_vp56_decode_mbs, 0, 0, s->has_alpha + 1);
00573
00574
00575 for (i = 0; i < 4; ++i) {
00576 AVFrame *victim = &s->frames[i];
00577 if (!victim->data[0])
00578 continue;
00579 if (victim != s->framep[VP56_FRAME_PREVIOUS] &&
00580 victim != s->framep[VP56_FRAME_GOLDEN] &&
00581 (!s->has_alpha || victim != s->alpha_context->framep[VP56_FRAME_GOLDEN]))
00582 avctx->release_buffer(avctx, victim);
00583 }
00584
00585 p->qstride = 0;
00586 p->qscale_table = s->qscale_table;
00587 p->qscale_type = FF_QSCALE_TYPE_VP56;
00588 *(AVFrame*)data = *p;
00589 *got_frame = 1;
00590
00591 return avpkt->size;
00592 }
00593
00594 static int ff_vp56_decode_mbs(AVCodecContext *avctx, void *data,
00595 int jobnr, int threadnr)
00596 {
00597 VP56Context *s0 = avctx->priv_data;
00598 int is_alpha = (jobnr == 1);
00599 VP56Context *s = is_alpha ? s0->alpha_context : s0;
00600 AVFrame *const p = s->framep[VP56_FRAME_CURRENT];
00601 int mb_row, mb_col, mb_row_flip, mb_offset = 0;
00602 int block, y, uv, stride_y, stride_uv;
00603
00604 if (p->key_frame) {
00605 p->pict_type = AV_PICTURE_TYPE_I;
00606 s->default_models_init(s);
00607 for (block=0; block<s->mb_height*s->mb_width; block++)
00608 s->macroblocks[block].type = VP56_MB_INTRA;
00609 } else {
00610 p->pict_type = AV_PICTURE_TYPE_P;
00611 vp56_parse_mb_type_models(s);
00612 s->parse_vector_models(s);
00613 s->mb_type = VP56_MB_INTER_NOVEC_PF;
00614 }
00615
00616 if (s->parse_coeff_models(s))
00617 goto next;
00618
00619 memset(s->prev_dc, 0, sizeof(s->prev_dc));
00620 s->prev_dc[1][VP56_FRAME_CURRENT] = 128;
00621 s->prev_dc[2][VP56_FRAME_CURRENT] = 128;
00622
00623 for (block=0; block < 4*s->mb_width+6; block++) {
00624 s->above_blocks[block].ref_frame = VP56_FRAME_NONE;
00625 s->above_blocks[block].dc_coeff = 0;
00626 s->above_blocks[block].not_null_dc = 0;
00627 }
00628 s->above_blocks[2*s->mb_width + 2].ref_frame = VP56_FRAME_CURRENT;
00629 s->above_blocks[3*s->mb_width + 4].ref_frame = VP56_FRAME_CURRENT;
00630
00631 stride_y = p->linesize[0];
00632 stride_uv = p->linesize[1];
00633
00634 if (s->flip < 0)
00635 mb_offset = 7;
00636
00637
00638 for (mb_row=0; mb_row<s->mb_height; mb_row++) {
00639 if (s->flip < 0)
00640 mb_row_flip = s->mb_height - mb_row - 1;
00641 else
00642 mb_row_flip = mb_row;
00643
00644 for (block=0; block<4; block++) {
00645 s->left_block[block].ref_frame = VP56_FRAME_NONE;
00646 s->left_block[block].dc_coeff = 0;
00647 s->left_block[block].not_null_dc = 0;
00648 }
00649 memset(s->coeff_ctx, 0, sizeof(s->coeff_ctx));
00650 memset(s->coeff_ctx_last, 24, sizeof(s->coeff_ctx_last));
00651
00652 s->above_block_idx[0] = 1;
00653 s->above_block_idx[1] = 2;
00654 s->above_block_idx[2] = 1;
00655 s->above_block_idx[3] = 2;
00656 s->above_block_idx[4] = 2*s->mb_width + 2 + 1;
00657 s->above_block_idx[5] = 3*s->mb_width + 4 + 1;
00658
00659 s->block_offset[s->frbi] = (mb_row_flip*16 + mb_offset) * stride_y;
00660 s->block_offset[s->srbi] = s->block_offset[s->frbi] + 8*stride_y;
00661 s->block_offset[1] = s->block_offset[0] + 8;
00662 s->block_offset[3] = s->block_offset[2] + 8;
00663 s->block_offset[4] = (mb_row_flip*8 + mb_offset) * stride_uv;
00664 s->block_offset[5] = s->block_offset[4];
00665
00666 for (mb_col=0; mb_col<s->mb_width; mb_col++) {
00667 vp56_decode_mb(s, mb_row, mb_col, is_alpha);
00668
00669 for (y=0; y<4; y++) {
00670 s->above_block_idx[y] += 2;
00671 s->block_offset[y] += 16;
00672 }
00673
00674 for (uv=4; uv<6; uv++) {
00675 s->above_block_idx[uv] += 1;
00676 s->block_offset[uv] += 8;
00677 }
00678 }
00679 }
00680
00681 next:
00682 if (p->key_frame || s->golden_frame) {
00683 s->framep[VP56_FRAME_GOLDEN] = p;
00684 }
00685
00686 FFSWAP(AVFrame *, s->framep[VP56_FRAME_CURRENT],
00687 s->framep[VP56_FRAME_PREVIOUS]);
00688 return 0;
00689 }
00690
00691 av_cold void ff_vp56_init(AVCodecContext *avctx, int flip, int has_alpha)
00692 {
00693 VP56Context *s = avctx->priv_data;
00694 ff_vp56_init_context(avctx, s, flip, has_alpha);
00695 }
00696
00697 av_cold void ff_vp56_init_context(AVCodecContext *avctx, VP56Context *s,
00698 int flip, int has_alpha)
00699 {
00700 int i;
00701
00702 s->avctx = avctx;
00703 avctx->pix_fmt = has_alpha ? AV_PIX_FMT_YUVA420P : AV_PIX_FMT_YUV420P;
00704
00705 ff_dsputil_init(&s->dsp, avctx);
00706 ff_vp3dsp_init(&s->vp3dsp, avctx->flags);
00707 ff_vp56dsp_init(&s->vp56dsp, avctx->codec->id);
00708 ff_init_scantable_permutation(s->dsp.idct_permutation, s->vp3dsp.idct_perm);
00709 ff_init_scantable(s->dsp.idct_permutation, &s->scantable,ff_zigzag_direct);
00710
00711 for (i=0; i<4; i++) {
00712 s->framep[i] = &s->frames[i];
00713 avcodec_get_frame_defaults(&s->frames[i]);
00714 }
00715 s->framep[VP56_FRAME_UNUSED] = s->framep[VP56_FRAME_GOLDEN];
00716 s->framep[VP56_FRAME_UNUSED2] = s->framep[VP56_FRAME_GOLDEN2];
00717 s->edge_emu_buffer_alloc = NULL;
00718
00719 s->above_blocks = NULL;
00720 s->macroblocks = NULL;
00721 s->quantizer = -1;
00722 s->deblock_filtering = 1;
00723 s->golden_frame = 0;
00724
00725 s->filter = NULL;
00726
00727 s->has_alpha = has_alpha;
00728
00729 s->modelp = &s->model;
00730
00731 if (flip) {
00732 s->flip = -1;
00733 s->frbi = 2;
00734 s->srbi = 0;
00735 } else {
00736 s->flip = 1;
00737 s->frbi = 0;
00738 s->srbi = 2;
00739 }
00740 }
00741
00742 av_cold int ff_vp56_free(AVCodecContext *avctx)
00743 {
00744 VP56Context *s = avctx->priv_data;
00745 return ff_vp56_free_context(s);
00746 }
00747
00748 av_cold int ff_vp56_free_context(VP56Context *s)
00749 {
00750 AVCodecContext *avctx = s->avctx;
00751 int i;
00752
00753 av_freep(&s->qscale_table);
00754 av_freep(&s->above_blocks);
00755 av_freep(&s->macroblocks);
00756 av_freep(&s->edge_emu_buffer_alloc);
00757 for (i = 0; i < 4; ++i) {
00758 if (s->frames[i].data[0])
00759 avctx->release_buffer(avctx, &s->frames[i]);
00760 }
00761 return 0;
00762 }