00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00030 #include "libavutil/intmath.h"
00031 #include "libavutil/mathematics.h"
00032 #include "libavutil/pixdesc.h"
00033 #include "libavutil/opt.h"
00034 #include "avcodec.h"
00035 #include "dsputil.h"
00036 #include "mpegvideo.h"
00037 #include "h263.h"
00038 #include "mathops.h"
00039 #include "mjpegenc.h"
00040 #include "msmpeg4.h"
00041 #include "faandct.h"
00042 #include "thread.h"
00043 #include "aandcttab.h"
00044 #include "flv.h"
00045 #include "mpeg4video.h"
00046 #include "internal.h"
00047 #include "bytestream.h"
00048 #include <limits.h>
00049 #include "sp5x.h"
00050
00051
00052
00053
00054 static int encode_picture(MpegEncContext *s, int picture_number);
00055 static int dct_quantize_refine(MpegEncContext *s, DCTELEM *block, int16_t *weight, DCTELEM *orig, int n, int qscale);
00056 static int sse_mb(MpegEncContext *s);
00057 static void denoise_dct_c(MpegEncContext *s, DCTELEM *block);
00058 static int dct_quantize_trellis_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
00059
00060
00061
00062 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_MV * 2 + 1];
00063 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
00064
00065 const AVOption ff_mpv_generic_options[] = {
00066 FF_MPV_COMMON_OPTS
00067 { NULL },
00068 };
00069
00070 void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64],
00071 uint16_t (*qmat16)[2][64],
00072 const uint16_t *quant_matrix,
00073 int bias, int qmin, int qmax, int intra)
00074 {
00075 int qscale;
00076 int shift = 0;
00077
00078 for (qscale = qmin; qscale <= qmax; qscale++) {
00079 int i;
00080 if (dsp->fdct == ff_jpeg_fdct_islow_8 ||
00081 dsp->fdct == ff_jpeg_fdct_islow_10 ||
00082 dsp->fdct == ff_faandct) {
00083 for (i = 0; i < 64; i++) {
00084 const int j = dsp->idct_permutation[i];
00085
00086
00087
00088
00089
00090
00091 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
00092 (qscale * quant_matrix[j]));
00093 }
00094 } else if (dsp->fdct == ff_fdct_ifast) {
00095 for (i = 0; i < 64; i++) {
00096 const int j = dsp->idct_permutation[i];
00097
00098
00099
00100
00101
00102
00103 qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) /
00104 (ff_aanscales[i] * (int64_t)qscale * quant_matrix[j]));
00105 }
00106 } else {
00107 for (i = 0; i < 64; i++) {
00108 const int j = dsp->idct_permutation[i];
00109
00110
00111
00112
00113
00114 qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
00115 (qscale * quant_matrix[j]));
00116
00117
00118 qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) /
00119 (qscale * quant_matrix[j]);
00120
00121 if (qmat16[qscale][0][i] == 0 ||
00122 qmat16[qscale][0][i] == 128 * 256)
00123 qmat16[qscale][0][i] = 128 * 256 - 1;
00124 qmat16[qscale][1][i] =
00125 ROUNDED_DIV(bias << (16 - QUANT_BIAS_SHIFT),
00126 qmat16[qscale][0][i]);
00127 }
00128 }
00129
00130 for (i = intra; i < 64; i++) {
00131 int64_t max = 8191;
00132 if (dsp->fdct == ff_fdct_ifast) {
00133 max = (8191LL * ff_aanscales[i]) >> 14;
00134 }
00135 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
00136 shift++;
00137 }
00138 }
00139 }
00140 if (shift) {
00141 av_log(NULL, AV_LOG_INFO,
00142 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
00143 QMAT_SHIFT - shift);
00144 }
00145 }
00146
00147 static inline void update_qscale(MpegEncContext *s)
00148 {
00149 s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
00150 (FF_LAMBDA_SHIFT + 7);
00151 s->qscale = av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
00152
00153 s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
00154 FF_LAMBDA_SHIFT;
00155 }
00156
00157 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
00158 {
00159 int i;
00160
00161 if (matrix) {
00162 put_bits(pb, 1, 1);
00163 for (i = 0; i < 64; i++) {
00164 put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
00165 }
00166 } else
00167 put_bits(pb, 1, 0);
00168 }
00169
00173 void ff_init_qscale_tab(MpegEncContext *s)
00174 {
00175 int8_t * const qscale_table = s->current_picture.f.qscale_table;
00176 int i;
00177
00178 for (i = 0; i < s->mb_num; i++) {
00179 unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
00180 int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
00181 qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
00182 s->avctx->qmax);
00183 }
00184 }
00185
00186 static void copy_picture_attributes(MpegEncContext *s,
00187 AVFrame *dst,
00188 AVFrame *src)
00189 {
00190 int i;
00191
00192 dst->pict_type = src->pict_type;
00193 dst->quality = src->quality;
00194 dst->coded_picture_number = src->coded_picture_number;
00195 dst->display_picture_number = src->display_picture_number;
00196
00197 dst->pts = src->pts;
00198 dst->interlaced_frame = src->interlaced_frame;
00199 dst->top_field_first = src->top_field_first;
00200
00201 if (s->avctx->me_threshold) {
00202 if (!src->motion_val[0])
00203 av_log(s->avctx, AV_LOG_ERROR, "AVFrame.motion_val not set!\n");
00204 if (!src->mb_type)
00205 av_log(s->avctx, AV_LOG_ERROR, "AVFrame.mb_type not set!\n");
00206 if (!src->ref_index[0])
00207 av_log(s->avctx, AV_LOG_ERROR, "AVFrame.ref_index not set!\n");
00208 if (src->motion_subsample_log2 != dst->motion_subsample_log2)
00209 av_log(s->avctx, AV_LOG_ERROR,
00210 "AVFrame.motion_subsample_log2 doesn't match! (%d!=%d)\n",
00211 src->motion_subsample_log2, dst->motion_subsample_log2);
00212
00213 memcpy(dst->mb_type, src->mb_type,
00214 s->mb_stride * s->mb_height * sizeof(dst->mb_type[0]));
00215
00216 for (i = 0; i < 2; i++) {
00217 int stride = ((16 * s->mb_width ) >>
00218 src->motion_subsample_log2) + 1;
00219 int height = ((16 * s->mb_height) >> src->motion_subsample_log2);
00220
00221 if (src->motion_val[i] &&
00222 src->motion_val[i] != dst->motion_val[i]) {
00223 memcpy(dst->motion_val[i], src->motion_val[i],
00224 2 * stride * height * sizeof(int16_t));
00225 }
00226 if (src->ref_index[i] && src->ref_index[i] != dst->ref_index[i]) {
00227 memcpy(dst->ref_index[i], src->ref_index[i],
00228 s->mb_stride * 4 * s->mb_height * sizeof(int8_t));
00229 }
00230 }
00231 }
00232 }
00233
00234 static void update_duplicate_context_after_me(MpegEncContext *dst,
00235 MpegEncContext *src)
00236 {
00237 #define COPY(a) dst->a= src->a
00238 COPY(pict_type);
00239 COPY(current_picture);
00240 COPY(f_code);
00241 COPY(b_code);
00242 COPY(qscale);
00243 COPY(lambda);
00244 COPY(lambda2);
00245 COPY(picture_in_gop_number);
00246 COPY(gop_picture_number);
00247 COPY(frame_pred_frame_dct);
00248 COPY(progressive_frame);
00249 COPY(partitioned_frame);
00250 #undef COPY
00251 }
00252
00257 static void MPV_encode_defaults(MpegEncContext *s)
00258 {
00259 int i;
00260 ff_MPV_common_defaults(s);
00261
00262 for (i = -16; i < 16; i++) {
00263 default_fcode_tab[i + MAX_MV] = 1;
00264 }
00265 s->me.mv_penalty = default_mv_penalty;
00266 s->fcode_tab = default_fcode_tab;
00267 }
00268
00269 av_cold int ff_dct_encode_init(MpegEncContext *s) {
00270 if (ARCH_X86)
00271 ff_dct_encode_init_x86(s);
00272
00273 if (!s->dct_quantize)
00274 s->dct_quantize = ff_dct_quantize_c;
00275 if (!s->denoise_dct)
00276 s->denoise_dct = denoise_dct_c;
00277 s->fast_dct_quantize = s->dct_quantize;
00278 if (s->avctx->trellis)
00279 s->dct_quantize = dct_quantize_trellis_c;
00280
00281 return 0;
00282 }
00283
00284
00285 av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
00286 {
00287 MpegEncContext *s = avctx->priv_data;
00288 int i;
00289 int chroma_h_shift, chroma_v_shift;
00290
00291 MPV_encode_defaults(s);
00292
00293 switch (avctx->codec_id) {
00294 case AV_CODEC_ID_MPEG2VIDEO:
00295 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
00296 avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
00297 av_log(avctx, AV_LOG_ERROR,
00298 "only YUV420 and YUV422 are supported\n");
00299 return -1;
00300 }
00301 break;
00302 case AV_CODEC_ID_LJPEG:
00303 if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
00304 avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
00305 avctx->pix_fmt != AV_PIX_FMT_YUVJ444P &&
00306 avctx->pix_fmt != AV_PIX_FMT_BGR0 &&
00307 avctx->pix_fmt != AV_PIX_FMT_BGRA &&
00308 avctx->pix_fmt != AV_PIX_FMT_BGR24 &&
00309 ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
00310 avctx->pix_fmt != AV_PIX_FMT_YUV422P &&
00311 avctx->pix_fmt != AV_PIX_FMT_YUV444P) ||
00312 avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
00313 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in LJPEG\n");
00314 return -1;
00315 }
00316 break;
00317 case AV_CODEC_ID_MJPEG:
00318 case AV_CODEC_ID_AMV:
00319 if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
00320 avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
00321 avctx->pix_fmt != AV_PIX_FMT_YUVJ444P &&
00322 ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
00323 avctx->pix_fmt != AV_PIX_FMT_YUV422P &&
00324 avctx->pix_fmt != AV_PIX_FMT_YUV444P) ||
00325 avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
00326 av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
00327 return -1;
00328 }
00329 break;
00330 default:
00331 if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
00332 av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
00333 return -1;
00334 }
00335 }
00336
00337 switch (avctx->pix_fmt) {
00338 case AV_PIX_FMT_YUVJ444P:
00339 case AV_PIX_FMT_YUV444P:
00340 s->chroma_format = CHROMA_444;
00341 break;
00342 case AV_PIX_FMT_YUVJ422P:
00343 case AV_PIX_FMT_YUV422P:
00344 s->chroma_format = CHROMA_422;
00345 break;
00346 case AV_PIX_FMT_YUVJ420P:
00347 case AV_PIX_FMT_YUV420P:
00348 default:
00349 s->chroma_format = CHROMA_420;
00350 break;
00351 }
00352
00353 s->bit_rate = avctx->bit_rate;
00354 s->width = avctx->width;
00355 s->height = avctx->height;
00356 if (avctx->gop_size > 600 &&
00357 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
00358 av_log(avctx, AV_LOG_WARNING,
00359 "keyframe interval too large!, reducing it from %d to %d\n",
00360 avctx->gop_size, 600);
00361 avctx->gop_size = 600;
00362 }
00363 s->gop_size = avctx->gop_size;
00364 s->avctx = avctx;
00365 s->flags = avctx->flags;
00366 s->flags2 = avctx->flags2;
00367 s->max_b_frames = avctx->max_b_frames;
00368 s->codec_id = avctx->codec->id;
00369 #if FF_API_MPV_GLOBAL_OPTS
00370 if (avctx->luma_elim_threshold)
00371 s->luma_elim_threshold = avctx->luma_elim_threshold;
00372 if (avctx->chroma_elim_threshold)
00373 s->chroma_elim_threshold = avctx->chroma_elim_threshold;
00374 #endif
00375 s->strict_std_compliance = avctx->strict_std_compliance;
00376 s->quarter_sample = (avctx->flags & CODEC_FLAG_QPEL) != 0;
00377 s->mpeg_quant = avctx->mpeg_quant;
00378 s->rtp_mode = !!avctx->rtp_payload_size;
00379 s->intra_dc_precision = avctx->intra_dc_precision;
00380 s->user_specified_pts = AV_NOPTS_VALUE;
00381
00382 if (s->gop_size <= 1) {
00383 s->intra_only = 1;
00384 s->gop_size = 12;
00385 } else {
00386 s->intra_only = 0;
00387 }
00388
00389 s->me_method = avctx->me_method;
00390
00391
00392 s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
00393
00394 #if FF_API_MPV_GLOBAL_OPTS
00395 if (s->flags & CODEC_FLAG_QP_RD)
00396 s->mpv_flags |= FF_MPV_FLAG_QP_RD;
00397 #endif
00398
00399 s->adaptive_quant = (s->avctx->lumi_masking ||
00400 s->avctx->dark_masking ||
00401 s->avctx->temporal_cplx_masking ||
00402 s->avctx->spatial_cplx_masking ||
00403 s->avctx->p_masking ||
00404 s->avctx->border_masking ||
00405 (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
00406 !s->fixed_qscale;
00407
00408 s->loop_filter = !!(s->flags & CODEC_FLAG_LOOP_FILTER);
00409
00410 if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
00411 switch(avctx->codec_id) {
00412 case AV_CODEC_ID_MPEG1VIDEO:
00413 case AV_CODEC_ID_MPEG2VIDEO:
00414 avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112L / 15000000 * 16384;
00415 break;
00416 case AV_CODEC_ID_MPEG4:
00417 case AV_CODEC_ID_MSMPEG4V1:
00418 case AV_CODEC_ID_MSMPEG4V2:
00419 case AV_CODEC_ID_MSMPEG4V3:
00420 if (avctx->rc_max_rate >= 15000000) {
00421 avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000L) * (760-320) / (38400000 - 15000000);
00422 } else if(avctx->rc_max_rate >= 2000000) {
00423 avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000L) * (320- 80) / (15000000 - 2000000);
00424 } else if(avctx->rc_max_rate >= 384000) {
00425 avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000L) * ( 80- 40) / ( 2000000 - 384000);
00426 } else
00427 avctx->rc_buffer_size = 40;
00428 avctx->rc_buffer_size *= 16384;
00429 break;
00430 }
00431 if (avctx->rc_buffer_size) {
00432 av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
00433 }
00434 }
00435
00436 if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
00437 av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
00438 if (avctx->rc_max_rate && !avctx->rc_buffer_size)
00439 return -1;
00440 }
00441
00442 if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
00443 av_log(avctx, AV_LOG_INFO,
00444 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
00445 }
00446
00447 if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
00448 av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
00449 return -1;
00450 }
00451
00452 if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
00453 av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
00454 return -1;
00455 }
00456
00457 if (avctx->rc_max_rate &&
00458 avctx->rc_max_rate == avctx->bit_rate &&
00459 avctx->rc_max_rate != avctx->rc_min_rate) {
00460 av_log(avctx, AV_LOG_INFO,
00461 "impossible bitrate constraints, this will fail\n");
00462 }
00463
00464 if (avctx->rc_buffer_size &&
00465 avctx->bit_rate * (int64_t)avctx->time_base.num >
00466 avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
00467 av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
00468 return -1;
00469 }
00470
00471 if (!s->fixed_qscale &&
00472 avctx->bit_rate * av_q2d(avctx->time_base) >
00473 avctx->bit_rate_tolerance) {
00474 av_log(avctx, AV_LOG_ERROR,
00475 "bitrate tolerance too small for bitrate\n");
00476 return -1;
00477 }
00478
00479 if (s->avctx->rc_max_rate &&
00480 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
00481 (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
00482 s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
00483 90000LL * (avctx->rc_buffer_size - 1) >
00484 s->avctx->rc_max_rate * 0xFFFFLL) {
00485 av_log(avctx, AV_LOG_INFO,
00486 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
00487 "specified vbv buffer is too large for the given bitrate!\n");
00488 }
00489
00490 if ((s->flags & CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
00491 s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
00492 s->codec_id != AV_CODEC_ID_FLV1) {
00493 av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
00494 return -1;
00495 }
00496
00497 if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
00498 av_log(avctx, AV_LOG_ERROR,
00499 "OBMC is only supported with simple mb decision\n");
00500 return -1;
00501 }
00502
00503 if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
00504 av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
00505 return -1;
00506 }
00507
00508 if (s->max_b_frames &&
00509 s->codec_id != AV_CODEC_ID_MPEG4 &&
00510 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
00511 s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
00512 av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
00513 return -1;
00514 }
00515
00516 if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
00517 s->codec_id == AV_CODEC_ID_H263 ||
00518 s->codec_id == AV_CODEC_ID_H263P) &&
00519 (avctx->sample_aspect_ratio.num > 255 ||
00520 avctx->sample_aspect_ratio.den > 255)) {
00521 av_log(avctx, AV_LOG_WARNING,
00522 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
00523 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
00524 av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
00525 avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
00526 }
00527
00528 if ((s->codec_id == AV_CODEC_ID_H263 ||
00529 s->codec_id == AV_CODEC_ID_H263P) &&
00530 (avctx->width > 2048 ||
00531 avctx->height > 1152 )) {
00532 av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
00533 return -1;
00534 }
00535 if ((s->codec_id == AV_CODEC_ID_H263 ||
00536 s->codec_id == AV_CODEC_ID_H263P) &&
00537 ((avctx->width &3) ||
00538 (avctx->height&3) )) {
00539 av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
00540 return -1;
00541 }
00542
00543 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
00544 (avctx->width > 4095 ||
00545 avctx->height > 4095 )) {
00546 av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
00547 return -1;
00548 }
00549
00550 if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
00551 (avctx->width > 16383 ||
00552 avctx->height > 16383 )) {
00553 av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
00554 return -1;
00555 }
00556
00557 if (s->codec_id == AV_CODEC_ID_RV10 &&
00558 (avctx->width &15 ||
00559 avctx->height&15 )) {
00560 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
00561 return AVERROR(EINVAL);
00562 }
00563
00564 if (s->codec_id == AV_CODEC_ID_RV20 &&
00565 (avctx->width &3 ||
00566 avctx->height&3 )) {
00567 av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
00568 return AVERROR(EINVAL);
00569 }
00570
00571 if ((s->codec_id == AV_CODEC_ID_WMV1 ||
00572 s->codec_id == AV_CODEC_ID_WMV2) &&
00573 avctx->width & 1) {
00574 av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
00575 return -1;
00576 }
00577
00578 if ((s->flags & (CODEC_FLAG_INTERLACED_DCT | CODEC_FLAG_INTERLACED_ME)) &&
00579 s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
00580 av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
00581 return -1;
00582 }
00583
00584
00585 if (s->mpeg_quant && s->codec_id != AV_CODEC_ID_MPEG4) {
00586 av_log(avctx, AV_LOG_ERROR,
00587 "mpeg2 style quantization not supported by codec\n");
00588 return -1;
00589 }
00590
00591 #if FF_API_MPV_GLOBAL_OPTS
00592 if (s->flags & CODEC_FLAG_CBP_RD)
00593 s->mpv_flags |= FF_MPV_FLAG_CBP_RD;
00594 #endif
00595
00596 if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
00597 av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
00598 return -1;
00599 }
00600
00601 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
00602 s->avctx->mb_decision != FF_MB_DECISION_RD) {
00603 av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
00604 return -1;
00605 }
00606
00607 if (s->avctx->scenechange_threshold < 1000000000 &&
00608 (s->flags & CODEC_FLAG_CLOSED_GOP)) {
00609 av_log(avctx, AV_LOG_ERROR,
00610 "closed gop with scene change detection are not supported yet, "
00611 "set threshold to 1000000000\n");
00612 return -1;
00613 }
00614
00615 if (s->flags & CODEC_FLAG_LOW_DELAY) {
00616 if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
00617 av_log(avctx, AV_LOG_ERROR,
00618 "low delay forcing is only available for mpeg2\n");
00619 return -1;
00620 }
00621 if (s->max_b_frames != 0) {
00622 av_log(avctx, AV_LOG_ERROR,
00623 "b frames cannot be used with low delay\n");
00624 return -1;
00625 }
00626 }
00627
00628 if (s->q_scale_type == 1) {
00629 if (avctx->qmax > 12) {
00630 av_log(avctx, AV_LOG_ERROR,
00631 "non linear quant only supports qmax <= 12 currently\n");
00632 return -1;
00633 }
00634 }
00635
00636 if (s->avctx->thread_count > 1 &&
00637 s->codec_id != AV_CODEC_ID_MPEG4 &&
00638 s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
00639 s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
00640 s->codec_id != AV_CODEC_ID_MJPEG &&
00641 (s->codec_id != AV_CODEC_ID_H263P)) {
00642 av_log(avctx, AV_LOG_ERROR,
00643 "multi threaded encoding not supported by codec\n");
00644 return -1;
00645 }
00646
00647 if (s->avctx->thread_count < 1) {
00648 av_log(avctx, AV_LOG_ERROR,
00649 "automatic thread number detection not supported by codec, "
00650 "patch welcome\n");
00651 return -1;
00652 }
00653
00654 if (s->avctx->slices > 1 || s->avctx->thread_count > 1)
00655 s->rtp_mode = 1;
00656
00657 if (s->avctx->thread_count > 1 && s->codec_id == AV_CODEC_ID_H263P)
00658 s->h263_slice_structured = 1;
00659
00660 if (!avctx->time_base.den || !avctx->time_base.num) {
00661 av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
00662 return -1;
00663 }
00664
00665 i = (INT_MAX / 2 + 128) >> 8;
00666 if (avctx->me_threshold >= i) {
00667 av_log(avctx, AV_LOG_ERROR, "me_threshold too large, max is %d\n",
00668 i - 1);
00669 return -1;
00670 }
00671 if (avctx->mb_threshold >= i) {
00672 av_log(avctx, AV_LOG_ERROR, "mb_threshold too large, max is %d\n",
00673 i - 1);
00674 return -1;
00675 }
00676
00677 if (avctx->b_frame_strategy && (avctx->flags & CODEC_FLAG_PASS2)) {
00678 av_log(avctx, AV_LOG_INFO,
00679 "notice: b_frame_strategy only affects the first pass\n");
00680 avctx->b_frame_strategy = 0;
00681 }
00682
00683 i = av_gcd(avctx->time_base.den, avctx->time_base.num);
00684 if (i > 1) {
00685 av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
00686 avctx->time_base.den /= i;
00687 avctx->time_base.num /= i;
00688
00689 }
00690
00691 if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id==AV_CODEC_ID_AMV) {
00692
00693 s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
00694 s->inter_quant_bias = 0;
00695 } else {
00696 s->intra_quant_bias = 0;
00697
00698 s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
00699 }
00700
00701 if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
00702 s->intra_quant_bias = avctx->intra_quant_bias;
00703 if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
00704 s->inter_quant_bias = avctx->inter_quant_bias;
00705
00706 av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
00707
00708 avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift, &chroma_v_shift);
00709
00710 if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
00711 s->avctx->time_base.den > (1 << 16) - 1) {
00712 av_log(avctx, AV_LOG_ERROR,
00713 "timebase %d/%d not supported by MPEG 4 standard, "
00714 "the maximum admitted value for the timebase denominator "
00715 "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
00716 (1 << 16) - 1);
00717 return -1;
00718 }
00719 s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
00720
00721 #if FF_API_MPV_GLOBAL_OPTS
00722 if (avctx->flags2 & CODEC_FLAG2_SKIP_RD)
00723 s->mpv_flags |= FF_MPV_FLAG_SKIP_RD;
00724 if (avctx->flags2 & CODEC_FLAG2_STRICT_GOP)
00725 s->mpv_flags |= FF_MPV_FLAG_STRICT_GOP;
00726 if (avctx->quantizer_noise_shaping)
00727 s->quantizer_noise_shaping = avctx->quantizer_noise_shaping;
00728 #endif
00729
00730 switch (avctx->codec->id) {
00731 case AV_CODEC_ID_MPEG1VIDEO:
00732 s->out_format = FMT_MPEG1;
00733 s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
00734 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
00735 break;
00736 case AV_CODEC_ID_MPEG2VIDEO:
00737 s->out_format = FMT_MPEG1;
00738 s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
00739 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
00740 s->rtp_mode = 1;
00741 break;
00742 case AV_CODEC_ID_LJPEG:
00743 case AV_CODEC_ID_MJPEG:
00744 case AV_CODEC_ID_AMV:
00745 s->out_format = FMT_MJPEG;
00746 s->intra_only = 1;
00747 if (avctx->codec->id == AV_CODEC_ID_LJPEG &&
00748 (avctx->pix_fmt == AV_PIX_FMT_BGR0
00749 || s->avctx->pix_fmt == AV_PIX_FMT_BGRA
00750 || s->avctx->pix_fmt == AV_PIX_FMT_BGR24)) {
00751 s->mjpeg_vsample[0] = s->mjpeg_hsample[0] =
00752 s->mjpeg_vsample[1] = s->mjpeg_hsample[1] =
00753 s->mjpeg_vsample[2] = s->mjpeg_hsample[2] = 1;
00754 } else if (avctx->pix_fmt == AV_PIX_FMT_YUV444P || avctx->pix_fmt == AV_PIX_FMT_YUVJ444P) {
00755 s->mjpeg_vsample[0] = s->mjpeg_vsample[1] = s->mjpeg_vsample[2] = 2;
00756 s->mjpeg_hsample[0] = s->mjpeg_hsample[1] = s->mjpeg_hsample[2] = 1;
00757 } else {
00758 s->mjpeg_vsample[0] = 2;
00759 s->mjpeg_vsample[1] = 2 >> chroma_v_shift;
00760 s->mjpeg_vsample[2] = 2 >> chroma_v_shift;
00761 s->mjpeg_hsample[0] = 2;
00762 s->mjpeg_hsample[1] = 2 >> chroma_h_shift;
00763 s->mjpeg_hsample[2] = 2 >> chroma_h_shift;
00764 }
00765 if (!(CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) ||
00766 ff_mjpeg_encode_init(s) < 0)
00767 return -1;
00768 avctx->delay = 0;
00769 s->low_delay = 1;
00770 break;
00771 case AV_CODEC_ID_H261:
00772 if (!CONFIG_H261_ENCODER)
00773 return -1;
00774 if (ff_h261_get_picture_format(s->width, s->height) < 0) {
00775 av_log(avctx, AV_LOG_ERROR,
00776 "The specified picture size of %dx%d is not valid for the "
00777 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
00778 s->width, s->height);
00779 return -1;
00780 }
00781 s->out_format = FMT_H261;
00782 avctx->delay = 0;
00783 s->low_delay = 1;
00784 break;
00785 case AV_CODEC_ID_H263:
00786 if (!CONFIG_H263_ENCODER)
00787 return -1;
00788 if (ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format),
00789 s->width, s->height) == 8) {
00790 av_log(avctx, AV_LOG_ERROR,
00791 "The specified picture size of %dx%d is not valid for "
00792 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
00793 "352x288, 704x576, and 1408x1152. "
00794 "Try H.263+.\n", s->width, s->height);
00795 return -1;
00796 }
00797 s->out_format = FMT_H263;
00798 avctx->delay = 0;
00799 s->low_delay = 1;
00800 break;
00801 case AV_CODEC_ID_H263P:
00802 s->out_format = FMT_H263;
00803 s->h263_plus = 1;
00804
00805 s->h263_aic = (avctx->flags & CODEC_FLAG_AC_PRED) ? 1 : 0;
00806 s->modified_quant = s->h263_aic;
00807 s->loop_filter = (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
00808 s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
00809
00810
00811
00812 avctx->delay = 0;
00813 s->low_delay = 1;
00814 break;
00815 case AV_CODEC_ID_FLV1:
00816 s->out_format = FMT_H263;
00817 s->h263_flv = 2;
00818 s->unrestricted_mv = 1;
00819 s->rtp_mode = 0;
00820 avctx->delay = 0;
00821 s->low_delay = 1;
00822 break;
00823 case AV_CODEC_ID_RV10:
00824 s->out_format = FMT_H263;
00825 avctx->delay = 0;
00826 s->low_delay = 1;
00827 break;
00828 case AV_CODEC_ID_RV20:
00829 s->out_format = FMT_H263;
00830 avctx->delay = 0;
00831 s->low_delay = 1;
00832 s->modified_quant = 1;
00833 s->h263_aic = 1;
00834 s->h263_plus = 1;
00835 s->loop_filter = 1;
00836 s->unrestricted_mv = 0;
00837 break;
00838 case AV_CODEC_ID_MPEG4:
00839 s->out_format = FMT_H263;
00840 s->h263_pred = 1;
00841 s->unrestricted_mv = 1;
00842 s->low_delay = s->max_b_frames ? 0 : 1;
00843 avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
00844 break;
00845 case AV_CODEC_ID_MSMPEG4V2:
00846 s->out_format = FMT_H263;
00847 s->h263_pred = 1;
00848 s->unrestricted_mv = 1;
00849 s->msmpeg4_version = 2;
00850 avctx->delay = 0;
00851 s->low_delay = 1;
00852 break;
00853 case AV_CODEC_ID_MSMPEG4V3:
00854 s->out_format = FMT_H263;
00855 s->h263_pred = 1;
00856 s->unrestricted_mv = 1;
00857 s->msmpeg4_version = 3;
00858 s->flipflop_rounding = 1;
00859 avctx->delay = 0;
00860 s->low_delay = 1;
00861 break;
00862 case AV_CODEC_ID_WMV1:
00863 s->out_format = FMT_H263;
00864 s->h263_pred = 1;
00865 s->unrestricted_mv = 1;
00866 s->msmpeg4_version = 4;
00867 s->flipflop_rounding = 1;
00868 avctx->delay = 0;
00869 s->low_delay = 1;
00870 break;
00871 case AV_CODEC_ID_WMV2:
00872 s->out_format = FMT_H263;
00873 s->h263_pred = 1;
00874 s->unrestricted_mv = 1;
00875 s->msmpeg4_version = 5;
00876 s->flipflop_rounding = 1;
00877 avctx->delay = 0;
00878 s->low_delay = 1;
00879 break;
00880 default:
00881 return -1;
00882 }
00883
00884 avctx->has_b_frames = !s->low_delay;
00885
00886 s->encoding = 1;
00887
00888 s->progressive_frame =
00889 s->progressive_sequence = !(avctx->flags & (CODEC_FLAG_INTERLACED_DCT |
00890 CODEC_FLAG_INTERLACED_ME) ||
00891 s->alternate_scan);
00892
00893
00894 if (ff_MPV_common_init(s) < 0)
00895 return -1;
00896
00897 ff_dct_encode_init(s);
00898
00899 if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
00900 s->chroma_qscale_table = ff_h263_chroma_qscale_table;
00901
00902 s->quant_precision = 5;
00903
00904 ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
00905 ff_set_cmp(&s->dsp, s->dsp.frame_skip_cmp, s->avctx->frame_skip_cmp);
00906
00907 if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
00908 ff_h261_encode_init(s);
00909 if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
00910 ff_h263_encode_init(s);
00911 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
00912 ff_msmpeg4_encode_init(s);
00913 if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
00914 && s->out_format == FMT_MPEG1)
00915 ff_mpeg1_encode_init(s);
00916
00917
00918 for (i = 0; i < 64; i++) {
00919 int j = s->dsp.idct_permutation[i];
00920 if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
00921 s->mpeg_quant) {
00922 s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
00923 s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
00924 } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
00925 s->intra_matrix[j] =
00926 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
00927 } else {
00928
00929 s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
00930 s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
00931 }
00932 if (s->avctx->intra_matrix)
00933 s->intra_matrix[j] = s->avctx->intra_matrix[i];
00934 if (s->avctx->inter_matrix)
00935 s->inter_matrix[j] = s->avctx->inter_matrix[i];
00936 }
00937
00938
00939
00940 if (s->out_format != FMT_MJPEG) {
00941 ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
00942 s->intra_matrix, s->intra_quant_bias, avctx->qmin,
00943 31, 1);
00944 ff_convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16,
00945 s->inter_matrix, s->inter_quant_bias, avctx->qmin,
00946 31, 0);
00947 }
00948
00949 if (ff_rate_control_init(s) < 0)
00950 return -1;
00951
00952 return 0;
00953 }
00954
00955 av_cold int ff_MPV_encode_end(AVCodecContext *avctx)
00956 {
00957 MpegEncContext *s = avctx->priv_data;
00958
00959 ff_rate_control_uninit(s);
00960
00961 ff_MPV_common_end(s);
00962 if ((CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) &&
00963 s->out_format == FMT_MJPEG)
00964 ff_mjpeg_encode_close(s);
00965
00966 av_freep(&avctx->extradata);
00967
00968 return 0;
00969 }
00970
00971 static int get_sae(uint8_t *src, int ref, int stride)
00972 {
00973 int x,y;
00974 int acc = 0;
00975
00976 for (y = 0; y < 16; y++) {
00977 for (x = 0; x < 16; x++) {
00978 acc += FFABS(src[x + y * stride] - ref);
00979 }
00980 }
00981
00982 return acc;
00983 }
00984
00985 static int get_intra_count(MpegEncContext *s, uint8_t *src,
00986 uint8_t *ref, int stride)
00987 {
00988 int x, y, w, h;
00989 int acc = 0;
00990
00991 w = s->width & ~15;
00992 h = s->height & ~15;
00993
00994 for (y = 0; y < h; y += 16) {
00995 for (x = 0; x < w; x += 16) {
00996 int offset = x + y * stride;
00997 int sad = s->dsp.sad[0](NULL, src + offset, ref + offset, stride,
00998 16);
00999 int mean = (s->dsp.pix_sum(src + offset, stride) + 128) >> 8;
01000 int sae = get_sae(src + offset, mean, stride);
01001
01002 acc += sae + 500 < sad;
01003 }
01004 }
01005 return acc;
01006 }
01007
01008
01009 static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg)
01010 {
01011 AVFrame *pic = NULL;
01012 int64_t pts;
01013 int i;
01014 const int encoding_delay = s->max_b_frames ? s->max_b_frames :
01015 (s->low_delay ? 0 : 1);
01016 int direct = 1;
01017
01018 if (pic_arg) {
01019 pts = pic_arg->pts;
01020 pic_arg->display_picture_number = s->input_picture_number++;
01021
01022 if (pts != AV_NOPTS_VALUE) {
01023 if (s->user_specified_pts != AV_NOPTS_VALUE) {
01024 int64_t time = pts;
01025 int64_t last = s->user_specified_pts;
01026
01027 if (time <= last) {
01028 av_log(s->avctx, AV_LOG_ERROR,
01029 "Error, Invalid timestamp=%"PRId64", "
01030 "last=%"PRId64"\n", pts, s->user_specified_pts);
01031 return -1;
01032 }
01033
01034 if (!s->low_delay && pic_arg->display_picture_number == 1)
01035 s->dts_delta = time - last;
01036 }
01037 s->user_specified_pts = pts;
01038 } else {
01039 if (s->user_specified_pts != AV_NOPTS_VALUE) {
01040 s->user_specified_pts =
01041 pts = s->user_specified_pts + 1;
01042 av_log(s->avctx, AV_LOG_INFO,
01043 "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
01044 pts);
01045 } else {
01046 pts = pic_arg->display_picture_number;
01047 }
01048 }
01049 }
01050
01051 if (pic_arg) {
01052 if (encoding_delay && !(s->flags & CODEC_FLAG_INPUT_PRESERVED))
01053 direct = 0;
01054 if (pic_arg->linesize[0] != s->linesize)
01055 direct = 0;
01056 if (pic_arg->linesize[1] != s->uvlinesize)
01057 direct = 0;
01058 if (pic_arg->linesize[2] != s->uvlinesize)
01059 direct = 0;
01060
01061 av_dlog(s->avctx, "%d %d %d %d\n", pic_arg->linesize[0],
01062 pic_arg->linesize[1], s->linesize, s->uvlinesize);
01063
01064 if (direct) {
01065 i = ff_find_unused_picture(s, 1);
01066 if (i < 0)
01067 return i;
01068
01069 pic = &s->picture[i].f;
01070 pic->reference = 3;
01071
01072 for (i = 0; i < 4; i++) {
01073 pic->data[i] = pic_arg->data[i];
01074 pic->linesize[i] = pic_arg->linesize[i];
01075 }
01076 if (ff_alloc_picture(s, (Picture *) pic, 1) < 0) {
01077 return -1;
01078 }
01079 } else {
01080 i = ff_find_unused_picture(s, 0);
01081 if (i < 0)
01082 return i;
01083
01084 pic = &s->picture[i].f;
01085 pic->reference = 3;
01086
01087 if (ff_alloc_picture(s, (Picture *) pic, 0) < 0) {
01088 return -1;
01089 }
01090
01091 if (pic->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
01092 pic->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
01093 pic->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
01094
01095 } else {
01096 int h_chroma_shift, v_chroma_shift;
01097 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
01098
01099 for (i = 0; i < 3; i++) {
01100 int src_stride = pic_arg->linesize[i];
01101 int dst_stride = i ? s->uvlinesize : s->linesize;
01102 int h_shift = i ? h_chroma_shift : 0;
01103 int v_shift = i ? v_chroma_shift : 0;
01104 int w = s->width >> h_shift;
01105 int h = s->height >> v_shift;
01106 uint8_t *src = pic_arg->data[i];
01107 uint8_t *dst = pic->data[i];
01108
01109 if(s->codec_id == AV_CODEC_ID_AMV && !(s->avctx->flags & CODEC_FLAG_EMU_EDGE)){
01110 h= ((s->height+15)/16*16)>>v_shift;
01111 }
01112
01113 if (!s->avctx->rc_buffer_size)
01114 dst += INPLACE_OFFSET;
01115
01116 if (src_stride == dst_stride)
01117 memcpy(dst, src, src_stride * h);
01118 else {
01119 while (h--) {
01120 memcpy(dst, src, w);
01121 dst += dst_stride;
01122 src += src_stride;
01123 }
01124 }
01125 }
01126 }
01127 }
01128 copy_picture_attributes(s, pic, pic_arg);
01129 pic->pts = pts;
01130 }
01131
01132
01133 for (i = 1; i < MAX_PICTURE_COUNT ; i++)
01134 s->input_picture[i - 1] = s->input_picture[i];
01135
01136 s->input_picture[encoding_delay] = (Picture*) pic;
01137
01138 return 0;
01139 }
01140
01141 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
01142 {
01143 int x, y, plane;
01144 int score = 0;
01145 int64_t score64 = 0;
01146
01147 for (plane = 0; plane < 3; plane++) {
01148 const int stride = p->f.linesize[plane];
01149 const int bw = plane ? 1 : 2;
01150 for (y = 0; y < s->mb_height * bw; y++) {
01151 for (x = 0; x < s->mb_width * bw; x++) {
01152 int off = p->f.type == FF_BUFFER_TYPE_SHARED ? 0 : 16;
01153 uint8_t *dptr = p->f.data[plane] + 8 * (x + y * stride) + off;
01154 uint8_t *rptr = ref->f.data[plane] + 8 * (x + y * stride);
01155 int v = s->dsp.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
01156
01157 switch (s->avctx->frame_skip_exp) {
01158 case 0: score = FFMAX(score, v); break;
01159 case 1: score += FFABS(v); break;
01160 case 2: score += v * v; break;
01161 case 3: score64 += FFABS(v * v * (int64_t)v); break;
01162 case 4: score64 += v * v * (int64_t)(v * v); break;
01163 }
01164 }
01165 }
01166 }
01167
01168 if (score)
01169 score64 = score;
01170
01171 if (score64 < s->avctx->frame_skip_threshold)
01172 return 1;
01173 if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
01174 return 1;
01175 return 0;
01176 }
01177
01178 static int encode_frame(AVCodecContext *c, AVFrame *frame)
01179 {
01180 AVPacket pkt = { 0 };
01181 int ret, got_output;
01182
01183 av_init_packet(&pkt);
01184 ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
01185 if (ret < 0)
01186 return ret;
01187
01188 ret = pkt.size;
01189 av_free_packet(&pkt);
01190 return ret;
01191 }
01192
01193 static int estimate_best_b_count(MpegEncContext *s)
01194 {
01195 AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
01196 AVCodecContext *c = avcodec_alloc_context3(NULL);
01197 AVFrame input[FF_MAX_B_FRAMES + 2];
01198 const int scale = s->avctx->brd_scale;
01199 int i, j, out_size, p_lambda, b_lambda, lambda2;
01200 int64_t best_rd = INT64_MAX;
01201 int best_b_count = -1;
01202
01203 av_assert0(scale >= 0 && scale <= 3);
01204
01205
01206
01207 p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
01208
01209 b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
01210 if (!b_lambda)
01211 b_lambda = p_lambda;
01212 lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
01213 FF_LAMBDA_SHIFT;
01214
01215 c->width = s->width >> scale;
01216 c->height = s->height >> scale;
01217 c->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_PSNR |
01218 CODEC_FLAG_INPUT_PRESERVED ;
01219 c->flags |= s->avctx->flags & CODEC_FLAG_QPEL;
01220 c->mb_decision = s->avctx->mb_decision;
01221 c->me_cmp = s->avctx->me_cmp;
01222 c->mb_cmp = s->avctx->mb_cmp;
01223 c->me_sub_cmp = s->avctx->me_sub_cmp;
01224 c->pix_fmt = AV_PIX_FMT_YUV420P;
01225 c->time_base = s->avctx->time_base;
01226 c->max_b_frames = s->max_b_frames;
01227
01228 if (avcodec_open2(c, codec, NULL) < 0)
01229 return -1;
01230
01231 for (i = 0; i < s->max_b_frames + 2; i++) {
01232 int ysize = c->width * c->height;
01233 int csize = (c->width / 2) * (c->height / 2);
01234 Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
01235 s->next_picture_ptr;
01236
01237 avcodec_get_frame_defaults(&input[i]);
01238 input[i].data[0] = av_malloc(ysize + 2 * csize);
01239 input[i].data[1] = input[i].data[0] + ysize;
01240 input[i].data[2] = input[i].data[1] + csize;
01241 input[i].linesize[0] = c->width;
01242 input[i].linesize[1] =
01243 input[i].linesize[2] = c->width / 2;
01244
01245 if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
01246 pre_input = *pre_input_ptr;
01247
01248 if (pre_input.f.type != FF_BUFFER_TYPE_SHARED && i) {
01249 pre_input.f.data[0] += INPLACE_OFFSET;
01250 pre_input.f.data[1] += INPLACE_OFFSET;
01251 pre_input.f.data[2] += INPLACE_OFFSET;
01252 }
01253
01254 s->dsp.shrink[scale](input[i].data[0], input[i].linesize[0],
01255 pre_input.f.data[0], pre_input.f.linesize[0],
01256 c->width, c->height);
01257 s->dsp.shrink[scale](input[i].data[1], input[i].linesize[1],
01258 pre_input.f.data[1], pre_input.f.linesize[1],
01259 c->width >> 1, c->height >> 1);
01260 s->dsp.shrink[scale](input[i].data[2], input[i].linesize[2],
01261 pre_input.f.data[2], pre_input.f.linesize[2],
01262 c->width >> 1, c->height >> 1);
01263 }
01264 }
01265
01266 for (j = 0; j < s->max_b_frames + 1; j++) {
01267 int64_t rd = 0;
01268
01269 if (!s->input_picture[j])
01270 break;
01271
01272 c->error[0] = c->error[1] = c->error[2] = 0;
01273
01274 input[0].pict_type = AV_PICTURE_TYPE_I;
01275 input[0].quality = 1 * FF_QP2LAMBDA;
01276
01277 out_size = encode_frame(c, &input[0]);
01278
01279
01280
01281 for (i = 0; i < s->max_b_frames + 1; i++) {
01282 int is_p = i % (j + 1) == j || i == s->max_b_frames;
01283
01284 input[i + 1].pict_type = is_p ?
01285 AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
01286 input[i + 1].quality = is_p ? p_lambda : b_lambda;
01287
01288 out_size = encode_frame(c, &input[i + 1]);
01289
01290 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
01291 }
01292
01293
01294 while (out_size) {
01295 out_size = encode_frame(c, NULL);
01296 rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
01297 }
01298
01299 rd += c->error[0] + c->error[1] + c->error[2];
01300
01301 if (rd < best_rd) {
01302 best_rd = rd;
01303 best_b_count = j;
01304 }
01305 }
01306
01307 avcodec_close(c);
01308 av_freep(&c);
01309
01310 for (i = 0; i < s->max_b_frames + 2; i++) {
01311 av_freep(&input[i].data[0]);
01312 }
01313
01314 return best_b_count;
01315 }
01316
01317 static int select_input_picture(MpegEncContext *s)
01318 {
01319 int i;
01320
01321 for (i = 1; i < MAX_PICTURE_COUNT; i++)
01322 s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
01323 s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
01324
01325
01326 if (s->reordered_input_picture[0] == NULL && s->input_picture[0]) {
01327 if (
01328 s->next_picture_ptr == NULL || s->intra_only) {
01329 s->reordered_input_picture[0] = s->input_picture[0];
01330 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_I;
01331 s->reordered_input_picture[0]->f.coded_picture_number =
01332 s->coded_picture_number++;
01333 } else {
01334 int b_frames;
01335
01336 if (s->avctx->frame_skip_threshold || s->avctx->frame_skip_factor) {
01337 if (s->picture_in_gop_number < s->gop_size &&
01338 skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
01339
01340 if (s->input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED) {
01341 for (i = 0; i < 4; i++)
01342 s->input_picture[0]->f.data[i] = NULL;
01343 s->input_picture[0]->f.type = 0;
01344 } else {
01345 assert(s->input_picture[0]->f.type == FF_BUFFER_TYPE_USER ||
01346 s->input_picture[0]->f.type == FF_BUFFER_TYPE_INTERNAL);
01347
01348 s->avctx->release_buffer(s->avctx,
01349 &s->input_picture[0]->f);
01350 }
01351
01352 emms_c();
01353 ff_vbv_update(s, 0);
01354
01355 goto no_output_pic;
01356 }
01357 }
01358
01359 if (s->flags & CODEC_FLAG_PASS2) {
01360 for (i = 0; i < s->max_b_frames + 1; i++) {
01361 int pict_num = s->input_picture[0]->f.display_picture_number + i;
01362
01363 if (pict_num >= s->rc_context.num_entries)
01364 break;
01365 if (!s->input_picture[i]) {
01366 s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
01367 break;
01368 }
01369
01370 s->input_picture[i]->f.pict_type =
01371 s->rc_context.entry[pict_num].new_pict_type;
01372 }
01373 }
01374
01375 if (s->avctx->b_frame_strategy == 0) {
01376 b_frames = s->max_b_frames;
01377 while (b_frames && !s->input_picture[b_frames])
01378 b_frames--;
01379 } else if (s->avctx->b_frame_strategy == 1) {
01380 for (i = 1; i < s->max_b_frames + 1; i++) {
01381 if (s->input_picture[i] &&
01382 s->input_picture[i]->b_frame_score == 0) {
01383 s->input_picture[i]->b_frame_score =
01384 get_intra_count(s,
01385 s->input_picture[i ]->f.data[0],
01386 s->input_picture[i - 1]->f.data[0],
01387 s->linesize) + 1;
01388 }
01389 }
01390 for (i = 0; i < s->max_b_frames + 1; i++) {
01391 if (s->input_picture[i] == NULL ||
01392 s->input_picture[i]->b_frame_score - 1 >
01393 s->mb_num / s->avctx->b_sensitivity)
01394 break;
01395 }
01396
01397 b_frames = FFMAX(0, i - 1);
01398
01399
01400 for (i = 0; i < b_frames + 1; i++) {
01401 s->input_picture[i]->b_frame_score = 0;
01402 }
01403 } else if (s->avctx->b_frame_strategy == 2) {
01404 b_frames = estimate_best_b_count(s);
01405 } else {
01406 av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
01407 b_frames = 0;
01408 }
01409
01410 emms_c();
01411
01412 for (i = b_frames - 1; i >= 0; i--) {
01413 int type = s->input_picture[i]->f.pict_type;
01414 if (type && type != AV_PICTURE_TYPE_B)
01415 b_frames = i;
01416 }
01417 if (s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_B &&
01418 b_frames == s->max_b_frames) {
01419 av_log(s->avctx, AV_LOG_ERROR,
01420 "warning, too many b frames in a row\n");
01421 }
01422
01423 if (s->picture_in_gop_number + b_frames >= s->gop_size) {
01424 if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
01425 s->gop_size > s->picture_in_gop_number) {
01426 b_frames = s->gop_size - s->picture_in_gop_number - 1;
01427 } else {
01428 if (s->flags & CODEC_FLAG_CLOSED_GOP)
01429 b_frames = 0;
01430 s->input_picture[b_frames]->f.pict_type = AV_PICTURE_TYPE_I;
01431 }
01432 }
01433
01434 if ((s->flags & CODEC_FLAG_CLOSED_GOP) && b_frames &&
01435 s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_I)
01436 b_frames--;
01437
01438 s->reordered_input_picture[0] = s->input_picture[b_frames];
01439 if (s->reordered_input_picture[0]->f.pict_type != AV_PICTURE_TYPE_I)
01440 s->reordered_input_picture[0]->f.pict_type = AV_PICTURE_TYPE_P;
01441 s->reordered_input_picture[0]->f.coded_picture_number =
01442 s->coded_picture_number++;
01443 for (i = 0; i < b_frames; i++) {
01444 s->reordered_input_picture[i + 1] = s->input_picture[i];
01445 s->reordered_input_picture[i + 1]->f.pict_type =
01446 AV_PICTURE_TYPE_B;
01447 s->reordered_input_picture[i + 1]->f.coded_picture_number =
01448 s->coded_picture_number++;
01449 }
01450 }
01451 }
01452 no_output_pic:
01453 if (s->reordered_input_picture[0]) {
01454 s->reordered_input_picture[0]->f.reference =
01455 s->reordered_input_picture[0]->f.pict_type !=
01456 AV_PICTURE_TYPE_B ? 3 : 0;
01457
01458 ff_copy_picture(&s->new_picture, s->reordered_input_picture[0]);
01459
01460 if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED ||
01461 s->avctx->rc_buffer_size) {
01462
01463
01464
01465 Picture *pic;
01466 int i = ff_find_unused_picture(s, 0);
01467 if (i < 0)
01468 return i;
01469 pic = &s->picture[i];
01470
01471 pic->f.reference = s->reordered_input_picture[0]->f.reference;
01472 if (ff_alloc_picture(s, pic, 0) < 0) {
01473 return -1;
01474 }
01475
01476
01477 if (s->reordered_input_picture[0]->f.type == FF_BUFFER_TYPE_INTERNAL)
01478 s->avctx->release_buffer(s->avctx,
01479 &s->reordered_input_picture[0]->f);
01480 for (i = 0; i < 4; i++)
01481 s->reordered_input_picture[0]->f.data[i] = NULL;
01482 s->reordered_input_picture[0]->f.type = 0;
01483
01484 copy_picture_attributes(s, &pic->f,
01485 &s->reordered_input_picture[0]->f);
01486
01487 s->current_picture_ptr = pic;
01488 } else {
01489
01490
01491 assert(s->reordered_input_picture[0]->f.type ==
01492 FF_BUFFER_TYPE_USER ||
01493 s->reordered_input_picture[0]->f.type ==
01494 FF_BUFFER_TYPE_INTERNAL);
01495
01496 s->current_picture_ptr = s->reordered_input_picture[0];
01497 for (i = 0; i < 4; i++) {
01498 s->new_picture.f.data[i] += INPLACE_OFFSET;
01499 }
01500 }
01501 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
01502
01503 s->picture_number = s->new_picture.f.display_picture_number;
01504 } else {
01505 memset(&s->new_picture, 0, sizeof(Picture));
01506 }
01507 return 0;
01508 }
01509
01510 int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
01511 AVFrame *pic_arg, int *got_packet)
01512 {
01513 MpegEncContext *s = avctx->priv_data;
01514 int i, stuffing_count, ret;
01515 int context_count = s->slice_context_count;
01516
01517 s->picture_in_gop_number++;
01518
01519 if (load_input_picture(s, pic_arg) < 0)
01520 return -1;
01521
01522 if (select_input_picture(s) < 0) {
01523 return -1;
01524 }
01525
01526
01527 if (s->new_picture.f.data[0]) {
01528 if ((ret = ff_alloc_packet2(avctx, pkt, s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000)) < 0)
01529 return ret;
01530 if (s->mb_info) {
01531 s->mb_info_ptr = av_packet_new_side_data(pkt,
01532 AV_PKT_DATA_H263_MB_INFO,
01533 s->mb_width*s->mb_height*12);
01534 s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
01535 }
01536
01537 for (i = 0; i < context_count; i++) {
01538 int start_y = s->thread_context[i]->start_mb_y;
01539 int end_y = s->thread_context[i]-> end_mb_y;
01540 int h = s->mb_height;
01541 uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
01542 uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
01543
01544 init_put_bits(&s->thread_context[i]->pb, start, end - start);
01545 }
01546
01547 s->pict_type = s->new_picture.f.pict_type;
01548
01549 if (ff_MPV_frame_start(s, avctx) < 0)
01550 return -1;
01551 vbv_retry:
01552 if (encode_picture(s, s->picture_number) < 0)
01553 return -1;
01554
01555 avctx->header_bits = s->header_bits;
01556 avctx->mv_bits = s->mv_bits;
01557 avctx->misc_bits = s->misc_bits;
01558 avctx->i_tex_bits = s->i_tex_bits;
01559 avctx->p_tex_bits = s->p_tex_bits;
01560 avctx->i_count = s->i_count;
01561
01562 avctx->p_count = s->mb_num - s->i_count - s->skip_count;
01563 avctx->skip_count = s->skip_count;
01564
01565 ff_MPV_frame_end(s);
01566
01567 if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
01568 ff_mjpeg_encode_picture_trailer(s);
01569
01570 if (avctx->rc_buffer_size) {
01571 RateControlContext *rcc = &s->rc_context;
01572 int max_size = rcc->buffer_index * avctx->rc_max_available_vbv_use;
01573
01574 if (put_bits_count(&s->pb) > max_size &&
01575 s->lambda < s->avctx->lmax) {
01576 s->next_lambda = FFMAX(s->lambda + 1, s->lambda *
01577 (s->qscale + 1) / s->qscale);
01578 if (s->adaptive_quant) {
01579 int i;
01580 for (i = 0; i < s->mb_height * s->mb_stride; i++)
01581 s->lambda_table[i] =
01582 FFMAX(s->lambda_table[i] + 1,
01583 s->lambda_table[i] * (s->qscale + 1) /
01584 s->qscale);
01585 }
01586 s->mb_skipped = 0;
01587
01588 if (s->pict_type == AV_PICTURE_TYPE_P) {
01589 if (s->flipflop_rounding ||
01590 s->codec_id == AV_CODEC_ID_H263P ||
01591 s->codec_id == AV_CODEC_ID_MPEG4)
01592 s->no_rounding ^= 1;
01593 }
01594 if (s->pict_type != AV_PICTURE_TYPE_B) {
01595 s->time_base = s->last_time_base;
01596 s->last_non_b_time = s->time - s->pp_time;
01597 }
01598 for (i = 0; i < context_count; i++) {
01599 PutBitContext *pb = &s->thread_context[i]->pb;
01600 init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
01601 }
01602 goto vbv_retry;
01603 }
01604
01605 assert(s->avctx->rc_max_rate);
01606 }
01607
01608 if (s->flags & CODEC_FLAG_PASS1)
01609 ff_write_pass1_stats(s);
01610
01611 for (i = 0; i < 4; i++) {
01612 s->current_picture_ptr->f.error[i] = s->current_picture.f.error[i];
01613 avctx->error[i] += s->current_picture_ptr->f.error[i];
01614 }
01615
01616 if (s->flags & CODEC_FLAG_PASS1)
01617 assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits +
01618 avctx->i_tex_bits + avctx->p_tex_bits ==
01619 put_bits_count(&s->pb));
01620 flush_put_bits(&s->pb);
01621 s->frame_bits = put_bits_count(&s->pb);
01622
01623 stuffing_count = ff_vbv_update(s, s->frame_bits);
01624 s->stuffing_bits = 8*stuffing_count;
01625 if (stuffing_count) {
01626 if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
01627 stuffing_count + 50) {
01628 av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
01629 return -1;
01630 }
01631
01632 switch (s->codec_id) {
01633 case AV_CODEC_ID_MPEG1VIDEO:
01634 case AV_CODEC_ID_MPEG2VIDEO:
01635 while (stuffing_count--) {
01636 put_bits(&s->pb, 8, 0);
01637 }
01638 break;
01639 case AV_CODEC_ID_MPEG4:
01640 put_bits(&s->pb, 16, 0);
01641 put_bits(&s->pb, 16, 0x1C3);
01642 stuffing_count -= 4;
01643 while (stuffing_count--) {
01644 put_bits(&s->pb, 8, 0xFF);
01645 }
01646 break;
01647 default:
01648 av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
01649 }
01650 flush_put_bits(&s->pb);
01651 s->frame_bits = put_bits_count(&s->pb);
01652 }
01653
01654
01655 if (s->avctx->rc_max_rate &&
01656 s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
01657 s->out_format == FMT_MPEG1 &&
01658 90000LL * (avctx->rc_buffer_size - 1) <=
01659 s->avctx->rc_max_rate * 0xFFFFLL) {
01660 int vbv_delay, min_delay;
01661 double inbits = s->avctx->rc_max_rate *
01662 av_q2d(s->avctx->time_base);
01663 int minbits = s->frame_bits - 8 *
01664 (s->vbv_delay_ptr - s->pb.buf - 1);
01665 double bits = s->rc_context.buffer_index + minbits - inbits;
01666
01667 if (bits < 0)
01668 av_log(s->avctx, AV_LOG_ERROR,
01669 "Internal error, negative bits\n");
01670
01671 assert(s->repeat_first_field == 0);
01672
01673 vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
01674 min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
01675 s->avctx->rc_max_rate;
01676
01677 vbv_delay = FFMAX(vbv_delay, min_delay);
01678
01679 av_assert0(vbv_delay < 0xFFFF);
01680
01681 s->vbv_delay_ptr[0] &= 0xF8;
01682 s->vbv_delay_ptr[0] |= vbv_delay >> 13;
01683 s->vbv_delay_ptr[1] = vbv_delay >> 5;
01684 s->vbv_delay_ptr[2] &= 0x07;
01685 s->vbv_delay_ptr[2] |= vbv_delay << 3;
01686 avctx->vbv_delay = vbv_delay * 300;
01687 }
01688 s->total_bits += s->frame_bits;
01689 avctx->frame_bits = s->frame_bits;
01690
01691 pkt->pts = s->current_picture.f.pts;
01692 if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
01693 if (!s->current_picture.f.coded_picture_number)
01694 pkt->dts = pkt->pts - s->dts_delta;
01695 else
01696 pkt->dts = s->reordered_pts;
01697 s->reordered_pts = pkt->pts;
01698 } else
01699 pkt->dts = pkt->pts;
01700 if (s->current_picture.f.key_frame)
01701 pkt->flags |= AV_PKT_FLAG_KEY;
01702 if (s->mb_info)
01703 av_packet_shrink_side_data(pkt, AV_PKT_DATA_H263_MB_INFO, s->mb_info_size);
01704 } else {
01705 s->frame_bits = 0;
01706 }
01707 assert((s->frame_bits & 7) == 0);
01708
01709 pkt->size = s->frame_bits / 8;
01710 *got_packet = !!pkt->size;
01711 return 0;
01712 }
01713
01714 static inline void dct_single_coeff_elimination(MpegEncContext *s,
01715 int n, int threshold)
01716 {
01717 static const char tab[64] = {
01718 3, 2, 2, 1, 1, 1, 1, 1,
01719 1, 1, 1, 1, 1, 1, 1, 1,
01720 1, 1, 1, 1, 1, 1, 1, 1,
01721 0, 0, 0, 0, 0, 0, 0, 0,
01722 0, 0, 0, 0, 0, 0, 0, 0,
01723 0, 0, 0, 0, 0, 0, 0, 0,
01724 0, 0, 0, 0, 0, 0, 0, 0,
01725 0, 0, 0, 0, 0, 0, 0, 0
01726 };
01727 int score = 0;
01728 int run = 0;
01729 int i;
01730 DCTELEM *block = s->block[n];
01731 const int last_index = s->block_last_index[n];
01732 int skip_dc;
01733
01734 if (threshold < 0) {
01735 skip_dc = 0;
01736 threshold = -threshold;
01737 } else
01738 skip_dc = 1;
01739
01740
01741 if (last_index <= skip_dc - 1)
01742 return;
01743
01744 for (i = 0; i <= last_index; i++) {
01745 const int j = s->intra_scantable.permutated[i];
01746 const int level = FFABS(block[j]);
01747 if (level == 1) {
01748 if (skip_dc && i == 0)
01749 continue;
01750 score += tab[run];
01751 run = 0;
01752 } else if (level > 1) {
01753 return;
01754 } else {
01755 run++;
01756 }
01757 }
01758 if (score >= threshold)
01759 return;
01760 for (i = skip_dc; i <= last_index; i++) {
01761 const int j = s->intra_scantable.permutated[i];
01762 block[j] = 0;
01763 }
01764 if (block[0])
01765 s->block_last_index[n] = 0;
01766 else
01767 s->block_last_index[n] = -1;
01768 }
01769
01770 static inline void clip_coeffs(MpegEncContext *s, DCTELEM *block,
01771 int last_index)
01772 {
01773 int i;
01774 const int maxlevel = s->max_qcoeff;
01775 const int minlevel = s->min_qcoeff;
01776 int overflow = 0;
01777
01778 if (s->mb_intra) {
01779 i = 1;
01780 } else
01781 i = 0;
01782
01783 for (; i <= last_index; i++) {
01784 const int j = s->intra_scantable.permutated[i];
01785 int level = block[j];
01786
01787 if (level > maxlevel) {
01788 level = maxlevel;
01789 overflow++;
01790 } else if (level < minlevel) {
01791 level = minlevel;
01792 overflow++;
01793 }
01794
01795 block[j] = level;
01796 }
01797
01798 if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
01799 av_log(s->avctx, AV_LOG_INFO,
01800 "warning, clipping %d dct coefficients to %d..%d\n",
01801 overflow, minlevel, maxlevel);
01802 }
01803
01804 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
01805 {
01806 int x, y;
01807
01808 for (y = 0; y < 8; y++) {
01809 for (x = 0; x < 8; x++) {
01810 int x2, y2;
01811 int sum = 0;
01812 int sqr = 0;
01813 int count = 0;
01814
01815 for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
01816 for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
01817 int v = ptr[x2 + y2 * stride];
01818 sum += v;
01819 sqr += v * v;
01820 count++;
01821 }
01822 }
01823 weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
01824 }
01825 }
01826 }
01827
01828 static av_always_inline void encode_mb_internal(MpegEncContext *s,
01829 int motion_x, int motion_y,
01830 int mb_block_height,
01831 int mb_block_width,
01832 int mb_block_count)
01833 {
01834 int16_t weight[12][64];
01835 DCTELEM orig[12][64];
01836 const int mb_x = s->mb_x;
01837 const int mb_y = s->mb_y;
01838 int i;
01839 int skip_dct[12];
01840 int dct_offset = s->linesize * 8;
01841 int uv_dct_offset = s->uvlinesize * 8;
01842 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
01843 int wrap_y, wrap_c;
01844
01845 for (i = 0; i < mb_block_count; i++)
01846 skip_dct[i] = s->skipdct;
01847
01848 if (s->adaptive_quant) {
01849 const int last_qp = s->qscale;
01850 const int mb_xy = mb_x + mb_y * s->mb_stride;
01851
01852 s->lambda = s->lambda_table[mb_xy];
01853 update_qscale(s);
01854
01855 if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
01856 s->qscale = s->current_picture_ptr->f.qscale_table[mb_xy];
01857 s->dquant = s->qscale - last_qp;
01858
01859 if (s->out_format == FMT_H263) {
01860 s->dquant = av_clip(s->dquant, -2, 2);
01861
01862 if (s->codec_id == AV_CODEC_ID_MPEG4) {
01863 if (!s->mb_intra) {
01864 if (s->pict_type == AV_PICTURE_TYPE_B) {
01865 if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
01866 s->dquant = 0;
01867 }
01868 if (s->mv_type == MV_TYPE_8X8)
01869 s->dquant = 0;
01870 }
01871 }
01872 }
01873 }
01874 ff_set_qscale(s, last_qp + s->dquant);
01875 } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
01876 ff_set_qscale(s, s->qscale + s->dquant);
01877
01878 wrap_y = s->linesize;
01879 wrap_c = s->uvlinesize;
01880 ptr_y = s->new_picture.f.data[0] +
01881 (mb_y * 16 * wrap_y) + mb_x * 16;
01882 ptr_cb = s->new_picture.f.data[1] +
01883 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
01884 ptr_cr = s->new_picture.f.data[2] +
01885 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
01886
01887 if((mb_x*16+16 > s->width || mb_y*16+16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
01888 uint8_t *ebuf = s->edge_emu_buffer + 32;
01889 s->dsp.emulated_edge_mc(ebuf, ptr_y, wrap_y, 16, 16, mb_x * 16,
01890 mb_y * 16, s->width, s->height);
01891 ptr_y = ebuf;
01892 s->dsp.emulated_edge_mc(ebuf + 18 * wrap_y, ptr_cb, wrap_c, mb_block_width,
01893 mb_block_height, mb_x * 8, mb_y * 8,
01894 (s->width+1) >> 1, (s->height+1) >> 1);
01895 ptr_cb = ebuf + 18 * wrap_y;
01896 s->dsp.emulated_edge_mc(ebuf + 18 * wrap_y + 8, ptr_cr, wrap_c, mb_block_width,
01897 mb_block_height, mb_x * 8, mb_y * 8,
01898 (s->width+1) >> 1, (s->height+1) >> 1);
01899 ptr_cr = ebuf + 18 * wrap_y + 8;
01900 }
01901
01902 if (s->mb_intra) {
01903 if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
01904 int progressive_score, interlaced_score;
01905
01906 s->interlaced_dct = 0;
01907 progressive_score = s->dsp.ildct_cmp[4](s, ptr_y,
01908 NULL, wrap_y, 8) +
01909 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y * 8,
01910 NULL, wrap_y, 8) - 400;
01911
01912 if (progressive_score > 0) {
01913 interlaced_score = s->dsp.ildct_cmp[4](s, ptr_y,
01914 NULL, wrap_y * 2, 8) +
01915 s->dsp.ildct_cmp[4](s, ptr_y + wrap_y,
01916 NULL, wrap_y * 2, 8);
01917 if (progressive_score > interlaced_score) {
01918 s->interlaced_dct = 1;
01919
01920 dct_offset = wrap_y;
01921 uv_dct_offset = wrap_c;
01922 wrap_y <<= 1;
01923 if (s->chroma_format == CHROMA_422 ||
01924 s->chroma_format == CHROMA_444)
01925 wrap_c <<= 1;
01926 }
01927 }
01928 }
01929
01930 s->dsp.get_pixels(s->block[0], ptr_y , wrap_y);
01931 s->dsp.get_pixels(s->block[1], ptr_y + 8 , wrap_y);
01932 s->dsp.get_pixels(s->block[2], ptr_y + dct_offset , wrap_y);
01933 s->dsp.get_pixels(s->block[3], ptr_y + dct_offset + 8 , wrap_y);
01934
01935 if (s->flags & CODEC_FLAG_GRAY) {
01936 skip_dct[4] = 1;
01937 skip_dct[5] = 1;
01938 } else {
01939 s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c);
01940 s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c);
01941 if (!s->chroma_y_shift && s->chroma_x_shift) {
01942 s->dsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
01943 s->dsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
01944 } else if (!s->chroma_y_shift && !s->chroma_x_shift) {
01945 s->dsp.get_pixels(s->block[6], ptr_cb + 8, wrap_c);
01946 s->dsp.get_pixels(s->block[7], ptr_cr + 8, wrap_c);
01947 s->dsp.get_pixels(s->block[8], ptr_cb + uv_dct_offset, wrap_c);
01948 s->dsp.get_pixels(s->block[9], ptr_cr + uv_dct_offset, wrap_c);
01949 s->dsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
01950 s->dsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
01951 }
01952 }
01953 } else {
01954 op_pixels_func (*op_pix)[4];
01955 qpel_mc_func (*op_qpix)[16];
01956 uint8_t *dest_y, *dest_cb, *dest_cr;
01957
01958 dest_y = s->dest[0];
01959 dest_cb = s->dest[1];
01960 dest_cr = s->dest[2];
01961
01962 if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
01963 op_pix = s->dsp.put_pixels_tab;
01964 op_qpix = s->dsp.put_qpel_pixels_tab;
01965 } else {
01966 op_pix = s->dsp.put_no_rnd_pixels_tab;
01967 op_qpix = s->dsp.put_no_rnd_qpel_pixels_tab;
01968 }
01969
01970 if (s->mv_dir & MV_DIR_FORWARD) {
01971 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0,
01972 s->last_picture.f.data,
01973 op_pix, op_qpix);
01974 op_pix = s->dsp.avg_pixels_tab;
01975 op_qpix = s->dsp.avg_qpel_pixels_tab;
01976 }
01977 if (s->mv_dir & MV_DIR_BACKWARD) {
01978 ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1,
01979 s->next_picture.f.data,
01980 op_pix, op_qpix);
01981 }
01982
01983 if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
01984 int progressive_score, interlaced_score;
01985
01986 s->interlaced_dct = 0;
01987 progressive_score = s->dsp.ildct_cmp[0](s, dest_y,
01988 ptr_y, wrap_y,
01989 8) +
01990 s->dsp.ildct_cmp[0](s, dest_y + wrap_y * 8,
01991 ptr_y + wrap_y * 8, wrap_y,
01992 8) - 400;
01993
01994 if (s->avctx->ildct_cmp == FF_CMP_VSSE)
01995 progressive_score -= 400;
01996
01997 if (progressive_score > 0) {
01998 interlaced_score = s->dsp.ildct_cmp[0](s, dest_y,
01999 ptr_y,
02000 wrap_y * 2, 8) +
02001 s->dsp.ildct_cmp[0](s, dest_y + wrap_y,
02002 ptr_y + wrap_y,
02003 wrap_y * 2, 8);
02004
02005 if (progressive_score > interlaced_score) {
02006 s->interlaced_dct = 1;
02007
02008 dct_offset = wrap_y;
02009 uv_dct_offset = wrap_c;
02010 wrap_y <<= 1;
02011 if (s->chroma_format == CHROMA_422)
02012 wrap_c <<= 1;
02013 }
02014 }
02015 }
02016
02017 s->dsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
02018 s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
02019 s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset,
02020 dest_y + dct_offset, wrap_y);
02021 s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
02022 dest_y + dct_offset + 8, wrap_y);
02023
02024 if (s->flags & CODEC_FLAG_GRAY) {
02025 skip_dct[4] = 1;
02026 skip_dct[5] = 1;
02027 } else {
02028 s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
02029 s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
02030 if (!s->chroma_y_shift) {
02031 s->dsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
02032 dest_cb + uv_dct_offset, wrap_c);
02033 s->dsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
02034 dest_cr + uv_dct_offset, wrap_c);
02035 }
02036 }
02037
02038 if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
02039 2 * s->qscale * s->qscale) {
02040
02041 if (s->dsp.sad[1](NULL, ptr_y , dest_y,
02042 wrap_y, 8) < 20 * s->qscale)
02043 skip_dct[0] = 1;
02044 if (s->dsp.sad[1](NULL, ptr_y + 8,
02045 dest_y + 8, wrap_y, 8) < 20 * s->qscale)
02046 skip_dct[1] = 1;
02047 if (s->dsp.sad[1](NULL, ptr_y + dct_offset,
02048 dest_y + dct_offset, wrap_y, 8) < 20 * s->qscale)
02049 skip_dct[2] = 1;
02050 if (s->dsp.sad[1](NULL, ptr_y + dct_offset + 8,
02051 dest_y + dct_offset + 8,
02052 wrap_y, 8) < 20 * s->qscale)
02053 skip_dct[3] = 1;
02054 if (s->dsp.sad[1](NULL, ptr_cb, dest_cb,
02055 wrap_c, 8) < 20 * s->qscale)
02056 skip_dct[4] = 1;
02057 if (s->dsp.sad[1](NULL, ptr_cr, dest_cr,
02058 wrap_c, 8) < 20 * s->qscale)
02059 skip_dct[5] = 1;
02060 if (!s->chroma_y_shift) {
02061 if (s->dsp.sad[1](NULL, ptr_cb + uv_dct_offset,
02062 dest_cb + uv_dct_offset,
02063 wrap_c, 8) < 20 * s->qscale)
02064 skip_dct[6] = 1;
02065 if (s->dsp.sad[1](NULL, ptr_cr + uv_dct_offset,
02066 dest_cr + uv_dct_offset,
02067 wrap_c, 8) < 20 * s->qscale)
02068 skip_dct[7] = 1;
02069 }
02070 }
02071 }
02072
02073 if (s->quantizer_noise_shaping) {
02074 if (!skip_dct[0])
02075 get_visual_weight(weight[0], ptr_y , wrap_y);
02076 if (!skip_dct[1])
02077 get_visual_weight(weight[1], ptr_y + 8, wrap_y);
02078 if (!skip_dct[2])
02079 get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
02080 if (!skip_dct[3])
02081 get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
02082 if (!skip_dct[4])
02083 get_visual_weight(weight[4], ptr_cb , wrap_c);
02084 if (!skip_dct[5])
02085 get_visual_weight(weight[5], ptr_cr , wrap_c);
02086 if (!s->chroma_y_shift) {
02087 if (!skip_dct[6])
02088 get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
02089 wrap_c);
02090 if (!skip_dct[7])
02091 get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
02092 wrap_c);
02093 }
02094 memcpy(orig[0], s->block[0], sizeof(DCTELEM) * 64 * mb_block_count);
02095 }
02096
02097
02098 av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
02099 {
02100 for (i = 0; i < mb_block_count; i++) {
02101 if (!skip_dct[i]) {
02102 int overflow;
02103 s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
02104
02105
02106
02107
02108
02109 if (overflow)
02110 clip_coeffs(s, s->block[i], s->block_last_index[i]);
02111 } else
02112 s->block_last_index[i] = -1;
02113 }
02114 if (s->quantizer_noise_shaping) {
02115 for (i = 0; i < mb_block_count; i++) {
02116 if (!skip_dct[i]) {
02117 s->block_last_index[i] =
02118 dct_quantize_refine(s, s->block[i], weight[i],
02119 orig[i], i, s->qscale);
02120 }
02121 }
02122 }
02123
02124 if (s->luma_elim_threshold && !s->mb_intra)
02125 for (i = 0; i < 4; i++)
02126 dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
02127 if (s->chroma_elim_threshold && !s->mb_intra)
02128 for (i = 4; i < mb_block_count; i++)
02129 dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
02130
02131 if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
02132 for (i = 0; i < mb_block_count; i++) {
02133 if (s->block_last_index[i] == -1)
02134 s->coded_score[i] = INT_MAX / 256;
02135 }
02136 }
02137 }
02138
02139 if ((s->flags & CODEC_FLAG_GRAY) && s->mb_intra) {
02140 s->block_last_index[4] =
02141 s->block_last_index[5] = 0;
02142 s->block[4][0] =
02143 s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
02144 }
02145
02146
02147 if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
02148 for (i = 0; i < mb_block_count; i++) {
02149 int j;
02150 if (s->block_last_index[i] > 0) {
02151 for (j = 63; j > 0; j--) {
02152 if (s->block[i][s->intra_scantable.permutated[j]])
02153 break;
02154 }
02155 s->block_last_index[i] = j;
02156 }
02157 }
02158 }
02159
02160
02161 switch(s->codec_id){
02162 case AV_CODEC_ID_MPEG1VIDEO:
02163 case AV_CODEC_ID_MPEG2VIDEO:
02164 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
02165 ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
02166 break;
02167 case AV_CODEC_ID_MPEG4:
02168 if (CONFIG_MPEG4_ENCODER)
02169 ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
02170 break;
02171 case AV_CODEC_ID_MSMPEG4V2:
02172 case AV_CODEC_ID_MSMPEG4V3:
02173 case AV_CODEC_ID_WMV1:
02174 if (CONFIG_MSMPEG4_ENCODER)
02175 ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
02176 break;
02177 case AV_CODEC_ID_WMV2:
02178 if (CONFIG_WMV2_ENCODER)
02179 ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
02180 break;
02181 case AV_CODEC_ID_H261:
02182 if (CONFIG_H261_ENCODER)
02183 ff_h261_encode_mb(s, s->block, motion_x, motion_y);
02184 break;
02185 case AV_CODEC_ID_H263:
02186 case AV_CODEC_ID_H263P:
02187 case AV_CODEC_ID_FLV1:
02188 case AV_CODEC_ID_RV10:
02189 case AV_CODEC_ID_RV20:
02190 if (CONFIG_H263_ENCODER)
02191 ff_h263_encode_mb(s, s->block, motion_x, motion_y);
02192 break;
02193 case AV_CODEC_ID_MJPEG:
02194 case AV_CODEC_ID_AMV:
02195 if (CONFIG_MJPEG_ENCODER)
02196 ff_mjpeg_encode_mb(s, s->block);
02197 break;
02198 default:
02199 av_assert1(0);
02200 }
02201 }
02202
02203 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
02204 {
02205 if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
02206 else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
02207 else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
02208 }
02209
02210 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
02211 int i;
02212
02213 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int));
02214
02215
02216 d->mb_skip_run= s->mb_skip_run;
02217 for(i=0; i<3; i++)
02218 d->last_dc[i] = s->last_dc[i];
02219
02220
02221 d->mv_bits= s->mv_bits;
02222 d->i_tex_bits= s->i_tex_bits;
02223 d->p_tex_bits= s->p_tex_bits;
02224 d->i_count= s->i_count;
02225 d->f_count= s->f_count;
02226 d->b_count= s->b_count;
02227 d->skip_count= s->skip_count;
02228 d->misc_bits= s->misc_bits;
02229 d->last_bits= 0;
02230
02231 d->mb_skipped= 0;
02232 d->qscale= s->qscale;
02233 d->dquant= s->dquant;
02234
02235 d->esc3_level_length= s->esc3_level_length;
02236 }
02237
02238 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
02239 int i;
02240
02241 memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
02242 memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int));
02243
02244
02245 d->mb_skip_run= s->mb_skip_run;
02246 for(i=0; i<3; i++)
02247 d->last_dc[i] = s->last_dc[i];
02248
02249
02250 d->mv_bits= s->mv_bits;
02251 d->i_tex_bits= s->i_tex_bits;
02252 d->p_tex_bits= s->p_tex_bits;
02253 d->i_count= s->i_count;
02254 d->f_count= s->f_count;
02255 d->b_count= s->b_count;
02256 d->skip_count= s->skip_count;
02257 d->misc_bits= s->misc_bits;
02258
02259 d->mb_intra= s->mb_intra;
02260 d->mb_skipped= s->mb_skipped;
02261 d->mv_type= s->mv_type;
02262 d->mv_dir= s->mv_dir;
02263 d->pb= s->pb;
02264 if(s->data_partitioning){
02265 d->pb2= s->pb2;
02266 d->tex_pb= s->tex_pb;
02267 }
02268 d->block= s->block;
02269 for(i=0; i<8; i++)
02270 d->block_last_index[i]= s->block_last_index[i];
02271 d->interlaced_dct= s->interlaced_dct;
02272 d->qscale= s->qscale;
02273
02274 d->esc3_level_length= s->esc3_level_length;
02275 }
02276
02277 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
02278 PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
02279 int *dmin, int *next_block, int motion_x, int motion_y)
02280 {
02281 int score;
02282 uint8_t *dest_backup[3];
02283
02284 copy_context_before_encode(s, backup, type);
02285
02286 s->block= s->blocks[*next_block];
02287 s->pb= pb[*next_block];
02288 if(s->data_partitioning){
02289 s->pb2 = pb2 [*next_block];
02290 s->tex_pb= tex_pb[*next_block];
02291 }
02292
02293 if(*next_block){
02294 memcpy(dest_backup, s->dest, sizeof(s->dest));
02295 s->dest[0] = s->rd_scratchpad;
02296 s->dest[1] = s->rd_scratchpad + 16*s->linesize;
02297 s->dest[2] = s->rd_scratchpad + 16*s->linesize + 8;
02298 assert(s->linesize >= 32);
02299 }
02300
02301 encode_mb(s, motion_x, motion_y);
02302
02303 score= put_bits_count(&s->pb);
02304 if(s->data_partitioning){
02305 score+= put_bits_count(&s->pb2);
02306 score+= put_bits_count(&s->tex_pb);
02307 }
02308
02309 if(s->avctx->mb_decision == FF_MB_DECISION_RD){
02310 ff_MPV_decode_mb(s, s->block);
02311
02312 score *= s->lambda2;
02313 score += sse_mb(s) << FF_LAMBDA_SHIFT;
02314 }
02315
02316 if(*next_block){
02317 memcpy(s->dest, dest_backup, sizeof(s->dest));
02318 }
02319
02320 if(score<*dmin){
02321 *dmin= score;
02322 *next_block^=1;
02323
02324 copy_context_after_encode(best, s, type);
02325 }
02326 }
02327
02328 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
02329 uint32_t *sq = ff_squareTbl + 256;
02330 int acc=0;
02331 int x,y;
02332
02333 if(w==16 && h==16)
02334 return s->dsp.sse[0](NULL, src1, src2, stride, 16);
02335 else if(w==8 && h==8)
02336 return s->dsp.sse[1](NULL, src1, src2, stride, 8);
02337
02338 for(y=0; y<h; y++){
02339 for(x=0; x<w; x++){
02340 acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
02341 }
02342 }
02343
02344 av_assert2(acc>=0);
02345
02346 return acc;
02347 }
02348
02349 static int sse_mb(MpegEncContext *s){
02350 int w= 16;
02351 int h= 16;
02352
02353 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
02354 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
02355
02356 if(w==16 && h==16)
02357 if(s->avctx->mb_cmp == FF_CMP_NSSE){
02358 return s->dsp.nsse[0](s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
02359 +s->dsp.nsse[1](s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
02360 +s->dsp.nsse[1](s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
02361 }else{
02362 return s->dsp.sse[0](NULL, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
02363 +s->dsp.sse[1](NULL, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
02364 +s->dsp.sse[1](NULL, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
02365 }
02366 else
02367 return sse(s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
02368 +sse(s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
02369 +sse(s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
02370 }
02371
02372 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
02373 MpegEncContext *s= *(void**)arg;
02374
02375
02376 s->me.pre_pass=1;
02377 s->me.dia_size= s->avctx->pre_dia_size;
02378 s->first_slice_line=1;
02379 for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
02380 for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
02381 ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
02382 }
02383 s->first_slice_line=0;
02384 }
02385
02386 s->me.pre_pass=0;
02387
02388 return 0;
02389 }
02390
02391 static int estimate_motion_thread(AVCodecContext *c, void *arg){
02392 MpegEncContext *s= *(void**)arg;
02393
02394 ff_check_alignment();
02395
02396 s->me.dia_size= s->avctx->dia_size;
02397 s->first_slice_line=1;
02398 for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
02399 s->mb_x=0;
02400 ff_init_block_index(s);
02401 for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
02402 s->block_index[0]+=2;
02403 s->block_index[1]+=2;
02404 s->block_index[2]+=2;
02405 s->block_index[3]+=2;
02406
02407
02408 if(s->pict_type==AV_PICTURE_TYPE_B)
02409 ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
02410 else
02411 ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
02412 }
02413 s->first_slice_line=0;
02414 }
02415 return 0;
02416 }
02417
02418 static int mb_var_thread(AVCodecContext *c, void *arg){
02419 MpegEncContext *s= *(void**)arg;
02420 int mb_x, mb_y;
02421
02422 ff_check_alignment();
02423
02424 for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
02425 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
02426 int xx = mb_x * 16;
02427 int yy = mb_y * 16;
02428 uint8_t *pix = s->new_picture.f.data[0] + (yy * s->linesize) + xx;
02429 int varc;
02430 int sum = s->dsp.pix_sum(pix, s->linesize);
02431
02432 varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)sum*sum)>>8) + 500 + 128)>>8;
02433
02434 s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
02435 s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
02436 s->me.mb_var_sum_temp += varc;
02437 }
02438 }
02439 return 0;
02440 }
02441
02442 static void write_slice_end(MpegEncContext *s){
02443 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
02444 if(s->partitioned_frame){
02445 ff_mpeg4_merge_partitions(s);
02446 }
02447
02448 ff_mpeg4_stuffing(&s->pb);
02449 }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
02450 ff_mjpeg_encode_stuffing(s);
02451 }
02452
02453 avpriv_align_put_bits(&s->pb);
02454 flush_put_bits(&s->pb);
02455
02456 if((s->flags&CODEC_FLAG_PASS1) && !s->partitioned_frame)
02457 s->misc_bits+= get_bits_diff(s);
02458 }
02459
02460 static void write_mb_info(MpegEncContext *s)
02461 {
02462 uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
02463 int offset = put_bits_count(&s->pb);
02464 int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
02465 int gobn = s->mb_y / s->gob_index;
02466 int pred_x, pred_y;
02467 if (CONFIG_H263_ENCODER)
02468 ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
02469 bytestream_put_le32(&ptr, offset);
02470 bytestream_put_byte(&ptr, s->qscale);
02471 bytestream_put_byte(&ptr, gobn);
02472 bytestream_put_le16(&ptr, mba);
02473 bytestream_put_byte(&ptr, pred_x);
02474 bytestream_put_byte(&ptr, pred_y);
02475
02476 bytestream_put_byte(&ptr, 0);
02477 bytestream_put_byte(&ptr, 0);
02478 }
02479
02480 static void update_mb_info(MpegEncContext *s, int startcode)
02481 {
02482 if (!s->mb_info)
02483 return;
02484 if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
02485 s->mb_info_size += 12;
02486 s->prev_mb_info = s->last_mb_info;
02487 }
02488 if (startcode) {
02489 s->prev_mb_info = put_bits_count(&s->pb)/8;
02490
02491
02492
02493
02494 return;
02495 }
02496
02497 s->last_mb_info = put_bits_count(&s->pb)/8;
02498 if (!s->mb_info_size)
02499 s->mb_info_size += 12;
02500 write_mb_info(s);
02501 }
02502
02503 static int encode_thread(AVCodecContext *c, void *arg){
02504 MpegEncContext *s= *(void**)arg;
02505 int mb_x, mb_y, pdif = 0;
02506 int chr_h= 16>>s->chroma_y_shift;
02507 int i, j;
02508 MpegEncContext best_s, backup_s;
02509 uint8_t bit_buf[2][MAX_MB_BYTES];
02510 uint8_t bit_buf2[2][MAX_MB_BYTES];
02511 uint8_t bit_buf_tex[2][MAX_MB_BYTES];
02512 PutBitContext pb[2], pb2[2], tex_pb[2];
02513
02514 ff_check_alignment();
02515
02516 for(i=0; i<2; i++){
02517 init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
02518 init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
02519 init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
02520 }
02521
02522 s->last_bits= put_bits_count(&s->pb);
02523 s->mv_bits=0;
02524 s->misc_bits=0;
02525 s->i_tex_bits=0;
02526 s->p_tex_bits=0;
02527 s->i_count=0;
02528 s->f_count=0;
02529 s->b_count=0;
02530 s->skip_count=0;
02531
02532 for(i=0; i<3; i++){
02533
02534
02535 s->last_dc[i] = 128 << s->intra_dc_precision;
02536
02537 s->current_picture.f.error[i] = 0;
02538 }
02539 if(s->codec_id==AV_CODEC_ID_AMV){
02540 s->last_dc[0] = 128*8/13;
02541 s->last_dc[1] = 128*8/14;
02542 s->last_dc[2] = 128*8/14;
02543 }
02544 s->mb_skip_run = 0;
02545 memset(s->last_mv, 0, sizeof(s->last_mv));
02546
02547 s->last_mv_dir = 0;
02548
02549 switch(s->codec_id){
02550 case AV_CODEC_ID_H263:
02551 case AV_CODEC_ID_H263P:
02552 case AV_CODEC_ID_FLV1:
02553 if (CONFIG_H263_ENCODER)
02554 s->gob_index = ff_h263_get_gob_height(s);
02555 break;
02556 case AV_CODEC_ID_MPEG4:
02557 if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
02558 ff_mpeg4_init_partitions(s);
02559 break;
02560 }
02561
02562 s->resync_mb_x=0;
02563 s->resync_mb_y=0;
02564 s->first_slice_line = 1;
02565 s->ptr_lastgob = s->pb.buf;
02566 for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
02567 s->mb_x=0;
02568 s->mb_y= mb_y;
02569
02570 ff_set_qscale(s, s->qscale);
02571 ff_init_block_index(s);
02572
02573 for(mb_x=0; mb_x < s->mb_width; mb_x++) {
02574 int xy= mb_y*s->mb_stride + mb_x;
02575 int mb_type= s->mb_type[xy];
02576
02577 int dmin= INT_MAX;
02578 int dir;
02579
02580 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
02581 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
02582 return -1;
02583 }
02584 if(s->data_partitioning){
02585 if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
02586 || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
02587 av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
02588 return -1;
02589 }
02590 }
02591
02592 s->mb_x = mb_x;
02593 s->mb_y = mb_y;
02594 ff_update_block_index(s);
02595
02596 if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
02597 ff_h261_reorder_mb_index(s);
02598 xy= s->mb_y*s->mb_stride + s->mb_x;
02599 mb_type= s->mb_type[xy];
02600 }
02601
02602
02603 if(s->rtp_mode){
02604 int current_packet_size, is_gob_start;
02605
02606 current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
02607
02608 is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
02609
02610 if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
02611
02612 switch(s->codec_id){
02613 case AV_CODEC_ID_H263:
02614 case AV_CODEC_ID_H263P:
02615 if(!s->h263_slice_structured)
02616 if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
02617 break;
02618 case AV_CODEC_ID_MPEG2VIDEO:
02619 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
02620 case AV_CODEC_ID_MPEG1VIDEO:
02621 if(s->mb_skip_run) is_gob_start=0;
02622 break;
02623 case AV_CODEC_ID_MJPEG:
02624 if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
02625 break;
02626 }
02627
02628 if(is_gob_start){
02629 if(s->start_mb_y != mb_y || mb_x!=0){
02630 write_slice_end(s);
02631 if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
02632 ff_mpeg4_init_partitions(s);
02633 }
02634 }
02635
02636 av_assert2((put_bits_count(&s->pb)&7) == 0);
02637 current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
02638
02639 if(s->avctx->error_rate && s->resync_mb_x + s->resync_mb_y > 0){
02640 int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
02641 int d= 100 / s->avctx->error_rate;
02642 if(r % d == 0){
02643 current_packet_size=0;
02644 s->pb.buf_ptr= s->ptr_lastgob;
02645 assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
02646 }
02647 }
02648
02649 if (s->avctx->rtp_callback){
02650 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
02651 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
02652 }
02653 update_mb_info(s, 1);
02654
02655 switch(s->codec_id){
02656 case AV_CODEC_ID_MPEG4:
02657 if (CONFIG_MPEG4_ENCODER) {
02658 ff_mpeg4_encode_video_packet_header(s);
02659 ff_mpeg4_clean_buffers(s);
02660 }
02661 break;
02662 case AV_CODEC_ID_MPEG1VIDEO:
02663 case AV_CODEC_ID_MPEG2VIDEO:
02664 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
02665 ff_mpeg1_encode_slice_header(s);
02666 ff_mpeg1_clean_buffers(s);
02667 }
02668 break;
02669 case AV_CODEC_ID_H263:
02670 case AV_CODEC_ID_H263P:
02671 if (CONFIG_H263_ENCODER)
02672 ff_h263_encode_gob_header(s, mb_y);
02673 break;
02674 }
02675
02676 if(s->flags&CODEC_FLAG_PASS1){
02677 int bits= put_bits_count(&s->pb);
02678 s->misc_bits+= bits - s->last_bits;
02679 s->last_bits= bits;
02680 }
02681
02682 s->ptr_lastgob += current_packet_size;
02683 s->first_slice_line=1;
02684 s->resync_mb_x=mb_x;
02685 s->resync_mb_y=mb_y;
02686 }
02687 }
02688
02689 if( (s->resync_mb_x == s->mb_x)
02690 && s->resync_mb_y+1 == s->mb_y){
02691 s->first_slice_line=0;
02692 }
02693
02694 s->mb_skipped=0;
02695 s->dquant=0;
02696
02697 update_mb_info(s, 0);
02698
02699 if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
02700 int next_block=0;
02701 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
02702
02703 copy_context_before_encode(&backup_s, s, -1);
02704 backup_s.pb= s->pb;
02705 best_s.data_partitioning= s->data_partitioning;
02706 best_s.partitioned_frame= s->partitioned_frame;
02707 if(s->data_partitioning){
02708 backup_s.pb2= s->pb2;
02709 backup_s.tex_pb= s->tex_pb;
02710 }
02711
02712 if(mb_type&CANDIDATE_MB_TYPE_INTER){
02713 s->mv_dir = MV_DIR_FORWARD;
02714 s->mv_type = MV_TYPE_16X16;
02715 s->mb_intra= 0;
02716 s->mv[0][0][0] = s->p_mv_table[xy][0];
02717 s->mv[0][0][1] = s->p_mv_table[xy][1];
02718 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
02719 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
02720 }
02721 if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
02722 s->mv_dir = MV_DIR_FORWARD;
02723 s->mv_type = MV_TYPE_FIELD;
02724 s->mb_intra= 0;
02725 for(i=0; i<2; i++){
02726 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
02727 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
02728 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
02729 }
02730 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
02731 &dmin, &next_block, 0, 0);
02732 }
02733 if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
02734 s->mv_dir = MV_DIR_FORWARD;
02735 s->mv_type = MV_TYPE_16X16;
02736 s->mb_intra= 0;
02737 s->mv[0][0][0] = 0;
02738 s->mv[0][0][1] = 0;
02739 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
02740 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
02741 }
02742 if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
02743 s->mv_dir = MV_DIR_FORWARD;
02744 s->mv_type = MV_TYPE_8X8;
02745 s->mb_intra= 0;
02746 for(i=0; i<4; i++){
02747 s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0];
02748 s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1];
02749 }
02750 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
02751 &dmin, &next_block, 0, 0);
02752 }
02753 if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
02754 s->mv_dir = MV_DIR_FORWARD;
02755 s->mv_type = MV_TYPE_16X16;
02756 s->mb_intra= 0;
02757 s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
02758 s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
02759 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
02760 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
02761 }
02762 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
02763 s->mv_dir = MV_DIR_BACKWARD;
02764 s->mv_type = MV_TYPE_16X16;
02765 s->mb_intra= 0;
02766 s->mv[1][0][0] = s->b_back_mv_table[xy][0];
02767 s->mv[1][0][1] = s->b_back_mv_table[xy][1];
02768 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
02769 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
02770 }
02771 if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
02772 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
02773 s->mv_type = MV_TYPE_16X16;
02774 s->mb_intra= 0;
02775 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
02776 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
02777 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
02778 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
02779 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
02780 &dmin, &next_block, 0, 0);
02781 }
02782 if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
02783 s->mv_dir = MV_DIR_FORWARD;
02784 s->mv_type = MV_TYPE_FIELD;
02785 s->mb_intra= 0;
02786 for(i=0; i<2; i++){
02787 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
02788 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
02789 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
02790 }
02791 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
02792 &dmin, &next_block, 0, 0);
02793 }
02794 if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
02795 s->mv_dir = MV_DIR_BACKWARD;
02796 s->mv_type = MV_TYPE_FIELD;
02797 s->mb_intra= 0;
02798 for(i=0; i<2; i++){
02799 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
02800 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
02801 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
02802 }
02803 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
02804 &dmin, &next_block, 0, 0);
02805 }
02806 if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
02807 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
02808 s->mv_type = MV_TYPE_FIELD;
02809 s->mb_intra= 0;
02810 for(dir=0; dir<2; dir++){
02811 for(i=0; i<2; i++){
02812 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
02813 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
02814 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
02815 }
02816 }
02817 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
02818 &dmin, &next_block, 0, 0);
02819 }
02820 if(mb_type&CANDIDATE_MB_TYPE_INTRA){
02821 s->mv_dir = 0;
02822 s->mv_type = MV_TYPE_16X16;
02823 s->mb_intra= 1;
02824 s->mv[0][0][0] = 0;
02825 s->mv[0][0][1] = 0;
02826 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
02827 &dmin, &next_block, 0, 0);
02828 if(s->h263_pred || s->h263_aic){
02829 if(best_s.mb_intra)
02830 s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
02831 else
02832 ff_clean_intra_table_entries(s);
02833 }
02834 }
02835
02836 if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
02837 if(best_s.mv_type==MV_TYPE_16X16){
02838 const int last_qp= backup_s.qscale;
02839 int qpi, qp, dc[6];
02840 DCTELEM ac[6][16];
02841 const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
02842 static const int dquant_tab[4]={-1,1,-2,2};
02843
02844 av_assert2(backup_s.dquant == 0);
02845
02846
02847 s->mv_dir= best_s.mv_dir;
02848 s->mv_type = MV_TYPE_16X16;
02849 s->mb_intra= best_s.mb_intra;
02850 s->mv[0][0][0] = best_s.mv[0][0][0];
02851 s->mv[0][0][1] = best_s.mv[0][0][1];
02852 s->mv[1][0][0] = best_s.mv[1][0][0];
02853 s->mv[1][0][1] = best_s.mv[1][0][1];
02854
02855 qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
02856 for(; qpi<4; qpi++){
02857 int dquant= dquant_tab[qpi];
02858 qp= last_qp + dquant;
02859 if(qp < s->avctx->qmin || qp > s->avctx->qmax)
02860 continue;
02861 backup_s.dquant= dquant;
02862 if(s->mb_intra && s->dc_val[0]){
02863 for(i=0; i<6; i++){
02864 dc[i]= s->dc_val[0][ s->block_index[i] ];
02865 memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(DCTELEM)*16);
02866 }
02867 }
02868
02869 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER , pb, pb2, tex_pb,
02870 &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
02871 if(best_s.qscale != qp){
02872 if(s->mb_intra && s->dc_val[0]){
02873 for(i=0; i<6; i++){
02874 s->dc_val[0][ s->block_index[i] ]= dc[i];
02875 memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(DCTELEM)*16);
02876 }
02877 }
02878 }
02879 }
02880 }
02881 }
02882 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
02883 int mx= s->b_direct_mv_table[xy][0];
02884 int my= s->b_direct_mv_table[xy][1];
02885
02886 backup_s.dquant = 0;
02887 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
02888 s->mb_intra= 0;
02889 ff_mpeg4_set_direct_mv(s, mx, my);
02890 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
02891 &dmin, &next_block, mx, my);
02892 }
02893 if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
02894 backup_s.dquant = 0;
02895 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
02896 s->mb_intra= 0;
02897 ff_mpeg4_set_direct_mv(s, 0, 0);
02898 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
02899 &dmin, &next_block, 0, 0);
02900 }
02901 if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
02902 int coded=0;
02903 for(i=0; i<6; i++)
02904 coded |= s->block_last_index[i];
02905 if(coded){
02906 int mx,my;
02907 memcpy(s->mv, best_s.mv, sizeof(s->mv));
02908 if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
02909 mx=my=0;
02910 ff_mpeg4_set_direct_mv(s, mx, my);
02911 }else if(best_s.mv_dir&MV_DIR_BACKWARD){
02912 mx= s->mv[1][0][0];
02913 my= s->mv[1][0][1];
02914 }else{
02915 mx= s->mv[0][0][0];
02916 my= s->mv[0][0][1];
02917 }
02918
02919 s->mv_dir= best_s.mv_dir;
02920 s->mv_type = best_s.mv_type;
02921 s->mb_intra= 0;
02922
02923
02924
02925
02926 backup_s.dquant= 0;
02927 s->skipdct=1;
02928 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER , pb, pb2, tex_pb,
02929 &dmin, &next_block, mx, my);
02930 s->skipdct=0;
02931 }
02932 }
02933
02934 s->current_picture.f.qscale_table[xy] = best_s.qscale;
02935
02936 copy_context_after_encode(s, &best_s, -1);
02937
02938 pb_bits_count= put_bits_count(&s->pb);
02939 flush_put_bits(&s->pb);
02940 avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
02941 s->pb= backup_s.pb;
02942
02943 if(s->data_partitioning){
02944 pb2_bits_count= put_bits_count(&s->pb2);
02945 flush_put_bits(&s->pb2);
02946 avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
02947 s->pb2= backup_s.pb2;
02948
02949 tex_pb_bits_count= put_bits_count(&s->tex_pb);
02950 flush_put_bits(&s->tex_pb);
02951 avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
02952 s->tex_pb= backup_s.tex_pb;
02953 }
02954 s->last_bits= put_bits_count(&s->pb);
02955
02956 if (CONFIG_H263_ENCODER &&
02957 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
02958 ff_h263_update_motion_val(s);
02959
02960 if(next_block==0){
02961 s->dsp.put_pixels_tab[0][0](s->dest[0], s->rd_scratchpad , s->linesize ,16);
02962 s->dsp.put_pixels_tab[1][0](s->dest[1], s->rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
02963 s->dsp.put_pixels_tab[1][0](s->dest[2], s->rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
02964 }
02965
02966 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
02967 ff_MPV_decode_mb(s, s->block);
02968 } else {
02969 int motion_x = 0, motion_y = 0;
02970 s->mv_type=MV_TYPE_16X16;
02971
02972
02973 switch(mb_type){
02974 case CANDIDATE_MB_TYPE_INTRA:
02975 s->mv_dir = 0;
02976 s->mb_intra= 1;
02977 motion_x= s->mv[0][0][0] = 0;
02978 motion_y= s->mv[0][0][1] = 0;
02979 break;
02980 case CANDIDATE_MB_TYPE_INTER:
02981 s->mv_dir = MV_DIR_FORWARD;
02982 s->mb_intra= 0;
02983 motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
02984 motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
02985 break;
02986 case CANDIDATE_MB_TYPE_INTER_I:
02987 s->mv_dir = MV_DIR_FORWARD;
02988 s->mv_type = MV_TYPE_FIELD;
02989 s->mb_intra= 0;
02990 for(i=0; i<2; i++){
02991 j= s->field_select[0][i] = s->p_field_select_table[i][xy];
02992 s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
02993 s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
02994 }
02995 break;
02996 case CANDIDATE_MB_TYPE_INTER4V:
02997 s->mv_dir = MV_DIR_FORWARD;
02998 s->mv_type = MV_TYPE_8X8;
02999 s->mb_intra= 0;
03000 for(i=0; i<4; i++){
03001 s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0];
03002 s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1];
03003 }
03004 break;
03005 case CANDIDATE_MB_TYPE_DIRECT:
03006 if (CONFIG_MPEG4_ENCODER) {
03007 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
03008 s->mb_intra= 0;
03009 motion_x=s->b_direct_mv_table[xy][0];
03010 motion_y=s->b_direct_mv_table[xy][1];
03011 ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
03012 }
03013 break;
03014 case CANDIDATE_MB_TYPE_DIRECT0:
03015 if (CONFIG_MPEG4_ENCODER) {
03016 s->mv_dir = MV_DIR_FORWARD|MV_DIR_BACKWARD|MV_DIRECT;
03017 s->mb_intra= 0;
03018 ff_mpeg4_set_direct_mv(s, 0, 0);
03019 }
03020 break;
03021 case CANDIDATE_MB_TYPE_BIDIR:
03022 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
03023 s->mb_intra= 0;
03024 s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
03025 s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
03026 s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
03027 s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
03028 break;
03029 case CANDIDATE_MB_TYPE_BACKWARD:
03030 s->mv_dir = MV_DIR_BACKWARD;
03031 s->mb_intra= 0;
03032 motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
03033 motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
03034 break;
03035 case CANDIDATE_MB_TYPE_FORWARD:
03036 s->mv_dir = MV_DIR_FORWARD;
03037 s->mb_intra= 0;
03038 motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
03039 motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
03040 break;
03041 case CANDIDATE_MB_TYPE_FORWARD_I:
03042 s->mv_dir = MV_DIR_FORWARD;
03043 s->mv_type = MV_TYPE_FIELD;
03044 s->mb_intra= 0;
03045 for(i=0; i<2; i++){
03046 j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
03047 s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
03048 s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
03049 }
03050 break;
03051 case CANDIDATE_MB_TYPE_BACKWARD_I:
03052 s->mv_dir = MV_DIR_BACKWARD;
03053 s->mv_type = MV_TYPE_FIELD;
03054 s->mb_intra= 0;
03055 for(i=0; i<2; i++){
03056 j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
03057 s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
03058 s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
03059 }
03060 break;
03061 case CANDIDATE_MB_TYPE_BIDIR_I:
03062 s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
03063 s->mv_type = MV_TYPE_FIELD;
03064 s->mb_intra= 0;
03065 for(dir=0; dir<2; dir++){
03066 for(i=0; i<2; i++){
03067 j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
03068 s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
03069 s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
03070 }
03071 }
03072 break;
03073 default:
03074 av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
03075 }
03076
03077 encode_mb(s, motion_x, motion_y);
03078
03079
03080 s->last_mv_dir = s->mv_dir;
03081
03082 if (CONFIG_H263_ENCODER &&
03083 s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
03084 ff_h263_update_motion_val(s);
03085
03086 ff_MPV_decode_mb(s, s->block);
03087 }
03088
03089
03090 if(s->mb_intra ){
03091 s->p_mv_table[xy][0]=0;
03092 s->p_mv_table[xy][1]=0;
03093 }
03094
03095 if(s->flags&CODEC_FLAG_PSNR){
03096 int w= 16;
03097 int h= 16;
03098
03099 if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
03100 if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
03101
03102 s->current_picture.f.error[0] += sse(
03103 s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
03104 s->dest[0], w, h, s->linesize);
03105 s->current_picture.f.error[1] += sse(
03106 s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
03107 s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
03108 s->current_picture.f.error[2] += sse(
03109 s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
03110 s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
03111 }
03112 if(s->loop_filter){
03113 if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
03114 ff_h263_loop_filter(s);
03115 }
03116 av_dlog(s->avctx, "MB %d %d bits\n",
03117 s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
03118 }
03119 }
03120
03121
03122 if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
03123 ff_msmpeg4_encode_ext_header(s);
03124
03125 write_slice_end(s);
03126
03127
03128 if (s->avctx->rtp_callback) {
03129 int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
03130 pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
03131
03132 emms_c();
03133 s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
03134 }
03135
03136 return 0;
03137 }
03138
03139 #define MERGE(field) dst->field += src->field; src->field=0
03140 static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
03141 MERGE(me.scene_change_score);
03142 MERGE(me.mc_mb_var_sum_temp);
03143 MERGE(me.mb_var_sum_temp);
03144 }
03145
03146 static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
03147 int i;
03148
03149 MERGE(dct_count[0]);
03150 MERGE(dct_count[1]);
03151 MERGE(mv_bits);
03152 MERGE(i_tex_bits);
03153 MERGE(p_tex_bits);
03154 MERGE(i_count);
03155 MERGE(f_count);
03156 MERGE(b_count);
03157 MERGE(skip_count);
03158 MERGE(misc_bits);
03159 MERGE(error_count);
03160 MERGE(padding_bug_score);
03161 MERGE(current_picture.f.error[0]);
03162 MERGE(current_picture.f.error[1]);
03163 MERGE(current_picture.f.error[2]);
03164
03165 if(dst->avctx->noise_reduction){
03166 for(i=0; i<64; i++){
03167 MERGE(dct_error_sum[0][i]);
03168 MERGE(dct_error_sum[1][i]);
03169 }
03170 }
03171
03172 assert(put_bits_count(&src->pb) % 8 ==0);
03173 assert(put_bits_count(&dst->pb) % 8 ==0);
03174 avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
03175 flush_put_bits(&dst->pb);
03176 }
03177
03178 static int estimate_qp(MpegEncContext *s, int dry_run){
03179 if (s->next_lambda){
03180 s->current_picture_ptr->f.quality =
03181 s->current_picture.f.quality = s->next_lambda;
03182 if(!dry_run) s->next_lambda= 0;
03183 } else if (!s->fixed_qscale) {
03184 s->current_picture_ptr->f.quality =
03185 s->current_picture.f.quality = ff_rate_estimate_qscale(s, dry_run);
03186 if (s->current_picture.f.quality < 0)
03187 return -1;
03188 }
03189
03190 if(s->adaptive_quant){
03191 switch(s->codec_id){
03192 case AV_CODEC_ID_MPEG4:
03193 if (CONFIG_MPEG4_ENCODER)
03194 ff_clean_mpeg4_qscales(s);
03195 break;
03196 case AV_CODEC_ID_H263:
03197 case AV_CODEC_ID_H263P:
03198 case AV_CODEC_ID_FLV1:
03199 if (CONFIG_H263_ENCODER)
03200 ff_clean_h263_qscales(s);
03201 break;
03202 default:
03203 ff_init_qscale_tab(s);
03204 }
03205
03206 s->lambda= s->lambda_table[0];
03207
03208 }else
03209 s->lambda = s->current_picture.f.quality;
03210 update_qscale(s);
03211 return 0;
03212 }
03213
03214
03215 static void set_frame_distances(MpegEncContext * s){
03216 assert(s->current_picture_ptr->f.pts != AV_NOPTS_VALUE);
03217 s->time = s->current_picture_ptr->f.pts * s->avctx->time_base.num;
03218
03219 if(s->pict_type==AV_PICTURE_TYPE_B){
03220 s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
03221 assert(s->pb_time > 0 && s->pb_time < s->pp_time);
03222 }else{
03223 s->pp_time= s->time - s->last_non_b_time;
03224 s->last_non_b_time= s->time;
03225 assert(s->picture_number==0 || s->pp_time > 0);
03226 }
03227 }
03228
03229 static int encode_picture(MpegEncContext *s, int picture_number)
03230 {
03231 int i;
03232 int bits;
03233 int context_count = s->slice_context_count;
03234
03235 s->picture_number = picture_number;
03236
03237
03238 s->me.mb_var_sum_temp =
03239 s->me.mc_mb_var_sum_temp = 0;
03240
03241
03242
03243 if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
03244 set_frame_distances(s);
03245 if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
03246 ff_set_mpeg4_time(s);
03247
03248 s->me.scene_change_score=0;
03249
03250
03251
03252 if(s->pict_type==AV_PICTURE_TYPE_I){
03253 if(s->msmpeg4_version >= 3) s->no_rounding=1;
03254 else s->no_rounding=0;
03255 }else if(s->pict_type!=AV_PICTURE_TYPE_B){
03256 if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
03257 s->no_rounding ^= 1;
03258 }
03259
03260 if(s->flags & CODEC_FLAG_PASS2){
03261 if (estimate_qp(s,1) < 0)
03262 return -1;
03263 ff_get_2pass_fcode(s);
03264 }else if(!(s->flags & CODEC_FLAG_QSCALE)){
03265 if(s->pict_type==AV_PICTURE_TYPE_B)
03266 s->lambda= s->last_lambda_for[s->pict_type];
03267 else
03268 s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
03269 update_qscale(s);
03270 }
03271
03272 if(s->codec_id != AV_CODEC_ID_AMV){
03273 if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
03274 if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
03275 s->q_chroma_intra_matrix = s->q_intra_matrix;
03276 s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
03277 }
03278
03279 s->mb_intra=0;
03280 for(i=1; i<context_count; i++){
03281 ff_update_duplicate_context(s->thread_context[i], s);
03282 }
03283
03284 if(ff_init_me(s)<0)
03285 return -1;
03286
03287
03288 if(s->pict_type != AV_PICTURE_TYPE_I){
03289 s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
03290 s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
03291 if(s->pict_type != AV_PICTURE_TYPE_B && s->avctx->me_threshold==0){
03292 if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
03293 s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
03294 }
03295 }
03296
03297 s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
03298 }else {
03299
03300 for(i=0; i<s->mb_stride*s->mb_height; i++)
03301 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
03302
03303 if(!s->fixed_qscale){
03304
03305 s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
03306 }
03307 }
03308 for(i=1; i<context_count; i++){
03309 merge_context_after_me(s, s->thread_context[i]);
03310 }
03311 s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
03312 s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
03313 emms_c();
03314
03315 if(s->me.scene_change_score > s->avctx->scenechange_threshold && s->pict_type == AV_PICTURE_TYPE_P){
03316 s->pict_type= AV_PICTURE_TYPE_I;
03317 for(i=0; i<s->mb_stride*s->mb_height; i++)
03318 s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
03319 if(s->msmpeg4_version >= 3)
03320 s->no_rounding=1;
03321 av_dlog(s, "Scene change detected, encoding as I Frame %d %d\n",
03322 s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
03323 }
03324
03325 if(!s->umvplus){
03326 if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
03327 s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
03328
03329 if(s->flags & CODEC_FLAG_INTERLACED_ME){
03330 int a,b;
03331 a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I);
03332 b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
03333 s->f_code= FFMAX3(s->f_code, a, b);
03334 }
03335
03336 ff_fix_long_p_mvs(s);
03337 ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0);
03338 if(s->flags & CODEC_FLAG_INTERLACED_ME){
03339 int j;
03340 for(i=0; i<2; i++){
03341 for(j=0; j<2; j++)
03342 ff_fix_long_mvs(s, s->p_field_select_table[i], j,
03343 s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, 0);
03344 }
03345 }
03346 }
03347
03348 if(s->pict_type==AV_PICTURE_TYPE_B){
03349 int a, b;
03350
03351 a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
03352 b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
03353 s->f_code = FFMAX(a, b);
03354
03355 a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
03356 b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
03357 s->b_code = FFMAX(a, b);
03358
03359 ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
03360 ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
03361 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
03362 ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
03363 if(s->flags & CODEC_FLAG_INTERLACED_ME){
03364 int dir, j;
03365 for(dir=0; dir<2; dir++){
03366 for(i=0; i<2; i++){
03367 for(j=0; j<2; j++){
03368 int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
03369 : (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
03370 ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
03371 s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
03372 }
03373 }
03374 }
03375 }
03376 }
03377 }
03378
03379 if (estimate_qp(s, 0) < 0)
03380 return -1;
03381
03382 if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==AV_PICTURE_TYPE_I && !(s->flags & CODEC_FLAG_QSCALE))
03383 s->qscale= 3;
03384
03385 if (s->out_format == FMT_MJPEG) {
03386
03387 for(i=1;i<64;i++){
03388 int j= s->dsp.idct_permutation[i];
03389
03390 s->intra_matrix[j] = av_clip_uint8((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
03391 }
03392 s->y_dc_scale_table=
03393 s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
03394 s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
03395 ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
03396 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
03397 s->qscale= 8;
03398 }
03399 if(s->codec_id == AV_CODEC_ID_AMV){
03400 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
03401 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
03402 for(i=1;i<64;i++){
03403 int j= s->dsp.idct_permutation[ff_zigzag_direct[i]];
03404
03405 s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
03406 s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
03407 }
03408 s->y_dc_scale_table= y;
03409 s->c_dc_scale_table= c;
03410 s->intra_matrix[0] = 13;
03411 s->chroma_intra_matrix[0] = 14;
03412 ff_convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16,
03413 s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
03414 ff_convert_matrix(&s->dsp, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
03415 s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
03416 s->qscale= 8;
03417 }
03418
03419
03420 s->current_picture_ptr->f.key_frame =
03421 s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
03422 s->current_picture_ptr->f.pict_type =
03423 s->current_picture.f.pict_type = s->pict_type;
03424
03425 if (s->current_picture.f.key_frame)
03426 s->picture_in_gop_number=0;
03427
03428 s->mb_x = s->mb_y = 0;
03429 s->last_bits= put_bits_count(&s->pb);
03430 switch(s->out_format) {
03431 case FMT_MJPEG:
03432 if (CONFIG_MJPEG_ENCODER)
03433 ff_mjpeg_encode_picture_header(s);
03434 break;
03435 case FMT_H261:
03436 if (CONFIG_H261_ENCODER)
03437 ff_h261_encode_picture_header(s, picture_number);
03438 break;
03439 case FMT_H263:
03440 if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
03441 ff_wmv2_encode_picture_header(s, picture_number);
03442 else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
03443 ff_msmpeg4_encode_picture_header(s, picture_number);
03444 else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
03445 ff_mpeg4_encode_picture_header(s, picture_number);
03446 else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10)
03447 ff_rv10_encode_picture_header(s, picture_number);
03448 else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
03449 ff_rv20_encode_picture_header(s, picture_number);
03450 else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
03451 ff_flv_encode_picture_header(s, picture_number);
03452 else if (CONFIG_H263_ENCODER)
03453 ff_h263_encode_picture_header(s, picture_number);
03454 break;
03455 case FMT_MPEG1:
03456 if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
03457 ff_mpeg1_encode_picture_header(s, picture_number);
03458 break;
03459 case FMT_H264:
03460 break;
03461 default:
03462 av_assert0(0);
03463 }
03464 bits= put_bits_count(&s->pb);
03465 s->header_bits= bits - s->last_bits;
03466
03467 for(i=1; i<context_count; i++){
03468 update_duplicate_context_after_me(s->thread_context[i], s);
03469 }
03470 s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
03471 for(i=1; i<context_count; i++){
03472 merge_context_after_encode(s, s->thread_context[i]);
03473 }
03474 emms_c();
03475 return 0;
03476 }
03477
03478 static void denoise_dct_c(MpegEncContext *s, DCTELEM *block){
03479 const int intra= s->mb_intra;
03480 int i;
03481
03482 s->dct_count[intra]++;
03483
03484 for(i=0; i<64; i++){
03485 int level= block[i];
03486
03487 if(level){
03488 if(level>0){
03489 s->dct_error_sum[intra][i] += level;
03490 level -= s->dct_offset[intra][i];
03491 if(level<0) level=0;
03492 }else{
03493 s->dct_error_sum[intra][i] -= level;
03494 level += s->dct_offset[intra][i];
03495 if(level>0) level=0;
03496 }
03497 block[i]= level;
03498 }
03499 }
03500 }
03501
03502 static int dct_quantize_trellis_c(MpegEncContext *s,
03503 DCTELEM *block, int n,
03504 int qscale, int *overflow){
03505 const int *qmat;
03506 const uint8_t *scantable= s->intra_scantable.scantable;
03507 const uint8_t *perm_scantable= s->intra_scantable.permutated;
03508 int max=0;
03509 unsigned int threshold1, threshold2;
03510 int bias=0;
03511 int run_tab[65];
03512 int level_tab[65];
03513 int score_tab[65];
03514 int survivor[65];
03515 int survivor_count;
03516 int last_run=0;
03517 int last_level=0;
03518 int last_score= 0;
03519 int last_i;
03520 int coeff[2][64];
03521 int coeff_count[64];
03522 int qmul, qadd, start_i, last_non_zero, i, dc;
03523 const int esc_length= s->ac_esc_length;
03524 uint8_t * length;
03525 uint8_t * last_length;
03526 const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
03527
03528 s->dsp.fdct (block);
03529
03530 if(s->dct_error_sum)
03531 s->denoise_dct(s, block);
03532 qmul= qscale*16;
03533 qadd= ((qscale-1)|1)*8;
03534
03535 if (s->mb_intra) {
03536 int q;
03537 if (!s->h263_aic) {
03538 if (n < 4)
03539 q = s->y_dc_scale;
03540 else
03541 q = s->c_dc_scale;
03542 q = q << 3;
03543 } else{
03544
03545 q = 1 << 3;
03546 qadd=0;
03547 }
03548
03549
03550 block[0] = (block[0] + (q >> 1)) / q;
03551 start_i = 1;
03552 last_non_zero = 0;
03553 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
03554 if(s->mpeg_quant || s->out_format == FMT_MPEG1)
03555 bias= 1<<(QMAT_SHIFT-1);
03556 length = s->intra_ac_vlc_length;
03557 last_length= s->intra_ac_vlc_last_length;
03558 } else {
03559 start_i = 0;
03560 last_non_zero = -1;
03561 qmat = s->q_inter_matrix[qscale];
03562 length = s->inter_ac_vlc_length;
03563 last_length= s->inter_ac_vlc_last_length;
03564 }
03565 last_i= start_i;
03566
03567 threshold1= (1<<QMAT_SHIFT) - bias - 1;
03568 threshold2= (threshold1<<1);
03569
03570 for(i=63; i>=start_i; i--) {
03571 const int j = scantable[i];
03572 int level = block[j] * qmat[j];
03573
03574 if(((unsigned)(level+threshold1))>threshold2){
03575 last_non_zero = i;
03576 break;
03577 }
03578 }
03579
03580 for(i=start_i; i<=last_non_zero; i++) {
03581 const int j = scantable[i];
03582 int level = block[j] * qmat[j];
03583
03584
03585
03586 if(((unsigned)(level+threshold1))>threshold2){
03587 if(level>0){
03588 level= (bias + level)>>QMAT_SHIFT;
03589 coeff[0][i]= level;
03590 coeff[1][i]= level-1;
03591
03592 }else{
03593 level= (bias - level)>>QMAT_SHIFT;
03594 coeff[0][i]= -level;
03595 coeff[1][i]= -level+1;
03596
03597 }
03598 coeff_count[i]= FFMIN(level, 2);
03599 av_assert2(coeff_count[i]);
03600 max |=level;
03601 }else{
03602 coeff[0][i]= (level>>31)|1;
03603 coeff_count[i]= 1;
03604 }
03605 }
03606
03607 *overflow= s->max_qcoeff < max;
03608
03609 if(last_non_zero < start_i){
03610 memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM));
03611 return last_non_zero;
03612 }
03613
03614 score_tab[start_i]= 0;
03615 survivor[0]= start_i;
03616 survivor_count= 1;
03617
03618 for(i=start_i; i<=last_non_zero; i++){
03619 int level_index, j, zero_distortion;
03620 int dct_coeff= FFABS(block[ scantable[i] ]);
03621 int best_score=256*256*256*120;
03622
03623 if (s->dsp.fdct == ff_fdct_ifast)
03624 dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
03625 zero_distortion= dct_coeff*dct_coeff;
03626
03627 for(level_index=0; level_index < coeff_count[i]; level_index++){
03628 int distortion;
03629 int level= coeff[level_index][i];
03630 const int alevel= FFABS(level);
03631 int unquant_coeff;
03632
03633 av_assert2(level);
03634
03635 if(s->out_format == FMT_H263){
03636 unquant_coeff= alevel*qmul + qadd;
03637 }else{
03638 j= s->dsp.idct_permutation[ scantable[i] ];
03639 if(s->mb_intra){
03640 unquant_coeff = (int)( alevel * qscale * s->intra_matrix[j]) >> 3;
03641 unquant_coeff = (unquant_coeff - 1) | 1;
03642 }else{
03643 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4;
03644 unquant_coeff = (unquant_coeff - 1) | 1;
03645 }
03646 unquant_coeff<<= 3;
03647 }
03648
03649 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
03650 level+=64;
03651 if((level&(~127)) == 0){
03652 for(j=survivor_count-1; j>=0; j--){
03653 int run= i - survivor[j];
03654 int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
03655 score += score_tab[i-run];
03656
03657 if(score < best_score){
03658 best_score= score;
03659 run_tab[i+1]= run;
03660 level_tab[i+1]= level-64;
03661 }
03662 }
03663
03664 if(s->out_format == FMT_H263){
03665 for(j=survivor_count-1; j>=0; j--){
03666 int run= i - survivor[j];
03667 int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
03668 score += score_tab[i-run];
03669 if(score < last_score){
03670 last_score= score;
03671 last_run= run;
03672 last_level= level-64;
03673 last_i= i+1;
03674 }
03675 }
03676 }
03677 }else{
03678 distortion += esc_length*lambda;
03679 for(j=survivor_count-1; j>=0; j--){
03680 int run= i - survivor[j];
03681 int score= distortion + score_tab[i-run];
03682
03683 if(score < best_score){
03684 best_score= score;
03685 run_tab[i+1]= run;
03686 level_tab[i+1]= level-64;
03687 }
03688 }
03689
03690 if(s->out_format == FMT_H263){
03691 for(j=survivor_count-1; j>=0; j--){
03692 int run= i - survivor[j];
03693 int score= distortion + score_tab[i-run];
03694 if(score < last_score){
03695 last_score= score;
03696 last_run= run;
03697 last_level= level-64;
03698 last_i= i+1;
03699 }
03700 }
03701 }
03702 }
03703 }
03704
03705 score_tab[i+1]= best_score;
03706
03707
03708 if(last_non_zero <= 27){
03709 for(; survivor_count; survivor_count--){
03710 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
03711 break;
03712 }
03713 }else{
03714 for(; survivor_count; survivor_count--){
03715 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
03716 break;
03717 }
03718 }
03719
03720 survivor[ survivor_count++ ]= i+1;
03721 }
03722
03723 if(s->out_format != FMT_H263){
03724 last_score= 256*256*256*120;
03725 for(i= survivor[0]; i<=last_non_zero + 1; i++){
03726 int score= score_tab[i];
03727 if(i) score += lambda*2;
03728
03729 if(score < last_score){
03730 last_score= score;
03731 last_i= i;
03732 last_level= level_tab[i];
03733 last_run= run_tab[i];
03734 }
03735 }
03736 }
03737
03738 s->coded_score[n] = last_score;
03739
03740 dc= FFABS(block[0]);
03741 last_non_zero= last_i - 1;
03742 memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM));
03743
03744 if(last_non_zero < start_i)
03745 return last_non_zero;
03746
03747 if(last_non_zero == 0 && start_i == 0){
03748 int best_level= 0;
03749 int best_score= dc * dc;
03750
03751 for(i=0; i<coeff_count[0]; i++){
03752 int level= coeff[i][0];
03753 int alevel= FFABS(level);
03754 int unquant_coeff, score, distortion;
03755
03756 if(s->out_format == FMT_H263){
03757 unquant_coeff= (alevel*qmul + qadd)>>3;
03758 }else{
03759 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[0])) >> 4;
03760 unquant_coeff = (unquant_coeff - 1) | 1;
03761 }
03762 unquant_coeff = (unquant_coeff + 4) >> 3;
03763 unquant_coeff<<= 3 + 3;
03764
03765 distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
03766 level+=64;
03767 if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
03768 else score= distortion + esc_length*lambda;
03769
03770 if(score < best_score){
03771 best_score= score;
03772 best_level= level - 64;
03773 }
03774 }
03775 block[0]= best_level;
03776 s->coded_score[n] = best_score - dc*dc;
03777 if(best_level == 0) return -1;
03778 else return last_non_zero;
03779 }
03780
03781 i= last_i;
03782 av_assert2(last_level);
03783
03784 block[ perm_scantable[last_non_zero] ]= last_level;
03785 i -= last_run + 1;
03786
03787 for(; i>start_i; i -= run_tab[i] + 1){
03788 block[ perm_scantable[i-1] ]= level_tab[i];
03789 }
03790
03791 return last_non_zero;
03792 }
03793
03794
03795 static int16_t basis[64][64];
03796
03797 static void build_basis(uint8_t *perm){
03798 int i, j, x, y;
03799 emms_c();
03800 for(i=0; i<8; i++){
03801 for(j=0; j<8; j++){
03802 for(y=0; y<8; y++){
03803 for(x=0; x<8; x++){
03804 double s= 0.25*(1<<BASIS_SHIFT);
03805 int index= 8*i + j;
03806 int perm_index= perm[index];
03807 if(i==0) s*= sqrt(0.5);
03808 if(j==0) s*= sqrt(0.5);
03809 basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
03810 }
03811 }
03812 }
03813 }
03814 }
03815
03816 static int dct_quantize_refine(MpegEncContext *s,
03817 DCTELEM *block, int16_t *weight, DCTELEM *orig,
03818 int n, int qscale){
03819 int16_t rem[64];
03820 LOCAL_ALIGNED_16(DCTELEM, d1, [64]);
03821 const uint8_t *scantable= s->intra_scantable.scantable;
03822 const uint8_t *perm_scantable= s->intra_scantable.permutated;
03823
03824
03825 int run_tab[65];
03826 int prev_run=0;
03827 int prev_level=0;
03828 int qmul, qadd, start_i, last_non_zero, i, dc;
03829 uint8_t * length;
03830 uint8_t * last_length;
03831 int lambda;
03832 int rle_index, run, q = 1, sum;
03833 #ifdef REFINE_STATS
03834 static int count=0;
03835 static int after_last=0;
03836 static int to_zero=0;
03837 static int from_zero=0;
03838 static int raise=0;
03839 static int lower=0;
03840 static int messed_sign=0;
03841 #endif
03842
03843 if(basis[0][0] == 0)
03844 build_basis(s->dsp.idct_permutation);
03845
03846 qmul= qscale*2;
03847 qadd= (qscale-1)|1;
03848 if (s->mb_intra) {
03849 if (!s->h263_aic) {
03850 if (n < 4)
03851 q = s->y_dc_scale;
03852 else
03853 q = s->c_dc_scale;
03854 } else{
03855
03856 q = 1;
03857 qadd=0;
03858 }
03859 q <<= RECON_SHIFT-3;
03860
03861 dc= block[0]*q;
03862
03863 start_i = 1;
03864
03865
03866 length = s->intra_ac_vlc_length;
03867 last_length= s->intra_ac_vlc_last_length;
03868 } else {
03869 dc= 0;
03870 start_i = 0;
03871 length = s->inter_ac_vlc_length;
03872 last_length= s->inter_ac_vlc_last_length;
03873 }
03874 last_non_zero = s->block_last_index[n];
03875
03876 #ifdef REFINE_STATS
03877 {START_TIMER
03878 #endif
03879 dc += (1<<(RECON_SHIFT-1));
03880 for(i=0; i<64; i++){
03881 rem[i]= dc - (orig[i]<<RECON_SHIFT);
03882 }
03883 #ifdef REFINE_STATS
03884 STOP_TIMER("memset rem[]")}
03885 #endif
03886 sum=0;
03887 for(i=0; i<64; i++){
03888 int one= 36;
03889 int qns=4;
03890 int w;
03891
03892 w= FFABS(weight[i]) + qns*one;
03893 w= 15 + (48*qns*one + w/2)/w;
03894
03895 weight[i] = w;
03896
03897
03898 av_assert2(w>0);
03899 av_assert2(w<(1<<6));
03900 sum += w*w;
03901 }
03902 lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
03903 #ifdef REFINE_STATS
03904 {START_TIMER
03905 #endif
03906 run=0;
03907 rle_index=0;
03908 for(i=start_i; i<=last_non_zero; i++){
03909 int j= perm_scantable[i];
03910 const int level= block[j];
03911 int coeff;
03912
03913 if(level){
03914 if(level<0) coeff= qmul*level - qadd;
03915 else coeff= qmul*level + qadd;
03916 run_tab[rle_index++]=run;
03917 run=0;
03918
03919 s->dsp.add_8x8basis(rem, basis[j], coeff);
03920 }else{
03921 run++;
03922 }
03923 }
03924 #ifdef REFINE_STATS
03925 if(last_non_zero>0){
03926 STOP_TIMER("init rem[]")
03927 }
03928 }
03929
03930 {START_TIMER
03931 #endif
03932 for(;;){
03933 int best_score=s->dsp.try_8x8basis(rem, weight, basis[0], 0);
03934 int best_coeff=0;
03935 int best_change=0;
03936 int run2, best_unquant_change=0, analyze_gradient;
03937 #ifdef REFINE_STATS
03938 {START_TIMER
03939 #endif
03940 analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
03941
03942 if(analyze_gradient){
03943 #ifdef REFINE_STATS
03944 {START_TIMER
03945 #endif
03946 for(i=0; i<64; i++){
03947 int w= weight[i];
03948
03949 d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
03950 }
03951 #ifdef REFINE_STATS
03952 STOP_TIMER("rem*w*w")}
03953 {START_TIMER
03954 #endif
03955 s->dsp.fdct(d1);
03956 #ifdef REFINE_STATS
03957 STOP_TIMER("dct")}
03958 #endif
03959 }
03960
03961 if(start_i){
03962 const int level= block[0];
03963 int change, old_coeff;
03964
03965 av_assert2(s->mb_intra);
03966
03967 old_coeff= q*level;
03968
03969 for(change=-1; change<=1; change+=2){
03970 int new_level= level + change;
03971 int score, new_coeff;
03972
03973 new_coeff= q*new_level;
03974 if(new_coeff >= 2048 || new_coeff < 0)
03975 continue;
03976
03977 score= s->dsp.try_8x8basis(rem, weight, basis[0], new_coeff - old_coeff);
03978 if(score<best_score){
03979 best_score= score;
03980 best_coeff= 0;
03981 best_change= change;
03982 best_unquant_change= new_coeff - old_coeff;
03983 }
03984 }
03985 }
03986
03987 run=0;
03988 rle_index=0;
03989 run2= run_tab[rle_index++];
03990 prev_level=0;
03991 prev_run=0;
03992
03993 for(i=start_i; i<64; i++){
03994 int j= perm_scantable[i];
03995 const int level= block[j];
03996 int change, old_coeff;
03997
03998 if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
03999 break;
04000
04001 if(level){
04002 if(level<0) old_coeff= qmul*level - qadd;
04003 else old_coeff= qmul*level + qadd;
04004 run2= run_tab[rle_index++];
04005 }else{
04006 old_coeff=0;
04007 run2--;
04008 av_assert2(run2>=0 || i >= last_non_zero );
04009 }
04010
04011 for(change=-1; change<=1; change+=2){
04012 int new_level= level + change;
04013 int score, new_coeff, unquant_change;
04014
04015 score=0;
04016 if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
04017 continue;
04018
04019 if(new_level){
04020 if(new_level<0) new_coeff= qmul*new_level - qadd;
04021 else new_coeff= qmul*new_level + qadd;
04022 if(new_coeff >= 2048 || new_coeff <= -2048)
04023 continue;
04024
04025
04026 if(level){
04027 if(level < 63 && level > -63){
04028 if(i < last_non_zero)
04029 score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
04030 - length[UNI_AC_ENC_INDEX(run, level+64)];
04031 else
04032 score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
04033 - last_length[UNI_AC_ENC_INDEX(run, level+64)];
04034 }
04035 }else{
04036 av_assert2(FFABS(new_level)==1);
04037
04038 if(analyze_gradient){
04039 int g= d1[ scantable[i] ];
04040 if(g && (g^new_level) >= 0)
04041 continue;
04042 }
04043
04044 if(i < last_non_zero){
04045 int next_i= i + run2 + 1;
04046 int next_level= block[ perm_scantable[next_i] ] + 64;
04047
04048 if(next_level&(~127))
04049 next_level= 0;
04050
04051 if(next_i < last_non_zero)
04052 score += length[UNI_AC_ENC_INDEX(run, 65)]
04053 + length[UNI_AC_ENC_INDEX(run2, next_level)]
04054 - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
04055 else
04056 score += length[UNI_AC_ENC_INDEX(run, 65)]
04057 + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
04058 - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
04059 }else{
04060 score += last_length[UNI_AC_ENC_INDEX(run, 65)];
04061 if(prev_level){
04062 score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
04063 - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
04064 }
04065 }
04066 }
04067 }else{
04068 new_coeff=0;
04069 av_assert2(FFABS(level)==1);
04070
04071 if(i < last_non_zero){
04072 int next_i= i + run2 + 1;
04073 int next_level= block[ perm_scantable[next_i] ] + 64;
04074
04075 if(next_level&(~127))
04076 next_level= 0;
04077
04078 if(next_i < last_non_zero)
04079 score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
04080 - length[UNI_AC_ENC_INDEX(run2, next_level)]
04081 - length[UNI_AC_ENC_INDEX(run, 65)];
04082 else
04083 score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
04084 - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
04085 - length[UNI_AC_ENC_INDEX(run, 65)];
04086 }else{
04087 score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
04088 if(prev_level){
04089 score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
04090 - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
04091 }
04092 }
04093 }
04094
04095 score *= lambda;
04096
04097 unquant_change= new_coeff - old_coeff;
04098 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
04099
04100 score+= s->dsp.try_8x8basis(rem, weight, basis[j], unquant_change);
04101 if(score<best_score){
04102 best_score= score;
04103 best_coeff= i;
04104 best_change= change;
04105 best_unquant_change= unquant_change;
04106 }
04107 }
04108 if(level){
04109 prev_level= level + 64;
04110 if(prev_level&(~127))
04111 prev_level= 0;
04112 prev_run= run;
04113 run=0;
04114 }else{
04115 run++;
04116 }
04117 }
04118 #ifdef REFINE_STATS
04119 STOP_TIMER("iterative step")}
04120 #endif
04121
04122 if(best_change){
04123 int j= perm_scantable[ best_coeff ];
04124
04125 block[j] += best_change;
04126
04127 if(best_coeff > last_non_zero){
04128 last_non_zero= best_coeff;
04129 av_assert2(block[j]);
04130 #ifdef REFINE_STATS
04131 after_last++;
04132 #endif
04133 }else{
04134 #ifdef REFINE_STATS
04135 if(block[j]){
04136 if(block[j] - best_change){
04137 if(FFABS(block[j]) > FFABS(block[j] - best_change)){
04138 raise++;
04139 }else{
04140 lower++;
04141 }
04142 }else{
04143 from_zero++;
04144 }
04145 }else{
04146 to_zero++;
04147 }
04148 #endif
04149 for(; last_non_zero>=start_i; last_non_zero--){
04150 if(block[perm_scantable[last_non_zero]])
04151 break;
04152 }
04153 }
04154 #ifdef REFINE_STATS
04155 count++;
04156 if(256*256*256*64 % count == 0){
04157 av_log(s->avctx, AV_LOG_DEBUG, "after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
04158 }
04159 #endif
04160 run=0;
04161 rle_index=0;
04162 for(i=start_i; i<=last_non_zero; i++){
04163 int j= perm_scantable[i];
04164 const int level= block[j];
04165
04166 if(level){
04167 run_tab[rle_index++]=run;
04168 run=0;
04169 }else{
04170 run++;
04171 }
04172 }
04173
04174 s->dsp.add_8x8basis(rem, basis[j], best_unquant_change);
04175 }else{
04176 break;
04177 }
04178 }
04179 #ifdef REFINE_STATS
04180 if(last_non_zero>0){
04181 STOP_TIMER("iterative search")
04182 }
04183 }
04184 #endif
04185
04186 return last_non_zero;
04187 }
04188
04189 int ff_dct_quantize_c(MpegEncContext *s,
04190 DCTELEM *block, int n,
04191 int qscale, int *overflow)
04192 {
04193 int i, j, level, last_non_zero, q, start_i;
04194 const int *qmat;
04195 const uint8_t *scantable= s->intra_scantable.scantable;
04196 int bias;
04197 int max=0;
04198 unsigned int threshold1, threshold2;
04199
04200 s->dsp.fdct (block);
04201
04202 if(s->dct_error_sum)
04203 s->denoise_dct(s, block);
04204
04205 if (s->mb_intra) {
04206 if (!s->h263_aic) {
04207 if (n < 4)
04208 q = s->y_dc_scale;
04209 else
04210 q = s->c_dc_scale;
04211 q = q << 3;
04212 } else
04213
04214 q = 1 << 3;
04215
04216
04217 block[0] = (block[0] + (q >> 1)) / q;
04218 start_i = 1;
04219 last_non_zero = 0;
04220 qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
04221 bias= s->intra_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
04222 } else {
04223 start_i = 0;
04224 last_non_zero = -1;
04225 qmat = s->q_inter_matrix[qscale];
04226 bias= s->inter_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
04227 }
04228 threshold1= (1<<QMAT_SHIFT) - bias - 1;
04229 threshold2= (threshold1<<1);
04230 for(i=63;i>=start_i;i--) {
04231 j = scantable[i];
04232 level = block[j] * qmat[j];
04233
04234 if(((unsigned)(level+threshold1))>threshold2){
04235 last_non_zero = i;
04236 break;
04237 }else{
04238 block[j]=0;
04239 }
04240 }
04241 for(i=start_i; i<=last_non_zero; i++) {
04242 j = scantable[i];
04243 level = block[j] * qmat[j];
04244
04245
04246
04247 if(((unsigned)(level+threshold1))>threshold2){
04248 if(level>0){
04249 level= (bias + level)>>QMAT_SHIFT;
04250 block[j]= level;
04251 }else{
04252 level= (bias - level)>>QMAT_SHIFT;
04253 block[j]= -level;
04254 }
04255 max |=level;
04256 }else{
04257 block[j]=0;
04258 }
04259 }
04260 *overflow= s->max_qcoeff < max;
04261
04262
04263 if (s->dsp.idct_permutation_type != FF_NO_IDCT_PERM)
04264 ff_block_permute(block, s->dsp.idct_permutation, scantable, last_non_zero);
04265
04266 return last_non_zero;
04267 }
04268
04269 #define OFFSET(x) offsetof(MpegEncContext, x)
04270 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
04271 static const AVOption h263_options[] = {
04272 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
04273 { "structured_slices","Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
04274 { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
04275 FF_MPV_COMMON_OPTS
04276 { NULL },
04277 };
04278
04279 static const AVClass h263_class = {
04280 .class_name = "H.263 encoder",
04281 .item_name = av_default_item_name,
04282 .option = h263_options,
04283 .version = LIBAVUTIL_VERSION_INT,
04284 };
04285
04286 AVCodec ff_h263_encoder = {
04287 .name = "h263",
04288 .type = AVMEDIA_TYPE_VIDEO,
04289 .id = AV_CODEC_ID_H263,
04290 .priv_data_size = sizeof(MpegEncContext),
04291 .init = ff_MPV_encode_init,
04292 .encode2 = ff_MPV_encode_picture,
04293 .close = ff_MPV_encode_end,
04294 .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
04295 .long_name= NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
04296 .priv_class = &h263_class,
04297 };
04298
04299 static const AVOption h263p_options[] = {
04300 { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
04301 { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
04302 { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
04303 { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
04304 FF_MPV_COMMON_OPTS
04305 { NULL },
04306 };
04307 static const AVClass h263p_class = {
04308 .class_name = "H.263p encoder",
04309 .item_name = av_default_item_name,
04310 .option = h263p_options,
04311 .version = LIBAVUTIL_VERSION_INT,
04312 };
04313
04314 AVCodec ff_h263p_encoder = {
04315 .name = "h263p",
04316 .type = AVMEDIA_TYPE_VIDEO,
04317 .id = AV_CODEC_ID_H263P,
04318 .priv_data_size = sizeof(MpegEncContext),
04319 .init = ff_MPV_encode_init,
04320 .encode2 = ff_MPV_encode_picture,
04321 .close = ff_MPV_encode_end,
04322 .capabilities = CODEC_CAP_SLICE_THREADS,
04323 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
04324 .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
04325 .priv_class = &h263p_class,
04326 };
04327
04328 FF_MPV_GENERIC_CLASS(msmpeg4v2)
04329
04330 AVCodec ff_msmpeg4v2_encoder = {
04331 .name = "msmpeg4v2",
04332 .type = AVMEDIA_TYPE_VIDEO,
04333 .id = AV_CODEC_ID_MSMPEG4V2,
04334 .priv_data_size = sizeof(MpegEncContext),
04335 .init = ff_MPV_encode_init,
04336 .encode2 = ff_MPV_encode_picture,
04337 .close = ff_MPV_encode_end,
04338 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
04339 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
04340 .priv_class = &msmpeg4v2_class,
04341 };
04342
04343 FF_MPV_GENERIC_CLASS(msmpeg4v3)
04344
04345 AVCodec ff_msmpeg4v3_encoder = {
04346 .name = "msmpeg4",
04347 .type = AVMEDIA_TYPE_VIDEO,
04348 .id = AV_CODEC_ID_MSMPEG4V3,
04349 .priv_data_size = sizeof(MpegEncContext),
04350 .init = ff_MPV_encode_init,
04351 .encode2 = ff_MPV_encode_picture,
04352 .close = ff_MPV_encode_end,
04353 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
04354 .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
04355 .priv_class = &msmpeg4v3_class,
04356 };
04357
04358 FF_MPV_GENERIC_CLASS(wmv1)
04359
04360 AVCodec ff_wmv1_encoder = {
04361 .name = "wmv1",
04362 .type = AVMEDIA_TYPE_VIDEO,
04363 .id = AV_CODEC_ID_WMV1,
04364 .priv_data_size = sizeof(MpegEncContext),
04365 .init = ff_MPV_encode_init,
04366 .encode2 = ff_MPV_encode_picture,
04367 .close = ff_MPV_encode_end,
04368 .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
04369 .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
04370 .priv_class = &wmv1_class,
04371 };