FFmpeg
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mpegvideo_enc.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29 
30 #include "libavutil/internal.h"
31 #include "libavutil/intmath.h"
32 #include "libavutil/mathematics.h"
33 #include "libavutil/pixdesc.h"
34 #include "libavutil/opt.h"
35 #include "avcodec.h"
36 #include "dct.h"
37 #include "dsputil.h"
38 #include "mpegvideo.h"
39 #include "h263.h"
40 #include "mathops.h"
41 #include "mjpegenc.h"
42 #include "msmpeg4.h"
43 #include "faandct.h"
44 #include "thread.h"
45 #include "aandcttab.h"
46 #include "flv.h"
47 #include "mpeg4video.h"
48 #include "internal.h"
49 #include "bytestream.h"
50 #include <limits.h>
51 #include "sp5x.h"
52 
53 //#undef NDEBUG
54 //#include <assert.h>
55 
56 static int encode_picture(MpegEncContext *s, int picture_number);
57 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
58 static int sse_mb(MpegEncContext *s);
59 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
60 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
61 
62 //#define DEBUG
63 
66 
69  { NULL },
70 };
71 
72 void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64],
73  uint16_t (*qmat16)[2][64],
74  const uint16_t *quant_matrix,
75  int bias, int qmin, int qmax, int intra)
76 {
77  int qscale;
78  int shift = 0;
79 
80  for (qscale = qmin; qscale <= qmax; qscale++) {
81  int i;
82  if (dsp->fdct == ff_jpeg_fdct_islow_8 ||
83  dsp->fdct == ff_jpeg_fdct_islow_10 ||
84  dsp->fdct == ff_faandct) {
85  for (i = 0; i < 64; i++) {
86  const int j = dsp->idct_permutation[i];
87  /* 16 <= qscale * quant_matrix[i] <= 7905
88  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
89  * 19952 <= x <= 249205026
90  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
91  * 3444240 >= (1 << 36) / (x) >= 275 */
92 
93  qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
94  (qscale * quant_matrix[j]));
95  }
96  } else if (dsp->fdct == ff_fdct_ifast) {
97  for (i = 0; i < 64; i++) {
98  const int j = dsp->idct_permutation[i];
99  /* 16 <= qscale * quant_matrix[i] <= 7905
100  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
101  * 19952 <= x <= 249205026
102  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
103  * 3444240 >= (1 << 36) / (x) >= 275 */
104 
105  qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) /
106  (ff_aanscales[i] * (int64_t)qscale * quant_matrix[j]));
107  }
108  } else {
109  for (i = 0; i < 64; i++) {
110  const int j = dsp->idct_permutation[i];
111  /* We can safely suppose that 16 <= quant_matrix[i] <= 255
112  * Assume x = qscale * quant_matrix[i]
113  * So 16 <= x <= 7905
114  * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
115  * so 32768 >= (1 << 19) / (x) >= 67 */
116  qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
117  (qscale * quant_matrix[j]));
118  //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
119  // (qscale * quant_matrix[i]);
120  qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) /
121  (qscale * quant_matrix[j]);
122 
123  if (qmat16[qscale][0][i] == 0 ||
124  qmat16[qscale][0][i] == 128 * 256)
125  qmat16[qscale][0][i] = 128 * 256 - 1;
126  qmat16[qscale][1][i] =
127  ROUNDED_DIV(bias << (16 - QUANT_BIAS_SHIFT),
128  qmat16[qscale][0][i]);
129  }
130  }
131 
132  for (i = intra; i < 64; i++) {
133  int64_t max = 8191;
134  if (dsp->fdct == ff_fdct_ifast) {
135  max = (8191LL * ff_aanscales[i]) >> 14;
136  }
137  while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
138  shift++;
139  }
140  }
141  }
142  if (shift) {
144  "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
145  QMAT_SHIFT - shift);
146  }
147 }
148 
149 static inline void update_qscale(MpegEncContext *s)
150 {
151  s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
152  (FF_LAMBDA_SHIFT + 7);
153  s->qscale = av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
154 
155  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
157 }
158 
159 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
160 {
161  int i;
162 
163  if (matrix) {
164  put_bits(pb, 1, 1);
165  for (i = 0; i < 64; i++) {
166  put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
167  }
168  } else
169  put_bits(pb, 1, 0);
170 }
171 
172 /**
173  * init s->current_picture.qscale_table from s->lambda_table
174  */
176 {
177  int8_t * const qscale_table = s->current_picture.f.qscale_table;
178  int i;
179 
180  for (i = 0; i < s->mb_num; i++) {
181  unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
182  int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
183  qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
184  s->avctx->qmax);
185  }
186 }
187 
189  const AVFrame *src)
190 {
191  int i;
192 
193  dst->pict_type = src->pict_type;
194  dst->quality = src->quality;
197  //dst->reference = src->reference;
198  dst->pts = src->pts;
200  dst->top_field_first = src->top_field_first;
201 
202  if (s->avctx->me_threshold) {
203  if (!src->motion_val[0])
204  av_log(s->avctx, AV_LOG_ERROR, "AVFrame.motion_val not set!\n");
205  if (!src->mb_type)
206  av_log(s->avctx, AV_LOG_ERROR, "AVFrame.mb_type not set!\n");
207  if (!src->ref_index[0])
208  av_log(s->avctx, AV_LOG_ERROR, "AVFrame.ref_index not set!\n");
211  "AVFrame.motion_subsample_log2 doesn't match! (%d!=%d)\n",
213 
214  memcpy(dst->mb_type, src->mb_type,
215  s->mb_stride * s->mb_height * sizeof(dst->mb_type[0]));
216 
217  for (i = 0; i < 2; i++) {
218  int stride = ((16 * s->mb_width ) >>
219  src->motion_subsample_log2) + 1;
220  int height = ((16 * s->mb_height) >> src->motion_subsample_log2);
221 
222  if (src->motion_val[i] &&
223  src->motion_val[i] != dst->motion_val[i]) {
224  memcpy(dst->motion_val[i], src->motion_val[i],
225  2 * stride * height * sizeof(int16_t));
226  }
227  if (src->ref_index[i] && src->ref_index[i] != dst->ref_index[i]) {
228  memcpy(dst->ref_index[i], src->ref_index[i],
229  s->mb_stride * 4 * s->mb_height * sizeof(int8_t));
230  }
231  }
232  }
233 }
234 
236  MpegEncContext *src)
237 {
238 #define COPY(a) dst->a= src->a
239  COPY(pict_type);
241  COPY(f_code);
242  COPY(b_code);
243  COPY(qscale);
244  COPY(lambda);
245  COPY(lambda2);
248  COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
249  COPY(progressive_frame); // FIXME don't set in encode_header
250  COPY(partitioned_frame); // FIXME don't set in encode_header
251 #undef COPY
252 }
253 
254 /**
255  * Set the given MpegEncContext to defaults for encoding.
256  * the changed fields will not depend upon the prior state of the MpegEncContext.
257  */
259 {
260  int i;
262 
263  for (i = -16; i < 16; i++) {
264  default_fcode_tab[i + MAX_MV] = 1;
265  }
268 }
269 
271  if (ARCH_X86)
273 
274  if (!s->dct_quantize)
276  if (!s->denoise_dct)
279  if (s->avctx->trellis)
281 
282  return 0;
283 }
284 
285 /* init video encoder */
287 {
288  MpegEncContext *s = avctx->priv_data;
289  int i;
290  int chroma_h_shift, chroma_v_shift;
291 
293 
294  switch (avctx->codec_id) {
296  if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
297  avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
298  av_log(avctx, AV_LOG_ERROR,
299  "only YUV420 and YUV422 are supported\n");
300  return -1;
301  }
302  break;
303  case AV_CODEC_ID_LJPEG:
304  if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
305  avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
306  avctx->pix_fmt != AV_PIX_FMT_YUVJ444P &&
307  avctx->pix_fmt != AV_PIX_FMT_BGR0 &&
308  avctx->pix_fmt != AV_PIX_FMT_BGRA &&
309  avctx->pix_fmt != AV_PIX_FMT_BGR24 &&
310  ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
311  avctx->pix_fmt != AV_PIX_FMT_YUV422P &&
312  avctx->pix_fmt != AV_PIX_FMT_YUV444P) ||
314  av_log(avctx, AV_LOG_ERROR, "colorspace not supported in LJPEG\n");
315  return -1;
316  }
317  break;
318  case AV_CODEC_ID_MJPEG:
319  case AV_CODEC_ID_AMV:
320  if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
321  avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
322  avctx->pix_fmt != AV_PIX_FMT_YUVJ444P &&
323  ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
324  avctx->pix_fmt != AV_PIX_FMT_YUV422P &&
325  avctx->pix_fmt != AV_PIX_FMT_YUV444P) ||
327  av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
328  return -1;
329  }
330  break;
331  default:
332  if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
333  av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
334  return -1;
335  }
336  }
337 
338  switch (avctx->pix_fmt) {
339  case AV_PIX_FMT_YUVJ444P:
340  case AV_PIX_FMT_YUV444P:
342  break;
343  case AV_PIX_FMT_YUVJ422P:
344  case AV_PIX_FMT_YUV422P:
346  break;
347  case AV_PIX_FMT_YUVJ420P:
348  case AV_PIX_FMT_YUV420P:
349  default:
351  break;
352  }
353 
354  s->bit_rate = avctx->bit_rate;
355  s->width = avctx->width;
356  s->height = avctx->height;
357  if (avctx->gop_size > 600 &&
359  av_log(avctx, AV_LOG_WARNING,
360  "keyframe interval too large!, reducing it from %d to %d\n",
361  avctx->gop_size, 600);
362  avctx->gop_size = 600;
363  }
364  s->gop_size = avctx->gop_size;
365  s->avctx = avctx;
366  s->flags = avctx->flags;
367  s->flags2 = avctx->flags2;
368  s->max_b_frames = avctx->max_b_frames;
369  s->codec_id = avctx->codec->id;
370 #if FF_API_MPV_GLOBAL_OPTS
371  if (avctx->luma_elim_threshold)
372  s->luma_elim_threshold = avctx->luma_elim_threshold;
373  if (avctx->chroma_elim_threshold)
374  s->chroma_elim_threshold = avctx->chroma_elim_threshold;
375 #endif
377  s->quarter_sample = (avctx->flags & CODEC_FLAG_QPEL) != 0;
378  s->mpeg_quant = avctx->mpeg_quant;
379  s->rtp_mode = !!avctx->rtp_payload_size;
382 
383  if (s->gop_size <= 1) {
384  s->intra_only = 1;
385  s->gop_size = 12;
386  } else {
387  s->intra_only = 0;
388  }
389 
390  s->me_method = avctx->me_method;
391 
392  /* Fixed QSCALE */
393  s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
394 
395 #if FF_API_MPV_GLOBAL_OPTS
396  if (s->flags & CODEC_FLAG_QP_RD)
398 #endif
399 
400  s->adaptive_quant = (s->avctx->lumi_masking ||
401  s->avctx->dark_masking ||
404  s->avctx->p_masking ||
405  s->avctx->border_masking ||
406  (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
407  !s->fixed_qscale;
408 
410 
411  if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
412  switch(avctx->codec_id) {
415  avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112L / 15000000 * 16384;
416  break;
417  case AV_CODEC_ID_MPEG4:
421  if (avctx->rc_max_rate >= 15000000) {
422  avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000L) * (760-320) / (38400000 - 15000000);
423  } else if(avctx->rc_max_rate >= 2000000) {
424  avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000L) * (320- 80) / (15000000 - 2000000);
425  } else if(avctx->rc_max_rate >= 384000) {
426  avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000L) * ( 80- 40) / ( 2000000 - 384000);
427  } else
428  avctx->rc_buffer_size = 40;
429  avctx->rc_buffer_size *= 16384;
430  break;
431  }
432  if (avctx->rc_buffer_size) {
433  av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
434  }
435  }
436 
437  if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
438  av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
439  if (avctx->rc_max_rate && !avctx->rc_buffer_size)
440  return -1;
441  }
442 
443  if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
444  av_log(avctx, AV_LOG_INFO,
445  "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
446  }
447 
448  if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
449  av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
450  return -1;
451  }
452 
453  if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
454  av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
455  return -1;
456  }
457 
458  if (avctx->rc_max_rate &&
459  avctx->rc_max_rate == avctx->bit_rate &&
460  avctx->rc_max_rate != avctx->rc_min_rate) {
461  av_log(avctx, AV_LOG_INFO,
462  "impossible bitrate constraints, this will fail\n");
463  }
464 
465  if (avctx->rc_buffer_size &&
466  avctx->bit_rate * (int64_t)avctx->time_base.num >
467  avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
468  av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
469  return -1;
470  }
471 
472  if (!s->fixed_qscale &&
473  avctx->bit_rate * av_q2d(avctx->time_base) >
474  avctx->bit_rate_tolerance) {
475  av_log(avctx, AV_LOG_ERROR,
476  "bitrate tolerance too small for bitrate\n");
477  return -1;
478  }
479 
480  if (s->avctx->rc_max_rate &&
481  s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
484  90000LL * (avctx->rc_buffer_size - 1) >
485  s->avctx->rc_max_rate * 0xFFFFLL) {
486  av_log(avctx, AV_LOG_INFO,
487  "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
488  "specified vbv buffer is too large for the given bitrate!\n");
489  }
490 
491  if ((s->flags & CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
493  s->codec_id != AV_CODEC_ID_FLV1) {
494  av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
495  return -1;
496  }
497 
498  if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
499  av_log(avctx, AV_LOG_ERROR,
500  "OBMC is only supported with simple mb decision\n");
501  return -1;
502  }
503 
504  if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
505  av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
506  return -1;
507  }
508 
509  if (s->max_b_frames &&
510  s->codec_id != AV_CODEC_ID_MPEG4 &&
513  av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
514  return -1;
515  }
516 
517  if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
518  s->codec_id == AV_CODEC_ID_H263 ||
519  s->codec_id == AV_CODEC_ID_H263P) &&
520  (avctx->sample_aspect_ratio.num > 255 ||
521  avctx->sample_aspect_ratio.den > 255)) {
522  av_log(avctx, AV_LOG_WARNING,
523  "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
526  avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
527  }
528 
529  if ((s->codec_id == AV_CODEC_ID_H263 ||
530  s->codec_id == AV_CODEC_ID_H263P) &&
531  (avctx->width > 2048 ||
532  avctx->height > 1152 )) {
533  av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
534  return -1;
535  }
536  if ((s->codec_id == AV_CODEC_ID_H263 ||
537  s->codec_id == AV_CODEC_ID_H263P) &&
538  ((avctx->width &3) ||
539  (avctx->height&3) )) {
540  av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
541  return -1;
542  }
543 
544  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
545  (avctx->width > 4095 ||
546  avctx->height > 4095 )) {
547  av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
548  return -1;
549  }
550 
551  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
552  (avctx->width > 16383 ||
553  avctx->height > 16383 )) {
554  av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
555  return -1;
556  }
557 
558  if (s->codec_id == AV_CODEC_ID_RV10 &&
559  (avctx->width &15 ||
560  avctx->height&15 )) {
561  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
562  return AVERROR(EINVAL);
563  }
564 
565  if (s->codec_id == AV_CODEC_ID_RV20 &&
566  (avctx->width &3 ||
567  avctx->height&3 )) {
568  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
569  return AVERROR(EINVAL);
570  }
571 
572  if ((s->codec_id == AV_CODEC_ID_WMV1 ||
573  s->codec_id == AV_CODEC_ID_WMV2) &&
574  avctx->width & 1) {
575  av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
576  return -1;
577  }
578 
581  av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
582  return -1;
583  }
584 
585  // FIXME mpeg2 uses that too
586  if (s->mpeg_quant && s->codec_id != AV_CODEC_ID_MPEG4) {
587  av_log(avctx, AV_LOG_ERROR,
588  "mpeg2 style quantization not supported by codec\n");
589  return -1;
590  }
591 
592 #if FF_API_MPV_GLOBAL_OPTS
593  if (s->flags & CODEC_FLAG_CBP_RD)
595 #endif
596 
597  if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
598  av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
599  return -1;
600  }
601 
602  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
604  av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
605  return -1;
606  }
607 
608  if (s->avctx->scenechange_threshold < 1000000000 &&
609  (s->flags & CODEC_FLAG_CLOSED_GOP)) {
610  av_log(avctx, AV_LOG_ERROR,
611  "closed gop with scene change detection are not supported yet, "
612  "set threshold to 1000000000\n");
613  return -1;
614  }
615 
616  if (s->flags & CODEC_FLAG_LOW_DELAY) {
617  if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
618  av_log(avctx, AV_LOG_ERROR,
619  "low delay forcing is only available for mpeg2\n");
620  return -1;
621  }
622  if (s->max_b_frames != 0) {
623  av_log(avctx, AV_LOG_ERROR,
624  "b frames cannot be used with low delay\n");
625  return -1;
626  }
627  }
628 
629  if (s->q_scale_type == 1) {
630  if (avctx->qmax > 12) {
631  av_log(avctx, AV_LOG_ERROR,
632  "non linear quant only supports qmax <= 12 currently\n");
633  return -1;
634  }
635  }
636 
637  if (s->avctx->thread_count > 1 &&
638  s->codec_id != AV_CODEC_ID_MPEG4 &&
641  s->codec_id != AV_CODEC_ID_MJPEG &&
642  (s->codec_id != AV_CODEC_ID_H263P)) {
643  av_log(avctx, AV_LOG_ERROR,
644  "multi threaded encoding not supported by codec\n");
645  return -1;
646  }
647 
648  if (s->avctx->thread_count < 1) {
649  av_log(avctx, AV_LOG_ERROR,
650  "automatic thread number detection not supported by codec, "
651  "patch welcome\n");
652  return -1;
653  }
654 
655  if (s->avctx->slices > 1 || s->avctx->thread_count > 1)
656  s->rtp_mode = 1;
657 
658  if (s->avctx->thread_count > 1 && s->codec_id == AV_CODEC_ID_H263P)
659  s->h263_slice_structured = 1;
660 
661  if (!avctx->time_base.den || !avctx->time_base.num) {
662  av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
663  return -1;
664  }
665 
666  i = (INT_MAX / 2 + 128) >> 8;
667  if (avctx->me_threshold >= i) {
668  av_log(avctx, AV_LOG_ERROR, "me_threshold too large, max is %d\n",
669  i - 1);
670  return -1;
671  }
672  if (avctx->mb_threshold >= i) {
673  av_log(avctx, AV_LOG_ERROR, "mb_threshold too large, max is %d\n",
674  i - 1);
675  return -1;
676  }
677 
678  if (avctx->b_frame_strategy && (avctx->flags & CODEC_FLAG_PASS2)) {
679  av_log(avctx, AV_LOG_INFO,
680  "notice: b_frame_strategy only affects the first pass\n");
681  avctx->b_frame_strategy = 0;
682  }
683 
684  i = av_gcd(avctx->time_base.den, avctx->time_base.num);
685  if (i > 1) {
686  av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
687  avctx->time_base.den /= i;
688  avctx->time_base.num /= i;
689  //return -1;
690  }
691 
693  // (a + x * 3 / 8) / x
694  s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
695  s->inter_quant_bias = 0;
696  } else {
697  s->intra_quant_bias = 0;
698  // (a - x / 4) / x
699  s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
700  }
701 
706 
707  av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
708 
709  avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift, &chroma_v_shift);
710 
711  if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
712  s->avctx->time_base.den > (1 << 16) - 1) {
713  av_log(avctx, AV_LOG_ERROR,
714  "timebase %d/%d not supported by MPEG 4 standard, "
715  "the maximum admitted value for the timebase denominator "
716  "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
717  (1 << 16) - 1);
718  return -1;
719  }
720  s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
721 
722 #if FF_API_MPV_GLOBAL_OPTS
723  if (avctx->flags2 & CODEC_FLAG2_SKIP_RD)
725  if (avctx->flags2 & CODEC_FLAG2_STRICT_GOP)
727  if (avctx->quantizer_noise_shaping)
728  s->quantizer_noise_shaping = avctx->quantizer_noise_shaping;
729 #endif
730 
731  switch (avctx->codec->id) {
733  s->out_format = FMT_MPEG1;
734  s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
735  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
736  break;
738  s->out_format = FMT_MPEG1;
739  s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
740  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
741  s->rtp_mode = 1;
742  break;
743  case AV_CODEC_ID_LJPEG:
744  case AV_CODEC_ID_MJPEG:
745  case AV_CODEC_ID_AMV:
746  s->out_format = FMT_MJPEG;
747  s->intra_only = 1; /* force intra only for jpeg */
748  if (avctx->codec->id == AV_CODEC_ID_LJPEG &&
749  (avctx->pix_fmt == AV_PIX_FMT_BGR0
750  || s->avctx->pix_fmt == AV_PIX_FMT_BGRA
751  || s->avctx->pix_fmt == AV_PIX_FMT_BGR24)) {
752  s->mjpeg_vsample[0] = s->mjpeg_hsample[0] =
753  s->mjpeg_vsample[1] = s->mjpeg_hsample[1] =
754  s->mjpeg_vsample[2] = s->mjpeg_hsample[2] = 1;
755  } else if (avctx->pix_fmt == AV_PIX_FMT_YUV444P || avctx->pix_fmt == AV_PIX_FMT_YUVJ444P) {
756  s->mjpeg_vsample[0] = s->mjpeg_vsample[1] = s->mjpeg_vsample[2] = 2;
757  s->mjpeg_hsample[0] = s->mjpeg_hsample[1] = s->mjpeg_hsample[2] = 1;
758  } else {
759  s->mjpeg_vsample[0] = 2;
760  s->mjpeg_vsample[1] = 2 >> chroma_v_shift;
761  s->mjpeg_vsample[2] = 2 >> chroma_v_shift;
762  s->mjpeg_hsample[0] = 2;
763  s->mjpeg_hsample[1] = 2 >> chroma_h_shift;
764  s->mjpeg_hsample[2] = 2 >> chroma_h_shift;
765  }
766  if (!(CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) ||
767  ff_mjpeg_encode_init(s) < 0)
768  return -1;
769  avctx->delay = 0;
770  s->low_delay = 1;
771  break;
772  case AV_CODEC_ID_H261:
773  if (!CONFIG_H261_ENCODER)
774  return -1;
775  if (ff_h261_get_picture_format(s->width, s->height) < 0) {
776  av_log(avctx, AV_LOG_ERROR,
777  "The specified picture size of %dx%d is not valid for the "
778  "H.261 codec.\nValid sizes are 176x144, 352x288\n",
779  s->width, s->height);
780  return -1;
781  }
782  s->out_format = FMT_H261;
783  avctx->delay = 0;
784  s->low_delay = 1;
785  break;
786  case AV_CODEC_ID_H263:
787  if (!CONFIG_H263_ENCODER)
788  return -1;
790  s->width, s->height) == 8) {
791  av_log(avctx, AV_LOG_ERROR,
792  "The specified picture size of %dx%d is not valid for "
793  "the H.263 codec.\nValid sizes are 128x96, 176x144, "
794  "352x288, 704x576, and 1408x1152. "
795  "Try H.263+.\n", s->width, s->height);
796  return -1;
797  }
798  s->out_format = FMT_H263;
799  avctx->delay = 0;
800  s->low_delay = 1;
801  break;
802  case AV_CODEC_ID_H263P:
803  s->out_format = FMT_H263;
804  s->h263_plus = 1;
805  /* Fx */
806  s->h263_aic = (avctx->flags & CODEC_FLAG_AC_PRED) ? 1 : 0;
807  s->modified_quant = s->h263_aic;
808  s->loop_filter = (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
809  s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
810 
811  /* /Fx */
812  /* These are just to be sure */
813  avctx->delay = 0;
814  s->low_delay = 1;
815  break;
816  case AV_CODEC_ID_FLV1:
817  s->out_format = FMT_H263;
818  s->h263_flv = 2; /* format = 1; 11-bit codes */
819  s->unrestricted_mv = 1;
820  s->rtp_mode = 0; /* don't allow GOB */
821  avctx->delay = 0;
822  s->low_delay = 1;
823  break;
824  case AV_CODEC_ID_RV10:
825  s->out_format = FMT_H263;
826  avctx->delay = 0;
827  s->low_delay = 1;
828  break;
829  case AV_CODEC_ID_RV20:
830  s->out_format = FMT_H263;
831  avctx->delay = 0;
832  s->low_delay = 1;
833  s->modified_quant = 1;
834  s->h263_aic = 1;
835  s->h263_plus = 1;
836  s->loop_filter = 1;
837  s->unrestricted_mv = 0;
838  break;
839  case AV_CODEC_ID_MPEG4:
840  s->out_format = FMT_H263;
841  s->h263_pred = 1;
842  s->unrestricted_mv = 1;
843  s->low_delay = s->max_b_frames ? 0 : 1;
844  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
845  break;
847  s->out_format = FMT_H263;
848  s->h263_pred = 1;
849  s->unrestricted_mv = 1;
850  s->msmpeg4_version = 2;
851  avctx->delay = 0;
852  s->low_delay = 1;
853  break;
855  s->out_format = FMT_H263;
856  s->h263_pred = 1;
857  s->unrestricted_mv = 1;
858  s->msmpeg4_version = 3;
859  s->flipflop_rounding = 1;
860  avctx->delay = 0;
861  s->low_delay = 1;
862  break;
863  case AV_CODEC_ID_WMV1:
864  s->out_format = FMT_H263;
865  s->h263_pred = 1;
866  s->unrestricted_mv = 1;
867  s->msmpeg4_version = 4;
868  s->flipflop_rounding = 1;
869  avctx->delay = 0;
870  s->low_delay = 1;
871  break;
872  case AV_CODEC_ID_WMV2:
873  s->out_format = FMT_H263;
874  s->h263_pred = 1;
875  s->unrestricted_mv = 1;
876  s->msmpeg4_version = 5;
877  s->flipflop_rounding = 1;
878  avctx->delay = 0;
879  s->low_delay = 1;
880  break;
881  default:
882  return -1;
883  }
884 
885  avctx->has_b_frames = !s->low_delay;
886 
887  s->encoding = 1;
888 
889  s->progressive_frame =
892  s->alternate_scan);
893 
894  /* init */
895  if (ff_MPV_common_init(s) < 0)
896  return -1;
897 
899 
900  if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
902 
903  s->quant_precision = 5;
904 
905  ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
907 
908  if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
910  if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
914  if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
915  && s->out_format == FMT_MPEG1)
917 
918  /* init q matrix */
919  for (i = 0; i < 64; i++) {
920  int j = s->dsp.idct_permutation[i];
921  if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
922  s->mpeg_quant) {
925  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
926  s->intra_matrix[j] =
928  } else {
929  /* mpeg1/2 */
932  }
933  if (s->avctx->intra_matrix)
934  s->intra_matrix[j] = s->avctx->intra_matrix[i];
935  if (s->avctx->inter_matrix)
936  s->inter_matrix[j] = s->avctx->inter_matrix[i];
937  }
938 
939  /* precompute matrix */
940  /* for mjpeg, we do include qscale in the matrix */
941  if (s->out_format != FMT_MJPEG) {
943  s->intra_matrix, s->intra_quant_bias, avctx->qmin,
944  31, 1);
946  s->inter_matrix, s->inter_quant_bias, avctx->qmin,
947  31, 0);
948  }
949 
950  if (ff_rate_control_init(s) < 0)
951  return -1;
952 
953  return 0;
954 }
955 
957 {
958  MpegEncContext *s = avctx->priv_data;
959 
961 
963  if ((CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) &&
964  s->out_format == FMT_MJPEG)
966 
967  av_freep(&avctx->extradata);
968 
969  return 0;
970 }
971 
972 static int get_sae(uint8_t *src, int ref, int stride)
973 {
974  int x,y;
975  int acc = 0;
976 
977  for (y = 0; y < 16; y++) {
978  for (x = 0; x < 16; x++) {
979  acc += FFABS(src[x + y * stride] - ref);
980  }
981  }
982 
983  return acc;
984 }
985 
987  uint8_t *ref, int stride)
988 {
989  int x, y, w, h;
990  int acc = 0;
991 
992  w = s->width & ~15;
993  h = s->height & ~15;
994 
995  for (y = 0; y < h; y += 16) {
996  for (x = 0; x < w; x += 16) {
997  int offset = x + y * stride;
998  int sad = s->dsp.sad[0](NULL, src + offset, ref + offset, stride,
999  16);
1000  int mean = (s->dsp.pix_sum(src + offset, stride) + 128) >> 8;
1001  int sae = get_sae(src + offset, mean, stride);
1002 
1003  acc += sae + 500 < sad;
1004  }
1005  }
1006  return acc;
1007 }
1008 
1009 
1010 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1011 {
1012  AVFrame *pic = NULL;
1013  int64_t pts;
1014  int i, display_picture_number = 0;
1015  const int encoding_delay = s->max_b_frames ? s->max_b_frames :
1016  (s->low_delay ? 0 : 1);
1017  int direct = 1;
1018 
1019  if (pic_arg) {
1020  pts = pic_arg->pts;
1021  display_picture_number = s->input_picture_number++;
1022 
1023  if (pts != AV_NOPTS_VALUE) {
1024  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1025  int64_t time = pts;
1026  int64_t last = s->user_specified_pts;
1027 
1028  if (time <= last) {
1030  "Error, Invalid timestamp=%"PRId64", "
1031  "last=%"PRId64"\n", pts, s->user_specified_pts);
1032  return -1;
1033  }
1034 
1035  if (!s->low_delay && display_picture_number == 1)
1036  s->dts_delta = time - last;
1037  }
1038  s->user_specified_pts = pts;
1039  } else {
1040  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1041  s->user_specified_pts =
1042  pts = s->user_specified_pts + 1;
1043  av_log(s->avctx, AV_LOG_INFO,
1044  "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1045  pts);
1046  } else {
1047  pts = display_picture_number;
1048  }
1049  }
1050  }
1051 
1052  if (pic_arg) {
1053  if (encoding_delay && !(s->flags & CODEC_FLAG_INPUT_PRESERVED))
1054  direct = 0;
1055  if (pic_arg->linesize[0] != s->linesize)
1056  direct = 0;
1057  if (pic_arg->linesize[1] != s->uvlinesize)
1058  direct = 0;
1059  if (pic_arg->linesize[2] != s->uvlinesize)
1060  direct = 0;
1061 
1062  av_dlog(s->avctx, "%d %d %d %d\n", pic_arg->linesize[0],
1063  pic_arg->linesize[1], s->linesize, s->uvlinesize);
1064 
1065  if (direct) {
1066  i = ff_find_unused_picture(s, 1);
1067  if (i < 0)
1068  return i;
1069 
1070  pic = &s->picture[i].f;
1071  pic->reference = 3;
1072 
1073  for (i = 0; i < 4; i++) {
1074  pic->data[i] = pic_arg->data[i];
1075  pic->linesize[i] = pic_arg->linesize[i];
1076  }
1077  if (ff_alloc_picture(s, (Picture *) pic, 1) < 0) {
1078  return -1;
1079  }
1080  } else {
1081  i = ff_find_unused_picture(s, 0);
1082  if (i < 0)
1083  return i;
1084 
1085  pic = &s->picture[i].f;
1086  pic->reference = 3;
1087 
1088  if (ff_alloc_picture(s, (Picture *) pic, 0) < 0) {
1089  return -1;
1090  }
1091 
1092  if (pic->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1093  pic->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1094  pic->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1095  // empty
1096  } else {
1097  int h_chroma_shift, v_chroma_shift;
1099  &h_chroma_shift,
1100  &v_chroma_shift);
1101 
1102  for (i = 0; i < 3; i++) {
1103  int src_stride = pic_arg->linesize[i];
1104  int dst_stride = i ? s->uvlinesize : s->linesize;
1105  int h_shift = i ? h_chroma_shift : 0;
1106  int v_shift = i ? v_chroma_shift : 0;
1107  int w = s->width >> h_shift;
1108  int h = s->height >> v_shift;
1109  uint8_t *src = pic_arg->data[i];
1110  uint8_t *dst = pic->data[i];
1111 
1112  if (s->codec_id == AV_CODEC_ID_AMV && !(s->avctx->flags & CODEC_FLAG_EMU_EDGE)) {
1113  h = ((s->height + 15)/16*16) >> v_shift;
1114  }
1115 
1116  if (!s->avctx->rc_buffer_size)
1117  dst += INPLACE_OFFSET;
1118 
1119  if (src_stride == dst_stride)
1120  memcpy(dst, src, src_stride * h);
1121  else {
1122  int h2 = h;
1123  uint8_t *dst2 = dst;
1124  while (h2--) {
1125  memcpy(dst2, src, w);
1126  dst2 += dst_stride;
1127  src += src_stride;
1128  }
1129  }
1130  if ((s->width & 15) || (s->height & 15)) {
1131  s->dsp.draw_edges(dst, dst_stride,
1132  w, h,
1133  16>>h_shift,
1134  16>>v_shift,
1135  EDGE_BOTTOM);
1136  }
1137  }
1138  }
1139  }
1140  copy_picture_attributes(s, pic, pic_arg);
1141  pic->display_picture_number = display_picture_number;
1142  pic->pts = pts; // we set this here to avoid modifiying pic_arg
1143  }
1144 
1145  /* shift buffer entries */
1146  for (i = 1; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1147  s->input_picture[i - 1] = s->input_picture[i];
1148 
1149  s->input_picture[encoding_delay] = (Picture*) pic;
1150 
1151  return 0;
1152 }
1153 
1154 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1155 {
1156  int x, y, plane;
1157  int score = 0;
1158  int64_t score64 = 0;
1159 
1160  for (plane = 0; plane < 3; plane++) {
1161  const int stride = p->f.linesize[plane];
1162  const int bw = plane ? 1 : 2;
1163  for (y = 0; y < s->mb_height * bw; y++) {
1164  for (x = 0; x < s->mb_width * bw; x++) {
1165  int off = p->f.type == FF_BUFFER_TYPE_SHARED ? 0 : 16;
1166  uint8_t *dptr = p->f.data[plane] + 8 * (x + y * stride) + off;
1167  uint8_t *rptr = ref->f.data[plane] + 8 * (x + y * stride);
1168  int v = s->dsp.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1169 
1170  switch (s->avctx->frame_skip_exp) {
1171  case 0: score = FFMAX(score, v); break;
1172  case 1: score += FFABS(v); break;
1173  case 2: score += v * v; break;
1174  case 3: score64 += FFABS(v * v * (int64_t)v); break;
1175  case 4: score64 += v * v * (int64_t)(v * v); break;
1176  }
1177  }
1178  }
1179  }
1180 
1181  if (score)
1182  score64 = score;
1183 
1184  if (score64 < s->avctx->frame_skip_threshold)
1185  return 1;
1186  if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
1187  return 1;
1188  return 0;
1189 }
1190 
1192 {
1193  AVPacket pkt = { 0 };
1194  int ret, got_output;
1195 
1196  av_init_packet(&pkt);
1197  ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1198  if (ret < 0)
1199  return ret;
1200 
1201  ret = pkt.size;
1202  av_free_packet(&pkt);
1203  return ret;
1204 }
1205 
1207 {
1210  AVFrame input[FF_MAX_B_FRAMES + 2];
1211  const int scale = s->avctx->brd_scale;
1212  int i, j, out_size, p_lambda, b_lambda, lambda2;
1213  int64_t best_rd = INT64_MAX;
1214  int best_b_count = -1;
1215 
1216  av_assert0(scale >= 0 && scale <= 3);
1217 
1218  //emms_c();
1219  //s->next_picture_ptr->quality;
1220  p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1221  //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1222  b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1223  if (!b_lambda) // FIXME we should do this somewhere else
1224  b_lambda = p_lambda;
1225  lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1227 
1228  c->width = s->width >> scale;
1229  c->height = s->height >> scale;
1231  CODEC_FLAG_INPUT_PRESERVED /*| CODEC_FLAG_EMU_EDGE*/;
1232  c->flags |= s->avctx->flags & CODEC_FLAG_QPEL;
1233  c->mb_decision = s->avctx->mb_decision;
1234  c->me_cmp = s->avctx->me_cmp;
1235  c->mb_cmp = s->avctx->mb_cmp;
1236  c->me_sub_cmp = s->avctx->me_sub_cmp;
1238  c->time_base = s->avctx->time_base;
1239  c->max_b_frames = s->max_b_frames;
1240 
1241  if (avcodec_open2(c, codec, NULL) < 0)
1242  return -1;
1243 
1244  for (i = 0; i < s->max_b_frames + 2; i++) {
1245  int ysize = c->width * c->height;
1246  int csize = (c->width / 2) * (c->height / 2);
1247  Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1248  s->next_picture_ptr;
1249 
1250  avcodec_get_frame_defaults(&input[i]);
1251  input[i].data[0] = av_malloc(ysize + 2 * csize);
1252  input[i].data[1] = input[i].data[0] + ysize;
1253  input[i].data[2] = input[i].data[1] + csize;
1254  input[i].linesize[0] = c->width;
1255  input[i].linesize[1] =
1256  input[i].linesize[2] = c->width / 2;
1257 
1258  if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1259  pre_input = *pre_input_ptr;
1260 
1261  if (pre_input.f.type != FF_BUFFER_TYPE_SHARED && i) {
1262  pre_input.f.data[0] += INPLACE_OFFSET;
1263  pre_input.f.data[1] += INPLACE_OFFSET;
1264  pre_input.f.data[2] += INPLACE_OFFSET;
1265  }
1266 
1267  s->dsp.shrink[scale](input[i].data[0], input[i].linesize[0],
1268  pre_input.f.data[0], pre_input.f.linesize[0],
1269  c->width, c->height);
1270  s->dsp.shrink[scale](input[i].data[1], input[i].linesize[1],
1271  pre_input.f.data[1], pre_input.f.linesize[1],
1272  c->width >> 1, c->height >> 1);
1273  s->dsp.shrink[scale](input[i].data[2], input[i].linesize[2],
1274  pre_input.f.data[2], pre_input.f.linesize[2],
1275  c->width >> 1, c->height >> 1);
1276  }
1277  }
1278 
1279  for (j = 0; j < s->max_b_frames + 1; j++) {
1280  int64_t rd = 0;
1281 
1282  if (!s->input_picture[j])
1283  break;
1284 
1285  c->error[0] = c->error[1] = c->error[2] = 0;
1286 
1287  input[0].pict_type = AV_PICTURE_TYPE_I;
1288  input[0].quality = 1 * FF_QP2LAMBDA;
1289 
1290  out_size = encode_frame(c, &input[0]);
1291 
1292  //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1293 
1294  for (i = 0; i < s->max_b_frames + 1; i++) {
1295  int is_p = i % (j + 1) == j || i == s->max_b_frames;
1296 
1297  input[i + 1].pict_type = is_p ?
1299  input[i + 1].quality = is_p ? p_lambda : b_lambda;
1300 
1301  out_size = encode_frame(c, &input[i + 1]);
1302 
1303  rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1304  }
1305 
1306  /* get the delayed frames */
1307  while (out_size) {
1308  out_size = encode_frame(c, NULL);
1309  rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1310  }
1311 
1312  rd += c->error[0] + c->error[1] + c->error[2];
1313 
1314  if (rd < best_rd) {
1315  best_rd = rd;
1316  best_b_count = j;
1317  }
1318  }
1319 
1320  avcodec_close(c);
1321  av_freep(&c);
1322 
1323  for (i = 0; i < s->max_b_frames + 2; i++) {
1324  av_freep(&input[i].data[0]);
1325  }
1326 
1327  return best_b_count;
1328 }
1329 
1331 {
1332  int i;
1333 
1334  for (i = 1; i < MAX_PICTURE_COUNT; i++)
1336  s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1337 
1338  /* set next picture type & ordering */
1339  if (s->reordered_input_picture[0] == NULL && s->input_picture[0]) {
1340  if (/*s->picture_in_gop_number >= s->gop_size ||*/
1341  s->next_picture_ptr == NULL || s->intra_only) {
1342  s->reordered_input_picture[0] = s->input_picture[0];
1345  s->coded_picture_number++;
1346  } else {
1347  int b_frames;
1348 
1350  if (s->picture_in_gop_number < s->gop_size &&
1351  skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1352  // FIXME check that te gop check above is +-1 correct
1353  if (s->input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED) {
1354  for (i = 0; i < 4; i++)
1355  s->input_picture[0]->f.data[i] = NULL;
1356  s->input_picture[0]->f.type = 0;
1357  } else {
1358  assert(s->input_picture[0]->f.type == FF_BUFFER_TYPE_USER ||
1360 
1361  s->avctx->release_buffer(s->avctx,
1362  &s->input_picture[0]->f);
1363  }
1364 
1365  emms_c();
1366  ff_vbv_update(s, 0);
1367 
1368  goto no_output_pic;
1369  }
1370  }
1371 
1372  if (s->flags & CODEC_FLAG_PASS2) {
1373  for (i = 0; i < s->max_b_frames + 1; i++) {
1374  int pict_num = s->input_picture[0]->f.display_picture_number + i;
1375 
1376  if (pict_num >= s->rc_context.num_entries)
1377  break;
1378  if (!s->input_picture[i]) {
1379  s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1380  break;
1381  }
1382 
1383  s->input_picture[i]->f.pict_type =
1384  s->rc_context.entry[pict_num].new_pict_type;
1385  }
1386  }
1387 
1388  if (s->avctx->b_frame_strategy == 0) {
1389  b_frames = s->max_b_frames;
1390  while (b_frames && !s->input_picture[b_frames])
1391  b_frames--;
1392  } else if (s->avctx->b_frame_strategy == 1) {
1393  for (i = 1; i < s->max_b_frames + 1; i++) {
1394  if (s->input_picture[i] &&
1395  s->input_picture[i]->b_frame_score == 0) {
1396  s->input_picture[i]->b_frame_score =
1397  get_intra_count(s,
1398  s->input_picture[i ]->f.data[0],
1399  s->input_picture[i - 1]->f.data[0],
1400  s->linesize) + 1;
1401  }
1402  }
1403  for (i = 0; i < s->max_b_frames + 1; i++) {
1404  if (s->input_picture[i] == NULL ||
1405  s->input_picture[i]->b_frame_score - 1 >
1406  s->mb_num / s->avctx->b_sensitivity)
1407  break;
1408  }
1409 
1410  b_frames = FFMAX(0, i - 1);
1411 
1412  /* reset scores */
1413  for (i = 0; i < b_frames + 1; i++) {
1414  s->input_picture[i]->b_frame_score = 0;
1415  }
1416  } else if (s->avctx->b_frame_strategy == 2) {
1417  b_frames = estimate_best_b_count(s);
1418  } else {
1419  av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
1420  b_frames = 0;
1421  }
1422 
1423  emms_c();
1424 
1425  for (i = b_frames - 1; i >= 0; i--) {
1426  int type = s->input_picture[i]->f.pict_type;
1427  if (type && type != AV_PICTURE_TYPE_B)
1428  b_frames = i;
1429  }
1430  if (s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_B &&
1431  b_frames == s->max_b_frames) {
1433  "warning, too many b frames in a row\n");
1434  }
1435 
1436  if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1437  if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1438  s->gop_size > s->picture_in_gop_number) {
1439  b_frames = s->gop_size - s->picture_in_gop_number - 1;
1440  } else {
1441  if (s->flags & CODEC_FLAG_CLOSED_GOP)
1442  b_frames = 0;
1443  s->input_picture[b_frames]->f.pict_type = AV_PICTURE_TYPE_I;
1444  }
1445  }
1446 
1447  if ((s->flags & CODEC_FLAG_CLOSED_GOP) && b_frames &&
1448  s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_I)
1449  b_frames--;
1450 
1451  s->reordered_input_picture[0] = s->input_picture[b_frames];
1455  s->coded_picture_number++;
1456  for (i = 0; i < b_frames; i++) {
1457  s->reordered_input_picture[i + 1] = s->input_picture[i];
1458  s->reordered_input_picture[i + 1]->f.pict_type =
1461  s->coded_picture_number++;
1462  }
1463  }
1464  }
1465 no_output_pic:
1466  if (s->reordered_input_picture[0]) {
1469  AV_PICTURE_TYPE_B ? 3 : 0;
1470 
1472 
1474  s->avctx->rc_buffer_size) {
1475  // input is a shared pix, so we can't modifiy it -> alloc a new
1476  // one & ensure that the shared one is reuseable
1477 
1478  Picture *pic;
1479  int i = ff_find_unused_picture(s, 0);
1480  if (i < 0)
1481  return i;
1482  pic = &s->picture[i];
1483 
1485  if (ff_alloc_picture(s, pic, 0) < 0) {
1486  return -1;
1487  }
1488 
1489  /* mark us unused / free shared pic */
1491  s->avctx->release_buffer(s->avctx,
1492  &s->reordered_input_picture[0]->f);
1493  for (i = 0; i < 4; i++)
1494  s->reordered_input_picture[0]->f.data[i] = NULL;
1495  s->reordered_input_picture[0]->f.type = 0;
1496 
1497  copy_picture_attributes(s, &pic->f,
1498  &s->reordered_input_picture[0]->f);
1499 
1500  s->current_picture_ptr = pic;
1501  } else {
1502  // input is not a shared pix -> reuse buffer for current_pix
1503 
1504  assert(s->reordered_input_picture[0]->f.type ==
1506  s->reordered_input_picture[0]->f.type ==
1508 
1510  for (i = 0; i < 4; i++) {
1511  s->new_picture.f.data[i] += INPLACE_OFFSET;
1512  }
1513  }
1515 
1517  } else {
1518  memset(&s->new_picture, 0, sizeof(Picture));
1519  }
1520  return 0;
1521 }
1522 
1524  AVFrame *pic_arg, int *got_packet)
1525 {
1526  MpegEncContext *s = avctx->priv_data;
1527  int i, stuffing_count, ret;
1528  int context_count = s->slice_context_count;
1529 
1530  s->picture_in_gop_number++;
1531 
1532  if (load_input_picture(s, pic_arg) < 0)
1533  return -1;
1534 
1535  if (select_input_picture(s) < 0) {
1536  return -1;
1537  }
1538 
1539  /* output? */
1540  if (s->new_picture.f.data[0]) {
1541  if ((ret = ff_alloc_packet2(avctx, pkt, s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000)) < 0)
1542  return ret;
1543  if (s->mb_info) {
1546  s->mb_width*s->mb_height*12);
1547  s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1548  }
1549 
1550  for (i = 0; i < context_count; i++) {
1551  int start_y = s->thread_context[i]->start_mb_y;
1552  int end_y = s->thread_context[i]-> end_mb_y;
1553  int h = s->mb_height;
1554  uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1555  uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1556 
1557  init_put_bits(&s->thread_context[i]->pb, start, end - start);
1558  }
1559 
1560  s->pict_type = s->new_picture.f.pict_type;
1561  //emms_c();
1562  if (ff_MPV_frame_start(s, avctx) < 0)
1563  return -1;
1564 vbv_retry:
1565  if (encode_picture(s, s->picture_number) < 0)
1566  return -1;
1567 
1568  avctx->header_bits = s->header_bits;
1569  avctx->mv_bits = s->mv_bits;
1570  avctx->misc_bits = s->misc_bits;
1571  avctx->i_tex_bits = s->i_tex_bits;
1572  avctx->p_tex_bits = s->p_tex_bits;
1573  avctx->i_count = s->i_count;
1574  // FIXME f/b_count in avctx
1575  avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1576  avctx->skip_count = s->skip_count;
1577 
1578  ff_MPV_frame_end(s);
1579 
1580  if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1582 
1583  if (avctx->rc_buffer_size) {
1584  RateControlContext *rcc = &s->rc_context;
1585  int max_size = rcc->buffer_index * avctx->rc_max_available_vbv_use;
1586 
1587  if (put_bits_count(&s->pb) > max_size &&
1588  s->lambda < s->avctx->lmax) {
1589  s->next_lambda = FFMAX(s->lambda + 1, s->lambda *
1590  (s->qscale + 1) / s->qscale);
1591  if (s->adaptive_quant) {
1592  int i;
1593  for (i = 0; i < s->mb_height * s->mb_stride; i++)
1594  s->lambda_table[i] =
1595  FFMAX(s->lambda_table[i] + 1,
1596  s->lambda_table[i] * (s->qscale + 1) /
1597  s->qscale);
1598  }
1599  s->mb_skipped = 0; // done in MPV_frame_start()
1600  // done in encode_picture() so we must undo it
1601  if (s->pict_type == AV_PICTURE_TYPE_P) {
1602  if (s->flipflop_rounding ||
1603  s->codec_id == AV_CODEC_ID_H263P ||
1605  s->no_rounding ^= 1;
1606  }
1607  if (s->pict_type != AV_PICTURE_TYPE_B) {
1608  s->time_base = s->last_time_base;
1609  s->last_non_b_time = s->time - s->pp_time;
1610  }
1611  for (i = 0; i < context_count; i++) {
1612  PutBitContext *pb = &s->thread_context[i]->pb;
1613  init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1614  }
1615  goto vbv_retry;
1616  }
1617 
1618  assert(s->avctx->rc_max_rate);
1619  }
1620 
1621  if (s->flags & CODEC_FLAG_PASS1)
1623 
1624  for (i = 0; i < 4; i++) {
1626  avctx->error[i] += s->current_picture_ptr->f.error[i];
1627  }
1628 
1629  if (s->flags & CODEC_FLAG_PASS1)
1630  assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits +
1631  avctx->i_tex_bits + avctx->p_tex_bits ==
1632  put_bits_count(&s->pb));
1633  flush_put_bits(&s->pb);
1634  s->frame_bits = put_bits_count(&s->pb);
1635 
1636  stuffing_count = ff_vbv_update(s, s->frame_bits);
1637  s->stuffing_bits = 8*stuffing_count;
1638  if (stuffing_count) {
1639  if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1640  stuffing_count + 50) {
1641  av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1642  return -1;
1643  }
1644 
1645  switch (s->codec_id) {
1648  while (stuffing_count--) {
1649  put_bits(&s->pb, 8, 0);
1650  }
1651  break;
1652  case AV_CODEC_ID_MPEG4:
1653  put_bits(&s->pb, 16, 0);
1654  put_bits(&s->pb, 16, 0x1C3);
1655  stuffing_count -= 4;
1656  while (stuffing_count--) {
1657  put_bits(&s->pb, 8, 0xFF);
1658  }
1659  break;
1660  default:
1661  av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1662  }
1663  flush_put_bits(&s->pb);
1664  s->frame_bits = put_bits_count(&s->pb);
1665  }
1666 
1667  /* update mpeg1/2 vbv_delay for CBR */
1668  if (s->avctx->rc_max_rate &&
1669  s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1670  s->out_format == FMT_MPEG1 &&
1671  90000LL * (avctx->rc_buffer_size - 1) <=
1672  s->avctx->rc_max_rate * 0xFFFFLL) {
1673  int vbv_delay, min_delay;
1674  double inbits = s->avctx->rc_max_rate *
1675  av_q2d(s->avctx->time_base);
1676  int minbits = s->frame_bits - 8 *
1677  (s->vbv_delay_ptr - s->pb.buf - 1);
1678  double bits = s->rc_context.buffer_index + minbits - inbits;
1679 
1680  if (bits < 0)
1682  "Internal error, negative bits\n");
1683 
1684  assert(s->repeat_first_field == 0);
1685 
1686  vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
1687  min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
1688  s->avctx->rc_max_rate;
1689 
1690  vbv_delay = FFMAX(vbv_delay, min_delay);
1691 
1692  av_assert0(vbv_delay < 0xFFFF);
1693 
1694  s->vbv_delay_ptr[0] &= 0xF8;
1695  s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1696  s->vbv_delay_ptr[1] = vbv_delay >> 5;
1697  s->vbv_delay_ptr[2] &= 0x07;
1698  s->vbv_delay_ptr[2] |= vbv_delay << 3;
1699  avctx->vbv_delay = vbv_delay * 300;
1700  }
1701  s->total_bits += s->frame_bits;
1702  avctx->frame_bits = s->frame_bits;
1703 
1704  pkt->pts = s->current_picture.f.pts;
1705  if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
1707  pkt->dts = pkt->pts - s->dts_delta;
1708  else
1709  pkt->dts = s->reordered_pts;
1710  s->reordered_pts = pkt->pts;
1711  } else
1712  pkt->dts = pkt->pts;
1713  if (s->current_picture.f.key_frame)
1714  pkt->flags |= AV_PKT_FLAG_KEY;
1715  if (s->mb_info)
1717  } else {
1718  s->frame_bits = 0;
1719  }
1720  assert((s->frame_bits & 7) == 0);
1721 
1722  pkt->size = s->frame_bits / 8;
1723  *got_packet = !!pkt->size;
1724  return 0;
1725 }
1726 
1728  int n, int threshold)
1729 {
1730  static const char tab[64] = {
1731  3, 2, 2, 1, 1, 1, 1, 1,
1732  1, 1, 1, 1, 1, 1, 1, 1,
1733  1, 1, 1, 1, 1, 1, 1, 1,
1734  0, 0, 0, 0, 0, 0, 0, 0,
1735  0, 0, 0, 0, 0, 0, 0, 0,
1736  0, 0, 0, 0, 0, 0, 0, 0,
1737  0, 0, 0, 0, 0, 0, 0, 0,
1738  0, 0, 0, 0, 0, 0, 0, 0
1739  };
1740  int score = 0;
1741  int run = 0;
1742  int i;
1743  int16_t *block = s->block[n];
1744  const int last_index = s->block_last_index[n];
1745  int skip_dc;
1746 
1747  if (threshold < 0) {
1748  skip_dc = 0;
1749  threshold = -threshold;
1750  } else
1751  skip_dc = 1;
1752 
1753  /* Are all we could set to zero already zero? */
1754  if (last_index <= skip_dc - 1)
1755  return;
1756 
1757  for (i = 0; i <= last_index; i++) {
1758  const int j = s->intra_scantable.permutated[i];
1759  const int level = FFABS(block[j]);
1760  if (level == 1) {
1761  if (skip_dc && i == 0)
1762  continue;
1763  score += tab[run];
1764  run = 0;
1765  } else if (level > 1) {
1766  return;
1767  } else {
1768  run++;
1769  }
1770  }
1771  if (score >= threshold)
1772  return;
1773  for (i = skip_dc; i <= last_index; i++) {
1774  const int j = s->intra_scantable.permutated[i];
1775  block[j] = 0;
1776  }
1777  if (block[0])
1778  s->block_last_index[n] = 0;
1779  else
1780  s->block_last_index[n] = -1;
1781 }
1782 
1783 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
1784  int last_index)
1785 {
1786  int i;
1787  const int maxlevel = s->max_qcoeff;
1788  const int minlevel = s->min_qcoeff;
1789  int overflow = 0;
1790 
1791  if (s->mb_intra) {
1792  i = 1; // skip clipping of intra dc
1793  } else
1794  i = 0;
1795 
1796  for (; i <= last_index; i++) {
1797  const int j = s->intra_scantable.permutated[i];
1798  int level = block[j];
1799 
1800  if (level > maxlevel) {
1801  level = maxlevel;
1802  overflow++;
1803  } else if (level < minlevel) {
1804  level = minlevel;
1805  overflow++;
1806  }
1807 
1808  block[j] = level;
1809  }
1810 
1811  if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
1812  av_log(s->avctx, AV_LOG_INFO,
1813  "warning, clipping %d dct coefficients to %d..%d\n",
1814  overflow, minlevel, maxlevel);
1815 }
1816 
1817 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
1818 {
1819  int x, y;
1820  // FIXME optimize
1821  for (y = 0; y < 8; y++) {
1822  for (x = 0; x < 8; x++) {
1823  int x2, y2;
1824  int sum = 0;
1825  int sqr = 0;
1826  int count = 0;
1827 
1828  for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
1829  for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
1830  int v = ptr[x2 + y2 * stride];
1831  sum += v;
1832  sqr += v * v;
1833  count++;
1834  }
1835  }
1836  weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
1837  }
1838  }
1839 }
1840 
1842  int motion_x, int motion_y,
1843  int mb_block_height,
1844  int mb_block_width,
1845  int mb_block_count)
1846 {
1847  int16_t weight[12][64];
1848  int16_t orig[12][64];
1849  const int mb_x = s->mb_x;
1850  const int mb_y = s->mb_y;
1851  int i;
1852  int skip_dct[12];
1853  int dct_offset = s->linesize * 8; // default for progressive frames
1854  int uv_dct_offset = s->uvlinesize * 8;
1855  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1856  int wrap_y, wrap_c;
1857 
1858  for (i = 0; i < mb_block_count; i++)
1859  skip_dct[i] = s->skipdct;
1860 
1861  if (s->adaptive_quant) {
1862  const int last_qp = s->qscale;
1863  const int mb_xy = mb_x + mb_y * s->mb_stride;
1864 
1865  s->lambda = s->lambda_table[mb_xy];
1866  update_qscale(s);
1867 
1868  if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
1869  s->qscale = s->current_picture_ptr->f.qscale_table[mb_xy];
1870  s->dquant = s->qscale - last_qp;
1871 
1872  if (s->out_format == FMT_H263) {
1873  s->dquant = av_clip(s->dquant, -2, 2);
1874 
1875  if (s->codec_id == AV_CODEC_ID_MPEG4) {
1876  if (!s->mb_intra) {
1877  if (s->pict_type == AV_PICTURE_TYPE_B) {
1878  if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
1879  s->dquant = 0;
1880  }
1881  if (s->mv_type == MV_TYPE_8X8)
1882  s->dquant = 0;
1883  }
1884  }
1885  }
1886  }
1887  ff_set_qscale(s, last_qp + s->dquant);
1888  } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
1889  ff_set_qscale(s, s->qscale + s->dquant);
1890 
1891  wrap_y = s->linesize;
1892  wrap_c = s->uvlinesize;
1893  ptr_y = s->new_picture.f.data[0] +
1894  (mb_y * 16 * wrap_y) + mb_x * 16;
1895  ptr_cb = s->new_picture.f.data[1] +
1896  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
1897  ptr_cr = s->new_picture.f.data[2] +
1898  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
1899 
1900  if((mb_x*16+16 > s->width || mb_y*16+16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
1901  uint8_t *ebuf = s->edge_emu_buffer + 32;
1902  int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
1903  int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
1904  s->vdsp.emulated_edge_mc(ebuf, ptr_y, wrap_y, 16, 16, mb_x * 16,
1905  mb_y * 16, s->width, s->height);
1906  ptr_y = ebuf;
1907  s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y, ptr_cb, wrap_c, mb_block_width,
1908  mb_block_height, mb_x * mb_block_width, mb_y * mb_block_height,
1909  cw, ch);
1910  ptr_cb = ebuf + 18 * wrap_y;
1911  s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y + 16, ptr_cr, wrap_c, mb_block_width,
1912  mb_block_height, mb_x * mb_block_width, mb_y * mb_block_height,
1913  cw, ch);
1914  ptr_cr = ebuf + 18 * wrap_y + 16;
1915  }
1916 
1917  if (s->mb_intra) {
1918  if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1919  int progressive_score, interlaced_score;
1920 
1921  s->interlaced_dct = 0;
1922  progressive_score = s->dsp.ildct_cmp[4](s, ptr_y,
1923  NULL, wrap_y, 8) +
1924  s->dsp.ildct_cmp[4](s, ptr_y + wrap_y * 8,
1925  NULL, wrap_y, 8) - 400;
1926 
1927  if (progressive_score > 0) {
1928  interlaced_score = s->dsp.ildct_cmp[4](s, ptr_y,
1929  NULL, wrap_y * 2, 8) +
1930  s->dsp.ildct_cmp[4](s, ptr_y + wrap_y,
1931  NULL, wrap_y * 2, 8);
1932  if (progressive_score > interlaced_score) {
1933  s->interlaced_dct = 1;
1934 
1935  dct_offset = wrap_y;
1936  uv_dct_offset = wrap_c;
1937  wrap_y <<= 1;
1938  if (s->chroma_format == CHROMA_422 ||
1939  s->chroma_format == CHROMA_444)
1940  wrap_c <<= 1;
1941  }
1942  }
1943  }
1944 
1945  s->dsp.get_pixels(s->block[0], ptr_y , wrap_y);
1946  s->dsp.get_pixels(s->block[1], ptr_y + 8 , wrap_y);
1947  s->dsp.get_pixels(s->block[2], ptr_y + dct_offset , wrap_y);
1948  s->dsp.get_pixels(s->block[3], ptr_y + dct_offset + 8 , wrap_y);
1949 
1950  if (s->flags & CODEC_FLAG_GRAY) {
1951  skip_dct[4] = 1;
1952  skip_dct[5] = 1;
1953  } else {
1954  s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c);
1955  s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c);
1956  if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
1957  s->dsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
1958  s->dsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
1959  } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
1960  s->dsp.get_pixels(s->block[6], ptr_cb + 8, wrap_c);
1961  s->dsp.get_pixels(s->block[7], ptr_cr + 8, wrap_c);
1962  s->dsp.get_pixels(s->block[8], ptr_cb + uv_dct_offset, wrap_c);
1963  s->dsp.get_pixels(s->block[9], ptr_cr + uv_dct_offset, wrap_c);
1964  s->dsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
1965  s->dsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
1966  }
1967  }
1968  } else {
1969  op_pixels_func (*op_pix)[4];
1970  qpel_mc_func (*op_qpix)[16];
1971  uint8_t *dest_y, *dest_cb, *dest_cr;
1972 
1973  dest_y = s->dest[0];
1974  dest_cb = s->dest[1];
1975  dest_cr = s->dest[2];
1976 
1977  if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
1978  op_pix = s->dsp.put_pixels_tab;
1979  op_qpix = s->dsp.put_qpel_pixels_tab;
1980  } else {
1981  op_pix = s->dsp.put_no_rnd_pixels_tab;
1982  op_qpix = s->dsp.put_no_rnd_qpel_pixels_tab;
1983  }
1984 
1985  if (s->mv_dir & MV_DIR_FORWARD) {
1986  ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0,
1987  s->last_picture.f.data,
1988  op_pix, op_qpix);
1989  op_pix = s->dsp.avg_pixels_tab;
1990  op_qpix = s->dsp.avg_qpel_pixels_tab;
1991  }
1992  if (s->mv_dir & MV_DIR_BACKWARD) {
1993  ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1,
1994  s->next_picture.f.data,
1995  op_pix, op_qpix);
1996  }
1997 
1998  if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1999  int progressive_score, interlaced_score;
2000 
2001  s->interlaced_dct = 0;
2002  progressive_score = s->dsp.ildct_cmp[0](s, dest_y,
2003  ptr_y, wrap_y,
2004  8) +
2005  s->dsp.ildct_cmp[0](s, dest_y + wrap_y * 8,
2006  ptr_y + wrap_y * 8, wrap_y,
2007  8) - 400;
2008 
2009  if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2010  progressive_score -= 400;
2011 
2012  if (progressive_score > 0) {
2013  interlaced_score = s->dsp.ildct_cmp[0](s, dest_y,
2014  ptr_y,
2015  wrap_y * 2, 8) +
2016  s->dsp.ildct_cmp[0](s, dest_y + wrap_y,
2017  ptr_y + wrap_y,
2018  wrap_y * 2, 8);
2019 
2020  if (progressive_score > interlaced_score) {
2021  s->interlaced_dct = 1;
2022 
2023  dct_offset = wrap_y;
2024  uv_dct_offset = wrap_c;
2025  wrap_y <<= 1;
2026  if (s->chroma_format == CHROMA_422)
2027  wrap_c <<= 1;
2028  }
2029  }
2030  }
2031 
2032  s->dsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2033  s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2034  s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2035  dest_y + dct_offset, wrap_y);
2036  s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2037  dest_y + dct_offset + 8, wrap_y);
2038 
2039  if (s->flags & CODEC_FLAG_GRAY) {
2040  skip_dct[4] = 1;
2041  skip_dct[5] = 1;
2042  } else {
2043  s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2044  s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2045  if (!s->chroma_y_shift) { /* 422 */
2046  s->dsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2047  dest_cb + uv_dct_offset, wrap_c);
2048  s->dsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2049  dest_cr + uv_dct_offset, wrap_c);
2050  }
2051  }
2052  /* pre quantization */
2053  if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2054  2 * s->qscale * s->qscale) {
2055  // FIXME optimize
2056  if (s->dsp.sad[1](NULL, ptr_y , dest_y,
2057  wrap_y, 8) < 20 * s->qscale)
2058  skip_dct[0] = 1;
2059  if (s->dsp.sad[1](NULL, ptr_y + 8,
2060  dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2061  skip_dct[1] = 1;
2062  if (s->dsp.sad[1](NULL, ptr_y + dct_offset,
2063  dest_y + dct_offset, wrap_y, 8) < 20 * s->qscale)
2064  skip_dct[2] = 1;
2065  if (s->dsp.sad[1](NULL, ptr_y + dct_offset + 8,
2066  dest_y + dct_offset + 8,
2067  wrap_y, 8) < 20 * s->qscale)
2068  skip_dct[3] = 1;
2069  if (s->dsp.sad[1](NULL, ptr_cb, dest_cb,
2070  wrap_c, 8) < 20 * s->qscale)
2071  skip_dct[4] = 1;
2072  if (s->dsp.sad[1](NULL, ptr_cr, dest_cr,
2073  wrap_c, 8) < 20 * s->qscale)
2074  skip_dct[5] = 1;
2075  if (!s->chroma_y_shift) { /* 422 */
2076  if (s->dsp.sad[1](NULL, ptr_cb + uv_dct_offset,
2077  dest_cb + uv_dct_offset,
2078  wrap_c, 8) < 20 * s->qscale)
2079  skip_dct[6] = 1;
2080  if (s->dsp.sad[1](NULL, ptr_cr + uv_dct_offset,
2081  dest_cr + uv_dct_offset,
2082  wrap_c, 8) < 20 * s->qscale)
2083  skip_dct[7] = 1;
2084  }
2085  }
2086  }
2087 
2088  if (s->quantizer_noise_shaping) {
2089  if (!skip_dct[0])
2090  get_visual_weight(weight[0], ptr_y , wrap_y);
2091  if (!skip_dct[1])
2092  get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2093  if (!skip_dct[2])
2094  get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2095  if (!skip_dct[3])
2096  get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2097  if (!skip_dct[4])
2098  get_visual_weight(weight[4], ptr_cb , wrap_c);
2099  if (!skip_dct[5])
2100  get_visual_weight(weight[5], ptr_cr , wrap_c);
2101  if (!s->chroma_y_shift) { /* 422 */
2102  if (!skip_dct[6])
2103  get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2104  wrap_c);
2105  if (!skip_dct[7])
2106  get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2107  wrap_c);
2108  }
2109  memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2110  }
2111 
2112  /* DCT & quantize */
2113  av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2114  {
2115  for (i = 0; i < mb_block_count; i++) {
2116  if (!skip_dct[i]) {
2117  int overflow;
2118  s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2119  // FIXME we could decide to change to quantizer instead of
2120  // clipping
2121  // JS: I don't think that would be a good idea it could lower
2122  // quality instead of improve it. Just INTRADC clipping
2123  // deserves changes in quantizer
2124  if (overflow)
2125  clip_coeffs(s, s->block[i], s->block_last_index[i]);
2126  } else
2127  s->block_last_index[i] = -1;
2128  }
2129  if (s->quantizer_noise_shaping) {
2130  for (i = 0; i < mb_block_count; i++) {
2131  if (!skip_dct[i]) {
2132  s->block_last_index[i] =
2133  dct_quantize_refine(s, s->block[i], weight[i],
2134  orig[i], i, s->qscale);
2135  }
2136  }
2137  }
2138 
2139  if (s->luma_elim_threshold && !s->mb_intra)
2140  for (i = 0; i < 4; i++)
2142  if (s->chroma_elim_threshold && !s->mb_intra)
2143  for (i = 4; i < mb_block_count; i++)
2145 
2146  if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2147  for (i = 0; i < mb_block_count; i++) {
2148  if (s->block_last_index[i] == -1)
2149  s->coded_score[i] = INT_MAX / 256;
2150  }
2151  }
2152  }
2153 
2154  if ((s->flags & CODEC_FLAG_GRAY) && s->mb_intra) {
2155  s->block_last_index[4] =
2156  s->block_last_index[5] = 0;
2157  s->block[4][0] =
2158  s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2159  if (!s->chroma_y_shift) { /* 422 / 444 */
2160  for (i=6; i<12; i++) {
2161  s->block_last_index[i] = 0;
2162  s->block[i][0] = s->block[4][0];
2163  }
2164  }
2165  }
2166 
2167  // non c quantize code returns incorrect block_last_index FIXME
2168  if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2169  for (i = 0; i < mb_block_count; i++) {
2170  int j;
2171  if (s->block_last_index[i] > 0) {
2172  for (j = 63; j > 0; j--) {
2173  if (s->block[i][s->intra_scantable.permutated[j]])
2174  break;
2175  }
2176  s->block_last_index[i] = j;
2177  }
2178  }
2179  }
2180 
2181  /* huffman encode */
2182  switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2185  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2186  ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2187  break;
2188  case AV_CODEC_ID_MPEG4:
2189  if (CONFIG_MPEG4_ENCODER)
2190  ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2191  break;
2192  case AV_CODEC_ID_MSMPEG4V2:
2193  case AV_CODEC_ID_MSMPEG4V3:
2194  case AV_CODEC_ID_WMV1:
2196  ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2197  break;
2198  case AV_CODEC_ID_WMV2:
2199  if (CONFIG_WMV2_ENCODER)
2200  ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2201  break;
2202  case AV_CODEC_ID_H261:
2203  if (CONFIG_H261_ENCODER)
2204  ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2205  break;
2206  case AV_CODEC_ID_H263:
2207  case AV_CODEC_ID_H263P:
2208  case AV_CODEC_ID_FLV1:
2209  case AV_CODEC_ID_RV10:
2210  case AV_CODEC_ID_RV20:
2211  if (CONFIG_H263_ENCODER)
2212  ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2213  break;
2214  case AV_CODEC_ID_MJPEG:
2215  case AV_CODEC_ID_AMV:
2216  if (CONFIG_MJPEG_ENCODER)
2217  ff_mjpeg_encode_mb(s, s->block);
2218  break;
2219  default:
2220  av_assert1(0);
2221  }
2222 }
2223 
2224 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2225 {
2226  if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2227  else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2228  else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2229 }
2230 
2231 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2232  int i;
2233 
2234  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2235 
2236  /* mpeg1 */
2237  d->mb_skip_run= s->mb_skip_run;
2238  for(i=0; i<3; i++)
2239  d->last_dc[i] = s->last_dc[i];
2240 
2241  /* statistics */
2242  d->mv_bits= s->mv_bits;
2243  d->i_tex_bits= s->i_tex_bits;
2244  d->p_tex_bits= s->p_tex_bits;
2245  d->i_count= s->i_count;
2246  d->f_count= s->f_count;
2247  d->b_count= s->b_count;
2248  d->skip_count= s->skip_count;
2249  d->misc_bits= s->misc_bits;
2250  d->last_bits= 0;
2251 
2252  d->mb_skipped= 0;
2253  d->qscale= s->qscale;
2254  d->dquant= s->dquant;
2255 
2257 }
2258 
2259 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2260  int i;
2261 
2262  memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2263  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2264 
2265  /* mpeg1 */
2266  d->mb_skip_run= s->mb_skip_run;
2267  for(i=0; i<3; i++)
2268  d->last_dc[i] = s->last_dc[i];
2269 
2270  /* statistics */
2271  d->mv_bits= s->mv_bits;
2272  d->i_tex_bits= s->i_tex_bits;
2273  d->p_tex_bits= s->p_tex_bits;
2274  d->i_count= s->i_count;
2275  d->f_count= s->f_count;
2276  d->b_count= s->b_count;
2277  d->skip_count= s->skip_count;
2278  d->misc_bits= s->misc_bits;
2279 
2280  d->mb_intra= s->mb_intra;
2281  d->mb_skipped= s->mb_skipped;
2282  d->mv_type= s->mv_type;
2283  d->mv_dir= s->mv_dir;
2284  d->pb= s->pb;
2285  if(s->data_partitioning){
2286  d->pb2= s->pb2;
2287  d->tex_pb= s->tex_pb;
2288  }
2289  d->block= s->block;
2290  for(i=0; i<8; i++)
2291  d->block_last_index[i]= s->block_last_index[i];
2293  d->qscale= s->qscale;
2294 
2296 }
2297 
2298 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2300  int *dmin, int *next_block, int motion_x, int motion_y)
2301 {
2302  int score;
2303  uint8_t *dest_backup[3];
2304 
2305  copy_context_before_encode(s, backup, type);
2306 
2307  s->block= s->blocks[*next_block];
2308  s->pb= pb[*next_block];
2309  if(s->data_partitioning){
2310  s->pb2 = pb2 [*next_block];
2311  s->tex_pb= tex_pb[*next_block];
2312  }
2313 
2314  if(*next_block){
2315  memcpy(dest_backup, s->dest, sizeof(s->dest));
2316  s->dest[0] = s->rd_scratchpad;
2317  s->dest[1] = s->rd_scratchpad + 16*s->linesize;
2318  s->dest[2] = s->rd_scratchpad + 16*s->linesize + 8;
2319  assert(s->linesize >= 32); //FIXME
2320  }
2321 
2322  encode_mb(s, motion_x, motion_y);
2323 
2324  score= put_bits_count(&s->pb);
2325  if(s->data_partitioning){
2326  score+= put_bits_count(&s->pb2);
2327  score+= put_bits_count(&s->tex_pb);
2328  }
2329 
2330  if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2331  ff_MPV_decode_mb(s, s->block);
2332 
2333  score *= s->lambda2;
2334  score += sse_mb(s) << FF_LAMBDA_SHIFT;
2335  }
2336 
2337  if(*next_block){
2338  memcpy(s->dest, dest_backup, sizeof(s->dest));
2339  }
2340 
2341  if(score<*dmin){
2342  *dmin= score;
2343  *next_block^=1;
2344 
2345  copy_context_after_encode(best, s, type);
2346  }
2347 }
2348 
2349 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2350  uint32_t *sq = ff_squareTbl + 256;
2351  int acc=0;
2352  int x,y;
2353 
2354  if(w==16 && h==16)
2355  return s->dsp.sse[0](NULL, src1, src2, stride, 16);
2356  else if(w==8 && h==8)
2357  return s->dsp.sse[1](NULL, src1, src2, stride, 8);
2358 
2359  for(y=0; y<h; y++){
2360  for(x=0; x<w; x++){
2361  acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2362  }
2363  }
2364 
2365  av_assert2(acc>=0);
2366 
2367  return acc;
2368 }
2369 
2370 static int sse_mb(MpegEncContext *s){
2371  int w= 16;
2372  int h= 16;
2373 
2374  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2375  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2376 
2377  if(w==16 && h==16)
2378  if(s->avctx->mb_cmp == FF_CMP_NSSE){
2379  return s->dsp.nsse[0](s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2380  +s->dsp.nsse[1](s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2381  +s->dsp.nsse[1](s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2382  }else{
2383  return s->dsp.sse[0](NULL, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2384  +s->dsp.sse[1](NULL, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2385  +s->dsp.sse[1](NULL, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2386  }
2387  else
2388  return sse(s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2389  +sse(s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2390  +sse(s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2391 }
2392 
2394  MpegEncContext *s= *(void**)arg;
2395 
2396 
2397  s->me.pre_pass=1;
2398  s->me.dia_size= s->avctx->pre_dia_size;
2399  s->first_slice_line=1;
2400  for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2401  for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2403  }
2404  s->first_slice_line=0;
2405  }
2406 
2407  s->me.pre_pass=0;
2408 
2409  return 0;
2410 }
2411 
2413  MpegEncContext *s= *(void**)arg;
2414 
2416 
2417  s->me.dia_size= s->avctx->dia_size;
2418  s->first_slice_line=1;
2419  for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2420  s->mb_x=0; //for block init below
2422  for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2423  s->block_index[0]+=2;
2424  s->block_index[1]+=2;
2425  s->block_index[2]+=2;
2426  s->block_index[3]+=2;
2427 
2428  /* compute motion vector & mb_type and store in context */
2431  else
2433  }
2434  s->first_slice_line=0;
2435  }
2436  return 0;
2437 }
2438 
2439 static int mb_var_thread(AVCodecContext *c, void *arg){
2440  MpegEncContext *s= *(void**)arg;
2441  int mb_x, mb_y;
2442 
2444 
2445  for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2446  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2447  int xx = mb_x * 16;
2448  int yy = mb_y * 16;
2449  uint8_t *pix = s->new_picture.f.data[0] + (yy * s->linesize) + xx;
2450  int varc;
2451  int sum = s->dsp.pix_sum(pix, s->linesize);
2452 
2453  varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)sum*sum)>>8) + 500 + 128)>>8;
2454 
2455  s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2456  s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2457  s->me.mb_var_sum_temp += varc;
2458  }
2459  }
2460  return 0;
2461 }
2462 
2464  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2465  if(s->partitioned_frame){
2467  }
2468 
2469  ff_mpeg4_stuffing(&s->pb);
2470  }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2472  }
2473 
2475  flush_put_bits(&s->pb);
2476 
2477  if((s->flags&CODEC_FLAG_PASS1) && !s->partitioned_frame)
2478  s->misc_bits+= get_bits_diff(s);
2479 }
2480 
2482 {
2483  uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2484  int offset = put_bits_count(&s->pb);
2485  int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2486  int gobn = s->mb_y / s->gob_index;
2487  int pred_x, pred_y;
2488  if (CONFIG_H263_ENCODER)
2489  ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2490  bytestream_put_le32(&ptr, offset);
2491  bytestream_put_byte(&ptr, s->qscale);
2492  bytestream_put_byte(&ptr, gobn);
2493  bytestream_put_le16(&ptr, mba);
2494  bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2495  bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2496  /* 4MV not implemented */
2497  bytestream_put_byte(&ptr, 0); /* hmv2 */
2498  bytestream_put_byte(&ptr, 0); /* vmv2 */
2499 }
2500 
2501 static void update_mb_info(MpegEncContext *s, int startcode)
2502 {
2503  if (!s->mb_info)
2504  return;
2505  if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2506  s->mb_info_size += 12;
2507  s->prev_mb_info = s->last_mb_info;
2508  }
2509  if (startcode) {
2510  s->prev_mb_info = put_bits_count(&s->pb)/8;
2511  /* This might have incremented mb_info_size above, and we return without
2512  * actually writing any info into that slot yet. But in that case,
2513  * this will be called again at the start of the after writing the
2514  * start code, actually writing the mb info. */
2515  return;
2516  }
2517 
2518  s->last_mb_info = put_bits_count(&s->pb)/8;
2519  if (!s->mb_info_size)
2520  s->mb_info_size += 12;
2521  write_mb_info(s);
2522 }
2523 
2524 static int encode_thread(AVCodecContext *c, void *arg){
2525  MpegEncContext *s= *(void**)arg;
2526  int mb_x, mb_y, pdif = 0;
2527  int chr_h= 16>>s->chroma_y_shift;
2528  int i, j;
2529  MpegEncContext best_s, backup_s;
2530  uint8_t bit_buf[2][MAX_MB_BYTES];
2531  uint8_t bit_buf2[2][MAX_MB_BYTES];
2532  uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2533  PutBitContext pb[2], pb2[2], tex_pb[2];
2534 
2536 
2537  for(i=0; i<2; i++){
2538  init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2539  init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2540  init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2541  }
2542 
2543  s->last_bits= put_bits_count(&s->pb);
2544  s->mv_bits=0;
2545  s->misc_bits=0;
2546  s->i_tex_bits=0;
2547  s->p_tex_bits=0;
2548  s->i_count=0;
2549  s->f_count=0;
2550  s->b_count=0;
2551  s->skip_count=0;
2552 
2553  for(i=0; i<3; i++){
2554  /* init last dc values */
2555  /* note: quant matrix value (8) is implied here */
2556  s->last_dc[i] = 128 << s->intra_dc_precision;
2557 
2558  s->current_picture.f.error[i] = 0;
2559  }
2560  if(s->codec_id==AV_CODEC_ID_AMV){
2561  s->last_dc[0] = 128*8/13;
2562  s->last_dc[1] = 128*8/14;
2563  s->last_dc[2] = 128*8/14;
2564  }
2565  s->mb_skip_run = 0;
2566  memset(s->last_mv, 0, sizeof(s->last_mv));
2567 
2568  s->last_mv_dir = 0;
2569 
2570  switch(s->codec_id){
2571  case AV_CODEC_ID_H263:
2572  case AV_CODEC_ID_H263P:
2573  case AV_CODEC_ID_FLV1:
2574  if (CONFIG_H263_ENCODER)
2576  break;
2577  case AV_CODEC_ID_MPEG4:
2578  if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2580  break;
2581  }
2582 
2583  s->resync_mb_x=0;
2584  s->resync_mb_y=0;
2585  s->first_slice_line = 1;
2586  s->ptr_lastgob = s->pb.buf;
2587  for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2588  s->mb_x=0;
2589  s->mb_y= mb_y;
2590 
2591  ff_set_qscale(s, s->qscale);
2593 
2594  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2595  int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2596  int mb_type= s->mb_type[xy];
2597 // int d;
2598  int dmin= INT_MAX;
2599  int dir;
2600 
2601  if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2602  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2603  return -1;
2604  }
2605  if(s->data_partitioning){
2606  if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
2607  || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2608  av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
2609  return -1;
2610  }
2611  }
2612 
2613  s->mb_x = mb_x;
2614  s->mb_y = mb_y; // moved into loop, can get changed by H.261
2616 
2617  if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2619  xy= s->mb_y*s->mb_stride + s->mb_x;
2620  mb_type= s->mb_type[xy];
2621  }
2622 
2623  /* write gob / video packet header */
2624  if(s->rtp_mode){
2625  int current_packet_size, is_gob_start;
2626 
2627  current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
2628 
2629  is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
2630 
2631  if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2632 
2633  switch(s->codec_id){
2634  case AV_CODEC_ID_H263:
2635  case AV_CODEC_ID_H263P:
2636  if(!s->h263_slice_structured)
2637  if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2638  break;
2640  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2642  if(s->mb_skip_run) is_gob_start=0;
2643  break;
2644  case AV_CODEC_ID_MJPEG:
2645  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2646  break;
2647  }
2648 
2649  if(is_gob_start){
2650  if(s->start_mb_y != mb_y || mb_x!=0){
2651  write_slice_end(s);
2652  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
2654  }
2655  }
2656 
2657  av_assert2((put_bits_count(&s->pb)&7) == 0);
2658  current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2659 
2660  if(s->avctx->error_rate && s->resync_mb_x + s->resync_mb_y > 0){
2661  int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
2662  int d= 100 / s->avctx->error_rate;
2663  if(r % d == 0){
2664  current_packet_size=0;
2665  s->pb.buf_ptr= s->ptr_lastgob;
2666  assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
2667  }
2668  }
2669 
2670  if (s->avctx->rtp_callback){
2671  int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
2672  s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
2673  }
2674  update_mb_info(s, 1);
2675 
2676  switch(s->codec_id){
2677  case AV_CODEC_ID_MPEG4:
2678  if (CONFIG_MPEG4_ENCODER) {
2681  }
2682  break;
2685  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
2688  }
2689  break;
2690  case AV_CODEC_ID_H263:
2691  case AV_CODEC_ID_H263P:
2692  if (CONFIG_H263_ENCODER)
2693  ff_h263_encode_gob_header(s, mb_y);
2694  break;
2695  }
2696 
2697  if(s->flags&CODEC_FLAG_PASS1){
2698  int bits= put_bits_count(&s->pb);
2699  s->misc_bits+= bits - s->last_bits;
2700  s->last_bits= bits;
2701  }
2702 
2703  s->ptr_lastgob += current_packet_size;
2704  s->first_slice_line=1;
2705  s->resync_mb_x=mb_x;
2706  s->resync_mb_y=mb_y;
2707  }
2708  }
2709 
2710  if( (s->resync_mb_x == s->mb_x)
2711  && s->resync_mb_y+1 == s->mb_y){
2712  s->first_slice_line=0;
2713  }
2714 
2715  s->mb_skipped=0;
2716  s->dquant=0; //only for QP_RD
2717 
2718  update_mb_info(s, 0);
2719 
2720  if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
2721  int next_block=0;
2722  int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
2723 
2724  copy_context_before_encode(&backup_s, s, -1);
2725  backup_s.pb= s->pb;
2728  if(s->data_partitioning){
2729  backup_s.pb2= s->pb2;
2730  backup_s.tex_pb= s->tex_pb;
2731  }
2732 
2733  if(mb_type&CANDIDATE_MB_TYPE_INTER){
2734  s->mv_dir = MV_DIR_FORWARD;
2735  s->mv_type = MV_TYPE_16X16;
2736  s->mb_intra= 0;
2737  s->mv[0][0][0] = s->p_mv_table[xy][0];
2738  s->mv[0][0][1] = s->p_mv_table[xy][1];
2739  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
2740  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2741  }
2742  if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
2743  s->mv_dir = MV_DIR_FORWARD;
2744  s->mv_type = MV_TYPE_FIELD;
2745  s->mb_intra= 0;
2746  for(i=0; i<2; i++){
2747  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2748  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2749  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2750  }
2751  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
2752  &dmin, &next_block, 0, 0);
2753  }
2754  if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
2755  s->mv_dir = MV_DIR_FORWARD;
2756  s->mv_type = MV_TYPE_16X16;
2757  s->mb_intra= 0;
2758  s->mv[0][0][0] = 0;
2759  s->mv[0][0][1] = 0;
2760  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
2761  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2762  }
2763  if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
2764  s->mv_dir = MV_DIR_FORWARD;
2765  s->mv_type = MV_TYPE_8X8;
2766  s->mb_intra= 0;
2767  for(i=0; i<4; i++){
2768  s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0];
2769  s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1];
2770  }
2771  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
2772  &dmin, &next_block, 0, 0);
2773  }
2774  if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
2775  s->mv_dir = MV_DIR_FORWARD;
2776  s->mv_type = MV_TYPE_16X16;
2777  s->mb_intra= 0;
2778  s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2779  s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2780  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
2781  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2782  }
2783  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
2784  s->mv_dir = MV_DIR_BACKWARD;
2785  s->mv_type = MV_TYPE_16X16;
2786  s->mb_intra= 0;
2787  s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2788  s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2789  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
2790  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
2791  }
2792  if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
2794  s->mv_type = MV_TYPE_16X16;
2795  s->mb_intra= 0;
2796  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2797  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2798  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2799  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2800  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
2801  &dmin, &next_block, 0, 0);
2802  }
2803  if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
2804  s->mv_dir = MV_DIR_FORWARD;
2805  s->mv_type = MV_TYPE_FIELD;
2806  s->mb_intra= 0;
2807  for(i=0; i<2; i++){
2808  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2809  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2810  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2811  }
2812  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
2813  &dmin, &next_block, 0, 0);
2814  }
2815  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
2816  s->mv_dir = MV_DIR_BACKWARD;
2817  s->mv_type = MV_TYPE_FIELD;
2818  s->mb_intra= 0;
2819  for(i=0; i<2; i++){
2820  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2821  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2822  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2823  }
2824  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
2825  &dmin, &next_block, 0, 0);
2826  }
2827  if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
2829  s->mv_type = MV_TYPE_FIELD;
2830  s->mb_intra= 0;
2831  for(dir=0; dir<2; dir++){
2832  for(i=0; i<2; i++){
2833  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2834  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2835  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2836  }
2837  }
2838  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
2839  &dmin, &next_block, 0, 0);
2840  }
2841  if(mb_type&CANDIDATE_MB_TYPE_INTRA){
2842  s->mv_dir = 0;
2843  s->mv_type = MV_TYPE_16X16;
2844  s->mb_intra= 1;
2845  s->mv[0][0][0] = 0;
2846  s->mv[0][0][1] = 0;
2847  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
2848  &dmin, &next_block, 0, 0);
2849  if(s->h263_pred || s->h263_aic){
2850  if(best_s.mb_intra)
2851  s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
2852  else
2853  ff_clean_intra_table_entries(s); //old mode?
2854  }
2855  }
2856 
2857  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
2858  if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
2859  const int last_qp= backup_s.qscale;
2860  int qpi, qp, dc[6];
2861  int16_t ac[6][16];
2862  const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
2863  static const int dquant_tab[4]={-1,1,-2,2};
2864  int storecoefs = s->mb_intra && s->dc_val[0];
2865 
2866  av_assert2(backup_s.dquant == 0);
2867 
2868  //FIXME intra
2869  s->mv_dir= best_s.mv_dir;
2870  s->mv_type = MV_TYPE_16X16;
2871  s->mb_intra= best_s.mb_intra;
2872  s->mv[0][0][0] = best_s.mv[0][0][0];
2873  s->mv[0][0][1] = best_s.mv[0][0][1];
2874  s->mv[1][0][0] = best_s.mv[1][0][0];
2875  s->mv[1][0][1] = best_s.mv[1][0][1];
2876 
2877  qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
2878  for(; qpi<4; qpi++){
2879  int dquant= dquant_tab[qpi];
2880  qp= last_qp + dquant;
2881  if(qp < s->avctx->qmin || qp > s->avctx->qmax)
2882  continue;
2883  backup_s.dquant= dquant;
2884  if(storecoefs){
2885  for(i=0; i<6; i++){
2886  dc[i]= s->dc_val[0][ s->block_index[i] ];
2887  memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
2888  }
2889  }
2890 
2891  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2892  &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
2893  if(best_s.qscale != qp){
2894  if(storecoefs){
2895  for(i=0; i<6; i++){
2896  s->dc_val[0][ s->block_index[i] ]= dc[i];
2897  memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
2898  }
2899  }
2900  }
2901  }
2902  }
2903  }
2904  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
2905  int mx= s->b_direct_mv_table[xy][0];
2906  int my= s->b_direct_mv_table[xy][1];
2907 
2908  backup_s.dquant = 0;
2910  s->mb_intra= 0;
2911  ff_mpeg4_set_direct_mv(s, mx, my);
2912  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2913  &dmin, &next_block, mx, my);
2914  }
2915  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
2916  backup_s.dquant = 0;
2918  s->mb_intra= 0;
2919  ff_mpeg4_set_direct_mv(s, 0, 0);
2920  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2921  &dmin, &next_block, 0, 0);
2922  }
2923  if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
2924  int coded=0;
2925  for(i=0; i<6; i++)
2926  coded |= s->block_last_index[i];
2927  if(coded){
2928  int mx,my;
2929  memcpy(s->mv, best_s.mv, sizeof(s->mv));
2930  if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
2931  mx=my=0; //FIXME find the one we actually used
2932  ff_mpeg4_set_direct_mv(s, mx, my);
2933  }else if(best_s.mv_dir&MV_DIR_BACKWARD){
2934  mx= s->mv[1][0][0];
2935  my= s->mv[1][0][1];
2936  }else{
2937  mx= s->mv[0][0][0];
2938  my= s->mv[0][0][1];
2939  }
2940 
2941  s->mv_dir= best_s.mv_dir;
2942  s->mv_type = best_s.mv_type;
2943  s->mb_intra= 0;
2944 /* s->mv[0][0][0] = best_s.mv[0][0][0];
2945  s->mv[0][0][1] = best_s.mv[0][0][1];
2946  s->mv[1][0][0] = best_s.mv[1][0][0];
2947  s->mv[1][0][1] = best_s.mv[1][0][1];*/
2948  backup_s.dquant= 0;
2949  s->skipdct=1;
2950  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2951  &dmin, &next_block, mx, my);
2952  s->skipdct=0;
2953  }
2954  }
2955 
2956  s->current_picture.f.qscale_table[xy] = best_s.qscale;
2957 
2958  copy_context_after_encode(s, &best_s, -1);
2959 
2960  pb_bits_count= put_bits_count(&s->pb);
2961  flush_put_bits(&s->pb);
2962  avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
2963  s->pb= backup_s.pb;
2964 
2965  if(s->data_partitioning){
2966  pb2_bits_count= put_bits_count(&s->pb2);
2967  flush_put_bits(&s->pb2);
2968  avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
2969  s->pb2= backup_s.pb2;
2970 
2971  tex_pb_bits_count= put_bits_count(&s->tex_pb);
2972  flush_put_bits(&s->tex_pb);
2973  avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
2974  s->tex_pb= backup_s.tex_pb;
2975  }
2976  s->last_bits= put_bits_count(&s->pb);
2977 
2978  if (CONFIG_H263_ENCODER &&
2981 
2982  if(next_block==0){ //FIXME 16 vs linesize16
2983  s->dsp.put_pixels_tab[0][0](s->dest[0], s->rd_scratchpad , s->linesize ,16);
2984  s->dsp.put_pixels_tab[1][0](s->dest[1], s->rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
2985  s->dsp.put_pixels_tab[1][0](s->dest[2], s->rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
2986  }
2987 
2989  ff_MPV_decode_mb(s, s->block);
2990  } else {
2991  int motion_x = 0, motion_y = 0;
2993  // only one MB-Type possible
2994 
2995  switch(mb_type){
2997  s->mv_dir = 0;
2998  s->mb_intra= 1;
2999  motion_x= s->mv[0][0][0] = 0;
3000  motion_y= s->mv[0][0][1] = 0;
3001  break;
3003  s->mv_dir = MV_DIR_FORWARD;
3004  s->mb_intra= 0;
3005  motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3006  motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3007  break;
3009  s->mv_dir = MV_DIR_FORWARD;
3010  s->mv_type = MV_TYPE_FIELD;
3011  s->mb_intra= 0;
3012  for(i=0; i<2; i++){
3013  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3014  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3015  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3016  }
3017  break;
3019  s->mv_dir = MV_DIR_FORWARD;
3020  s->mv_type = MV_TYPE_8X8;
3021  s->mb_intra= 0;
3022  for(i=0; i<4; i++){
3023  s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0];
3024  s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1];
3025  }
3026  break;
3028  if (CONFIG_MPEG4_ENCODER) {
3030  s->mb_intra= 0;
3031  motion_x=s->b_direct_mv_table[xy][0];
3032  motion_y=s->b_direct_mv_table[xy][1];
3033  ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3034  }
3035  break;
3037  if (CONFIG_MPEG4_ENCODER) {
3039  s->mb_intra= 0;
3040  ff_mpeg4_set_direct_mv(s, 0, 0);
3041  }
3042  break;
3045  s->mb_intra= 0;
3046  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3047  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3048  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3049  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3050  break;
3052  s->mv_dir = MV_DIR_BACKWARD;
3053  s->mb_intra= 0;
3054  motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3055  motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3056  break;
3058  s->mv_dir = MV_DIR_FORWARD;
3059  s->mb_intra= 0;
3060  motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3061  motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3062  break;
3064  s->mv_dir = MV_DIR_FORWARD;
3065  s->mv_type = MV_TYPE_FIELD;
3066  s->mb_intra= 0;
3067  for(i=0; i<2; i++){
3068  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3069  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3070  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3071  }
3072  break;
3074  s->mv_dir = MV_DIR_BACKWARD;
3075  s->mv_type = MV_TYPE_FIELD;
3076  s->mb_intra= 0;
3077  for(i=0; i<2; i++){
3078  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3079  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3080  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3081  }
3082  break;
3085  s->mv_type = MV_TYPE_FIELD;
3086  s->mb_intra= 0;
3087  for(dir=0; dir<2; dir++){
3088  for(i=0; i<2; i++){
3089  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3090  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3091  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3092  }
3093  }
3094  break;
3095  default:
3096  av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3097  }
3098 
3099  encode_mb(s, motion_x, motion_y);
3100 
3101  // RAL: Update last macroblock type
3102  s->last_mv_dir = s->mv_dir;
3103 
3104  if (CONFIG_H263_ENCODER &&
3107 
3108  ff_MPV_decode_mb(s, s->block);
3109  }
3110 
3111  /* clean the MV table in IPS frames for direct mode in B frames */
3112  if(s->mb_intra /* && I,P,S_TYPE */){
3113  s->p_mv_table[xy][0]=0;
3114  s->p_mv_table[xy][1]=0;
3115  }
3116 
3117  if(s->flags&CODEC_FLAG_PSNR){
3118  int w= 16;
3119  int h= 16;
3120 
3121  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3122  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3123 
3124  s->current_picture.f.error[0] += sse(
3125  s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3126  s->dest[0], w, h, s->linesize);
3127  s->current_picture.f.error[1] += sse(
3128  s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3129  s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3130  s->current_picture.f.error[2] += sse(
3131  s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3132  s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3133  }
3134  if(s->loop_filter){
3135  if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3137  }
3138  av_dlog(s->avctx, "MB %d %d bits\n",
3139  s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3140  }
3141  }
3142 
3143  //not beautiful here but we must write it before flushing so it has to be here
3146 
3147  write_slice_end(s);
3148 
3149  /* Send the last GOB if RTP */
3150  if (s->avctx->rtp_callback) {
3151  int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3152  pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3153  /* Call the RTP callback to send the last GOB */
3154  emms_c();
3155  s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3156  }
3157 
3158  return 0;
3159 }
3160 
3161 #define MERGE(field) dst->field += src->field; src->field=0
3163  MERGE(me.scene_change_score);
3164  MERGE(me.mc_mb_var_sum_temp);
3165  MERGE(me.mb_var_sum_temp);
3166 }
3167 
3169  int i;
3170 
3171  MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3172  MERGE(dct_count[1]);
3173  MERGE(mv_bits);
3174  MERGE(i_tex_bits);
3175  MERGE(p_tex_bits);
3176  MERGE(i_count);
3177  MERGE(f_count);
3178  MERGE(b_count);
3179  MERGE(skip_count);
3180  MERGE(misc_bits);
3181  MERGE(er.error_count);
3186 
3187  if(dst->avctx->noise_reduction){
3188  for(i=0; i<64; i++){
3189  MERGE(dct_error_sum[0][i]);
3190  MERGE(dct_error_sum[1][i]);
3191  }
3192  }
3193 
3194  assert(put_bits_count(&src->pb) % 8 ==0);
3195  assert(put_bits_count(&dst->pb) % 8 ==0);
3196  avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3197  flush_put_bits(&dst->pb);
3198 }
3199 
3200 static int estimate_qp(MpegEncContext *s, int dry_run){
3201  if (s->next_lambda){
3204  if(!dry_run) s->next_lambda= 0;
3205  } else if (!s->fixed_qscale) {
3208  if (s->current_picture.f.quality < 0)
3209  return -1;
3210  }
3211 
3212  if(s->adaptive_quant){
3213  switch(s->codec_id){
3214  case AV_CODEC_ID_MPEG4:
3215  if (CONFIG_MPEG4_ENCODER)
3217  break;
3218  case AV_CODEC_ID_H263:
3219  case AV_CODEC_ID_H263P:
3220  case AV_CODEC_ID_FLV1:
3221  if (CONFIG_H263_ENCODER)
3223  break;
3224  default:
3225  ff_init_qscale_tab(s);
3226  }
3227 
3228  s->lambda= s->lambda_table[0];
3229  //FIXME broken
3230  }else
3231  s->lambda = s->current_picture.f.quality;
3232  update_qscale(s);
3233  return 0;
3234 }
3235 
3236 /* must be called before writing the header */
3238  assert(s->current_picture_ptr->f.pts != AV_NOPTS_VALUE);
3239  s->time = s->current_picture_ptr->f.pts * s->avctx->time_base.num;
3240 
3241  if(s->pict_type==AV_PICTURE_TYPE_B){
3242  s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3243  assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3244  }else{
3245  s->pp_time= s->time - s->last_non_b_time;
3246  s->last_non_b_time= s->time;
3247  assert(s->picture_number==0 || s->pp_time > 0);
3248  }
3249 }
3250 
3252 {
3253  int i, ret;
3254  int bits;
3255  int context_count = s->slice_context_count;
3256 
3258 
3259  /* Reset the average MB variance */
3260  s->me.mb_var_sum_temp =
3261  s->me.mc_mb_var_sum_temp = 0;
3262 
3263  /* we need to initialize some time vars before we can encode b-frames */
3264  // RAL: Condition added for MPEG1VIDEO
3267  if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3268  ff_set_mpeg4_time(s);
3269 
3270  s->me.scene_change_score=0;
3271 
3272 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3273 
3274  if(s->pict_type==AV_PICTURE_TYPE_I){
3275  if(s->msmpeg4_version >= 3) s->no_rounding=1;
3276  else s->no_rounding=0;
3277  }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3279  s->no_rounding ^= 1;
3280  }
3281 
3282  if(s->flags & CODEC_FLAG_PASS2){
3283  if (estimate_qp(s,1) < 0)
3284  return -1;
3285  ff_get_2pass_fcode(s);
3286  }else if(!(s->flags & CODEC_FLAG_QSCALE)){
3288  s->lambda= s->last_lambda_for[s->pict_type];
3289  else
3291  update_qscale(s);
3292  }
3293 
3294  if(s->codec_id != AV_CODEC_ID_AMV){
3299  }
3300 
3301  s->mb_intra=0; //for the rate distortion & bit compare functions
3302  for(i=1; i<context_count; i++){
3304  if (ret < 0)
3305  return ret;
3306  }
3307 
3308  if(ff_init_me(s)<0)
3309  return -1;
3310 
3311  /* Estimate motion for every MB */
3312  if(s->pict_type != AV_PICTURE_TYPE_I){
3313  s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
3314  s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
3315  if(s->pict_type != AV_PICTURE_TYPE_B && s->avctx->me_threshold==0){
3316  if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
3317  s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3318  }
3319  }
3320 
3321  s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3322  }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3323  /* I-Frame */
3324  for(i=0; i<s->mb_stride*s->mb_height; i++)
3326 
3327  if(!s->fixed_qscale){
3328  /* finding spatial complexity for I-frame rate control */
3329  s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3330  }
3331  }
3332  for(i=1; i<context_count; i++){
3334  }
3336  s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3337  emms_c();
3338 
3341  for(i=0; i<s->mb_stride*s->mb_height; i++)
3343  if(s->msmpeg4_version >= 3)
3344  s->no_rounding=1;
3345  av_dlog(s, "Scene change detected, encoding as I Frame %d %d\n",
3347  }
3348 
3349  if(!s->umvplus){
3352 
3354  int a,b;
3355  a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3357  s->f_code= FFMAX3(s->f_code, a, b);
3358  }
3359 
3360  ff_fix_long_p_mvs(s);
3363  int j;
3364  for(i=0; i<2; i++){
3365  for(j=0; j<2; j++)
3368  }
3369  }
3370  }
3371 
3372  if(s->pict_type==AV_PICTURE_TYPE_B){
3373  int a, b;
3374 
3377  s->f_code = FFMAX(a, b);
3378 
3381  s->b_code = FFMAX(a, b);
3382 
3388  int dir, j;
3389  for(dir=0; dir<2; dir++){
3390  for(i=0; i<2; i++){
3391  for(j=0; j<2; j++){
3394  ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3395  s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3396  }
3397  }
3398  }
3399  }
3400  }
3401  }
3402 
3403  if (estimate_qp(s, 0) < 0)
3404  return -1;
3405 
3406  if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==AV_PICTURE_TYPE_I && !(s->flags & CODEC_FLAG_QSCALE))
3407  s->qscale= 3; //reduce clipping problems
3408 
3409  if (s->out_format == FMT_MJPEG) {
3410  /* for mjpeg, we do include qscale in the matrix */
3411  for(i=1;i<64;i++){
3412  int j= s->dsp.idct_permutation[i];
3413 
3414  s->intra_matrix[j] = av_clip_uint8((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
3415  }
3416  s->y_dc_scale_table=
3420  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3421  s->qscale= 8;
3422  }
3423  if(s->codec_id == AV_CODEC_ID_AMV){
3424  static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3425  static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3426  for(i=1;i<64;i++){
3427  int j= s->dsp.idct_permutation[ff_zigzag_direct[i]];
3428 
3429  s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
3430  s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
3431  }
3432  s->y_dc_scale_table= y;
3433  s->c_dc_scale_table= c;
3434  s->intra_matrix[0] = 13;
3435  s->chroma_intra_matrix[0] = 14;
3437  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3439  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3440  s->qscale= 8;
3441  }
3442 
3443  //FIXME var duplication
3445  s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3448 
3449  if (s->current_picture.f.key_frame)
3450  s->picture_in_gop_number=0;
3451 
3452  s->mb_x = s->mb_y = 0;
3453  s->last_bits= put_bits_count(&s->pb);
3454  switch(s->out_format) {
3455  case FMT_MJPEG:
3456  if (CONFIG_MJPEG_ENCODER)
3458  break;
3459  case FMT_H261:
3460  if (CONFIG_H261_ENCODER)
3461  ff_h261_encode_picture_header(s, picture_number);
3462  break;
3463  case FMT_H263:
3464  if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3465  ff_wmv2_encode_picture_header(s, picture_number);
3466  else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3467  ff_msmpeg4_encode_picture_header(s, picture_number);
3468  else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
3469  ff_mpeg4_encode_picture_header(s, picture_number);
3470  else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10)
3471  ff_rv10_encode_picture_header(s, picture_number);
3472  else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3473  ff_rv20_encode_picture_header(s, picture_number);
3474  else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3475  ff_flv_encode_picture_header(s, picture_number);
3476  else if (CONFIG_H263_ENCODER)
3477  ff_h263_encode_picture_header(s, picture_number);
3478  break;
3479  case FMT_MPEG1:
3480  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3481  ff_mpeg1_encode_picture_header(s, picture_number);
3482  break;
3483  case FMT_H264:
3484  break;
3485  default:
3486  av_assert0(0);
3487  }
3488  bits= put_bits_count(&s->pb);
3489  s->header_bits= bits - s->last_bits;
3490 
3491  for(i=1; i<context_count; i++){
3493  }
3494  s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3495  for(i=1; i<context_count; i++){
3497  }
3498  emms_c();
3499  return 0;
3500 }
3501 
3502 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3503  const int intra= s->mb_intra;
3504  int i;
3505 
3506  s->dct_count[intra]++;
3507 
3508  for(i=0; i<64; i++){
3509  int level= block[i];
3510 
3511  if(level){
3512  if(level>0){
3513  s->dct_error_sum[intra][i] += level;
3514  level -= s->dct_offset[intra][i];
3515  if(level<0) level=0;
3516  }else{
3517  s->dct_error_sum[intra][i] -= level;
3518  level += s->dct_offset[intra][i];
3519  if(level>0) level=0;
3520  }
3521  block[i]= level;
3522  }
3523  }
3524 }
3525 
3527  int16_t *block, int n,
3528  int qscale, int *overflow){
3529  const int *qmat;
3530  const uint8_t *scantable= s->intra_scantable.scantable;
3531  const uint8_t *perm_scantable= s->intra_scantable.permutated;
3532  int max=0;
3533  unsigned int threshold1, threshold2;
3534  int bias=0;
3535  int run_tab[65];
3536  int level_tab[65];
3537  int score_tab[65];
3538  int survivor[65];
3539  int survivor_count;
3540  int last_run=0;
3541  int last_level=0;
3542  int last_score= 0;
3543  int last_i;
3544  int coeff[2][64];
3545  int coeff_count[64];
3546  int qmul, qadd, start_i, last_non_zero, i, dc;
3547  const int esc_length= s->ac_esc_length;
3548  uint8_t * length;
3549  uint8_t * last_length;
3550  const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3551 
3552  s->dsp.fdct (block);
3553 
3554  if(s->dct_error_sum)
3555  s->denoise_dct(s, block);
3556  qmul= qscale*16;
3557  qadd= ((qscale-1)|1)*8;
3558 
3559  if (s->mb_intra) {
3560  int q;
3561  if (!s->h263_aic) {
3562  if (n < 4)
3563  q = s->y_dc_scale;
3564  else
3565  q = s->c_dc_scale;
3566  q = q << 3;
3567  } else{
3568  /* For AIC we skip quant/dequant of INTRADC */
3569  q = 1 << 3;
3570  qadd=0;
3571  }
3572 
3573  /* note: block[0] is assumed to be positive */
3574  block[0] = (block[0] + (q >> 1)) / q;
3575  start_i = 1;
3576  last_non_zero = 0;
3577  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
3578  if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3579  bias= 1<<(QMAT_SHIFT-1);
3580  length = s->intra_ac_vlc_length;
3581  last_length= s->intra_ac_vlc_last_length;
3582  } else {
3583  start_i = 0;
3584  last_non_zero = -1;
3585  qmat = s->q_inter_matrix[qscale];
3586  length = s->inter_ac_vlc_length;
3587  last_length= s->inter_ac_vlc_last_length;
3588  }
3589  last_i= start_i;
3590 
3591  threshold1= (1<<QMAT_SHIFT) - bias - 1;
3592  threshold2= (threshold1<<1);
3593 
3594  for(i=63; i>=start_i; i--) {
3595  const int j = scantable[i];
3596  int level = block[j] * qmat[j];
3597 
3598  if(((unsigned)(level+threshold1))>threshold2){
3599  last_non_zero = i;
3600  break;
3601  }
3602  }
3603 
3604  for(i=start_i; i<=last_non_zero; i++) {
3605  const int j = scantable[i];
3606  int level = block[j] * qmat[j];
3607 
3608 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
3609 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
3610  if(((unsigned)(level+threshold1))>threshold2){
3611  if(level>0){
3612  level= (bias + level)>>QMAT_SHIFT;
3613  coeff[0][i]= level;
3614  coeff[1][i]= level-1;
3615 // coeff[2][k]= level-2;
3616  }else{
3617  level= (bias - level)>>QMAT_SHIFT;
3618  coeff[0][i]= -level;
3619  coeff[1][i]= -level+1;
3620 // coeff[2][k]= -level+2;
3621  }
3622  coeff_count[i]= FFMIN(level, 2);
3623  av_assert2(coeff_count[i]);
3624  max |=level;
3625  }else{
3626  coeff[0][i]= (level>>31)|1;
3627  coeff_count[i]= 1;
3628  }
3629  }
3630 
3631  *overflow= s->max_qcoeff < max; //overflow might have happened
3632 
3633  if(last_non_zero < start_i){
3634  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3635  return last_non_zero;
3636  }
3637 
3638  score_tab[start_i]= 0;
3639  survivor[0]= start_i;
3640  survivor_count= 1;
3641 
3642  for(i=start_i; i<=last_non_zero; i++){
3643  int level_index, j, zero_distortion;
3644  int dct_coeff= FFABS(block[ scantable[i] ]);
3645  int best_score=256*256*256*120;
3646 
3647  if (s->dsp.fdct == ff_fdct_ifast)
3648  dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
3649  zero_distortion= dct_coeff*dct_coeff;
3650 
3651  for(level_index=0; level_index < coeff_count[i]; level_index++){
3652  int distortion;
3653  int level= coeff[level_index][i];
3654  const int alevel= FFABS(level);
3655  int unquant_coeff;
3656 
3657  av_assert2(level);
3658 
3659  if(s->out_format == FMT_H263){
3660  unquant_coeff= alevel*qmul + qadd;
3661  }else{ //MPEG1
3662  j= s->dsp.idct_permutation[ scantable[i] ]; //FIXME optimize
3663  if(s->mb_intra){
3664  unquant_coeff = (int)( alevel * qscale * s->intra_matrix[j]) >> 3;
3665  unquant_coeff = (unquant_coeff - 1) | 1;
3666  }else{
3667  unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4;
3668  unquant_coeff = (unquant_coeff - 1) | 1;
3669  }
3670  unquant_coeff<<= 3;
3671  }
3672 
3673  distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
3674  level+=64;
3675  if((level&(~127)) == 0){
3676  for(j=survivor_count-1; j>=0; j--){
3677  int run= i - survivor[j];
3678  int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3679  score += score_tab[i-run];
3680 
3681  if(score < best_score){
3682  best_score= score;
3683  run_tab[i+1]= run;
3684  level_tab[i+1]= level-64;
3685  }
3686  }
3687 
3688  if(s->out_format == FMT_H263){
3689  for(j=survivor_count-1; j>=0; j--){
3690  int run= i - survivor[j];
3691  int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3692  score += score_tab[i-run];
3693  if(score < last_score){
3694  last_score= score;
3695  last_run= run;
3696  last_level= level-64;
3697  last_i= i+1;
3698  }
3699  }
3700  }
3701  }else{
3702  distortion += esc_length*lambda;
3703  for(j=survivor_count-1; j>=0; j--){
3704  int run= i - survivor[j];
3705  int score= distortion + score_tab[i-run];
3706 
3707  if(score < best_score){
3708  best_score= score;
3709  run_tab[i+1]= run;
3710  level_tab[i+1]= level-64;
3711  }
3712  }
3713 
3714  if(s->out_format == FMT_H263){
3715  for(j=survivor_count-1; j>=0; j--){
3716  int run= i - survivor[j];
3717  int score= distortion + score_tab[i-run];
3718  if(score < last_score){
3719  last_score= score;
3720  last_run= run;
3721  last_level= level-64;
3722  last_i= i+1;
3723  }
3724  }
3725  }
3726  }
3727  }
3728 
3729  score_tab[i+1]= best_score;
3730 
3731  //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
3732  if(last_non_zero <= 27){
3733  for(; survivor_count; survivor_count--){
3734  if(score_tab[ survivor[survivor_count-1] ] <= best_score)
3735  break;
3736  }
3737  }else{
3738  for(; survivor_count; survivor_count--){
3739  if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
3740  break;
3741  }
3742  }
3743 
3744  survivor[ survivor_count++ ]= i+1;
3745  }
3746 
3747  if(s->out_format != FMT_H263){
3748  last_score= 256*256*256*120;
3749  for(i= survivor[0]; i<=last_non_zero + 1; i++){
3750  int score= score_tab[i];
3751  if(i) score += lambda*2; //FIXME exacter?
3752 
3753  if(score < last_score){
3754  last_score= score;
3755  last_i= i;
3756  last_level= level_tab[i];
3757  last_run= run_tab[i];
3758  }
3759  }
3760  }
3761 
3762  s->coded_score[n] = last_score;
3763 
3764  dc= FFABS(block[0]);
3765  last_non_zero= last_i - 1;
3766  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
3767 
3768  if(last_non_zero < start_i)
3769  return last_non_zero;
3770 
3771  if(last_non_zero == 0 && start_i == 0){
3772  int best_level= 0;
3773  int best_score= dc * dc;
3774 
3775  for(i=0; i<coeff_count[0]; i++){
3776  int level= coeff[i][0];
3777  int alevel= FFABS(level);
3778  int unquant_coeff, score, distortion;
3779 
3780  if(s->out_format == FMT_H263){
3781  unquant_coeff= (alevel*qmul + qadd)>>3;
3782  }else{ //MPEG1
3783  unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[0])) >> 4;
3784  unquant_coeff = (unquant_coeff - 1) | 1;
3785  }
3786  unquant_coeff = (unquant_coeff + 4) >> 3;
3787  unquant_coeff<<= 3 + 3;
3788 
3789  distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
3790  level+=64;
3791  if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
3792  else score= distortion + esc_length*lambda;
3793 
3794  if(score < best_score){
3795  best_score= score;
3796  best_level= level - 64;
3797  }
3798  }
3799  block[0]= best_level;
3800  s->coded_score[n] = best_score - dc*dc;
3801  if(best_level == 0) return -1;
3802  else return last_non_zero;
3803  }
3804 
3805  i= last_i;
3806  av_assert2(last_level);
3807 
3808  block[ perm_scantable[last_non_zero] ]= last_level;
3809  i -= last_run + 1;
3810 
3811  for(; i>start_i; i -= run_tab[i] + 1){
3812  block[ perm_scantable[i-1] ]= level_tab[i];
3813  }
3814 
3815  return last_non_zero;
3816 }
3817 
3818 //#define REFINE_STATS 1
3819 static int16_t basis[64][64];
3820 
3821 static void build_basis(uint8_t *perm){
3822  int i, j, x, y;
3823  emms_c();
3824  for(i=0; i<8; i++){
3825  for(j=0; j<8; j++){
3826  for(y=0; y<8; y++){
3827  for(x=0; x<8; x++){
3828  double s= 0.25*(1<<BASIS_SHIFT);
3829  int index= 8*i + j;
3830  int perm_index= perm[index];
3831  if(i==0) s*= sqrt(0.5);
3832  if(j==0) s*= sqrt(0.5);
3833  basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
3834  }
3835  }
3836  }
3837  }
3838 }
3839 
3840 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
3841  int16_t *block, int16_t *weight, int16_t *orig,
3842  int n, int qscale){
3843  int16_t rem[64];
3844  LOCAL_ALIGNED_16(int16_t, d1, [64]);
3845  const uint8_t *scantable= s->intra_scantable.scantable;
3846  const uint8_t *perm_scantable= s->intra_scantable.permutated;
3847 // unsigned int threshold1, threshold2;
3848 // int bias=0;
3849  int run_tab[65];
3850  int prev_run=0;
3851  int prev_level=0;
3852  int qmul, qadd, start_i, last_non_zero, i, dc;
3853  uint8_t * length;
3854  uint8_t * last_length;
3855  int lambda;
3856  int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
3857 #ifdef REFINE_STATS
3858 static int count=0;
3859 static int after_last=0;
3860 static int to_zero=0;
3861 static int from_zero=0;
3862 static int raise=0;
3863 static int lower=0;
3864 static int messed_sign=0;
3865 #endif
3866 
3867  if(basis[0][0] == 0)
3869 
3870  qmul= qscale*2;
3871  qadd= (qscale-1)|1;
3872  if (s->mb_intra) {
3873  if (!s->h263_aic) {
3874  if (n < 4)
3875  q = s->y_dc_scale;
3876  else
3877  q = s->c_dc_scale;
3878  } else{
3879  /* For AIC we skip quant/dequant of INTRADC */
3880  q = 1;
3881  qadd=0;
3882  }
3883  q <<= RECON_SHIFT-3;
3884  /* note: block[0] is assumed to be positive */
3885  dc= block[0]*q;
3886 // block[0] = (block[0] + (q >> 1)) / q;
3887  start_i = 1;
3888 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3889 // bias= 1<<(QMAT_SHIFT-1);
3890  length = s->intra_ac_vlc_length;
3891  last_length= s->intra_ac_vlc_last_length;
3892  } else {
3893  dc= 0;
3894  start_i = 0;
3895  length = s->inter_ac_vlc_length;
3896  last_length= s->inter_ac_vlc_last_length;
3897  }
3898  last_non_zero = s->block_last_index[n];
3899 
3900 #ifdef REFINE_STATS
3901 {START_TIMER
3902 #endif
3903  dc += (1<<(RECON_SHIFT-1));
3904  for(i=0; i<64; i++){
3905  rem[i]= dc - (orig[i]<<RECON_SHIFT); //FIXME use orig dirrectly instead of copying to rem[]
3906  }
3907 #ifdef REFINE_STATS
3908 STOP_TIMER("memset rem[]")}
3909 #endif
3910  sum=0;
3911  for(i=0; i<64; i++){
3912  int one= 36;
3913  int qns=4;
3914  int w;
3915 
3916  w= FFABS(weight[i]) + qns*one;
3917  w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
3918 
3919  weight[i] = w;
3920 // w=weight[i] = (63*qns + (w/2)) / w;
3921 
3922  av_assert2(w>0);
3923  av_assert2(w<(1<<6));
3924  sum += w*w;
3925  }
3926  lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
3927 #ifdef REFINE_STATS
3928 {START_TIMER
3929 #endif
3930  run=0;
3931  rle_index=0;
3932  for(i=start_i; i<=last_non_zero; i++){
3933  int j= perm_scantable[i];
3934  const int level= block[j];
3935  int coeff;
3936 
3937  if(level){
3938  if(level<0) coeff= qmul*level - qadd;
3939  else coeff= qmul*level + qadd;
3940  run_tab[rle_index++]=run;
3941  run=0;
3942 
3943  s->dsp.add_8x8basis(rem, basis[j], coeff);
3944  }else{
3945  run++;
3946  }
3947  }
3948 #ifdef REFINE_STATS
3949 if(last_non_zero>0){
3950 STOP_TIMER("init rem[]")
3951 }
3952 }
3953 
3954 {START_TIMER
3955 #endif
3956  for(;;){
3957  int best_score=s->dsp.try_8x8basis(rem, weight, basis[0], 0);
3958  int best_coeff=0;
3959  int best_change=0;
3960  int run2, best_unquant_change=0, analyze_gradient;
3961 #ifdef REFINE_STATS
3962 {START_TIMER
3963 #endif
3964  analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
3965 
3966  if(analyze_gradient){
3967 #ifdef REFINE_STATS
3968 {START_TIMER
3969 #endif
3970  for(i=0; i<64; i++){
3971  int w= weight[i];
3972 
3973  d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
3974  }
3975 #ifdef REFINE_STATS
3976 STOP_TIMER("rem*w*w")}
3977 {START_TIMER
3978 #endif
3979  s->dsp.fdct(d1);
3980 #ifdef REFINE_STATS
3981 STOP_TIMER("dct")}
3982 #endif
3983  }
3984 
3985  if(start_i){
3986  const int level= block[0];
3987  int change, old_coeff;
3988 
3989  av_assert2(s->mb_intra);
3990 
3991  old_coeff= q*level;
3992 
3993  for(change=-1; change<=1; change+=2){
3994  int new_level= level + change;
3995  int score, new_coeff;
3996 
3997  new_coeff= q*new_level;
3998  if(new_coeff >= 2048 || new_coeff < 0)
3999  continue;
4000 
4001  score= s->dsp.try_8x8basis(rem, weight, basis[0], new_coeff - old_coeff);
4002  if(score<best_score){
4003  best_score= score;
4004  best_coeff= 0;
4005  best_change= change;
4006  best_unquant_change= new_coeff - old_coeff;
4007  }
4008  }
4009  }
4010 
4011  run=0;
4012  rle_index=0;
4013  run2= run_tab[rle_index++];
4014  prev_level=0;
4015  prev_run=0;
4016 
4017  for(i=start_i; i<64; i++){
4018  int j= perm_scantable[i];
4019  const int level= block[j];
4020  int change, old_coeff;
4021 
4022  if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4023  break;
4024 
4025  if(level){
4026  if(level<0) old_coeff= qmul*level - qadd;
4027  else old_coeff= qmul*level + qadd;
4028  run2= run_tab[rle_index++]; //FIXME ! maybe after last
4029  }else{
4030  old_coeff=0;
4031  run2--;
4032  av_assert2(run2>=0 || i >= last_non_zero );
4033  }
4034 
4035  for(change=-1; change<=1; change+=2){
4036  int new_level= level + change;
4037  int score, new_coeff, unquant_change;
4038 
4039  score=0;
4040  if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4041  continue;
4042 
4043  if(new_level){
4044  if(new_level<0) new_coeff= qmul*new_level - qadd;
4045  else new_coeff= qmul*new_level + qadd;
4046  if(new_coeff >= 2048 || new_coeff <= -2048)
4047  continue;
4048  //FIXME check for overflow
4049 
4050  if(level){
4051  if(level < 63 && level > -63){
4052  if(i < last_non_zero)
4053  score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4054  - length[UNI_AC_ENC_INDEX(run, level+64)];
4055  else
4056  score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4057  - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4058  }
4059  }else{
4060  av_assert2(FFABS(new_level)==1);
4061 
4062  if(analyze_gradient){
4063  int g= d1[ scantable[i] ];
4064  if(g && (g^new_level) >= 0)
4065  continue;
4066  }
4067 
4068  if(i < last_non_zero){
4069  int next_i= i + run2 + 1;
4070  int next_level= block[ perm_scantable[next_i] ] + 64;
4071 
4072  if(next_level&(~127))
4073  next_level= 0;
4074 
4075  if(next_i < last_non_zero)
4076  score += length[UNI_AC_ENC_INDEX(run, 65)]
4077  + length[UNI_AC_ENC_INDEX(run2, next_level)]
4078  - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4079  else
4080  score += length[UNI_AC_ENC_INDEX(run, 65)]
4081  + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4082  - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4083  }else{
4084  score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4085  if(prev_level){
4086  score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4087  - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4088  }
4089  }
4090  }
4091  }else{
4092  new_coeff=0;
4093  av_assert2(FFABS(level)==1);
4094 
4095  if(i < last_non_zero){
4096  int next_i= i + run2 + 1;
4097  int next_level= block[ perm_scantable[next_i] ] + 64;
4098 
4099  if(next_level&(~127))
4100  next_level= 0;
4101 
4102  if(next_i < last_non_zero)
4103  score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4104  - length[UNI_AC_ENC_INDEX(run2, next_level)]
4105  - length[UNI_AC_ENC_INDEX(run, 65)];
4106  else
4107  score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4108  - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4109  - length[UNI_AC_ENC_INDEX(run, 65)];
4110  }else{
4111  score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4112  if(prev_level){
4113  score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4114  - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4115  }
4116  }
4117  }
4118 
4119  score *= lambda;
4120 
4121  unquant_change= new_coeff - old_coeff;
4122  av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4123 
4124  score+= s->dsp.try_8x8basis(rem, weight, basis[j], unquant_change);
4125  if(score<best_score){
4126  best_score= score;
4127  best_coeff= i;
4128  best_change= change;
4129  best_unquant_change= unquant_change;
4130  }
4131  }
4132  if(level){
4133  prev_level= level + 64;
4134  if(prev_level&(~127))
4135  prev_level= 0;
4136  prev_run= run;
4137  run=0;
4138  }else{
4139  run++;
4140  }
4141  }
4142 #ifdef REFINE_STATS
4143 STOP_TIMER("iterative step")}
4144 #endif
4145 
4146  if(best_change){
4147  int j= perm_scantable[ best_coeff ];
4148 
4149  block[j] += best_change;
4150 
4151  if(best_coeff > last_non_zero){
4152  last_non_zero= best_coeff;
4153  av_assert2(block[j]);
4154 #ifdef REFINE_STATS
4155 after_last++;
4156 #endif
4157  }else{
4158 #ifdef REFINE_STATS
4159 if(block[j]){
4160  if(block[j] - best_change){
4161  if(FFABS(block[j]) > FFABS(block[j] - best_change)){
4162  raise++;
4163  }else{
4164  lower++;
4165  }
4166  }else{
4167  from_zero++;
4168  }
4169 }else{
4170  to_zero++;
4171 }
4172 #endif
4173  for(; last_non_zero>=start_i; last_non_zero--){
4174  if(block[perm_scantable[last_non_zero]])
4175  break;
4176  }
4177  }
4178 #ifdef REFINE_STATS
4179 count++;
4180 if(256*256*256*64 % count == 0){
4181  av_log(s->avctx, AV_LOG_DEBUG, "after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
4182 }
4183 #endif
4184  run=0;
4185  rle_index=0;
4186  for(i=start_i; i<=last_non_zero; i++){
4187  int j= perm_scantable[i];
4188  const int level= block[j];
4189 
4190  if(level){
4191  run_tab[rle_index++]=run;
4192  run=0;
4193  }else{
4194  run++;
4195  }
4196  }
4197 
4198  s->dsp.add_8x8basis(rem, basis[j], best_unquant_change);
4199  }else{
4200  break;
4201  }
4202  }
4203 #ifdef REFINE_STATS
4204 if(last_non_zero>0){
4205 STOP_TIMER("iterative search")
4206 }
4207 }
4208 #endif
4209 
4210  return last_non_zero;
4211 }
4212 
4214  int16_t *block, int n,
4215  int qscale, int *overflow)
4216 {
4217  int i, j, level, last_non_zero, q, start_i;
4218  const int *qmat;
4219  const uint8_t *scantable= s->intra_scantable.scantable;
4220  int bias;
4221  int max=0;
4222  unsigned int threshold1, threshold2;
4223 
4224  s->dsp.fdct (block);
4225 
4226  if(s->dct_error_sum)
4227  s->denoise_dct(s, block);
4228 
4229  if (s->mb_intra) {
4230  if (!s->h263_aic) {
4231  if (n < 4)
4232  q = s->y_dc_scale;
4233  else
4234  q = s->c_dc_scale;
4235  q = q << 3;
4236  } else
4237  /* For AIC we skip quant/dequant of INTRADC */
4238  q = 1 << 3;
4239 
4240  /* note: block[0] is assumed to be positive */
4241  block[0] = (block[0] + (q >> 1)) / q;
4242  start_i = 1;
4243  last_non_zero = 0;
4244  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4246  } else {
4247  start_i = 0;
4248  last_non_zero = -1;
4249  qmat = s->q_inter_matrix[qscale];
4251  }
4252  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4253  threshold2= (threshold1<<1);
4254  for(i=63;i>=start_i;i--) {
4255  j = scantable[i];
4256  level = block[j] * qmat[j];
4257 
4258  if(((unsigned)(level+threshold1))>threshold2){
4259  last_non_zero = i;
4260  break;
4261  }else{
4262  block[j]=0;
4263  }
4264  }
4265  for(i=start_i; i<=last_non_zero; i++) {
4266  j = scantable[i];
4267  level = block[j] * qmat[j];
4268 
4269 // if( bias+level >= (1<<QMAT_SHIFT)
4270 // || bias-level >= (1<<QMAT_SHIFT)){
4271  if(((unsigned)(level+threshold1))>threshold2){
4272  if(level>0){
4273  level= (bias + level)>>QMAT_SHIFT;
4274  block[j]= level;
4275  }else{
4276  level= (bias - level)>>QMAT_SHIFT;
4277  block[j]= -level;
4278  }
4279  max |=level;
4280  }else{
4281  block[j]=0;
4282  }
4283  }
4284  *overflow= s->max_qcoeff < max; //overflow might have happened
4285 
4286  /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4288  ff_block_permute(block, s->dsp.idct_permutation, scantable, last_non_zero);
4289 
4290  return last_non_zero;
4291 }
4292 
4293 #define OFFSET(x) offsetof(MpegEncContext, x)
4294 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4295 static const AVOption h263_options[] = {
4296  { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4297  { "structured_slices","Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4298  { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4300  { NULL },
4301 };
4302 
4303 static const AVClass h263_class = {
4304  .class_name = "H.263 encoder",
4305  .item_name = av_default_item_name,
4306  .option = h263_options,
4307  .version = LIBAVUTIL_VERSION_INT,
4308 };
4309 
4311  .name = "h263",
4312  .type = AVMEDIA_TYPE_VIDEO,
4313  .id = AV_CODEC_ID_H263,
4314  .priv_data_size = sizeof(MpegEncContext),
4316  .encode2 = ff_MPV_encode_picture,
4318  .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4319  .long_name= NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4320  .priv_class = &h263_class,
4321 };
4322 
4323 static const AVOption h263p_options[] = {
4324  { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4325  { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4326  { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4327  { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4329  { NULL },
4330 };
4331 static const AVClass h263p_class = {
4332  .class_name = "H.263p encoder",
4333  .item_name = av_default_item_name,
4334  .option = h263p_options,
4335  .version = LIBAVUTIL_VERSION_INT,
4336 };
4337 
4339  .name = "h263p",
4340  .type = AVMEDIA_TYPE_VIDEO,
4341  .id = AV_CODEC_ID_H263P,
4342  .priv_data_size = sizeof(MpegEncContext),
4344  .encode2 = ff_MPV_encode_picture,
4346  .capabilities = CODEC_CAP_SLICE_THREADS,
4347  .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4348  .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4349  .priv_class = &h263p_class,
4350 };
4351 
4352 FF_MPV_GENERIC_CLASS(msmpeg4v2)
4353 
4355  .name = "msmpeg4v2",
4356  .type = AVMEDIA_TYPE_VIDEO,
4357  .id = AV_CODEC_ID_MSMPEG4V2,
4358  .priv_data_size = sizeof(MpegEncContext),
4360  .encode2 = ff_MPV_encode_picture,
4362  .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4363  .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4364  .priv_class = &msmpeg4v2_class,
4365 };
4366 
4367 FF_MPV_GENERIC_CLASS(msmpeg4v3)
4368 
4370  .name = "msmpeg4",
4371  .type = AVMEDIA_TYPE_VIDEO,
4372  .id = AV_CODEC_ID_MSMPEG4V3,
4373  .priv_data_size = sizeof(MpegEncContext),
4375  .encode2 = ff_MPV_encode_picture,
4377  .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4378  .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4379  .priv_class = &msmpeg4v3_class,
4380 };
4381 
4383 
4385  .name = "wmv1",
4386  .type = AVMEDIA_TYPE_VIDEO,
4387  .id = AV_CODEC_ID_WMV1,
4388  .priv_data_size = sizeof(MpegEncContext),
4390  .encode2 = ff_MPV_encode_picture,
4392  .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4393  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4394  .priv_class = &wmv1_class,
4395 };