FFmpeg
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mpegvideo_enc.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29 
30 #include "libavutil/intmath.h"
31 #include "libavutil/mathematics.h"
32 #include "libavutil/pixdesc.h"
33 #include "libavutil/opt.h"
34 #include "avcodec.h"
35 #include "dsputil.h"
36 #include "mpegvideo.h"
37 #include "h263.h"
38 #include "mathops.h"
39 #include "mjpegenc.h"
40 #include "msmpeg4.h"
41 #include "faandct.h"
42 #include "thread.h"
43 #include "aandcttab.h"
44 #include "flv.h"
45 #include "mpeg4video.h"
46 #include "internal.h"
47 #include "bytestream.h"
48 #include <limits.h>
49 #include "sp5x.h"
50 
51 //#undef NDEBUG
52 //#include <assert.h>
53 
54 static int encode_picture(MpegEncContext *s, int picture_number);
55 static int dct_quantize_refine(MpegEncContext *s, DCTELEM *block, int16_t *weight, DCTELEM *orig, int n, int qscale);
56 static int sse_mb(MpegEncContext *s);
57 static void denoise_dct_c(MpegEncContext *s, DCTELEM *block);
58 static int dct_quantize_trellis_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
59 
60 //#define DEBUG
61 
64 
67  { NULL },
68 };
69 
70 void ff_convert_matrix(DSPContext *dsp, int (*qmat)[64],
71  uint16_t (*qmat16)[2][64],
72  const uint16_t *quant_matrix,
73  int bias, int qmin, int qmax, int intra)
74 {
75  int qscale;
76  int shift = 0;
77 
78  for (qscale = qmin; qscale <= qmax; qscale++) {
79  int i;
80  if (dsp->fdct == ff_jpeg_fdct_islow_8 ||
81  dsp->fdct == ff_jpeg_fdct_islow_10 ||
82  dsp->fdct == ff_faandct) {
83  for (i = 0; i < 64; i++) {
84  const int j = dsp->idct_permutation[i];
85  /* 16 <= qscale * quant_matrix[i] <= 7905
86  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
87  * 19952 <= x <= 249205026
88  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
89  * 3444240 >= (1 << 36) / (x) >= 275 */
90 
91  qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
92  (qscale * quant_matrix[j]));
93  }
94  } else if (dsp->fdct == ff_fdct_ifast) {
95  for (i = 0; i < 64; i++) {
96  const int j = dsp->idct_permutation[i];
97  /* 16 <= qscale * quant_matrix[i] <= 7905
98  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
99  * 19952 <= x <= 249205026
100  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
101  * 3444240 >= (1 << 36) / (x) >= 275 */
102 
103  qmat[qscale][i] = (int)((UINT64_C(1) << (QMAT_SHIFT + 14)) /
104  (ff_aanscales[i] * (int64_t)qscale * quant_matrix[j]));
105  }
106  } else {
107  for (i = 0; i < 64; i++) {
108  const int j = dsp->idct_permutation[i];
109  /* We can safely suppose that 16 <= quant_matrix[i] <= 255
110  * Assume x = qscale * quant_matrix[i]
111  * So 16 <= x <= 7905
112  * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
113  * so 32768 >= (1 << 19) / (x) >= 67 */
114  qmat[qscale][i] = (int)((UINT64_C(1) << QMAT_SHIFT) /
115  (qscale * quant_matrix[j]));
116  //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
117  // (qscale * quant_matrix[i]);
118  qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) /
119  (qscale * quant_matrix[j]);
120 
121  if (qmat16[qscale][0][i] == 0 ||
122  qmat16[qscale][0][i] == 128 * 256)
123  qmat16[qscale][0][i] = 128 * 256 - 1;
124  qmat16[qscale][1][i] =
125  ROUNDED_DIV(bias << (16 - QUANT_BIAS_SHIFT),
126  qmat16[qscale][0][i]);
127  }
128  }
129 
130  for (i = intra; i < 64; i++) {
131  int64_t max = 8191;
132  if (dsp->fdct == ff_fdct_ifast) {
133  max = (8191LL * ff_aanscales[i]) >> 14;
134  }
135  while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
136  shift++;
137  }
138  }
139  }
140  if (shift) {
142  "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
143  QMAT_SHIFT - shift);
144  }
145 }
146 
147 static inline void update_qscale(MpegEncContext *s)
148 {
149  s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
150  (FF_LAMBDA_SHIFT + 7);
151  s->qscale = av_clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
152 
153  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
155 }
156 
157 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
158 {
159  int i;
160 
161  if (matrix) {
162  put_bits(pb, 1, 1);
163  for (i = 0; i < 64; i++) {
164  put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
165  }
166  } else
167  put_bits(pb, 1, 0);
168 }
169 
170 /**
171  * init s->current_picture.qscale_table from s->lambda_table
172  */
174 {
175  int8_t * const qscale_table = s->current_picture.f.qscale_table;
176  int i;
177 
178  for (i = 0; i < s->mb_num; i++) {
179  unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
180  int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
181  qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
182  s->avctx->qmax);
183  }
184 }
185 
187  AVFrame *dst,
188  AVFrame *src)
189 {
190  int i;
191 
192  dst->pict_type = src->pict_type;
193  dst->quality = src->quality;
196  //dst->reference = src->reference;
197  dst->pts = src->pts;
199  dst->top_field_first = src->top_field_first;
200 
201  if (s->avctx->me_threshold) {
202  if (!src->motion_val[0])
203  av_log(s->avctx, AV_LOG_ERROR, "AVFrame.motion_val not set!\n");
204  if (!src->mb_type)
205  av_log(s->avctx, AV_LOG_ERROR, "AVFrame.mb_type not set!\n");
206  if (!src->ref_index[0])
207  av_log(s->avctx, AV_LOG_ERROR, "AVFrame.ref_index not set!\n");
210  "AVFrame.motion_subsample_log2 doesn't match! (%d!=%d)\n",
212 
213  memcpy(dst->mb_type, src->mb_type,
214  s->mb_stride * s->mb_height * sizeof(dst->mb_type[0]));
215 
216  for (i = 0; i < 2; i++) {
217  int stride = ((16 * s->mb_width ) >>
218  src->motion_subsample_log2) + 1;
219  int height = ((16 * s->mb_height) >> src->motion_subsample_log2);
220 
221  if (src->motion_val[i] &&
222  src->motion_val[i] != dst->motion_val[i]) {
223  memcpy(dst->motion_val[i], src->motion_val[i],
224  2 * stride * height * sizeof(int16_t));
225  }
226  if (src->ref_index[i] && src->ref_index[i] != dst->ref_index[i]) {
227  memcpy(dst->ref_index[i], src->ref_index[i],
228  s->mb_stride * 4 * s->mb_height * sizeof(int8_t));
229  }
230  }
231  }
232 }
233 
235  MpegEncContext *src)
236 {
237 #define COPY(a) dst->a= src->a
238  COPY(pict_type);
240  COPY(f_code);
241  COPY(b_code);
242  COPY(qscale);
243  COPY(lambda);
244  COPY(lambda2);
247  COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
248  COPY(progressive_frame); // FIXME don't set in encode_header
249  COPY(partitioned_frame); // FIXME don't set in encode_header
250 #undef COPY
251 }
252 
253 /**
254  * Set the given MpegEncContext to defaults for encoding.
255  * the changed fields will not depend upon the prior state of the MpegEncContext.
256  */
258 {
259  int i;
261 
262  for (i = -16; i < 16; i++) {
263  default_fcode_tab[i + MAX_MV] = 1;
264  }
267 }
268 
270  if (ARCH_X86)
272 
273  if (!s->dct_quantize)
275  if (!s->denoise_dct)
278  if (s->avctx->trellis)
280 
281  return 0;
282 }
283 
284 /* init video encoder */
286 {
287  MpegEncContext *s = avctx->priv_data;
288  int i;
289  int chroma_h_shift, chroma_v_shift;
290 
292 
293  switch (avctx->codec_id) {
295  if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
296  avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
297  av_log(avctx, AV_LOG_ERROR,
298  "only YUV420 and YUV422 are supported\n");
299  return -1;
300  }
301  break;
302  case AV_CODEC_ID_LJPEG:
303  if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
304  avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
305  avctx->pix_fmt != AV_PIX_FMT_YUVJ444P &&
306  avctx->pix_fmt != AV_PIX_FMT_BGR0 &&
307  avctx->pix_fmt != AV_PIX_FMT_BGRA &&
308  avctx->pix_fmt != AV_PIX_FMT_BGR24 &&
309  ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
310  avctx->pix_fmt != AV_PIX_FMT_YUV422P &&
311  avctx->pix_fmt != AV_PIX_FMT_YUV444P) ||
313  av_log(avctx, AV_LOG_ERROR, "colorspace not supported in LJPEG\n");
314  return -1;
315  }
316  break;
317  case AV_CODEC_ID_MJPEG:
318  case AV_CODEC_ID_AMV:
319  if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
320  avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
321  avctx->pix_fmt != AV_PIX_FMT_YUVJ444P &&
322  ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
323  avctx->pix_fmt != AV_PIX_FMT_YUV422P &&
324  avctx->pix_fmt != AV_PIX_FMT_YUV444P) ||
326  av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
327  return -1;
328  }
329  break;
330  default:
331  if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
332  av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
333  return -1;
334  }
335  }
336 
337  switch (avctx->pix_fmt) {
338  case AV_PIX_FMT_YUVJ444P:
339  case AV_PIX_FMT_YUV444P:
341  break;
342  case AV_PIX_FMT_YUVJ422P:
343  case AV_PIX_FMT_YUV422P:
345  break;
346  case AV_PIX_FMT_YUVJ420P:
347  case AV_PIX_FMT_YUV420P:
348  default:
350  break;
351  }
352 
353  s->bit_rate = avctx->bit_rate;
354  s->width = avctx->width;
355  s->height = avctx->height;
356  if (avctx->gop_size > 600 &&
358  av_log(avctx, AV_LOG_WARNING,
359  "keyframe interval too large!, reducing it from %d to %d\n",
360  avctx->gop_size, 600);
361  avctx->gop_size = 600;
362  }
363  s->gop_size = avctx->gop_size;
364  s->avctx = avctx;
365  s->flags = avctx->flags;
366  s->flags2 = avctx->flags2;
367  s->max_b_frames = avctx->max_b_frames;
368  s->codec_id = avctx->codec->id;
369 #if FF_API_MPV_GLOBAL_OPTS
370  if (avctx->luma_elim_threshold)
371  s->luma_elim_threshold = avctx->luma_elim_threshold;
372  if (avctx->chroma_elim_threshold)
373  s->chroma_elim_threshold = avctx->chroma_elim_threshold;
374 #endif
376  s->quarter_sample = (avctx->flags & CODEC_FLAG_QPEL) != 0;
377  s->mpeg_quant = avctx->mpeg_quant;
378  s->rtp_mode = !!avctx->rtp_payload_size;
381 
382  if (s->gop_size <= 1) {
383  s->intra_only = 1;
384  s->gop_size = 12;
385  } else {
386  s->intra_only = 0;
387  }
388 
389  s->me_method = avctx->me_method;
390 
391  /* Fixed QSCALE */
392  s->fixed_qscale = !!(avctx->flags & CODEC_FLAG_QSCALE);
393 
394 #if FF_API_MPV_GLOBAL_OPTS
395  if (s->flags & CODEC_FLAG_QP_RD)
397 #endif
398 
399  s->adaptive_quant = (s->avctx->lumi_masking ||
400  s->avctx->dark_masking ||
403  s->avctx->p_masking ||
404  s->avctx->border_masking ||
405  (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
406  !s->fixed_qscale;
407 
409 
410  if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
411  switch(avctx->codec_id) {
414  avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112L / 15000000 * 16384;
415  break;
416  case AV_CODEC_ID_MPEG4:
420  if (avctx->rc_max_rate >= 15000000) {
421  avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000L) * (760-320) / (38400000 - 15000000);
422  } else if(avctx->rc_max_rate >= 2000000) {
423  avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000L) * (320- 80) / (15000000 - 2000000);
424  } else if(avctx->rc_max_rate >= 384000) {
425  avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000L) * ( 80- 40) / ( 2000000 - 384000);
426  } else
427  avctx->rc_buffer_size = 40;
428  avctx->rc_buffer_size *= 16384;
429  break;
430  }
431  if (avctx->rc_buffer_size) {
432  av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
433  }
434  }
435 
436  if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
437  av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
438  if (avctx->rc_max_rate && !avctx->rc_buffer_size)
439  return -1;
440  }
441 
442  if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
443  av_log(avctx, AV_LOG_INFO,
444  "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
445  }
446 
447  if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
448  av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
449  return -1;
450  }
451 
452  if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
453  av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
454  return -1;
455  }
456 
457  if (avctx->rc_max_rate &&
458  avctx->rc_max_rate == avctx->bit_rate &&
459  avctx->rc_max_rate != avctx->rc_min_rate) {
460  av_log(avctx, AV_LOG_INFO,
461  "impossible bitrate constraints, this will fail\n");
462  }
463 
464  if (avctx->rc_buffer_size &&
465  avctx->bit_rate * (int64_t)avctx->time_base.num >
466  avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
467  av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
468  return -1;
469  }
470 
471  if (!s->fixed_qscale &&
472  avctx->bit_rate * av_q2d(avctx->time_base) >
473  avctx->bit_rate_tolerance) {
474  av_log(avctx, AV_LOG_ERROR,
475  "bitrate tolerance too small for bitrate\n");
476  return -1;
477  }
478 
479  if (s->avctx->rc_max_rate &&
480  s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
483  90000LL * (avctx->rc_buffer_size - 1) >
484  s->avctx->rc_max_rate * 0xFFFFLL) {
485  av_log(avctx, AV_LOG_INFO,
486  "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
487  "specified vbv buffer is too large for the given bitrate!\n");
488  }
489 
490  if ((s->flags & CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
492  s->codec_id != AV_CODEC_ID_FLV1) {
493  av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
494  return -1;
495  }
496 
497  if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
498  av_log(avctx, AV_LOG_ERROR,
499  "OBMC is only supported with simple mb decision\n");
500  return -1;
501  }
502 
503  if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
504  av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
505  return -1;
506  }
507 
508  if (s->max_b_frames &&
509  s->codec_id != AV_CODEC_ID_MPEG4 &&
512  av_log(avctx, AV_LOG_ERROR, "b frames not supported by codec\n");
513  return -1;
514  }
515 
516  if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
517  s->codec_id == AV_CODEC_ID_H263 ||
518  s->codec_id == AV_CODEC_ID_H263P) &&
519  (avctx->sample_aspect_ratio.num > 255 ||
520  avctx->sample_aspect_ratio.den > 255)) {
521  av_log(avctx, AV_LOG_WARNING,
522  "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
525  avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
526  }
527 
528  if ((s->codec_id == AV_CODEC_ID_H263 ||
529  s->codec_id == AV_CODEC_ID_H263P) &&
530  (avctx->width > 2048 ||
531  avctx->height > 1152 )) {
532  av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
533  return -1;
534  }
535  if ((s->codec_id == AV_CODEC_ID_H263 ||
536  s->codec_id == AV_CODEC_ID_H263P) &&
537  ((avctx->width &3) ||
538  (avctx->height&3) )) {
539  av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
540  return -1;
541  }
542 
543  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
544  (avctx->width > 4095 ||
545  avctx->height > 4095 )) {
546  av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
547  return -1;
548  }
549 
550  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
551  (avctx->width > 16383 ||
552  avctx->height > 16383 )) {
553  av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
554  return -1;
555  }
556 
557  if (s->codec_id == AV_CODEC_ID_RV10 &&
558  (avctx->width &15 ||
559  avctx->height&15 )) {
560  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
561  return AVERROR(EINVAL);
562  }
563 
564  if (s->codec_id == AV_CODEC_ID_RV20 &&
565  (avctx->width &3 ||
566  avctx->height&3 )) {
567  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
568  return AVERROR(EINVAL);
569  }
570 
571  if ((s->codec_id == AV_CODEC_ID_WMV1 ||
572  s->codec_id == AV_CODEC_ID_WMV2) &&
573  avctx->width & 1) {
574  av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
575  return -1;
576  }
577 
580  av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
581  return -1;
582  }
583 
584  // FIXME mpeg2 uses that too
585  if (s->mpeg_quant && s->codec_id != AV_CODEC_ID_MPEG4) {
586  av_log(avctx, AV_LOG_ERROR,
587  "mpeg2 style quantization not supported by codec\n");
588  return -1;
589  }
590 
591 #if FF_API_MPV_GLOBAL_OPTS
592  if (s->flags & CODEC_FLAG_CBP_RD)
594 #endif
595 
596  if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
597  av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
598  return -1;
599  }
600 
601  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
603  av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
604  return -1;
605  }
606 
607  if (s->avctx->scenechange_threshold < 1000000000 &&
608  (s->flags & CODEC_FLAG_CLOSED_GOP)) {
609  av_log(avctx, AV_LOG_ERROR,
610  "closed gop with scene change detection are not supported yet, "
611  "set threshold to 1000000000\n");
612  return -1;
613  }
614 
615  if (s->flags & CODEC_FLAG_LOW_DELAY) {
616  if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
617  av_log(avctx, AV_LOG_ERROR,
618  "low delay forcing is only available for mpeg2\n");
619  return -1;
620  }
621  if (s->max_b_frames != 0) {
622  av_log(avctx, AV_LOG_ERROR,
623  "b frames cannot be used with low delay\n");
624  return -1;
625  }
626  }
627 
628  if (s->q_scale_type == 1) {
629  if (avctx->qmax > 12) {
630  av_log(avctx, AV_LOG_ERROR,
631  "non linear quant only supports qmax <= 12 currently\n");
632  return -1;
633  }
634  }
635 
636  if (s->avctx->thread_count > 1 &&
637  s->codec_id != AV_CODEC_ID_MPEG4 &&
640  s->codec_id != AV_CODEC_ID_MJPEG &&
641  (s->codec_id != AV_CODEC_ID_H263P)) {
642  av_log(avctx, AV_LOG_ERROR,
643  "multi threaded encoding not supported by codec\n");
644  return -1;
645  }
646 
647  if (s->avctx->thread_count < 1) {
648  av_log(avctx, AV_LOG_ERROR,
649  "automatic thread number detection not supported by codec, "
650  "patch welcome\n");
651  return -1;
652  }
653 
654  if (s->avctx->slices > 1 || s->avctx->thread_count > 1)
655  s->rtp_mode = 1;
656 
657  if (s->avctx->thread_count > 1 && s->codec_id == AV_CODEC_ID_H263P)
658  s->h263_slice_structured = 1;
659 
660  if (!avctx->time_base.den || !avctx->time_base.num) {
661  av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
662  return -1;
663  }
664 
665  i = (INT_MAX / 2 + 128) >> 8;
666  if (avctx->me_threshold >= i) {
667  av_log(avctx, AV_LOG_ERROR, "me_threshold too large, max is %d\n",
668  i - 1);
669  return -1;
670  }
671  if (avctx->mb_threshold >= i) {
672  av_log(avctx, AV_LOG_ERROR, "mb_threshold too large, max is %d\n",
673  i - 1);
674  return -1;
675  }
676 
677  if (avctx->b_frame_strategy && (avctx->flags & CODEC_FLAG_PASS2)) {
678  av_log(avctx, AV_LOG_INFO,
679  "notice: b_frame_strategy only affects the first pass\n");
680  avctx->b_frame_strategy = 0;
681  }
682 
683  i = av_gcd(avctx->time_base.den, avctx->time_base.num);
684  if (i > 1) {
685  av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
686  avctx->time_base.den /= i;
687  avctx->time_base.num /= i;
688  //return -1;
689  }
690 
692  // (a + x * 3 / 8) / x
693  s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
694  s->inter_quant_bias = 0;
695  } else {
696  s->intra_quant_bias = 0;
697  // (a - x / 4) / x
698  s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
699  }
700 
705 
706  av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
707 
708  avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift, &chroma_v_shift);
709 
710  if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
711  s->avctx->time_base.den > (1 << 16) - 1) {
712  av_log(avctx, AV_LOG_ERROR,
713  "timebase %d/%d not supported by MPEG 4 standard, "
714  "the maximum admitted value for the timebase denominator "
715  "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
716  (1 << 16) - 1);
717  return -1;
718  }
719  s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
720 
721 #if FF_API_MPV_GLOBAL_OPTS
722  if (avctx->flags2 & CODEC_FLAG2_SKIP_RD)
724  if (avctx->flags2 & CODEC_FLAG2_STRICT_GOP)
726  if (avctx->quantizer_noise_shaping)
727  s->quantizer_noise_shaping = avctx->quantizer_noise_shaping;
728 #endif
729 
730  switch (avctx->codec->id) {
732  s->out_format = FMT_MPEG1;
733  s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
734  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
735  break;
737  s->out_format = FMT_MPEG1;
738  s->low_delay = !!(s->flags & CODEC_FLAG_LOW_DELAY);
739  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
740  s->rtp_mode = 1;
741  break;
742  case AV_CODEC_ID_LJPEG:
743  case AV_CODEC_ID_MJPEG:
744  case AV_CODEC_ID_AMV:
745  s->out_format = FMT_MJPEG;
746  s->intra_only = 1; /* force intra only for jpeg */
747  if (avctx->codec->id == AV_CODEC_ID_LJPEG &&
748  (avctx->pix_fmt == AV_PIX_FMT_BGR0
749  || s->avctx->pix_fmt == AV_PIX_FMT_BGRA
750  || s->avctx->pix_fmt == AV_PIX_FMT_BGR24)) {
751  s->mjpeg_vsample[0] = s->mjpeg_hsample[0] =
752  s->mjpeg_vsample[1] = s->mjpeg_hsample[1] =
753  s->mjpeg_vsample[2] = s->mjpeg_hsample[2] = 1;
754  } else if (avctx->pix_fmt == AV_PIX_FMT_YUV444P || avctx->pix_fmt == AV_PIX_FMT_YUVJ444P) {
755  s->mjpeg_vsample[0] = s->mjpeg_vsample[1] = s->mjpeg_vsample[2] = 2;
756  s->mjpeg_hsample[0] = s->mjpeg_hsample[1] = s->mjpeg_hsample[2] = 1;
757  } else {
758  s->mjpeg_vsample[0] = 2;
759  s->mjpeg_vsample[1] = 2 >> chroma_v_shift;
760  s->mjpeg_vsample[2] = 2 >> chroma_v_shift;
761  s->mjpeg_hsample[0] = 2;
762  s->mjpeg_hsample[1] = 2 >> chroma_h_shift;
763  s->mjpeg_hsample[2] = 2 >> chroma_h_shift;
764  }
765  if (!(CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) ||
766  ff_mjpeg_encode_init(s) < 0)
767  return -1;
768  avctx->delay = 0;
769  s->low_delay = 1;
770  break;
771  case AV_CODEC_ID_H261:
772  if (!CONFIG_H261_ENCODER)
773  return -1;
774  if (ff_h261_get_picture_format(s->width, s->height) < 0) {
775  av_log(avctx, AV_LOG_ERROR,
776  "The specified picture size of %dx%d is not valid for the "
777  "H.261 codec.\nValid sizes are 176x144, 352x288\n",
778  s->width, s->height);
779  return -1;
780  }
781  s->out_format = FMT_H261;
782  avctx->delay = 0;
783  s->low_delay = 1;
784  break;
785  case AV_CODEC_ID_H263:
786  if (!CONFIG_H263_ENCODER)
787  return -1;
789  s->width, s->height) == 8) {
790  av_log(avctx, AV_LOG_ERROR,
791  "The specified picture size of %dx%d is not valid for "
792  "the H.263 codec.\nValid sizes are 128x96, 176x144, "
793  "352x288, 704x576, and 1408x1152. "
794  "Try H.263+.\n", s->width, s->height);
795  return -1;
796  }
797  s->out_format = FMT_H263;
798  avctx->delay = 0;
799  s->low_delay = 1;
800  break;
801  case AV_CODEC_ID_H263P:
802  s->out_format = FMT_H263;
803  s->h263_plus = 1;
804  /* Fx */
805  s->h263_aic = (avctx->flags & CODEC_FLAG_AC_PRED) ? 1 : 0;
806  s->modified_quant = s->h263_aic;
807  s->loop_filter = (avctx->flags & CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
808  s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
809 
810  /* /Fx */
811  /* These are just to be sure */
812  avctx->delay = 0;
813  s->low_delay = 1;
814  break;
815  case AV_CODEC_ID_FLV1:
816  s->out_format = FMT_H263;
817  s->h263_flv = 2; /* format = 1; 11-bit codes */
818  s->unrestricted_mv = 1;
819  s->rtp_mode = 0; /* don't allow GOB */
820  avctx->delay = 0;
821  s->low_delay = 1;
822  break;
823  case AV_CODEC_ID_RV10:
824  s->out_format = FMT_H263;
825  avctx->delay = 0;
826  s->low_delay = 1;
827  break;
828  case AV_CODEC_ID_RV20:
829  s->out_format = FMT_H263;
830  avctx->delay = 0;
831  s->low_delay = 1;
832  s->modified_quant = 1;
833  s->h263_aic = 1;
834  s->h263_plus = 1;
835  s->loop_filter = 1;
836  s->unrestricted_mv = 0;
837  break;
838  case AV_CODEC_ID_MPEG4:
839  s->out_format = FMT_H263;
840  s->h263_pred = 1;
841  s->unrestricted_mv = 1;
842  s->low_delay = s->max_b_frames ? 0 : 1;
843  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
844  break;
846  s->out_format = FMT_H263;
847  s->h263_pred = 1;
848  s->unrestricted_mv = 1;
849  s->msmpeg4_version = 2;
850  avctx->delay = 0;
851  s->low_delay = 1;
852  break;
854  s->out_format = FMT_H263;
855  s->h263_pred = 1;
856  s->unrestricted_mv = 1;
857  s->msmpeg4_version = 3;
858  s->flipflop_rounding = 1;
859  avctx->delay = 0;
860  s->low_delay = 1;
861  break;
862  case AV_CODEC_ID_WMV1:
863  s->out_format = FMT_H263;
864  s->h263_pred = 1;
865  s->unrestricted_mv = 1;
866  s->msmpeg4_version = 4;
867  s->flipflop_rounding = 1;
868  avctx->delay = 0;
869  s->low_delay = 1;
870  break;
871  case AV_CODEC_ID_WMV2:
872  s->out_format = FMT_H263;
873  s->h263_pred = 1;
874  s->unrestricted_mv = 1;
875  s->msmpeg4_version = 5;
876  s->flipflop_rounding = 1;
877  avctx->delay = 0;
878  s->low_delay = 1;
879  break;
880  default:
881  return -1;
882  }
883 
884  avctx->has_b_frames = !s->low_delay;
885 
886  s->encoding = 1;
887 
888  s->progressive_frame =
891  s->alternate_scan);
892 
893  /* init */
894  if (ff_MPV_common_init(s) < 0)
895  return -1;
896 
898 
899  if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
901 
902  s->quant_precision = 5;
903 
904  ff_set_cmp(&s->dsp, s->dsp.ildct_cmp, s->avctx->ildct_cmp);
906 
907  if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
909  if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
913  if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
914  && s->out_format == FMT_MPEG1)
916 
917  /* init q matrix */
918  for (i = 0; i < 64; i++) {
919  int j = s->dsp.idct_permutation[i];
920  if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
921  s->mpeg_quant) {
924  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
925  s->intra_matrix[j] =
927  } else {
928  /* mpeg1/2 */
931  }
932  if (s->avctx->intra_matrix)
933  s->intra_matrix[j] = s->avctx->intra_matrix[i];
934  if (s->avctx->inter_matrix)
935  s->inter_matrix[j] = s->avctx->inter_matrix[i];
936  }
937 
938  /* precompute matrix */
939  /* for mjpeg, we do include qscale in the matrix */
940  if (s->out_format != FMT_MJPEG) {
942  s->intra_matrix, s->intra_quant_bias, avctx->qmin,
943  31, 1);
945  s->inter_matrix, s->inter_quant_bias, avctx->qmin,
946  31, 0);
947  }
948 
949  if (ff_rate_control_init(s) < 0)
950  return -1;
951 
952  return 0;
953 }
954 
956 {
957  MpegEncContext *s = avctx->priv_data;
958 
960 
962  if ((CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) &&
963  s->out_format == FMT_MJPEG)
965 
966  av_freep(&avctx->extradata);
967 
968  return 0;
969 }
970 
971 static int get_sae(uint8_t *src, int ref, int stride)
972 {
973  int x,y;
974  int acc = 0;
975 
976  for (y = 0; y < 16; y++) {
977  for (x = 0; x < 16; x++) {
978  acc += FFABS(src[x + y * stride] - ref);
979  }
980  }
981 
982  return acc;
983 }
984 
986  uint8_t *ref, int stride)
987 {
988  int x, y, w, h;
989  int acc = 0;
990 
991  w = s->width & ~15;
992  h = s->height & ~15;
993 
994  for (y = 0; y < h; y += 16) {
995  for (x = 0; x < w; x += 16) {
996  int offset = x + y * stride;
997  int sad = s->dsp.sad[0](NULL, src + offset, ref + offset, stride,
998  16);
999  int mean = (s->dsp.pix_sum(src + offset, stride) + 128) >> 8;
1000  int sae = get_sae(src + offset, mean, stride);
1001 
1002  acc += sae + 500 < sad;
1003  }
1004  }
1005  return acc;
1006 }
1007 
1008 
1010 {
1011  AVFrame *pic = NULL;
1012  int64_t pts;
1013  int i;
1014  const int encoding_delay = s->max_b_frames ? s->max_b_frames :
1015  (s->low_delay ? 0 : 1);
1016  int direct = 1;
1017 
1018  if (pic_arg) {
1019  pts = pic_arg->pts;
1021 
1022  if (pts != AV_NOPTS_VALUE) {
1023  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1024  int64_t time = pts;
1025  int64_t last = s->user_specified_pts;
1026 
1027  if (time <= last) {
1029  "Error, Invalid timestamp=%"PRId64", "
1030  "last=%"PRId64"\n", pts, s->user_specified_pts);
1031  return -1;
1032  }
1033 
1034  if (!s->low_delay && pic_arg->display_picture_number == 1)
1035  s->dts_delta = time - last;
1036  }
1037  s->user_specified_pts = pts;
1038  } else {
1039  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1040  s->user_specified_pts =
1041  pts = s->user_specified_pts + 1;
1042  av_log(s->avctx, AV_LOG_INFO,
1043  "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1044  pts);
1045  } else {
1046  pts = pic_arg->display_picture_number;
1047  }
1048  }
1049  }
1050 
1051  if (pic_arg) {
1052  if (encoding_delay && !(s->flags & CODEC_FLAG_INPUT_PRESERVED))
1053  direct = 0;
1054  if (pic_arg->linesize[0] != s->linesize)
1055  direct = 0;
1056  if (pic_arg->linesize[1] != s->uvlinesize)
1057  direct = 0;
1058  if (pic_arg->linesize[2] != s->uvlinesize)
1059  direct = 0;
1060 
1061  av_dlog(s->avctx, "%d %d %d %d\n", pic_arg->linesize[0],
1062  pic_arg->linesize[1], s->linesize, s->uvlinesize);
1063 
1064  if (direct) {
1065  i = ff_find_unused_picture(s, 1);
1066  if (i < 0)
1067  return i;
1068 
1069  pic = &s->picture[i].f;
1070  pic->reference = 3;
1071 
1072  for (i = 0; i < 4; i++) {
1073  pic->data[i] = pic_arg->data[i];
1074  pic->linesize[i] = pic_arg->linesize[i];
1075  }
1076  if (ff_alloc_picture(s, (Picture *) pic, 1) < 0) {
1077  return -1;
1078  }
1079  } else {
1080  i = ff_find_unused_picture(s, 0);
1081  if (i < 0)
1082  return i;
1083 
1084  pic = &s->picture[i].f;
1085  pic->reference = 3;
1086 
1087  if (ff_alloc_picture(s, (Picture *) pic, 0) < 0) {
1088  return -1;
1089  }
1090 
1091  if (pic->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1092  pic->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1093  pic->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1094  // empty
1095  } else {
1096  int h_chroma_shift, v_chroma_shift;
1097  avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1098 
1099  for (i = 0; i < 3; i++) {
1100  int src_stride = pic_arg->linesize[i];
1101  int dst_stride = i ? s->uvlinesize : s->linesize;
1102  int h_shift = i ? h_chroma_shift : 0;
1103  int v_shift = i ? v_chroma_shift : 0;
1104  int w = s->width >> h_shift;
1105  int h = s->height >> v_shift;
1106  uint8_t *src = pic_arg->data[i];
1107  uint8_t *dst = pic->data[i];
1108 
1109  if(s->codec_id == AV_CODEC_ID_AMV && !(s->avctx->flags & CODEC_FLAG_EMU_EDGE)){
1110  h= ((s->height+15)/16*16)>>v_shift;
1111  }
1112 
1113  if (!s->avctx->rc_buffer_size)
1114  dst += INPLACE_OFFSET;
1115 
1116  if (src_stride == dst_stride)
1117  memcpy(dst, src, src_stride * h);
1118  else {
1119  while (h--) {
1120  memcpy(dst, src, w);
1121  dst += dst_stride;
1122  src += src_stride;
1123  }
1124  }
1125  }
1126  }
1127  }
1128  copy_picture_attributes(s, pic, pic_arg);
1129  pic->pts = pts; // we set this here to avoid modifiying pic_arg
1130  }
1131 
1132  /* shift buffer entries */
1133  for (i = 1; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1134  s->input_picture[i - 1] = s->input_picture[i];
1135 
1136  s->input_picture[encoding_delay] = (Picture*) pic;
1137 
1138  return 0;
1139 }
1140 
1141 static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
1142 {
1143  int x, y, plane;
1144  int score = 0;
1145  int64_t score64 = 0;
1146 
1147  for (plane = 0; plane < 3; plane++) {
1148  const int stride = p->f.linesize[plane];
1149  const int bw = plane ? 1 : 2;
1150  for (y = 0; y < s->mb_height * bw; y++) {
1151  for (x = 0; x < s->mb_width * bw; x++) {
1152  int off = p->f.type == FF_BUFFER_TYPE_SHARED ? 0 : 16;
1153  uint8_t *dptr = p->f.data[plane] + 8 * (x + y * stride) + off;
1154  uint8_t *rptr = ref->f.data[plane] + 8 * (x + y * stride);
1155  int v = s->dsp.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1156 
1157  switch (s->avctx->frame_skip_exp) {
1158  case 0: score = FFMAX(score, v); break;
1159  case 1: score += FFABS(v); break;
1160  case 2: score += v * v; break;
1161  case 3: score64 += FFABS(v * v * (int64_t)v); break;
1162  case 4: score64 += v * v * (int64_t)(v * v); break;
1163  }
1164  }
1165  }
1166  }
1167 
1168  if (score)
1169  score64 = score;
1170 
1171  if (score64 < s->avctx->frame_skip_threshold)
1172  return 1;
1173  if (score64 < ((s->avctx->frame_skip_factor * (int64_t)s->lambda) >> 8))
1174  return 1;
1175  return 0;
1176 }
1177 
1179 {
1180  AVPacket pkt = { 0 };
1181  int ret, got_output;
1182 
1183  av_init_packet(&pkt);
1184  ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
1185  if (ret < 0)
1186  return ret;
1187 
1188  ret = pkt.size;
1189  av_free_packet(&pkt);
1190  return ret;
1191 }
1192 
1194 {
1197  AVFrame input[FF_MAX_B_FRAMES + 2];
1198  const int scale = s->avctx->brd_scale;
1199  int i, j, out_size, p_lambda, b_lambda, lambda2;
1200  int64_t best_rd = INT64_MAX;
1201  int best_b_count = -1;
1202 
1203  av_assert0(scale >= 0 && scale <= 3);
1204 
1205  //emms_c();
1206  //s->next_picture_ptr->quality;
1207  p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1208  //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1209  b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1210  if (!b_lambda) // FIXME we should do this somewhere else
1211  b_lambda = p_lambda;
1212  lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1214 
1215  c->width = s->width >> scale;
1216  c->height = s->height >> scale;
1218  CODEC_FLAG_INPUT_PRESERVED /*| CODEC_FLAG_EMU_EDGE*/;
1219  c->flags |= s->avctx->flags & CODEC_FLAG_QPEL;
1220  c->mb_decision = s->avctx->mb_decision;
1221  c->me_cmp = s->avctx->me_cmp;
1222  c->mb_cmp = s->avctx->mb_cmp;
1223  c->me_sub_cmp = s->avctx->me_sub_cmp;
1225  c->time_base = s->avctx->time_base;
1226  c->max_b_frames = s->max_b_frames;
1227 
1228  if (avcodec_open2(c, codec, NULL) < 0)
1229  return -1;
1230 
1231  for (i = 0; i < s->max_b_frames + 2; i++) {
1232  int ysize = c->width * c->height;
1233  int csize = (c->width / 2) * (c->height / 2);
1234  Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1235  s->next_picture_ptr;
1236 
1237  avcodec_get_frame_defaults(&input[i]);
1238  input[i].data[0] = av_malloc(ysize + 2 * csize);
1239  input[i].data[1] = input[i].data[0] + ysize;
1240  input[i].data[2] = input[i].data[1] + csize;
1241  input[i].linesize[0] = c->width;
1242  input[i].linesize[1] =
1243  input[i].linesize[2] = c->width / 2;
1244 
1245  if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1246  pre_input = *pre_input_ptr;
1247 
1248  if (pre_input.f.type != FF_BUFFER_TYPE_SHARED && i) {
1249  pre_input.f.data[0] += INPLACE_OFFSET;
1250  pre_input.f.data[1] += INPLACE_OFFSET;
1251  pre_input.f.data[2] += INPLACE_OFFSET;
1252  }
1253 
1254  s->dsp.shrink[scale](input[i].data[0], input[i].linesize[0],
1255  pre_input.f.data[0], pre_input.f.linesize[0],
1256  c->width, c->height);
1257  s->dsp.shrink[scale](input[i].data[1], input[i].linesize[1],
1258  pre_input.f.data[1], pre_input.f.linesize[1],
1259  c->width >> 1, c->height >> 1);
1260  s->dsp.shrink[scale](input[i].data[2], input[i].linesize[2],
1261  pre_input.f.data[2], pre_input.f.linesize[2],
1262  c->width >> 1, c->height >> 1);
1263  }
1264  }
1265 
1266  for (j = 0; j < s->max_b_frames + 1; j++) {
1267  int64_t rd = 0;
1268 
1269  if (!s->input_picture[j])
1270  break;
1271 
1272  c->error[0] = c->error[1] = c->error[2] = 0;
1273 
1274  input[0].pict_type = AV_PICTURE_TYPE_I;
1275  input[0].quality = 1 * FF_QP2LAMBDA;
1276 
1277  out_size = encode_frame(c, &input[0]);
1278 
1279  //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1280 
1281  for (i = 0; i < s->max_b_frames + 1; i++) {
1282  int is_p = i % (j + 1) == j || i == s->max_b_frames;
1283 
1284  input[i + 1].pict_type = is_p ?
1286  input[i + 1].quality = is_p ? p_lambda : b_lambda;
1287 
1288  out_size = encode_frame(c, &input[i + 1]);
1289 
1290  rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1291  }
1292 
1293  /* get the delayed frames */
1294  while (out_size) {
1295  out_size = encode_frame(c, NULL);
1296  rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1297  }
1298 
1299  rd += c->error[0] + c->error[1] + c->error[2];
1300 
1301  if (rd < best_rd) {
1302  best_rd = rd;
1303  best_b_count = j;
1304  }
1305  }
1306 
1307  avcodec_close(c);
1308  av_freep(&c);
1309 
1310  for (i = 0; i < s->max_b_frames + 2; i++) {
1311  av_freep(&input[i].data[0]);
1312  }
1313 
1314  return best_b_count;
1315 }
1316 
1318 {
1319  int i;
1320 
1321  for (i = 1; i < MAX_PICTURE_COUNT; i++)
1323  s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1324 
1325  /* set next picture type & ordering */
1326  if (s->reordered_input_picture[0] == NULL && s->input_picture[0]) {
1327  if (/*s->picture_in_gop_number >= s->gop_size ||*/
1328  s->next_picture_ptr == NULL || s->intra_only) {
1329  s->reordered_input_picture[0] = s->input_picture[0];
1332  s->coded_picture_number++;
1333  } else {
1334  int b_frames;
1335 
1337  if (s->picture_in_gop_number < s->gop_size &&
1338  skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1339  // FIXME check that te gop check above is +-1 correct
1340  if (s->input_picture[0]->f.type == FF_BUFFER_TYPE_SHARED) {
1341  for (i = 0; i < 4; i++)
1342  s->input_picture[0]->f.data[i] = NULL;
1343  s->input_picture[0]->f.type = 0;
1344  } else {
1345  assert(s->input_picture[0]->f.type == FF_BUFFER_TYPE_USER ||
1347 
1348  s->avctx->release_buffer(s->avctx,
1349  &s->input_picture[0]->f);
1350  }
1351 
1352  emms_c();
1353  ff_vbv_update(s, 0);
1354 
1355  goto no_output_pic;
1356  }
1357  }
1358 
1359  if (s->flags & CODEC_FLAG_PASS2) {
1360  for (i = 0; i < s->max_b_frames + 1; i++) {
1361  int pict_num = s->input_picture[0]->f.display_picture_number + i;
1362 
1363  if (pict_num >= s->rc_context.num_entries)
1364  break;
1365  if (!s->input_picture[i]) {
1366  s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1367  break;
1368  }
1369 
1370  s->input_picture[i]->f.pict_type =
1371  s->rc_context.entry[pict_num].new_pict_type;
1372  }
1373  }
1374 
1375  if (s->avctx->b_frame_strategy == 0) {
1376  b_frames = s->max_b_frames;
1377  while (b_frames && !s->input_picture[b_frames])
1378  b_frames--;
1379  } else if (s->avctx->b_frame_strategy == 1) {
1380  for (i = 1; i < s->max_b_frames + 1; i++) {
1381  if (s->input_picture[i] &&
1382  s->input_picture[i]->b_frame_score == 0) {
1383  s->input_picture[i]->b_frame_score =
1384  get_intra_count(s,
1385  s->input_picture[i ]->f.data[0],
1386  s->input_picture[i - 1]->f.data[0],
1387  s->linesize) + 1;
1388  }
1389  }
1390  for (i = 0; i < s->max_b_frames + 1; i++) {
1391  if (s->input_picture[i] == NULL ||
1392  s->input_picture[i]->b_frame_score - 1 >
1393  s->mb_num / s->avctx->b_sensitivity)
1394  break;
1395  }
1396 
1397  b_frames = FFMAX(0, i - 1);
1398 
1399  /* reset scores */
1400  for (i = 0; i < b_frames + 1; i++) {
1401  s->input_picture[i]->b_frame_score = 0;
1402  }
1403  } else if (s->avctx->b_frame_strategy == 2) {
1404  b_frames = estimate_best_b_count(s);
1405  } else {
1406  av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n");
1407  b_frames = 0;
1408  }
1409 
1410  emms_c();
1411 
1412  for (i = b_frames - 1; i >= 0; i--) {
1413  int type = s->input_picture[i]->f.pict_type;
1414  if (type && type != AV_PICTURE_TYPE_B)
1415  b_frames = i;
1416  }
1417  if (s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_B &&
1418  b_frames == s->max_b_frames) {
1420  "warning, too many b frames in a row\n");
1421  }
1422 
1423  if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1424  if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1425  s->gop_size > s->picture_in_gop_number) {
1426  b_frames = s->gop_size - s->picture_in_gop_number - 1;
1427  } else {
1428  if (s->flags & CODEC_FLAG_CLOSED_GOP)
1429  b_frames = 0;
1430  s->input_picture[b_frames]->f.pict_type = AV_PICTURE_TYPE_I;
1431  }
1432  }
1433 
1434  if ((s->flags & CODEC_FLAG_CLOSED_GOP) && b_frames &&
1435  s->input_picture[b_frames]->f.pict_type == AV_PICTURE_TYPE_I)
1436  b_frames--;
1437 
1438  s->reordered_input_picture[0] = s->input_picture[b_frames];
1442  s->coded_picture_number++;
1443  for (i = 0; i < b_frames; i++) {
1444  s->reordered_input_picture[i + 1] = s->input_picture[i];
1445  s->reordered_input_picture[i + 1]->f.pict_type =
1448  s->coded_picture_number++;
1449  }
1450  }
1451  }
1452 no_output_pic:
1453  if (s->reordered_input_picture[0]) {
1456  AV_PICTURE_TYPE_B ? 3 : 0;
1457 
1459 
1461  s->avctx->rc_buffer_size) {
1462  // input is a shared pix, so we can't modifiy it -> alloc a new
1463  // one & ensure that the shared one is reuseable
1464 
1465  Picture *pic;
1466  int i = ff_find_unused_picture(s, 0);
1467  if (i < 0)
1468  return i;
1469  pic = &s->picture[i];
1470 
1472  if (ff_alloc_picture(s, pic, 0) < 0) {
1473  return -1;
1474  }
1475 
1476  /* mark us unused / free shared pic */
1478  s->avctx->release_buffer(s->avctx,
1479  &s->reordered_input_picture[0]->f);
1480  for (i = 0; i < 4; i++)
1481  s->reordered_input_picture[0]->f.data[i] = NULL;
1482  s->reordered_input_picture[0]->f.type = 0;
1483 
1484  copy_picture_attributes(s, &pic->f,
1485  &s->reordered_input_picture[0]->f);
1486 
1487  s->current_picture_ptr = pic;
1488  } else {
1489  // input is not a shared pix -> reuse buffer for current_pix
1490 
1491  assert(s->reordered_input_picture[0]->f.type ==
1493  s->reordered_input_picture[0]->f.type ==
1495 
1497  for (i = 0; i < 4; i++) {
1498  s->new_picture.f.data[i] += INPLACE_OFFSET;
1499  }
1500  }
1502 
1504  } else {
1505  memset(&s->new_picture, 0, sizeof(Picture));
1506  }
1507  return 0;
1508 }
1509 
1511  AVFrame *pic_arg, int *got_packet)
1512 {
1513  MpegEncContext *s = avctx->priv_data;
1514  int i, stuffing_count, ret;
1515  int context_count = s->slice_context_count;
1516 
1517  s->picture_in_gop_number++;
1518 
1519  if (load_input_picture(s, pic_arg) < 0)
1520  return -1;
1521 
1522  if (select_input_picture(s) < 0) {
1523  return -1;
1524  }
1525 
1526  /* output? */
1527  if (s->new_picture.f.data[0]) {
1528  if ((ret = ff_alloc_packet2(avctx, pkt, s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000)) < 0)
1529  return ret;
1530  if (s->mb_info) {
1533  s->mb_width*s->mb_height*12);
1534  s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1535  }
1536 
1537  for (i = 0; i < context_count; i++) {
1538  int start_y = s->thread_context[i]->start_mb_y;
1539  int end_y = s->thread_context[i]-> end_mb_y;
1540  int h = s->mb_height;
1541  uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1542  uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1543 
1544  init_put_bits(&s->thread_context[i]->pb, start, end - start);
1545  }
1546 
1547  s->pict_type = s->new_picture.f.pict_type;
1548  //emms_c();
1549  if (ff_MPV_frame_start(s, avctx) < 0)
1550  return -1;
1551 vbv_retry:
1552  if (encode_picture(s, s->picture_number) < 0)
1553  return -1;
1554 
1555  avctx->header_bits = s->header_bits;
1556  avctx->mv_bits = s->mv_bits;
1557  avctx->misc_bits = s->misc_bits;
1558  avctx->i_tex_bits = s->i_tex_bits;
1559  avctx->p_tex_bits = s->p_tex_bits;
1560  avctx->i_count = s->i_count;
1561  // FIXME f/b_count in avctx
1562  avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1563  avctx->skip_count = s->skip_count;
1564 
1565  ff_MPV_frame_end(s);
1566 
1567  if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1569 
1570  if (avctx->rc_buffer_size) {
1571  RateControlContext *rcc = &s->rc_context;
1572  int max_size = rcc->buffer_index * avctx->rc_max_available_vbv_use;
1573 
1574  if (put_bits_count(&s->pb) > max_size &&
1575  s->lambda < s->avctx->lmax) {
1576  s->next_lambda = FFMAX(s->lambda + 1, s->lambda *
1577  (s->qscale + 1) / s->qscale);
1578  if (s->adaptive_quant) {
1579  int i;
1580  for (i = 0; i < s->mb_height * s->mb_stride; i++)
1581  s->lambda_table[i] =
1582  FFMAX(s->lambda_table[i] + 1,
1583  s->lambda_table[i] * (s->qscale + 1) /
1584  s->qscale);
1585  }
1586  s->mb_skipped = 0; // done in MPV_frame_start()
1587  // done in encode_picture() so we must undo it
1588  if (s->pict_type == AV_PICTURE_TYPE_P) {
1589  if (s->flipflop_rounding ||
1590  s->codec_id == AV_CODEC_ID_H263P ||
1592  s->no_rounding ^= 1;
1593  }
1594  if (s->pict_type != AV_PICTURE_TYPE_B) {
1595  s->time_base = s->last_time_base;
1596  s->last_non_b_time = s->time - s->pp_time;
1597  }
1598  for (i = 0; i < context_count; i++) {
1599  PutBitContext *pb = &s->thread_context[i]->pb;
1600  init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1601  }
1602  goto vbv_retry;
1603  }
1604 
1605  assert(s->avctx->rc_max_rate);
1606  }
1607 
1608  if (s->flags & CODEC_FLAG_PASS1)
1610 
1611  for (i = 0; i < 4; i++) {
1613  avctx->error[i] += s->current_picture_ptr->f.error[i];
1614  }
1615 
1616  if (s->flags & CODEC_FLAG_PASS1)
1617  assert(avctx->header_bits + avctx->mv_bits + avctx->misc_bits +
1618  avctx->i_tex_bits + avctx->p_tex_bits ==
1619  put_bits_count(&s->pb));
1620  flush_put_bits(&s->pb);
1621  s->frame_bits = put_bits_count(&s->pb);
1622 
1623  stuffing_count = ff_vbv_update(s, s->frame_bits);
1624  s->stuffing_bits = 8*stuffing_count;
1625  if (stuffing_count) {
1626  if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1627  stuffing_count + 50) {
1628  av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1629  return -1;
1630  }
1631 
1632  switch (s->codec_id) {
1635  while (stuffing_count--) {
1636  put_bits(&s->pb, 8, 0);
1637  }
1638  break;
1639  case AV_CODEC_ID_MPEG4:
1640  put_bits(&s->pb, 16, 0);
1641  put_bits(&s->pb, 16, 0x1C3);
1642  stuffing_count -= 4;
1643  while (stuffing_count--) {
1644  put_bits(&s->pb, 8, 0xFF);
1645  }
1646  break;
1647  default:
1648  av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1649  }
1650  flush_put_bits(&s->pb);
1651  s->frame_bits = put_bits_count(&s->pb);
1652  }
1653 
1654  /* update mpeg1/2 vbv_delay for CBR */
1655  if (s->avctx->rc_max_rate &&
1656  s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1657  s->out_format == FMT_MPEG1 &&
1658  90000LL * (avctx->rc_buffer_size - 1) <=
1659  s->avctx->rc_max_rate * 0xFFFFLL) {
1660  int vbv_delay, min_delay;
1661  double inbits = s->avctx->rc_max_rate *
1662  av_q2d(s->avctx->time_base);
1663  int minbits = s->frame_bits - 8 *
1664  (s->vbv_delay_ptr - s->pb.buf - 1);
1665  double bits = s->rc_context.buffer_index + minbits - inbits;
1666 
1667  if (bits < 0)
1669  "Internal error, negative bits\n");
1670 
1671  assert(s->repeat_first_field == 0);
1672 
1673  vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
1674  min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
1675  s->avctx->rc_max_rate;
1676 
1677  vbv_delay = FFMAX(vbv_delay, min_delay);
1678 
1679  av_assert0(vbv_delay < 0xFFFF);
1680 
1681  s->vbv_delay_ptr[0] &= 0xF8;
1682  s->vbv_delay_ptr[0] |= vbv_delay >> 13;
1683  s->vbv_delay_ptr[1] = vbv_delay >> 5;
1684  s->vbv_delay_ptr[2] &= 0x07;
1685  s->vbv_delay_ptr[2] |= vbv_delay << 3;
1686  avctx->vbv_delay = vbv_delay * 300;
1687  }
1688  s->total_bits += s->frame_bits;
1689  avctx->frame_bits = s->frame_bits;
1690 
1691  pkt->pts = s->current_picture.f.pts;
1692  if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
1694  pkt->dts = pkt->pts - s->dts_delta;
1695  else
1696  pkt->dts = s->reordered_pts;
1697  s->reordered_pts = pkt->pts;
1698  } else
1699  pkt->dts = pkt->pts;
1700  if (s->current_picture.f.key_frame)
1701  pkt->flags |= AV_PKT_FLAG_KEY;
1702  if (s->mb_info)
1704  } else {
1705  s->frame_bits = 0;
1706  }
1707  assert((s->frame_bits & 7) == 0);
1708 
1709  pkt->size = s->frame_bits / 8;
1710  *got_packet = !!pkt->size;
1711  return 0;
1712 }
1713 
1715  int n, int threshold)
1716 {
1717  static const char tab[64] = {
1718  3, 2, 2, 1, 1, 1, 1, 1,
1719  1, 1, 1, 1, 1, 1, 1, 1,
1720  1, 1, 1, 1, 1, 1, 1, 1,
1721  0, 0, 0, 0, 0, 0, 0, 0,
1722  0, 0, 0, 0, 0, 0, 0, 0,
1723  0, 0, 0, 0, 0, 0, 0, 0,
1724  0, 0, 0, 0, 0, 0, 0, 0,
1725  0, 0, 0, 0, 0, 0, 0, 0
1726  };
1727  int score = 0;
1728  int run = 0;
1729  int i;
1730  DCTELEM *block = s->block[n];
1731  const int last_index = s->block_last_index[n];
1732  int skip_dc;
1733 
1734  if (threshold < 0) {
1735  skip_dc = 0;
1736  threshold = -threshold;
1737  } else
1738  skip_dc = 1;
1739 
1740  /* Are all we could set to zero already zero? */
1741  if (last_index <= skip_dc - 1)
1742  return;
1743 
1744  for (i = 0; i <= last_index; i++) {
1745  const int j = s->intra_scantable.permutated[i];
1746  const int level = FFABS(block[j]);
1747  if (level == 1) {
1748  if (skip_dc && i == 0)
1749  continue;
1750  score += tab[run];
1751  run = 0;
1752  } else if (level > 1) {
1753  return;
1754  } else {
1755  run++;
1756  }
1757  }
1758  if (score >= threshold)
1759  return;
1760  for (i = skip_dc; i <= last_index; i++) {
1761  const int j = s->intra_scantable.permutated[i];
1762  block[j] = 0;
1763  }
1764  if (block[0])
1765  s->block_last_index[n] = 0;
1766  else
1767  s->block_last_index[n] = -1;
1768 }
1769 
1770 static inline void clip_coeffs(MpegEncContext *s, DCTELEM *block,
1771  int last_index)
1772 {
1773  int i;
1774  const int maxlevel = s->max_qcoeff;
1775  const int minlevel = s->min_qcoeff;
1776  int overflow = 0;
1777 
1778  if (s->mb_intra) {
1779  i = 1; // skip clipping of intra dc
1780  } else
1781  i = 0;
1782 
1783  for (; i <= last_index; i++) {
1784  const int j = s->intra_scantable.permutated[i];
1785  int level = block[j];
1786 
1787  if (level > maxlevel) {
1788  level = maxlevel;
1789  overflow++;
1790  } else if (level < minlevel) {
1791  level = minlevel;
1792  overflow++;
1793  }
1794 
1795  block[j] = level;
1796  }
1797 
1798  if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
1799  av_log(s->avctx, AV_LOG_INFO,
1800  "warning, clipping %d dct coefficients to %d..%d\n",
1801  overflow, minlevel, maxlevel);
1802 }
1803 
1804 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
1805 {
1806  int x, y;
1807  // FIXME optimize
1808  for (y = 0; y < 8; y++) {
1809  for (x = 0; x < 8; x++) {
1810  int x2, y2;
1811  int sum = 0;
1812  int sqr = 0;
1813  int count = 0;
1814 
1815  for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
1816  for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
1817  int v = ptr[x2 + y2 * stride];
1818  sum += v;
1819  sqr += v * v;
1820  count++;
1821  }
1822  }
1823  weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
1824  }
1825  }
1826 }
1827 
1829  int motion_x, int motion_y,
1830  int mb_block_height,
1831  int mb_block_width,
1832  int mb_block_count)
1833 {
1834  int16_t weight[12][64];
1835  DCTELEM orig[12][64];
1836  const int mb_x = s->mb_x;
1837  const int mb_y = s->mb_y;
1838  int i;
1839  int skip_dct[12];
1840  int dct_offset = s->linesize * 8; // default for progressive frames
1841  int uv_dct_offset = s->uvlinesize * 8;
1842  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1843  int wrap_y, wrap_c;
1844 
1845  for (i = 0; i < mb_block_count; i++)
1846  skip_dct[i] = s->skipdct;
1847 
1848  if (s->adaptive_quant) {
1849  const int last_qp = s->qscale;
1850  const int mb_xy = mb_x + mb_y * s->mb_stride;
1851 
1852  s->lambda = s->lambda_table[mb_xy];
1853  update_qscale(s);
1854 
1855  if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
1856  s->qscale = s->current_picture_ptr->f.qscale_table[mb_xy];
1857  s->dquant = s->qscale - last_qp;
1858 
1859  if (s->out_format == FMT_H263) {
1860  s->dquant = av_clip(s->dquant, -2, 2);
1861 
1862  if (s->codec_id == AV_CODEC_ID_MPEG4) {
1863  if (!s->mb_intra) {
1864  if (s->pict_type == AV_PICTURE_TYPE_B) {
1865  if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
1866  s->dquant = 0;
1867  }
1868  if (s->mv_type == MV_TYPE_8X8)
1869  s->dquant = 0;
1870  }
1871  }
1872  }
1873  }
1874  ff_set_qscale(s, last_qp + s->dquant);
1875  } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
1876  ff_set_qscale(s, s->qscale + s->dquant);
1877 
1878  wrap_y = s->linesize;
1879  wrap_c = s->uvlinesize;
1880  ptr_y = s->new_picture.f.data[0] +
1881  (mb_y * 16 * wrap_y) + mb_x * 16;
1882  ptr_cb = s->new_picture.f.data[1] +
1883  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
1884  ptr_cr = s->new_picture.f.data[2] +
1885  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
1886 
1887  if((mb_x*16+16 > s->width || mb_y*16+16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
1888  uint8_t *ebuf = s->edge_emu_buffer + 32;
1889  int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
1890  int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
1891  s->vdsp.emulated_edge_mc(ebuf, ptr_y, wrap_y, 16, 16, mb_x * 16,
1892  mb_y * 16, s->width, s->height);
1893  ptr_y = ebuf;
1894  s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y, ptr_cb, wrap_c, mb_block_width,
1895  mb_block_height, mb_x * mb_block_width, mb_y * mb_block_height,
1896  cw, ch);
1897  ptr_cb = ebuf + 18 * wrap_y;
1898  s->vdsp.emulated_edge_mc(ebuf + 18 * wrap_y + 16, ptr_cr, wrap_c, mb_block_width,
1899  mb_block_height, mb_x * mb_block_width, mb_y * mb_block_height,
1900  cw, ch);
1901  ptr_cr = ebuf + 18 * wrap_y + 16;
1902  }
1903 
1904  if (s->mb_intra) {
1905  if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1906  int progressive_score, interlaced_score;
1907 
1908  s->interlaced_dct = 0;
1909  progressive_score = s->dsp.ildct_cmp[4](s, ptr_y,
1910  NULL, wrap_y, 8) +
1911  s->dsp.ildct_cmp[4](s, ptr_y + wrap_y * 8,
1912  NULL, wrap_y, 8) - 400;
1913 
1914  if (progressive_score > 0) {
1915  interlaced_score = s->dsp.ildct_cmp[4](s, ptr_y,
1916  NULL, wrap_y * 2, 8) +
1917  s->dsp.ildct_cmp[4](s, ptr_y + wrap_y,
1918  NULL, wrap_y * 2, 8);
1919  if (progressive_score > interlaced_score) {
1920  s->interlaced_dct = 1;
1921 
1922  dct_offset = wrap_y;
1923  uv_dct_offset = wrap_c;
1924  wrap_y <<= 1;
1925  if (s->chroma_format == CHROMA_422 ||
1926  s->chroma_format == CHROMA_444)
1927  wrap_c <<= 1;
1928  }
1929  }
1930  }
1931 
1932  s->dsp.get_pixels(s->block[0], ptr_y , wrap_y);
1933  s->dsp.get_pixels(s->block[1], ptr_y + 8 , wrap_y);
1934  s->dsp.get_pixels(s->block[2], ptr_y + dct_offset , wrap_y);
1935  s->dsp.get_pixels(s->block[3], ptr_y + dct_offset + 8 , wrap_y);
1936 
1937  if (s->flags & CODEC_FLAG_GRAY) {
1938  skip_dct[4] = 1;
1939  skip_dct[5] = 1;
1940  } else {
1941  s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c);
1942  s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c);
1943  if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
1944  s->dsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
1945  s->dsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
1946  } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
1947  s->dsp.get_pixels(s->block[6], ptr_cb + 8, wrap_c);
1948  s->dsp.get_pixels(s->block[7], ptr_cr + 8, wrap_c);
1949  s->dsp.get_pixels(s->block[8], ptr_cb + uv_dct_offset, wrap_c);
1950  s->dsp.get_pixels(s->block[9], ptr_cr + uv_dct_offset, wrap_c);
1951  s->dsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
1952  s->dsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
1953  }
1954  }
1955  } else {
1956  op_pixels_func (*op_pix)[4];
1957  qpel_mc_func (*op_qpix)[16];
1958  uint8_t *dest_y, *dest_cb, *dest_cr;
1959 
1960  dest_y = s->dest[0];
1961  dest_cb = s->dest[1];
1962  dest_cr = s->dest[2];
1963 
1964  if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
1965  op_pix = s->dsp.put_pixels_tab;
1966  op_qpix = s->dsp.put_qpel_pixels_tab;
1967  } else {
1968  op_pix = s->dsp.put_no_rnd_pixels_tab;
1969  op_qpix = s->dsp.put_no_rnd_qpel_pixels_tab;
1970  }
1971 
1972  if (s->mv_dir & MV_DIR_FORWARD) {
1973  ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0,
1974  s->last_picture.f.data,
1975  op_pix, op_qpix);
1976  op_pix = s->dsp.avg_pixels_tab;
1977  op_qpix = s->dsp.avg_qpel_pixels_tab;
1978  }
1979  if (s->mv_dir & MV_DIR_BACKWARD) {
1980  ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1,
1981  s->next_picture.f.data,
1982  op_pix, op_qpix);
1983  }
1984 
1985  if (s->flags & CODEC_FLAG_INTERLACED_DCT) {
1986  int progressive_score, interlaced_score;
1987 
1988  s->interlaced_dct = 0;
1989  progressive_score = s->dsp.ildct_cmp[0](s, dest_y,
1990  ptr_y, wrap_y,
1991  8) +
1992  s->dsp.ildct_cmp[0](s, dest_y + wrap_y * 8,
1993  ptr_y + wrap_y * 8, wrap_y,
1994  8) - 400;
1995 
1996  if (s->avctx->ildct_cmp == FF_CMP_VSSE)
1997  progressive_score -= 400;
1998 
1999  if (progressive_score > 0) {
2000  interlaced_score = s->dsp.ildct_cmp[0](s, dest_y,
2001  ptr_y,
2002  wrap_y * 2, 8) +
2003  s->dsp.ildct_cmp[0](s, dest_y + wrap_y,
2004  ptr_y + wrap_y,
2005  wrap_y * 2, 8);
2006 
2007  if (progressive_score > interlaced_score) {
2008  s->interlaced_dct = 1;
2009 
2010  dct_offset = wrap_y;
2011  uv_dct_offset = wrap_c;
2012  wrap_y <<= 1;
2013  if (s->chroma_format == CHROMA_422)
2014  wrap_c <<= 1;
2015  }
2016  }
2017  }
2018 
2019  s->dsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2020  s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2021  s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2022  dest_y + dct_offset, wrap_y);
2023  s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2024  dest_y + dct_offset + 8, wrap_y);
2025 
2026  if (s->flags & CODEC_FLAG_GRAY) {
2027  skip_dct[4] = 1;
2028  skip_dct[5] = 1;
2029  } else {
2030  s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2031  s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2032  if (!s->chroma_y_shift) { /* 422 */
2033  s->dsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2034  dest_cb + uv_dct_offset, wrap_c);
2035  s->dsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2036  dest_cr + uv_dct_offset, wrap_c);
2037  }
2038  }
2039  /* pre quantization */
2040  if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2041  2 * s->qscale * s->qscale) {
2042  // FIXME optimize
2043  if (s->dsp.sad[1](NULL, ptr_y , dest_y,
2044  wrap_y, 8) < 20 * s->qscale)
2045  skip_dct[0] = 1;
2046  if (s->dsp.sad[1](NULL, ptr_y + 8,
2047  dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2048  skip_dct[1] = 1;
2049  if (s->dsp.sad[1](NULL, ptr_y + dct_offset,
2050  dest_y + dct_offset, wrap_y, 8) < 20 * s->qscale)
2051  skip_dct[2] = 1;
2052  if (s->dsp.sad[1](NULL, ptr_y + dct_offset + 8,
2053  dest_y + dct_offset + 8,
2054  wrap_y, 8) < 20 * s->qscale)
2055  skip_dct[3] = 1;
2056  if (s->dsp.sad[1](NULL, ptr_cb, dest_cb,
2057  wrap_c, 8) < 20 * s->qscale)
2058  skip_dct[4] = 1;
2059  if (s->dsp.sad[1](NULL, ptr_cr, dest_cr,
2060  wrap_c, 8) < 20 * s->qscale)
2061  skip_dct[5] = 1;
2062  if (!s->chroma_y_shift) { /* 422 */
2063  if (s->dsp.sad[1](NULL, ptr_cb + uv_dct_offset,
2064  dest_cb + uv_dct_offset,
2065  wrap_c, 8) < 20 * s->qscale)
2066  skip_dct[6] = 1;
2067  if (s->dsp.sad[1](NULL, ptr_cr + uv_dct_offset,
2068  dest_cr + uv_dct_offset,
2069  wrap_c, 8) < 20 * s->qscale)
2070  skip_dct[7] = 1;
2071  }
2072  }
2073  }
2074 
2075  if (s->quantizer_noise_shaping) {
2076  if (!skip_dct[0])
2077  get_visual_weight(weight[0], ptr_y , wrap_y);
2078  if (!skip_dct[1])
2079  get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2080  if (!skip_dct[2])
2081  get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2082  if (!skip_dct[3])
2083  get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2084  if (!skip_dct[4])
2085  get_visual_weight(weight[4], ptr_cb , wrap_c);
2086  if (!skip_dct[5])
2087  get_visual_weight(weight[5], ptr_cr , wrap_c);
2088  if (!s->chroma_y_shift) { /* 422 */
2089  if (!skip_dct[6])
2090  get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2091  wrap_c);
2092  if (!skip_dct[7])
2093  get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2094  wrap_c);
2095  }
2096  memcpy(orig[0], s->block[0], sizeof(DCTELEM) * 64 * mb_block_count);
2097  }
2098 
2099  /* DCT & quantize */
2100  av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2101  {
2102  for (i = 0; i < mb_block_count; i++) {
2103  if (!skip_dct[i]) {
2104  int overflow;
2105  s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2106  // FIXME we could decide to change to quantizer instead of
2107  // clipping
2108  // JS: I don't think that would be a good idea it could lower
2109  // quality instead of improve it. Just INTRADC clipping
2110  // deserves changes in quantizer
2111  if (overflow)
2112  clip_coeffs(s, s->block[i], s->block_last_index[i]);
2113  } else
2114  s->block_last_index[i] = -1;
2115  }
2116  if (s->quantizer_noise_shaping) {
2117  for (i = 0; i < mb_block_count; i++) {
2118  if (!skip_dct[i]) {
2119  s->block_last_index[i] =
2120  dct_quantize_refine(s, s->block[i], weight[i],
2121  orig[i], i, s->qscale);
2122  }
2123  }
2124  }
2125 
2126  if (s->luma_elim_threshold && !s->mb_intra)
2127  for (i = 0; i < 4; i++)
2129  if (s->chroma_elim_threshold && !s->mb_intra)
2130  for (i = 4; i < mb_block_count; i++)
2132 
2133  if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2134  for (i = 0; i < mb_block_count; i++) {
2135  if (s->block_last_index[i] == -1)
2136  s->coded_score[i] = INT_MAX / 256;
2137  }
2138  }
2139  }
2140 
2141  if ((s->flags & CODEC_FLAG_GRAY) && s->mb_intra) {
2142  s->block_last_index[4] =
2143  s->block_last_index[5] = 0;
2144  s->block[4][0] =
2145  s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2146  }
2147 
2148  // non c quantize code returns incorrect block_last_index FIXME
2149  if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2150  for (i = 0; i < mb_block_count; i++) {
2151  int j;
2152  if (s->block_last_index[i] > 0) {
2153  for (j = 63; j > 0; j--) {
2154  if (s->block[i][s->intra_scantable.permutated[j]])
2155  break;
2156  }
2157  s->block_last_index[i] = j;
2158  }
2159  }
2160  }
2161 
2162  /* huffman encode */
2163  switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2166  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2167  ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2168  break;
2169  case AV_CODEC_ID_MPEG4:
2170  if (CONFIG_MPEG4_ENCODER)
2171  ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2172  break;
2173  case AV_CODEC_ID_MSMPEG4V2:
2174  case AV_CODEC_ID_MSMPEG4V3:
2175  case AV_CODEC_ID_WMV1:
2177  ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2178  break;
2179  case AV_CODEC_ID_WMV2:
2180  if (CONFIG_WMV2_ENCODER)
2181  ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2182  break;
2183  case AV_CODEC_ID_H261:
2184  if (CONFIG_H261_ENCODER)
2185  ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2186  break;
2187  case AV_CODEC_ID_H263:
2188  case AV_CODEC_ID_H263P:
2189  case AV_CODEC_ID_FLV1:
2190  case AV_CODEC_ID_RV10:
2191  case AV_CODEC_ID_RV20:
2192  if (CONFIG_H263_ENCODER)
2193  ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2194  break;
2195  case AV_CODEC_ID_MJPEG:
2196  case AV_CODEC_ID_AMV:
2197  if (CONFIG_MJPEG_ENCODER)
2198  ff_mjpeg_encode_mb(s, s->block);
2199  break;
2200  default:
2201  av_assert1(0);
2202  }
2203 }
2204 
2205 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2206 {
2207  if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2208  else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2209  else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2210 }
2211 
2212 static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
2213  int i;
2214 
2215  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2216 
2217  /* mpeg1 */
2218  d->mb_skip_run= s->mb_skip_run;
2219  for(i=0; i<3; i++)
2220  d->last_dc[i] = s->last_dc[i];
2221 
2222  /* statistics */
2223  d->mv_bits= s->mv_bits;
2224  d->i_tex_bits= s->i_tex_bits;
2225  d->p_tex_bits= s->p_tex_bits;
2226  d->i_count= s->i_count;
2227  d->f_count= s->f_count;
2228  d->b_count= s->b_count;
2229  d->skip_count= s->skip_count;
2230  d->misc_bits= s->misc_bits;
2231  d->last_bits= 0;
2232 
2233  d->mb_skipped= 0;
2234  d->qscale= s->qscale;
2235  d->dquant= s->dquant;
2236 
2238 }
2239 
2240 static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
2241  int i;
2242 
2243  memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2244  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2245 
2246  /* mpeg1 */
2247  d->mb_skip_run= s->mb_skip_run;
2248  for(i=0; i<3; i++)
2249  d->last_dc[i] = s->last_dc[i];
2250 
2251  /* statistics */
2252  d->mv_bits= s->mv_bits;
2253  d->i_tex_bits= s->i_tex_bits;
2254  d->p_tex_bits= s->p_tex_bits;
2255  d->i_count= s->i_count;
2256  d->f_count= s->f_count;
2257  d->b_count= s->b_count;
2258  d->skip_count= s->skip_count;
2259  d->misc_bits= s->misc_bits;
2260 
2261  d->mb_intra= s->mb_intra;
2262  d->mb_skipped= s->mb_skipped;
2263  d->mv_type= s->mv_type;
2264  d->mv_dir= s->mv_dir;
2265  d->pb= s->pb;
2266  if(s->data_partitioning){
2267  d->pb2= s->pb2;
2268  d->tex_pb= s->tex_pb;
2269  }
2270  d->block= s->block;
2271  for(i=0; i<8; i++)
2272  d->block_last_index[i]= s->block_last_index[i];
2274  d->qscale= s->qscale;
2275 
2277 }
2278 
2279 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2281  int *dmin, int *next_block, int motion_x, int motion_y)
2282 {
2283  int score;
2284  uint8_t *dest_backup[3];
2285 
2286  copy_context_before_encode(s, backup, type);
2287 
2288  s->block= s->blocks[*next_block];
2289  s->pb= pb[*next_block];
2290  if(s->data_partitioning){
2291  s->pb2 = pb2 [*next_block];
2292  s->tex_pb= tex_pb[*next_block];
2293  }
2294 
2295  if(*next_block){
2296  memcpy(dest_backup, s->dest, sizeof(s->dest));
2297  s->dest[0] = s->rd_scratchpad;
2298  s->dest[1] = s->rd_scratchpad + 16*s->linesize;
2299  s->dest[2] = s->rd_scratchpad + 16*s->linesize + 8;
2300  assert(s->linesize >= 32); //FIXME
2301  }
2302 
2303  encode_mb(s, motion_x, motion_y);
2304 
2305  score= put_bits_count(&s->pb);
2306  if(s->data_partitioning){
2307  score+= put_bits_count(&s->pb2);
2308  score+= put_bits_count(&s->tex_pb);
2309  }
2310 
2311  if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2312  ff_MPV_decode_mb(s, s->block);
2313 
2314  score *= s->lambda2;
2315  score += sse_mb(s) << FF_LAMBDA_SHIFT;
2316  }
2317 
2318  if(*next_block){
2319  memcpy(s->dest, dest_backup, sizeof(s->dest));
2320  }
2321 
2322  if(score<*dmin){
2323  *dmin= score;
2324  *next_block^=1;
2325 
2326  copy_context_after_encode(best, s, type);
2327  }
2328 }
2329 
2330 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2331  uint32_t *sq = ff_squareTbl + 256;
2332  int acc=0;
2333  int x,y;
2334 
2335  if(w==16 && h==16)
2336  return s->dsp.sse[0](NULL, src1, src2, stride, 16);
2337  else if(w==8 && h==8)
2338  return s->dsp.sse[1](NULL, src1, src2, stride, 8);
2339 
2340  for(y=0; y<h; y++){
2341  for(x=0; x<w; x++){
2342  acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2343  }
2344  }
2345 
2346  av_assert2(acc>=0);
2347 
2348  return acc;
2349 }
2350 
2351 static int sse_mb(MpegEncContext *s){
2352  int w= 16;
2353  int h= 16;
2354 
2355  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2356  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2357 
2358  if(w==16 && h==16)
2359  if(s->avctx->mb_cmp == FF_CMP_NSSE){
2360  return s->dsp.nsse[0](s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2361  +s->dsp.nsse[1](s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2362  +s->dsp.nsse[1](s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2363  }else{
2364  return s->dsp.sse[0](NULL, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize, 16)
2365  +s->dsp.sse[1](NULL, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize, 8)
2366  +s->dsp.sse[1](NULL, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize, 8);
2367  }
2368  else
2369  return sse(s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2370  +sse(s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2371  +sse(s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2372 }
2373 
2375  MpegEncContext *s= *(void**)arg;
2376 
2377 
2378  s->me.pre_pass=1;
2379  s->me.dia_size= s->avctx->pre_dia_size;
2380  s->first_slice_line=1;
2381  for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2382  for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2384  }
2385  s->first_slice_line=0;
2386  }
2387 
2388  s->me.pre_pass=0;
2389 
2390  return 0;
2391 }
2392 
2394  MpegEncContext *s= *(void**)arg;
2395 
2397 
2398  s->me.dia_size= s->avctx->dia_size;
2399  s->first_slice_line=1;
2400  for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2401  s->mb_x=0; //for block init below
2403  for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2404  s->block_index[0]+=2;
2405  s->block_index[1]+=2;
2406  s->block_index[2]+=2;
2407  s->block_index[3]+=2;
2408 
2409  /* compute motion vector & mb_type and store in context */
2412  else
2414  }
2415  s->first_slice_line=0;
2416  }
2417  return 0;
2418 }
2419 
2420 static int mb_var_thread(AVCodecContext *c, void *arg){
2421  MpegEncContext *s= *(void**)arg;
2422  int mb_x, mb_y;
2423 
2425 
2426  for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2427  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2428  int xx = mb_x * 16;
2429  int yy = mb_y * 16;
2430  uint8_t *pix = s->new_picture.f.data[0] + (yy * s->linesize) + xx;
2431  int varc;
2432  int sum = s->dsp.pix_sum(pix, s->linesize);
2433 
2434  varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)sum*sum)>>8) + 500 + 128)>>8;
2435 
2436  s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2437  s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2438  s->me.mb_var_sum_temp += varc;
2439  }
2440  }
2441  return 0;
2442 }
2443 
2445  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2446  if(s->partitioned_frame){
2448  }
2449 
2450  ff_mpeg4_stuffing(&s->pb);
2451  }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2453  }
2454 
2456  flush_put_bits(&s->pb);
2457 
2458  if((s->flags&CODEC_FLAG_PASS1) && !s->partitioned_frame)
2459  s->misc_bits+= get_bits_diff(s);
2460 }
2461 
2463 {
2464  uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2465  int offset = put_bits_count(&s->pb);
2466  int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2467  int gobn = s->mb_y / s->gob_index;
2468  int pred_x, pred_y;
2469  if (CONFIG_H263_ENCODER)
2470  ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2471  bytestream_put_le32(&ptr, offset);
2472  bytestream_put_byte(&ptr, s->qscale);
2473  bytestream_put_byte(&ptr, gobn);
2474  bytestream_put_le16(&ptr, mba);
2475  bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2476  bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2477  /* 4MV not implemented */
2478  bytestream_put_byte(&ptr, 0); /* hmv2 */
2479  bytestream_put_byte(&ptr, 0); /* vmv2 */
2480 }
2481 
2482 static void update_mb_info(MpegEncContext *s, int startcode)
2483 {
2484  if (!s->mb_info)
2485  return;
2486  if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2487  s->mb_info_size += 12;
2488  s->prev_mb_info = s->last_mb_info;
2489  }
2490  if (startcode) {
2491  s->prev_mb_info = put_bits_count(&s->pb)/8;
2492  /* This might have incremented mb_info_size above, and we return without
2493  * actually writing any info into that slot yet. But in that case,
2494  * this will be called again at the start of the after writing the
2495  * start code, actually writing the mb info. */
2496  return;
2497  }
2498 
2499  s->last_mb_info = put_bits_count(&s->pb)/8;
2500  if (!s->mb_info_size)
2501  s->mb_info_size += 12;
2502  write_mb_info(s);
2503 }
2504 
2505 static int encode_thread(AVCodecContext *c, void *arg){
2506  MpegEncContext *s= *(void**)arg;
2507  int mb_x, mb_y, pdif = 0;
2508  int chr_h= 16>>s->chroma_y_shift;
2509  int i, j;
2510  MpegEncContext best_s, backup_s;
2511  uint8_t bit_buf[2][MAX_MB_BYTES];
2512  uint8_t bit_buf2[2][MAX_MB_BYTES];
2513  uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2514  PutBitContext pb[2], pb2[2], tex_pb[2];
2515 
2517 
2518  for(i=0; i<2; i++){
2519  init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2520  init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2521  init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2522  }
2523 
2524  s->last_bits= put_bits_count(&s->pb);
2525  s->mv_bits=0;
2526  s->misc_bits=0;
2527  s->i_tex_bits=0;
2528  s->p_tex_bits=0;
2529  s->i_count=0;
2530  s->f_count=0;
2531  s->b_count=0;
2532  s->skip_count=0;
2533 
2534  for(i=0; i<3; i++){
2535  /* init last dc values */
2536  /* note: quant matrix value (8) is implied here */
2537  s->last_dc[i] = 128 << s->intra_dc_precision;
2538 
2539  s->current_picture.f.error[i] = 0;
2540  }
2541  if(s->codec_id==AV_CODEC_ID_AMV){
2542  s->last_dc[0] = 128*8/13;
2543  s->last_dc[1] = 128*8/14;
2544  s->last_dc[2] = 128*8/14;
2545  }
2546  s->mb_skip_run = 0;
2547  memset(s->last_mv, 0, sizeof(s->last_mv));
2548 
2549  s->last_mv_dir = 0;
2550 
2551  switch(s->codec_id){
2552  case AV_CODEC_ID_H263:
2553  case AV_CODEC_ID_H263P:
2554  case AV_CODEC_ID_FLV1:
2555  if (CONFIG_H263_ENCODER)
2557  break;
2558  case AV_CODEC_ID_MPEG4:
2559  if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2561  break;
2562  }
2563 
2564  s->resync_mb_x=0;
2565  s->resync_mb_y=0;
2566  s->first_slice_line = 1;
2567  s->ptr_lastgob = s->pb.buf;
2568  for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2569  s->mb_x=0;
2570  s->mb_y= mb_y;
2571 
2572  ff_set_qscale(s, s->qscale);
2574 
2575  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2576  int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2577  int mb_type= s->mb_type[xy];
2578 // int d;
2579  int dmin= INT_MAX;
2580  int dir;
2581 
2582  if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2583  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2584  return -1;
2585  }
2586  if(s->data_partitioning){
2587  if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
2588  || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
2589  av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
2590  return -1;
2591  }
2592  }
2593 
2594  s->mb_x = mb_x;
2595  s->mb_y = mb_y; // moved into loop, can get changed by H.261
2597 
2598  if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
2600  xy= s->mb_y*s->mb_stride + s->mb_x;
2601  mb_type= s->mb_type[xy];
2602  }
2603 
2604  /* write gob / video packet header */
2605  if(s->rtp_mode){
2606  int current_packet_size, is_gob_start;
2607 
2608  current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
2609 
2610  is_gob_start= s->avctx->rtp_payload_size && current_packet_size >= s->avctx->rtp_payload_size && mb_y + mb_x>0;
2611 
2612  if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2613 
2614  switch(s->codec_id){
2615  case AV_CODEC_ID_H263:
2616  case AV_CODEC_ID_H263P:
2617  if(!s->h263_slice_structured)
2618  if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
2619  break;
2621  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2623  if(s->mb_skip_run) is_gob_start=0;
2624  break;
2625  case AV_CODEC_ID_MJPEG:
2626  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
2627  break;
2628  }
2629 
2630  if(is_gob_start){
2631  if(s->start_mb_y != mb_y || mb_x!=0){
2632  write_slice_end(s);
2633  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
2635  }
2636  }
2637 
2638  av_assert2((put_bits_count(&s->pb)&7) == 0);
2639  current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
2640 
2641  if(s->avctx->error_rate && s->resync_mb_x + s->resync_mb_y > 0){
2642  int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
2643  int d= 100 / s->avctx->error_rate;
2644  if(r % d == 0){
2645  current_packet_size=0;
2646  s->pb.buf_ptr= s->ptr_lastgob;
2647  assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
2648  }
2649  }
2650 
2651  if (s->avctx->rtp_callback){
2652  int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
2653  s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
2654  }
2655  update_mb_info(s, 1);
2656 
2657  switch(s->codec_id){
2658  case AV_CODEC_ID_MPEG4:
2659  if (CONFIG_MPEG4_ENCODER) {
2662  }
2663  break;
2666  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
2669  }
2670  break;
2671  case AV_CODEC_ID_H263:
2672  case AV_CODEC_ID_H263P:
2673  if (CONFIG_H263_ENCODER)
2674  ff_h263_encode_gob_header(s, mb_y);
2675  break;
2676  }
2677 
2678  if(s->flags&CODEC_FLAG_PASS1){
2679  int bits= put_bits_count(&s->pb);
2680  s->misc_bits+= bits - s->last_bits;
2681  s->last_bits= bits;
2682  }
2683 
2684  s->ptr_lastgob += current_packet_size;
2685  s->first_slice_line=1;
2686  s->resync_mb_x=mb_x;
2687  s->resync_mb_y=mb_y;
2688  }
2689  }
2690 
2691  if( (s->resync_mb_x == s->mb_x)
2692  && s->resync_mb_y+1 == s->mb_y){
2693  s->first_slice_line=0;
2694  }
2695 
2696  s->mb_skipped=0;
2697  s->dquant=0; //only for QP_RD
2698 
2699  update_mb_info(s, 0);
2700 
2701  if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
2702  int next_block=0;
2703  int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
2704 
2705  copy_context_before_encode(&backup_s, s, -1);
2706  backup_s.pb= s->pb;
2709  if(s->data_partitioning){
2710  backup_s.pb2= s->pb2;
2711  backup_s.tex_pb= s->tex_pb;
2712  }
2713 
2714  if(mb_type&CANDIDATE_MB_TYPE_INTER){
2715  s->mv_dir = MV_DIR_FORWARD;
2716  s->mv_type = MV_TYPE_16X16;
2717  s->mb_intra= 0;
2718  s->mv[0][0][0] = s->p_mv_table[xy][0];
2719  s->mv[0][0][1] = s->p_mv_table[xy][1];
2720  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
2721  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2722  }
2723  if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
2724  s->mv_dir = MV_DIR_FORWARD;
2725  s->mv_type = MV_TYPE_FIELD;
2726  s->mb_intra= 0;
2727  for(i=0; i<2; i++){
2728  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2729  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2730  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2731  }
2732  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
2733  &dmin, &next_block, 0, 0);
2734  }
2735  if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
2736  s->mv_dir = MV_DIR_FORWARD;
2737  s->mv_type = MV_TYPE_16X16;
2738  s->mb_intra= 0;
2739  s->mv[0][0][0] = 0;
2740  s->mv[0][0][1] = 0;
2741  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
2742  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2743  }
2744  if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
2745  s->mv_dir = MV_DIR_FORWARD;
2746  s->mv_type = MV_TYPE_8X8;
2747  s->mb_intra= 0;
2748  for(i=0; i<4; i++){
2749  s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0];
2750  s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1];
2751  }
2752  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
2753  &dmin, &next_block, 0, 0);
2754  }
2755  if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
2756  s->mv_dir = MV_DIR_FORWARD;
2757  s->mv_type = MV_TYPE_16X16;
2758  s->mb_intra= 0;
2759  s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
2760  s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
2761  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
2762  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
2763  }
2764  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
2765  s->mv_dir = MV_DIR_BACKWARD;
2766  s->mv_type = MV_TYPE_16X16;
2767  s->mb_intra= 0;
2768  s->mv[1][0][0] = s->b_back_mv_table[xy][0];
2769  s->mv[1][0][1] = s->b_back_mv_table[xy][1];
2770  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
2771  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
2772  }
2773  if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
2775  s->mv_type = MV_TYPE_16X16;
2776  s->mb_intra= 0;
2777  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
2778  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
2779  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
2780  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
2781  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
2782  &dmin, &next_block, 0, 0);
2783  }
2784  if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
2785  s->mv_dir = MV_DIR_FORWARD;
2786  s->mv_type = MV_TYPE_FIELD;
2787  s->mb_intra= 0;
2788  for(i=0; i<2; i++){
2789  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
2790  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
2791  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
2792  }
2793  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
2794  &dmin, &next_block, 0, 0);
2795  }
2796  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
2797  s->mv_dir = MV_DIR_BACKWARD;
2798  s->mv_type = MV_TYPE_FIELD;
2799  s->mb_intra= 0;
2800  for(i=0; i<2; i++){
2801  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
2802  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
2803  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
2804  }
2805  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
2806  &dmin, &next_block, 0, 0);
2807  }
2808  if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
2810  s->mv_type = MV_TYPE_FIELD;
2811  s->mb_intra= 0;
2812  for(dir=0; dir<2; dir++){
2813  for(i=0; i<2; i++){
2814  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
2815  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
2816  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
2817  }
2818  }
2819  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
2820  &dmin, &next_block, 0, 0);
2821  }
2822  if(mb_type&CANDIDATE_MB_TYPE_INTRA){
2823  s->mv_dir = 0;
2824  s->mv_type = MV_TYPE_16X16;
2825  s->mb_intra= 1;
2826  s->mv[0][0][0] = 0;
2827  s->mv[0][0][1] = 0;
2828  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
2829  &dmin, &next_block, 0, 0);
2830  if(s->h263_pred || s->h263_aic){
2831  if(best_s.mb_intra)
2832  s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
2833  else
2834  ff_clean_intra_table_entries(s); //old mode?
2835  }
2836  }
2837 
2838  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
2839  if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
2840  const int last_qp= backup_s.qscale;
2841  int qpi, qp, dc[6];
2842  DCTELEM ac[6][16];
2843  const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
2844  static const int dquant_tab[4]={-1,1,-2,2};
2845 
2846  av_assert2(backup_s.dquant == 0);
2847 
2848  //FIXME intra
2849  s->mv_dir= best_s.mv_dir;
2850  s->mv_type = MV_TYPE_16X16;
2851  s->mb_intra= best_s.mb_intra;
2852  s->mv[0][0][0] = best_s.mv[0][0][0];
2853  s->mv[0][0][1] = best_s.mv[0][0][1];
2854  s->mv[1][0][0] = best_s.mv[1][0][0];
2855  s->mv[1][0][1] = best_s.mv[1][0][1];
2856 
2857  qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
2858  for(; qpi<4; qpi++){
2859  int dquant= dquant_tab[qpi];
2860  qp= last_qp + dquant;
2861  if(qp < s->avctx->qmin || qp > s->avctx->qmax)
2862  continue;
2863  backup_s.dquant= dquant;
2864  if(s->mb_intra && s->dc_val[0]){
2865  for(i=0; i<6; i++){
2866  dc[i]= s->dc_val[0][ s->block_index[i] ];
2867  memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(DCTELEM)*16);
2868  }
2869  }
2870 
2871  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2872  &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
2873  if(best_s.qscale != qp){
2874  if(s->mb_intra && s->dc_val[0]){
2875  for(i=0; i<6; i++){
2876  s->dc_val[0][ s->block_index[i] ]= dc[i];
2877  memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(DCTELEM)*16);
2878  }
2879  }
2880  }
2881  }
2882  }
2883  }
2884  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
2885  int mx= s->b_direct_mv_table[xy][0];
2886  int my= s->b_direct_mv_table[xy][1];
2887 
2888  backup_s.dquant = 0;
2890  s->mb_intra= 0;
2891  ff_mpeg4_set_direct_mv(s, mx, my);
2892  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2893  &dmin, &next_block, mx, my);
2894  }
2895  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
2896  backup_s.dquant = 0;
2898  s->mb_intra= 0;
2899  ff_mpeg4_set_direct_mv(s, 0, 0);
2900  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
2901  &dmin, &next_block, 0, 0);
2902  }
2903  if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
2904  int coded=0;
2905  for(i=0; i<6; i++)
2906  coded |= s->block_last_index[i];
2907  if(coded){
2908  int mx,my;
2909  memcpy(s->mv, best_s.mv, sizeof(s->mv));
2910  if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
2911  mx=my=0; //FIXME find the one we actually used
2912  ff_mpeg4_set_direct_mv(s, mx, my);
2913  }else if(best_s.mv_dir&MV_DIR_BACKWARD){
2914  mx= s->mv[1][0][0];
2915  my= s->mv[1][0][1];
2916  }else{
2917  mx= s->mv[0][0][0];
2918  my= s->mv[0][0][1];
2919  }
2920 
2921  s->mv_dir= best_s.mv_dir;
2922  s->mv_type = best_s.mv_type;
2923  s->mb_intra= 0;
2924 /* s->mv[0][0][0] = best_s.mv[0][0][0];
2925  s->mv[0][0][1] = best_s.mv[0][0][1];
2926  s->mv[1][0][0] = best_s.mv[1][0][0];
2927  s->mv[1][0][1] = best_s.mv[1][0][1];*/
2928  backup_s.dquant= 0;
2929  s->skipdct=1;
2930  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
2931  &dmin, &next_block, mx, my);
2932  s->skipdct=0;
2933  }
2934  }
2935 
2936  s->current_picture.f.qscale_table[xy] = best_s.qscale;
2937 
2938  copy_context_after_encode(s, &best_s, -1);
2939 
2940  pb_bits_count= put_bits_count(&s->pb);
2941  flush_put_bits(&s->pb);
2942  avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
2943  s->pb= backup_s.pb;
2944 
2945  if(s->data_partitioning){
2946  pb2_bits_count= put_bits_count(&s->pb2);
2947  flush_put_bits(&s->pb2);
2948  avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
2949  s->pb2= backup_s.pb2;
2950 
2951  tex_pb_bits_count= put_bits_count(&s->tex_pb);
2952  flush_put_bits(&s->tex_pb);
2953  avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
2954  s->tex_pb= backup_s.tex_pb;
2955  }
2956  s->last_bits= put_bits_count(&s->pb);
2957 
2958  if (CONFIG_H263_ENCODER &&
2961 
2962  if(next_block==0){ //FIXME 16 vs linesize16
2963  s->dsp.put_pixels_tab[0][0](s->dest[0], s->rd_scratchpad , s->linesize ,16);
2964  s->dsp.put_pixels_tab[1][0](s->dest[1], s->rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
2965  s->dsp.put_pixels_tab[1][0](s->dest[2], s->rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
2966  }
2967 
2969  ff_MPV_decode_mb(s, s->block);
2970  } else {
2971  int motion_x = 0, motion_y = 0;
2973  // only one MB-Type possible
2974 
2975  switch(mb_type){
2977  s->mv_dir = 0;
2978  s->mb_intra= 1;
2979  motion_x= s->mv[0][0][0] = 0;
2980  motion_y= s->mv[0][0][1] = 0;
2981  break;
2983  s->mv_dir = MV_DIR_FORWARD;
2984  s->mb_intra= 0;
2985  motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
2986  motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
2987  break;
2989  s->mv_dir = MV_DIR_FORWARD;
2990  s->mv_type = MV_TYPE_FIELD;
2991  s->mb_intra= 0;
2992  for(i=0; i<2; i++){
2993  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
2994  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
2995  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
2996  }
2997  break;
2999  s->mv_dir = MV_DIR_FORWARD;
3000  s->mv_type = MV_TYPE_8X8;
3001  s->mb_intra= 0;
3002  for(i=0; i<4; i++){
3003  s->mv[0][i][0] = s->current_picture.f.motion_val[0][s->block_index[i]][0];
3004  s->mv[0][i][1] = s->current_picture.f.motion_val[0][s->block_index[i]][1];
3005  }
3006  break;
3008  if (CONFIG_MPEG4_ENCODER) {
3010  s->mb_intra= 0;
3011  motion_x=s->b_direct_mv_table[xy][0];
3012  motion_y=s->b_direct_mv_table[xy][1];
3013  ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3014  }
3015  break;
3017  if (CONFIG_MPEG4_ENCODER) {
3019  s->mb_intra= 0;
3020  ff_mpeg4_set_direct_mv(s, 0, 0);
3021  }
3022  break;
3025  s->mb_intra= 0;
3026  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3027  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3028  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3029  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3030  break;
3032  s->mv_dir = MV_DIR_BACKWARD;
3033  s->mb_intra= 0;
3034  motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3035  motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3036  break;
3038  s->mv_dir = MV_DIR_FORWARD;
3039  s->mb_intra= 0;
3040  motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3041  motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3042  break;
3044  s->mv_dir = MV_DIR_FORWARD;
3045  s->mv_type = MV_TYPE_FIELD;
3046  s->mb_intra= 0;
3047  for(i=0; i<2; i++){
3048  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3049  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3050  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3051  }
3052  break;
3054  s->mv_dir = MV_DIR_BACKWARD;
3055  s->mv_type = MV_TYPE_FIELD;
3056  s->mb_intra= 0;
3057  for(i=0; i<2; i++){
3058  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3059  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3060  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3061  }
3062  break;
3065  s->mv_type = MV_TYPE_FIELD;
3066  s->mb_intra= 0;
3067  for(dir=0; dir<2; dir++){
3068  for(i=0; i<2; i++){
3069  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3070  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3071  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3072  }
3073  }
3074  break;
3075  default:
3076  av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3077  }
3078 
3079  encode_mb(s, motion_x, motion_y);
3080 
3081  // RAL: Update last macroblock type
3082  s->last_mv_dir = s->mv_dir;
3083 
3084  if (CONFIG_H263_ENCODER &&
3087 
3088  ff_MPV_decode_mb(s, s->block);
3089  }
3090 
3091  /* clean the MV table in IPS frames for direct mode in B frames */
3092  if(s->mb_intra /* && I,P,S_TYPE */){
3093  s->p_mv_table[xy][0]=0;
3094  s->p_mv_table[xy][1]=0;
3095  }
3096 
3097  if(s->flags&CODEC_FLAG_PSNR){
3098  int w= 16;
3099  int h= 16;
3100 
3101  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3102  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3103 
3104  s->current_picture.f.error[0] += sse(
3105  s, s->new_picture.f.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3106  s->dest[0], w, h, s->linesize);
3107  s->current_picture.f.error[1] += sse(
3108  s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3109  s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3110  s->current_picture.f.error[2] += sse(
3111  s, s->new_picture.f.data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3112  s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3113  }
3114  if(s->loop_filter){
3115  if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3117  }
3118  av_dlog(s->avctx, "MB %d %d bits\n",
3119  s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3120  }
3121  }
3122 
3123  //not beautiful here but we must write it before flushing so it has to be here
3126 
3127  write_slice_end(s);
3128 
3129  /* Send the last GOB if RTP */
3130  if (s->avctx->rtp_callback) {
3131  int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3132  pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3133  /* Call the RTP callback to send the last GOB */
3134  emms_c();
3135  s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3136  }
3137 
3138  return 0;
3139 }
3140 
3141 #define MERGE(field) dst->field += src->field; src->field=0
3143  MERGE(me.scene_change_score);
3144  MERGE(me.mc_mb_var_sum_temp);
3145  MERGE(me.mb_var_sum_temp);
3146 }
3147 
3149  int i;
3150 
3151  MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3152  MERGE(dct_count[1]);
3153  MERGE(mv_bits);
3154  MERGE(i_tex_bits);
3155  MERGE(p_tex_bits);
3156  MERGE(i_count);
3157  MERGE(f_count);
3158  MERGE(b_count);
3159  MERGE(skip_count);
3160  MERGE(misc_bits);
3161  MERGE(error_count);
3166 
3167  if(dst->avctx->noise_reduction){
3168  for(i=0; i<64; i++){
3169  MERGE(dct_error_sum[0][i]);
3170  MERGE(dct_error_sum[1][i]);
3171  }
3172  }
3173 
3174  assert(put_bits_count(&src->pb) % 8 ==0);
3175  assert(put_bits_count(&dst->pb) % 8 ==0);
3176  avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3177  flush_put_bits(&dst->pb);
3178 }
3179 
3180 static int estimate_qp(MpegEncContext *s, int dry_run){
3181  if (s->next_lambda){
3184  if(!dry_run) s->next_lambda= 0;
3185  } else if (!s->fixed_qscale) {
3188  if (s->current_picture.f.quality < 0)
3189  return -1;
3190  }
3191 
3192  if(s->adaptive_quant){
3193  switch(s->codec_id){
3194  case AV_CODEC_ID_MPEG4:
3195  if (CONFIG_MPEG4_ENCODER)
3197  break;
3198  case AV_CODEC_ID_H263:
3199  case AV_CODEC_ID_H263P:
3200  case AV_CODEC_ID_FLV1:
3201  if (CONFIG_H263_ENCODER)
3203  break;
3204  default:
3205  ff_init_qscale_tab(s);
3206  }
3207 
3208  s->lambda= s->lambda_table[0];
3209  //FIXME broken
3210  }else
3211  s->lambda = s->current_picture.f.quality;
3212  update_qscale(s);
3213  return 0;
3214 }
3215 
3216 /* must be called before writing the header */
3218  assert(s->current_picture_ptr->f.pts != AV_NOPTS_VALUE);
3219  s->time = s->current_picture_ptr->f.pts * s->avctx->time_base.num;
3220 
3221  if(s->pict_type==AV_PICTURE_TYPE_B){
3222  s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3223  assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3224  }else{
3225  s->pp_time= s->time - s->last_non_b_time;
3226  s->last_non_b_time= s->time;
3227  assert(s->picture_number==0 || s->pp_time > 0);
3228  }
3229 }
3230 
3232 {
3233  int i, ret;
3234  int bits;
3235  int context_count = s->slice_context_count;
3236 
3238 
3239  /* Reset the average MB variance */
3240  s->me.mb_var_sum_temp =
3241  s->me.mc_mb_var_sum_temp = 0;
3242 
3243  /* we need to initialize some time vars before we can encode b-frames */
3244  // RAL: Condition added for MPEG1VIDEO
3247  if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3248  ff_set_mpeg4_time(s);
3249 
3250  s->me.scene_change_score=0;
3251 
3252 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3253 
3254  if(s->pict_type==AV_PICTURE_TYPE_I){
3255  if(s->msmpeg4_version >= 3) s->no_rounding=1;
3256  else s->no_rounding=0;
3257  }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3259  s->no_rounding ^= 1;
3260  }
3261 
3262  if(s->flags & CODEC_FLAG_PASS2){
3263  if (estimate_qp(s,1) < 0)
3264  return -1;
3265  ff_get_2pass_fcode(s);
3266  }else if(!(s->flags & CODEC_FLAG_QSCALE)){
3268  s->lambda= s->last_lambda_for[s->pict_type];
3269  else
3271  update_qscale(s);
3272  }
3273 
3274  if(s->codec_id != AV_CODEC_ID_AMV){
3279  }
3280 
3281  s->mb_intra=0; //for the rate distortion & bit compare functions
3282  for(i=1; i<context_count; i++){
3284  if (ret < 0)
3285  return ret;
3286  }
3287 
3288  if(ff_init_me(s)<0)
3289  return -1;
3290 
3291  /* Estimate motion for every MB */
3292  if(s->pict_type != AV_PICTURE_TYPE_I){
3293  s->lambda = (s->lambda * s->avctx->me_penalty_compensation + 128)>>8;
3294  s->lambda2= (s->lambda2* (int64_t)s->avctx->me_penalty_compensation + 128)>>8;
3295  if(s->pict_type != AV_PICTURE_TYPE_B && s->avctx->me_threshold==0){
3296  if((s->avctx->pre_me && s->last_non_b_pict_type==AV_PICTURE_TYPE_I) || s->avctx->pre_me==2){
3297  s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3298  }
3299  }
3300 
3301  s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3302  }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3303  /* I-Frame */
3304  for(i=0; i<s->mb_stride*s->mb_height; i++)
3306 
3307  if(!s->fixed_qscale){
3308  /* finding spatial complexity for I-frame rate control */
3309  s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3310  }
3311  }
3312  for(i=1; i<context_count; i++){
3314  }
3316  s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3317  emms_c();
3318 
3321  for(i=0; i<s->mb_stride*s->mb_height; i++)
3323  if(s->msmpeg4_version >= 3)
3324  s->no_rounding=1;
3325  av_dlog(s, "Scene change detected, encoding as I Frame %d %d\n",
3327  }
3328 
3329  if(!s->umvplus){
3332 
3334  int a,b;
3335  a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3337  s->f_code= FFMAX3(s->f_code, a, b);
3338  }
3339 
3340  ff_fix_long_p_mvs(s);
3343  int j;
3344  for(i=0; i<2; i++){
3345  for(j=0; j<2; j++)
3348  }
3349  }
3350  }
3351 
3352  if(s->pict_type==AV_PICTURE_TYPE_B){
3353  int a, b;
3354 
3357  s->f_code = FFMAX(a, b);
3358 
3361  s->b_code = FFMAX(a, b);
3362 
3368  int dir, j;
3369  for(dir=0; dir<2; dir++){
3370  for(i=0; i<2; i++){
3371  for(j=0; j<2; j++){
3374  ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3375  s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3376  }
3377  }
3378  }
3379  }
3380  }
3381  }
3382 
3383  if (estimate_qp(s, 0) < 0)
3384  return -1;
3385 
3386  if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==AV_PICTURE_TYPE_I && !(s->flags & CODEC_FLAG_QSCALE))
3387  s->qscale= 3; //reduce clipping problems
3388 
3389  if (s->out_format == FMT_MJPEG) {
3390  /* for mjpeg, we do include qscale in the matrix */
3391  for(i=1;i<64;i++){
3392  int j= s->dsp.idct_permutation[i];
3393 
3394  s->intra_matrix[j] = av_clip_uint8((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
3395  }
3396  s->y_dc_scale_table=
3400  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3401  s->qscale= 8;
3402  }
3403  if(s->codec_id == AV_CODEC_ID_AMV){
3404  static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3405  static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3406  for(i=1;i<64;i++){
3407  int j= s->dsp.idct_permutation[ff_zigzag_direct[i]];
3408 
3409  s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
3410  s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
3411  }
3412  s->y_dc_scale_table= y;
3413  s->c_dc_scale_table= c;
3414  s->intra_matrix[0] = 13;
3415  s->chroma_intra_matrix[0] = 14;
3417  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3419  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3420  s->qscale= 8;
3421  }
3422 
3423  //FIXME var duplication
3425  s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3428 
3429  if (s->current_picture.f.key_frame)
3430  s->picture_in_gop_number=0;
3431 
3432  s->mb_x = s->mb_y = 0;
3433  s->last_bits= put_bits_count(&s->pb);
3434  switch(s->out_format) {
3435  case FMT_MJPEG:
3436  if (CONFIG_MJPEG_ENCODER)
3438  break;
3439  case FMT_H261:
3440  if (CONFIG_H261_ENCODER)
3441  ff_h261_encode_picture_header(s, picture_number);
3442  break;
3443  case FMT_H263:
3444  if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3445  ff_wmv2_encode_picture_header(s, picture_number);
3446  else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3447  ff_msmpeg4_encode_picture_header(s, picture_number);
3448  else if (CONFIG_MPEG4_ENCODER && s->h263_pred)
3449  ff_mpeg4_encode_picture_header(s, picture_number);
3450  else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10)
3451  ff_rv10_encode_picture_header(s, picture_number);
3452  else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3453  ff_rv20_encode_picture_header(s, picture_number);
3454  else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3455  ff_flv_encode_picture_header(s, picture_number);
3456  else if (CONFIG_H263_ENCODER)
3457  ff_h263_encode_picture_header(s, picture_number);
3458  break;
3459  case FMT_MPEG1:
3460  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3461  ff_mpeg1_encode_picture_header(s, picture_number);
3462  break;
3463  case FMT_H264:
3464  break;
3465  default:
3466  av_assert0(0);
3467  }
3468  bits= put_bits_count(&s->pb);
3469  s->header_bits= bits - s->last_bits;
3470 
3471  for(i=1; i<context_count; i++){
3473  }
3474  s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3475  for(i=1; i<context_count; i++){
3477  }
3478  emms_c();
3479  return 0;
3480 }
3481 
3483  const int intra= s->mb_intra;
3484  int i;
3485 
3486  s->dct_count[intra]++;
3487 
3488  for(i=0; i<64; i++){
3489  int level= block[i];
3490 
3491  if(level){
3492  if(level>0){
3493  s->dct_error_sum[intra][i] += level;
3494  level -= s->dct_offset[intra][i];
3495  if(level<0) level=0;
3496  }else{
3497  s->dct_error_sum[intra][i] -= level;
3498  level += s->dct_offset[intra][i];
3499  if(level>0) level=0;
3500  }
3501  block[i]= level;
3502  }
3503  }
3504 }
3505 
3507  DCTELEM *block, int n,
3508  int qscale, int *overflow){
3509  const int *qmat;
3510  const uint8_t *scantable= s->intra_scantable.scantable;
3511  const uint8_t *perm_scantable= s->intra_scantable.permutated;
3512  int max=0;
3513  unsigned int threshold1, threshold2;
3514  int bias=0;
3515  int run_tab[65];
3516  int level_tab[65];
3517  int score_tab[65];
3518  int survivor[65];
3519  int survivor_count;
3520  int last_run=0;
3521  int last_level=0;
3522  int last_score= 0;
3523  int last_i;
3524  int coeff[2][64];
3525  int coeff_count[64];
3526  int qmul, qadd, start_i, last_non_zero, i, dc;
3527  const int esc_length= s->ac_esc_length;
3528  uint8_t * length;
3529  uint8_t * last_length;
3530  const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3531 
3532  s->dsp.fdct (block);
3533 
3534  if(s->dct_error_sum)
3535  s->denoise_dct(s, block);
3536  qmul= qscale*16;
3537  qadd= ((qscale-1)|1)*8;
3538 
3539  if (s->mb_intra) {
3540  int q;
3541  if (!s->h263_aic) {
3542  if (n < 4)
3543  q = s->y_dc_scale;
3544  else
3545  q = s->c_dc_scale;
3546  q = q << 3;
3547  } else{
3548  /* For AIC we skip quant/dequant of INTRADC */
3549  q = 1 << 3;
3550  qadd=0;
3551  }
3552 
3553  /* note: block[0] is assumed to be positive */
3554  block[0] = (block[0] + (q >> 1)) / q;
3555  start_i = 1;
3556  last_non_zero = 0;
3557  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
3558  if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3559  bias= 1<<(QMAT_SHIFT-1);
3560  length = s->intra_ac_vlc_length;
3561  last_length= s->intra_ac_vlc_last_length;
3562  } else {
3563  start_i = 0;
3564  last_non_zero = -1;
3565  qmat = s->q_inter_matrix[qscale];
3566  length = s->inter_ac_vlc_length;
3567  last_length= s->inter_ac_vlc_last_length;
3568  }
3569  last_i= start_i;
3570 
3571  threshold1= (1<<QMAT_SHIFT) - bias - 1;
3572  threshold2= (threshold1<<1);
3573 
3574  for(i=63; i>=start_i; i--) {
3575  const int j = scantable[i];
3576  int level = block[j] * qmat[j];
3577 
3578  if(((unsigned)(level+threshold1))>threshold2){
3579  last_non_zero = i;
3580  break;
3581  }
3582  }
3583 
3584  for(i=start_i; i<=last_non_zero; i++) {
3585  const int j = scantable[i];
3586  int level = block[j] * qmat[j];
3587 
3588 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
3589 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
3590  if(((unsigned)(level+threshold1))>threshold2){
3591  if(level>0){
3592  level= (bias + level)>>QMAT_SHIFT;
3593  coeff[0][i]= level;
3594  coeff[1][i]= level-1;
3595 // coeff[2][k]= level-2;
3596  }else{
3597  level= (bias - level)>>QMAT_SHIFT;
3598  coeff[0][i]= -level;
3599  coeff[1][i]= -level+1;
3600 // coeff[2][k]= -level+2;
3601  }
3602  coeff_count[i]= FFMIN(level, 2);
3603  av_assert2(coeff_count[i]);
3604  max |=level;
3605  }else{
3606  coeff[0][i]= (level>>31)|1;
3607  coeff_count[i]= 1;
3608  }
3609  }
3610 
3611  *overflow= s->max_qcoeff < max; //overflow might have happened
3612 
3613  if(last_non_zero < start_i){
3614  memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM));
3615  return last_non_zero;
3616  }
3617 
3618  score_tab[start_i]= 0;
3619  survivor[0]= start_i;
3620  survivor_count= 1;
3621 
3622  for(i=start_i; i<=last_non_zero; i++){
3623  int level_index, j, zero_distortion;
3624  int dct_coeff= FFABS(block[ scantable[i] ]);
3625  int best_score=256*256*256*120;
3626 
3627  if (s->dsp.fdct == ff_fdct_ifast)
3628  dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
3629  zero_distortion= dct_coeff*dct_coeff;
3630 
3631  for(level_index=0; level_index < coeff_count[i]; level_index++){
3632  int distortion;
3633  int level= coeff[level_index][i];
3634  const int alevel= FFABS(level);
3635  int unquant_coeff;
3636 
3637  av_assert2(level);
3638 
3639  if(s->out_format == FMT_H263){
3640  unquant_coeff= alevel*qmul + qadd;
3641  }else{ //MPEG1
3642  j= s->dsp.idct_permutation[ scantable[i] ]; //FIXME optimize
3643  if(s->mb_intra){
3644  unquant_coeff = (int)( alevel * qscale * s->intra_matrix[j]) >> 3;
3645  unquant_coeff = (unquant_coeff - 1) | 1;
3646  }else{
3647  unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4;
3648  unquant_coeff = (unquant_coeff - 1) | 1;
3649  }
3650  unquant_coeff<<= 3;
3651  }
3652 
3653  distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
3654  level+=64;
3655  if((level&(~127)) == 0){
3656  for(j=survivor_count-1; j>=0; j--){
3657  int run= i - survivor[j];
3658  int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3659  score += score_tab[i-run];
3660 
3661  if(score < best_score){
3662  best_score= score;
3663  run_tab[i+1]= run;
3664  level_tab[i+1]= level-64;
3665  }
3666  }
3667 
3668  if(s->out_format == FMT_H263){
3669  for(j=survivor_count-1; j>=0; j--){
3670  int run= i - survivor[j];
3671  int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
3672  score += score_tab[i-run];
3673  if(score < last_score){
3674  last_score= score;
3675  last_run= run;
3676  last_level= level-64;
3677  last_i= i+1;
3678  }
3679  }
3680  }
3681  }else{
3682  distortion += esc_length*lambda;
3683  for(j=survivor_count-1; j>=0; j--){
3684  int run= i - survivor[j];
3685  int score= distortion + score_tab[i-run];
3686 
3687  if(score < best_score){
3688  best_score= score;
3689  run_tab[i+1]= run;
3690  level_tab[i+1]= level-64;
3691  }
3692  }
3693 
3694  if(s->out_format == FMT_H263){
3695  for(j=survivor_count-1; j>=0; j--){
3696  int run= i - survivor[j];
3697  int score= distortion + score_tab[i-run];
3698  if(score < last_score){
3699  last_score= score;
3700  last_run= run;
3701  last_level= level-64;
3702  last_i= i+1;
3703  }
3704  }
3705  }
3706  }
3707  }
3708 
3709  score_tab[i+1]= best_score;
3710 
3711  //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
3712  if(last_non_zero <= 27){
3713  for(; survivor_count; survivor_count--){
3714  if(score_tab[ survivor[survivor_count-1] ] <= best_score)
3715  break;
3716  }
3717  }else{
3718  for(; survivor_count; survivor_count--){
3719  if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
3720  break;
3721  }
3722  }
3723 
3724  survivor[ survivor_count++ ]= i+1;
3725  }
3726 
3727  if(s->out_format != FMT_H263){
3728  last_score= 256*256*256*120;
3729  for(i= survivor[0]; i<=last_non_zero + 1; i++){
3730  int score= score_tab[i];
3731  if(i) score += lambda*2; //FIXME exacter?
3732 
3733  if(score < last_score){
3734  last_score= score;
3735  last_i= i;
3736  last_level= level_tab[i];
3737  last_run= run_tab[i];
3738  }
3739  }
3740  }
3741 
3742  s->coded_score[n] = last_score;
3743 
3744  dc= FFABS(block[0]);
3745  last_non_zero= last_i - 1;
3746  memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM));
3747 
3748  if(last_non_zero < start_i)
3749  return last_non_zero;
3750 
3751  if(last_non_zero == 0 && start_i == 0){
3752  int best_level= 0;
3753  int best_score= dc * dc;
3754 
3755  for(i=0; i<coeff_count[0]; i++){
3756  int level= coeff[i][0];
3757  int alevel= FFABS(level);
3758  int unquant_coeff, score, distortion;
3759 
3760  if(s->out_format == FMT_H263){
3761  unquant_coeff= (alevel*qmul + qadd)>>3;
3762  }else{ //MPEG1
3763  unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) s->inter_matrix[0])) >> 4;
3764  unquant_coeff = (unquant_coeff - 1) | 1;
3765  }
3766  unquant_coeff = (unquant_coeff + 4) >> 3;
3767  unquant_coeff<<= 3 + 3;
3768 
3769  distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
3770  level+=64;
3771  if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
3772  else score= distortion + esc_length*lambda;
3773 
3774  if(score < best_score){
3775  best_score= score;
3776  best_level= level - 64;
3777  }
3778  }
3779  block[0]= best_level;
3780  s->coded_score[n] = best_score - dc*dc;
3781  if(best_level == 0) return -1;
3782  else return last_non_zero;
3783  }
3784 
3785  i= last_i;
3786  av_assert2(last_level);
3787 
3788  block[ perm_scantable[last_non_zero] ]= last_level;
3789  i -= last_run + 1;
3790 
3791  for(; i>start_i; i -= run_tab[i] + 1){
3792  block[ perm_scantable[i-1] ]= level_tab[i];
3793  }
3794 
3795  return last_non_zero;
3796 }
3797 
3798 //#define REFINE_STATS 1
3799 static int16_t basis[64][64];
3800 
3801 static void build_basis(uint8_t *perm){
3802  int i, j, x, y;
3803  emms_c();
3804  for(i=0; i<8; i++){
3805  for(j=0; j<8; j++){
3806  for(y=0; y<8; y++){
3807  for(x=0; x<8; x++){
3808  double s= 0.25*(1<<BASIS_SHIFT);
3809  int index= 8*i + j;
3810  int perm_index= perm[index];
3811  if(i==0) s*= sqrt(0.5);
3812  if(j==0) s*= sqrt(0.5);
3813  basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
3814  }
3815  }
3816  }
3817  }
3818 }
3819 
3820 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
3821  DCTELEM *block, int16_t *weight, DCTELEM *orig,
3822  int n, int qscale){
3823  int16_t rem[64];
3824  LOCAL_ALIGNED_16(DCTELEM, d1, [64]);
3825  const uint8_t *scantable= s->intra_scantable.scantable;
3826  const uint8_t *perm_scantable= s->intra_scantable.permutated;
3827 // unsigned int threshold1, threshold2;
3828 // int bias=0;
3829  int run_tab[65];
3830  int prev_run=0;
3831  int prev_level=0;
3832  int qmul, qadd, start_i, last_non_zero, i, dc;
3833  uint8_t * length;
3834  uint8_t * last_length;
3835  int lambda;
3836  int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
3837 #ifdef REFINE_STATS
3838 static int count=0;
3839 static int after_last=0;
3840 static int to_zero=0;
3841 static int from_zero=0;
3842 static int raise=0;
3843 static int lower=0;
3844 static int messed_sign=0;
3845 #endif
3846 
3847  if(basis[0][0] == 0)
3849 
3850  qmul= qscale*2;
3851  qadd= (qscale-1)|1;
3852  if (s->mb_intra) {
3853  if (!s->h263_aic) {
3854  if (n < 4)
3855  q = s->y_dc_scale;
3856  else
3857  q = s->c_dc_scale;
3858  } else{
3859  /* For AIC we skip quant/dequant of INTRADC */
3860  q = 1;
3861  qadd=0;
3862  }
3863  q <<= RECON_SHIFT-3;
3864  /* note: block[0] is assumed to be positive */
3865  dc= block[0]*q;
3866 // block[0] = (block[0] + (q >> 1)) / q;
3867  start_i = 1;
3868 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
3869 // bias= 1<<(QMAT_SHIFT-1);
3870  length = s->intra_ac_vlc_length;
3871  last_length= s->intra_ac_vlc_last_length;
3872  } else {
3873  dc= 0;
3874  start_i = 0;
3875  length = s->inter_ac_vlc_length;
3876  last_length= s->inter_ac_vlc_last_length;
3877  }
3878  last_non_zero = s->block_last_index[n];
3879 
3880 #ifdef REFINE_STATS
3881 {START_TIMER
3882 #endif
3883  dc += (1<<(RECON_SHIFT-1));
3884  for(i=0; i<64; i++){
3885  rem[i]= dc - (orig[i]<<RECON_SHIFT); //FIXME use orig dirrectly instead of copying to rem[]
3886  }
3887 #ifdef REFINE_STATS
3888 STOP_TIMER("memset rem[]")}
3889 #endif
3890  sum=0;
3891  for(i=0; i<64; i++){
3892  int one= 36;
3893  int qns=4;
3894  int w;
3895 
3896  w= FFABS(weight[i]) + qns*one;
3897  w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
3898 
3899  weight[i] = w;
3900 // w=weight[i] = (63*qns + (w/2)) / w;
3901 
3902  av_assert2(w>0);
3903  av_assert2(w<(1<<6));
3904  sum += w*w;
3905  }
3906  lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
3907 #ifdef REFINE_STATS
3908 {START_TIMER
3909 #endif
3910  run=0;
3911  rle_index=0;
3912  for(i=start_i; i<=last_non_zero; i++){
3913  int j= perm_scantable[i];
3914  const int level= block[j];
3915  int coeff;
3916 
3917  if(level){
3918  if(level<0) coeff= qmul*level - qadd;
3919  else coeff= qmul*level + qadd;
3920  run_tab[rle_index++]=run;
3921  run=0;
3922 
3923  s->dsp.add_8x8basis(rem, basis[j], coeff);
3924  }else{
3925  run++;
3926  }
3927  }
3928 #ifdef REFINE_STATS
3929 if(last_non_zero>0){
3930 STOP_TIMER("init rem[]")
3931 }
3932 }
3933 
3934 {START_TIMER
3935 #endif
3936  for(;;){
3937  int best_score=s->dsp.try_8x8basis(rem, weight, basis[0], 0);
3938  int best_coeff=0;
3939  int best_change=0;
3940  int run2, best_unquant_change=0, analyze_gradient;
3941 #ifdef REFINE_STATS
3942 {START_TIMER
3943 #endif
3944  analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
3945 
3946  if(analyze_gradient){
3947 #ifdef REFINE_STATS
3948 {START_TIMER
3949 #endif
3950  for(i=0; i<64; i++){
3951  int w= weight[i];
3952 
3953  d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
3954  }
3955 #ifdef REFINE_STATS
3956 STOP_TIMER("rem*w*w")}
3957 {START_TIMER
3958 #endif
3959  s->dsp.fdct(d1);
3960 #ifdef REFINE_STATS
3961 STOP_TIMER("dct")}
3962 #endif
3963  }
3964 
3965  if(start_i){
3966  const int level= block[0];
3967  int change, old_coeff;
3968 
3969  av_assert2(s->mb_intra);
3970 
3971  old_coeff= q*level;
3972 
3973  for(change=-1; change<=1; change+=2){
3974  int new_level= level + change;
3975  int score, new_coeff;
3976 
3977  new_coeff= q*new_level;
3978  if(new_coeff >= 2048 || new_coeff < 0)
3979  continue;
3980 
3981  score= s->dsp.try_8x8basis(rem, weight, basis[0], new_coeff - old_coeff);
3982  if(score<best_score){
3983  best_score= score;
3984  best_coeff= 0;
3985  best_change= change;
3986  best_unquant_change= new_coeff - old_coeff;
3987  }
3988  }
3989  }
3990 
3991  run=0;
3992  rle_index=0;
3993  run2= run_tab[rle_index++];
3994  prev_level=0;
3995  prev_run=0;
3996 
3997  for(i=start_i; i<64; i++){
3998  int j= perm_scantable[i];
3999  const int level= block[j];
4000  int change, old_coeff;
4001 
4002  if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4003  break;
4004 
4005  if(level){
4006  if(level<0) old_coeff= qmul*level - qadd;
4007  else old_coeff= qmul*level + qadd;
4008  run2= run_tab[rle_index++]; //FIXME ! maybe after last
4009  }else{
4010  old_coeff=0;
4011  run2--;
4012  av_assert2(run2>=0 || i >= last_non_zero );
4013  }
4014 
4015  for(change=-1; change<=1; change+=2){
4016  int new_level= level + change;
4017  int score, new_coeff, unquant_change;
4018 
4019  score=0;
4020  if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4021  continue;
4022 
4023  if(new_level){
4024  if(new_level<0) new_coeff= qmul*new_level - qadd;
4025  else new_coeff= qmul*new_level + qadd;
4026  if(new_coeff >= 2048 || new_coeff <= -2048)
4027  continue;
4028  //FIXME check for overflow
4029 
4030  if(level){
4031  if(level < 63 && level > -63){
4032  if(i < last_non_zero)
4033  score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4034  - length[UNI_AC_ENC_INDEX(run, level+64)];
4035  else
4036  score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4037  - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4038  }
4039  }else{
4040  av_assert2(FFABS(new_level)==1);
4041 
4042  if(analyze_gradient){
4043  int g= d1[ scantable[i] ];
4044  if(g && (g^new_level) >= 0)
4045  continue;
4046  }
4047 
4048  if(i < last_non_zero){
4049  int next_i= i + run2 + 1;
4050  int next_level= block[ perm_scantable[next_i] ] + 64;
4051 
4052  if(next_level&(~127))
4053  next_level= 0;
4054 
4055  if(next_i < last_non_zero)
4056  score += length[UNI_AC_ENC_INDEX(run, 65)]
4057  + length[UNI_AC_ENC_INDEX(run2, next_level)]
4058  - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4059  else
4060  score += length[UNI_AC_ENC_INDEX(run, 65)]
4061  + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4062  - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4063  }else{
4064  score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4065  if(prev_level){
4066  score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4067  - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4068  }
4069  }
4070  }
4071  }else{
4072  new_coeff=0;
4073  av_assert2(FFABS(level)==1);
4074 
4075  if(i < last_non_zero){
4076  int next_i= i + run2 + 1;
4077  int next_level= block[ perm_scantable[next_i] ] + 64;
4078 
4079  if(next_level&(~127))
4080  next_level= 0;
4081 
4082  if(next_i < last_non_zero)
4083  score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4084  - length[UNI_AC_ENC_INDEX(run2, next_level)]
4085  - length[UNI_AC_ENC_INDEX(run, 65)];
4086  else
4087  score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4088  - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4089  - length[UNI_AC_ENC_INDEX(run, 65)];
4090  }else{
4091  score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4092  if(prev_level){
4093  score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4094  - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4095  }
4096  }
4097  }
4098 
4099  score *= lambda;
4100 
4101  unquant_change= new_coeff - old_coeff;
4102  av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4103 
4104  score+= s->dsp.try_8x8basis(rem, weight, basis[j], unquant_change);
4105  if(score<best_score){
4106  best_score= score;
4107  best_coeff= i;
4108  best_change= change;
4109  best_unquant_change= unquant_change;
4110  }
4111  }
4112  if(level){
4113  prev_level= level + 64;
4114  if(prev_level&(~127))
4115  prev_level= 0;
4116  prev_run= run;
4117  run=0;
4118  }else{
4119  run++;
4120  }
4121  }
4122 #ifdef REFINE_STATS
4123 STOP_TIMER("iterative step")}
4124 #endif
4125 
4126  if(best_change){
4127  int j= perm_scantable[ best_coeff ];
4128 
4129  block[j] += best_change;
4130 
4131  if(best_coeff > last_non_zero){
4132  last_non_zero= best_coeff;
4133  av_assert2(block[j]);
4134 #ifdef REFINE_STATS
4135 after_last++;
4136 #endif
4137  }else{
4138 #ifdef REFINE_STATS
4139 if(block[j]){
4140  if(block[j] - best_change){
4141  if(FFABS(block[j]) > FFABS(block[j] - best_change)){
4142  raise++;
4143  }else{
4144  lower++;
4145  }
4146  }else{
4147  from_zero++;
4148  }
4149 }else{
4150  to_zero++;
4151 }
4152 #endif
4153  for(; last_non_zero>=start_i; last_non_zero--){
4154  if(block[perm_scantable[last_non_zero]])
4155  break;
4156  }
4157  }
4158 #ifdef REFINE_STATS
4159 count++;
4160 if(256*256*256*64 % count == 0){
4161  av_log(s->avctx, AV_LOG_DEBUG, "after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
4162 }
4163 #endif
4164  run=0;
4165  rle_index=0;
4166  for(i=start_i; i<=last_non_zero; i++){
4167  int j= perm_scantable[i];
4168  const int level= block[j];
4169 
4170  if(level){
4171  run_tab[rle_index++]=run;
4172  run=0;
4173  }else{
4174  run++;
4175  }
4176  }
4177 
4178  s->dsp.add_8x8basis(rem, basis[j], best_unquant_change);
4179  }else{
4180  break;
4181  }
4182  }
4183 #ifdef REFINE_STATS
4184 if(last_non_zero>0){
4185 STOP_TIMER("iterative search")
4186 }
4187 }
4188 #endif
4189 
4190  return last_non_zero;
4191 }
4192 
4194  DCTELEM *block, int n,
4195  int qscale, int *overflow)
4196 {
4197  int i, j, level, last_non_zero, q, start_i;
4198  const int *qmat;
4199  const uint8_t *scantable= s->intra_scantable.scantable;
4200  int bias;
4201  int max=0;
4202  unsigned int threshold1, threshold2;
4203 
4204  s->dsp.fdct (block);
4205 
4206  if(s->dct_error_sum)
4207  s->denoise_dct(s, block);
4208 
4209  if (s->mb_intra) {
4210  if (!s->h263_aic) {
4211  if (n < 4)
4212  q = s->y_dc_scale;
4213  else
4214  q = s->c_dc_scale;
4215  q = q << 3;
4216  } else
4217  /* For AIC we skip quant/dequant of INTRADC */
4218  q = 1 << 3;
4219 
4220  /* note: block[0] is assumed to be positive */
4221  block[0] = (block[0] + (q >> 1)) / q;
4222  start_i = 1;
4223  last_non_zero = 0;
4224  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4226  } else {
4227  start_i = 0;
4228  last_non_zero = -1;
4229  qmat = s->q_inter_matrix[qscale];
4231  }
4232  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4233  threshold2= (threshold1<<1);
4234  for(i=63;i>=start_i;i--) {
4235  j = scantable[i];
4236  level = block[j] * qmat[j];
4237 
4238  if(((unsigned)(level+threshold1))>threshold2){
4239  last_non_zero = i;
4240  break;
4241  }else{
4242  block[j]=0;
4243  }
4244  }
4245  for(i=start_i; i<=last_non_zero; i++) {
4246  j = scantable[i];
4247  level = block[j] * qmat[j];
4248 
4249 // if( bias+level >= (1<<QMAT_SHIFT)
4250 // || bias-level >= (1<<QMAT_SHIFT)){
4251  if(((unsigned)(level+threshold1))>threshold2){
4252  if(level>0){
4253  level= (bias + level)>>QMAT_SHIFT;
4254  block[j]= level;
4255  }else{
4256  level= (bias - level)>>QMAT_SHIFT;
4257  block[j]= -level;
4258  }
4259  max |=level;
4260  }else{
4261  block[j]=0;
4262  }
4263  }
4264  *overflow= s->max_qcoeff < max; //overflow might have happened
4265 
4266  /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4268  ff_block_permute(block, s->dsp.idct_permutation, scantable, last_non_zero);
4269 
4270  return last_non_zero;
4271 }
4272 
4273 #define OFFSET(x) offsetof(MpegEncContext, x)
4274 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4275 static const AVOption h263_options[] = {
4276  { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4277  { "structured_slices","Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4278  { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4280  { NULL },
4281 };
4282 
4283 static const AVClass h263_class = {
4284  .class_name = "H.263 encoder",
4285  .item_name = av_default_item_name,
4286  .option = h263_options,
4287  .version = LIBAVUTIL_VERSION_INT,
4288 };
4289 
4291  .name = "h263",
4292  .type = AVMEDIA_TYPE_VIDEO,
4293  .id = AV_CODEC_ID_H263,
4294  .priv_data_size = sizeof(MpegEncContext),
4296  .encode2 = ff_MPV_encode_picture,
4298  .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
4299  .long_name= NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4300  .priv_class = &h263_class,
4301 };
4302 
4303 static const AVOption h263p_options[] = {
4304  { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4305  { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4306  { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
4307  { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE},
4309  { NULL },
4310 };
4311 static const AVClass h263p_class = {
4312  .class_name = "H.263p encoder",
4313  .item_name = av_default_item_name,
4314  .option = h263p_options,
4315  .version = LIBAVUTIL_VERSION_INT,
4316 };
4317 
4319  .name = "h263p",
4320  .type = AVMEDIA_TYPE_VIDEO,
4321  .id = AV_CODEC_ID_H263P,
4322  .priv_data_size = sizeof(MpegEncContext),
4324  .encode2 = ff_MPV_encode_picture,
4326  .capabilities = CODEC_CAP_SLICE_THREADS,
4327  .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4328  .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4329  .priv_class = &h263p_class,
4330 };
4331 
4332 FF_MPV_GENERIC_CLASS(msmpeg4v2)
4333 
4335  .name = "msmpeg4v2",
4336  .type = AVMEDIA_TYPE_VIDEO,
4337  .id = AV_CODEC_ID_MSMPEG4V2,
4338  .priv_data_size = sizeof(MpegEncContext),
4340  .encode2 = ff_MPV_encode_picture,
4342  .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4343  .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4344  .priv_class = &msmpeg4v2_class,
4345 };
4346 
4347 FF_MPV_GENERIC_CLASS(msmpeg4v3)
4348 
4350  .name = "msmpeg4",
4351  .type = AVMEDIA_TYPE_VIDEO,
4352  .id = AV_CODEC_ID_MSMPEG4V3,
4353  .priv_data_size = sizeof(MpegEncContext),
4355  .encode2 = ff_MPV_encode_picture,
4357  .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4358  .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4359  .priv_class = &msmpeg4v3_class,
4360 };
4361 
4363 
4365  .name = "wmv1",
4366  .type = AVMEDIA_TYPE_VIDEO,
4367  .id = AV_CODEC_ID_WMV1,
4368  .priv_data_size = sizeof(MpegEncContext),
4370  .encode2 = ff_MPV_encode_picture,
4372  .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
4373  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4374  .priv_class = &wmv1_class,
4375 };