FFmpeg
mpegvideo_enc.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /*
26  * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
27  */
28 
29 /**
30  * @file
31  * The simplest mpeg encoder (well, it was the simplest!).
32  */
33 
34 #include <stdint.h>
35 
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/mem_internal.h"
40 #include "libavutil/pixdesc.h"
41 #include "libavutil/opt.h"
42 #include "libavutil/thread.h"
43 #include "avcodec.h"
44 #include "dct.h"
45 #include "idctdsp.h"
46 #include "mpeg12.h"
47 #include "mpegvideo.h"
48 #include "mpegvideodata.h"
49 #include "h261.h"
50 #include "h263.h"
51 #include "h263data.h"
52 #include "mjpegenc_common.h"
53 #include "mathops.h"
54 #include "mpegutils.h"
55 #include "mjpegenc.h"
56 #include "speedhqenc.h"
57 #include "msmpeg4.h"
58 #include "pixblockdsp.h"
59 #include "qpeldsp.h"
60 #include "faandct.h"
61 #include "thread.h"
62 #include "aandcttab.h"
63 #include "flv.h"
64 #include "mpeg4video.h"
65 #include "internal.h"
66 #include "bytestream.h"
67 #include "wmv2.h"
68 #include "rv10.h"
69 #include "packet_internal.h"
70 #include <limits.h>
71 #include "sp5x.h"
72 
73 #define QUANT_BIAS_SHIFT 8
74 
75 #define QMAT_SHIFT_MMX 16
76 #define QMAT_SHIFT 21
77 
79 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
80 static int sse_mb(MpegEncContext *s);
81 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
82 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
83 
86 
89  { NULL },
90 };
91 
92 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
93  uint16_t (*qmat16)[2][64],
94  const uint16_t *quant_matrix,
95  int bias, int qmin, int qmax, int intra)
96 {
97  FDCTDSPContext *fdsp = &s->fdsp;
98  int qscale;
99  int shift = 0;
100 
101  for (qscale = qmin; qscale <= qmax; qscale++) {
102  int i;
103  int qscale2;
104 
105  if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
106  else qscale2 = qscale << 1;
107 
108  if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
109 #if CONFIG_FAANDCT
110  fdsp->fdct == ff_faandct ||
111 #endif /* CONFIG_FAANDCT */
113  for (i = 0; i < 64; i++) {
114  const int j = s->idsp.idct_permutation[i];
115  int64_t den = (int64_t) qscale2 * quant_matrix[j];
116  /* 16 <= qscale * quant_matrix[i] <= 7905
117  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
118  * 19952 <= x <= 249205026
119  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
120  * 3444240 >= (1 << 36) / (x) >= 275 */
121 
122  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
123  }
124  } else if (fdsp->fdct == ff_fdct_ifast) {
125  for (i = 0; i < 64; i++) {
126  const int j = s->idsp.idct_permutation[i];
127  int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
128  /* 16 <= qscale * quant_matrix[i] <= 7905
129  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
130  * 19952 <= x <= 249205026
131  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
132  * 3444240 >= (1 << 36) / (x) >= 275 */
133 
134  qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
135  }
136  } else {
137  for (i = 0; i < 64; i++) {
138  const int j = s->idsp.idct_permutation[i];
139  int64_t den = (int64_t) qscale2 * quant_matrix[j];
140  /* We can safely suppose that 16 <= quant_matrix[i] <= 255
141  * Assume x = qscale * quant_matrix[i]
142  * So 16 <= x <= 7905
143  * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
144  * so 32768 >= (1 << 19) / (x) >= 67 */
145  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
146  //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
147  // (qscale * quant_matrix[i]);
148  qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
149 
150  if (qmat16[qscale][0][i] == 0 ||
151  qmat16[qscale][0][i] == 128 * 256)
152  qmat16[qscale][0][i] = 128 * 256 - 1;
153  qmat16[qscale][1][i] =
154  ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
155  qmat16[qscale][0][i]);
156  }
157  }
158 
159  for (i = intra; i < 64; i++) {
160  int64_t max = 8191;
161  if (fdsp->fdct == ff_fdct_ifast) {
162  max = (8191LL * ff_aanscales[i]) >> 14;
163  }
164  while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
165  shift++;
166  }
167  }
168  }
169  if (shift) {
170  av_log(s->avctx, AV_LOG_INFO,
171  "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
172  QMAT_SHIFT - shift);
173  }
174 }
175 
176 static inline void update_qscale(MpegEncContext *s)
177 {
178  if (s->q_scale_type == 1 && 0) {
179  int i;
180  int bestdiff=INT_MAX;
181  int best = 1;
182 
183  for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
184  int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
185  if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
186  (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
187  continue;
188  if (diff < bestdiff) {
189  bestdiff = diff;
190  best = i;
191  }
192  }
193  s->qscale = best;
194  } else {
195  s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
196  (FF_LAMBDA_SHIFT + 7);
197  s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
198  }
199 
200  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
202 }
203 
204 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
205 {
206  int i;
207 
208  if (matrix) {
209  put_bits(pb, 1, 1);
210  for (i = 0; i < 64; i++) {
211  put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
212  }
213  } else
214  put_bits(pb, 1, 0);
215 }
216 
217 /**
218  * init s->current_picture.qscale_table from s->lambda_table
219  */
221 {
222  int8_t * const qscale_table = s->current_picture.qscale_table;
223  int i;
224 
225  for (i = 0; i < s->mb_num; i++) {
226  unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
227  int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
228  qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
229  s->avctx->qmax);
230  }
231 }
232 
235 {
236 #define COPY(a) dst->a= src->a
237  COPY(pict_type);
239  COPY(f_code);
240  COPY(b_code);
241  COPY(qscale);
242  COPY(lambda);
243  COPY(lambda2);
246  COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
247  COPY(progressive_frame); // FIXME don't set in encode_header
248  COPY(partitioned_frame); // FIXME don't set in encode_header
249 #undef COPY
250 }
251 
252 static void mpv_encode_init_static(void)
253 {
254  for (int i = -16; i < 16; i++)
255  default_fcode_tab[i + MAX_MV] = 1;
256 }
257 
258 /**
259  * Set the given MpegEncContext to defaults for encoding.
260  * the changed fields will not depend upon the prior state of the MpegEncContext.
261  */
263 {
264  static AVOnce init_static_once = AV_ONCE_INIT;
265 
267 
268  ff_thread_once(&init_static_once, mpv_encode_init_static);
269 
270  s->me.mv_penalty = default_mv_penalty;
271  s->fcode_tab = default_fcode_tab;
272 
273  s->input_picture_number = 0;
274  s->picture_in_gop_number = 0;
275 }
276 
278 {
279  if (ARCH_X86)
281 
282  if (CONFIG_H263_ENCODER)
283  ff_h263dsp_init(&s->h263dsp);
284  if (!s->dct_quantize)
285  s->dct_quantize = ff_dct_quantize_c;
286  if (!s->denoise_dct)
287  s->denoise_dct = denoise_dct_c;
288  s->fast_dct_quantize = s->dct_quantize;
289  if (s->avctx->trellis)
290  s->dct_quantize = dct_quantize_trellis_c;
291 
292  return 0;
293 }
294 
295 /* init video encoder */
297 {
299  AVCPBProperties *cpb_props;
300  int i, ret, format_supported;
301 
303 
304  switch (avctx->codec_id) {
306  if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
309  "only YUV420 and YUV422 are supported\n");
310  return AVERROR(EINVAL);
311  }
312  break;
313  case AV_CODEC_ID_MJPEG:
314  case AV_CODEC_ID_AMV:
315  format_supported = 0;
316  /* JPEG color space */
324  format_supported = 1;
325  /* MPEG color space */
330  format_supported = 1;
331 
332  if (!format_supported) {
333  av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
334  return AVERROR(EINVAL);
335  }
336  break;
337  case AV_CODEC_ID_SPEEDHQ:
338  if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
342  "only YUV420/YUV422/YUV444 are supported (no alpha support yet)\n");
343  return AVERROR(EINVAL);
344  }
345  break;
346  default:
347  if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
348  av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
349  return AVERROR(EINVAL);
350  }
351  }
352 
353  switch (avctx->pix_fmt) {
354  case AV_PIX_FMT_YUVJ444P:
355  case AV_PIX_FMT_YUV444P:
356  s->chroma_format = CHROMA_444;
357  break;
358  case AV_PIX_FMT_YUVJ422P:
359  case AV_PIX_FMT_YUV422P:
360  s->chroma_format = CHROMA_422;
361  break;
362  case AV_PIX_FMT_YUVJ420P:
363  case AV_PIX_FMT_YUV420P:
364  default:
365  s->chroma_format = CHROMA_420;
366  break;
367  }
368 
370 
371 #if FF_API_PRIVATE_OPT
373  if (avctx->rtp_payload_size)
374  s->rtp_payload_size = avctx->rtp_payload_size;
376  s->me_penalty_compensation = avctx->me_penalty_compensation;
377  if (avctx->pre_me)
378  s->me_pre = avctx->pre_me;
380 #endif
381 
382  s->bit_rate = avctx->bit_rate;
383  s->width = avctx->width;
384  s->height = avctx->height;
385  if (avctx->gop_size > 600 &&
388  "keyframe interval too large!, reducing it from %d to %d\n",
389  avctx->gop_size, 600);
390  avctx->gop_size = 600;
391  }
392  s->gop_size = avctx->gop_size;
393  s->avctx = avctx;
395  av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
396  "is %d.\n", MAX_B_FRAMES);
398  }
399  s->max_b_frames = avctx->max_b_frames;
400  s->codec_id = avctx->codec->id;
401  s->strict_std_compliance = avctx->strict_std_compliance;
402  s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
403  s->rtp_mode = !!s->rtp_payload_size;
404  s->intra_dc_precision = avctx->intra_dc_precision;
405 
406  // workaround some differences between how applications specify dc precision
407  if (s->intra_dc_precision < 0) {
408  s->intra_dc_precision += 8;
409  } else if (s->intra_dc_precision >= 8)
410  s->intra_dc_precision -= 8;
411 
412  if (s->intra_dc_precision < 0) {
414  "intra dc precision must be positive, note some applications use"
415  " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
416  return AVERROR(EINVAL);
417  }
418 
420  s->huffman = 0;
421 
422  if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
423  av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
424  return AVERROR(EINVAL);
425  }
426  s->user_specified_pts = AV_NOPTS_VALUE;
427 
428  if (s->gop_size <= 1) {
429  s->intra_only = 1;
430  s->gop_size = 12;
431  } else {
432  s->intra_only = 0;
433  }
434 
435  /* Fixed QSCALE */
436  s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
437 
438  s->adaptive_quant = (avctx->lumi_masking ||
439  avctx->dark_masking ||
442  avctx->p_masking ||
443  s->border_masking ||
444  (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
445  !s->fixed_qscale;
446 
447  s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
448 
450  switch(avctx->codec_id) {
453  avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
454  break;
455  case AV_CODEC_ID_MPEG4:
459  if (avctx->rc_max_rate >= 15000000) {
460  avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
461  } else if(avctx->rc_max_rate >= 2000000) {
462  avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
463  } else if(avctx->rc_max_rate >= 384000) {
464  avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
465  } else
466  avctx->rc_buffer_size = 40;
467  avctx->rc_buffer_size *= 16384;
468  break;
469  }
470  if (avctx->rc_buffer_size) {
471  av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
472  }
473  }
474 
475  if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
476  av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
477  return AVERROR(EINVAL);
478  }
479 
482  "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
483  }
484 
486  av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
487  return AVERROR(EINVAL);
488  }
489 
491  av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
492  return AVERROR(EINVAL);
493  }
494 
495  if (avctx->rc_max_rate &&
499  "impossible bitrate constraints, this will fail\n");
500  }
501 
502  if (avctx->rc_buffer_size &&
503  avctx->bit_rate * (int64_t)avctx->time_base.num >
504  avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
505  av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
506  return AVERROR(EINVAL);
507  }
508 
509  if (!s->fixed_qscale &&
512  double nbt = avctx->bit_rate * av_q2d(avctx->time_base) * 5;
514  "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
515  if (nbt <= INT_MAX) {
516  avctx->bit_rate_tolerance = nbt;
517  } else
518  avctx->bit_rate_tolerance = INT_MAX;
519  }
520 
521  if (avctx->rc_max_rate &&
523  (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
524  s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
525  90000LL * (avctx->rc_buffer_size - 1) >
526  avctx->rc_max_rate * 0xFFFFLL) {
528  "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
529  "specified vbv buffer is too large for the given bitrate!\n");
530  }
531 
532  if ((avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
533  s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
534  s->codec_id != AV_CODEC_ID_FLV1) {
535  av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
536  return AVERROR(EINVAL);
537  }
538 
539  if (s->obmc && avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
541  "OBMC is only supported with simple mb decision\n");
542  return AVERROR(EINVAL);
543  }
544 
545  if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
546  av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
547  return AVERROR(EINVAL);
548  }
549 
550  if (s->max_b_frames &&
551  s->codec_id != AV_CODEC_ID_MPEG4 &&
552  s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
553  s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
554  av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
555  return AVERROR(EINVAL);
556  }
557  if (s->max_b_frames < 0) {
559  "max b frames must be 0 or positive for mpegvideo based encoders\n");
560  return AVERROR(EINVAL);
561  }
562 
563  if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
564  s->codec_id == AV_CODEC_ID_H263 ||
565  s->codec_id == AV_CODEC_ID_H263P) &&
566  (avctx->sample_aspect_ratio.num > 255 ||
567  avctx->sample_aspect_ratio.den > 255)) {
569  "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
573  }
574 
575  if ((s->codec_id == AV_CODEC_ID_H263 ||
576  s->codec_id == AV_CODEC_ID_H263P) &&
577  (avctx->width > 2048 ||
578  avctx->height > 1152 )) {
579  av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
580  return AVERROR(EINVAL);
581  }
582  if ((s->codec_id == AV_CODEC_ID_H263 ||
583  s->codec_id == AV_CODEC_ID_H263P) &&
584  ((avctx->width &3) ||
585  (avctx->height&3) )) {
586  av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
587  return AVERROR(EINVAL);
588  }
589 
590  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
591  (avctx->width > 4095 ||
592  avctx->height > 4095 )) {
593  av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
594  return AVERROR(EINVAL);
595  }
596 
597  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
598  (avctx->width > 16383 ||
599  avctx->height > 16383 )) {
600  av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
601  return AVERROR(EINVAL);
602  }
603 
604  if (s->codec_id == AV_CODEC_ID_RV10 &&
605  (avctx->width &15 ||
606  avctx->height&15 )) {
607  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
608  return AVERROR(EINVAL);
609  }
610 
611  if (s->codec_id == AV_CODEC_ID_RV20 &&
612  (avctx->width &3 ||
613  avctx->height&3 )) {
614  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
615  return AVERROR(EINVAL);
616  }
617 
618  if ((s->codec_id == AV_CODEC_ID_WMV1 ||
619  s->codec_id == AV_CODEC_ID_WMV2) &&
620  avctx->width & 1) {
621  av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
622  return AVERROR(EINVAL);
623  }
624 
626  s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
627  av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
628  return AVERROR(EINVAL);
629  }
630 
631 #if FF_API_PRIVATE_OPT
633  if (avctx->mpeg_quant)
634  s->mpeg_quant = 1;
636 #endif
637 
638  // FIXME mpeg2 uses that too
639  if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
640  && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
642  "mpeg2 style quantization not supported by codec\n");
643  return AVERROR(EINVAL);
644  }
645 
646  if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
647  av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
648  return AVERROR(EINVAL);
649  }
650 
651  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
653  av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
654  return AVERROR(EINVAL);
655  }
656 
657  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
658  (s->codec_id == AV_CODEC_ID_AMV ||
659  s->codec_id == AV_CODEC_ID_MJPEG)) {
660  // Used to produce garbage with MJPEG.
662  "QP RD is no longer compatible with MJPEG or AMV\n");
663  return AVERROR(EINVAL);
664  }
665 
666 #if FF_API_PRIVATE_OPT
669  s->scenechange_threshold = avctx->scenechange_threshold;
671 #endif
672 
673  if (s->scenechange_threshold < 1000000000 &&
676  "closed gop with scene change detection are not supported yet, "
677  "set threshold to 1000000000\n");
678  return AVERROR_PATCHWELCOME;
679  }
680 
682  if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
683  s->strict_std_compliance >= FF_COMPLIANCE_NORMAL) {
685  "low delay forcing is only available for mpeg2, "
686  "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
687  return AVERROR(EINVAL);
688  }
689  if (s->max_b_frames != 0) {
691  "B-frames cannot be used with low delay\n");
692  return AVERROR(EINVAL);
693  }
694  }
695 
696  if (s->q_scale_type == 1) {
697  if (avctx->qmax > 28) {
699  "non linear quant only supports qmax <= 28 currently\n");
700  return AVERROR_PATCHWELCOME;
701  }
702  }
703 
704  if (avctx->slices > 1 &&
706  av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
707  return AVERROR(EINVAL);
708  }
709 
710  if (avctx->thread_count > 1 &&
711  s->codec_id != AV_CODEC_ID_MPEG4 &&
712  s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
713  s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
714  s->codec_id != AV_CODEC_ID_MJPEG &&
715  (s->codec_id != AV_CODEC_ID_H263P)) {
717  "multi threaded encoding not supported by codec\n");
718  return AVERROR_PATCHWELCOME;
719  }
720 
721  if (avctx->thread_count < 1) {
723  "automatic thread number detection not supported by codec, "
724  "patch welcome\n");
725  return AVERROR_PATCHWELCOME;
726  }
727 
728  if (!avctx->time_base.den || !avctx->time_base.num) {
729  av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
730  return AVERROR(EINVAL);
731  }
732 
733 #if FF_API_PRIVATE_OPT
735  if (avctx->b_frame_strategy)
736  s->b_frame_strategy = avctx->b_frame_strategy;
737  if (avctx->b_sensitivity != 40)
738  s->b_sensitivity = avctx->b_sensitivity;
740 #endif
741 
742  if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
744  "notice: b_frame_strategy only affects the first pass\n");
745  s->b_frame_strategy = 0;
746  }
747 
749  if (i > 1) {
750  av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
751  avctx->time_base.den /= i;
752  avctx->time_base.num /= i;
753  //return -1;
754  }
755 
756  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id == AV_CODEC_ID_AMV || s->codec_id == AV_CODEC_ID_SPEEDHQ) {
757  // (a + x * 3 / 8) / x
758  s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
759  s->inter_quant_bias = 0;
760  } else {
761  s->intra_quant_bias = 0;
762  // (a - x / 4) / x
763  s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
764  }
765 
766  if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
767  av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
768  return AVERROR(EINVAL);
769  }
770 
771  av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
772 
773  if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
774  avctx->time_base.den > (1 << 16) - 1) {
776  "timebase %d/%d not supported by MPEG 4 standard, "
777  "the maximum admitted value for the timebase denominator "
778  "is %d\n", avctx->time_base.num, avctx->time_base.den,
779  (1 << 16) - 1);
780  return AVERROR(EINVAL);
781  }
782  s->time_increment_bits = av_log2(avctx->time_base.den - 1) + 1;
783 
784  switch (avctx->codec->id) {
786  s->out_format = FMT_MPEG1;
787  s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
788  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
789  break;
791  s->out_format = FMT_MPEG1;
792  s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
793  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
794  s->rtp_mode = 1;
795  break;
796  case AV_CODEC_ID_MJPEG:
797  case AV_CODEC_ID_AMV:
798  s->out_format = FMT_MJPEG;
799  s->intra_only = 1; /* force intra only for jpeg */
800  if (!CONFIG_MJPEG_ENCODER)
802  if ((ret = ff_mjpeg_encode_init(s)) < 0)
803  return ret;
804  avctx->delay = 0;
805  s->low_delay = 1;
806  break;
807  case AV_CODEC_ID_SPEEDHQ:
808  s->out_format = FMT_SPEEDHQ;
809  s->intra_only = 1; /* force intra only for SHQ */
810  if (!CONFIG_SPEEDHQ_ENCODER)
812  if ((ret = ff_speedhq_encode_init(s)) < 0)
813  return ret;
814  avctx->delay = 0;
815  s->low_delay = 1;
816  break;
817  case AV_CODEC_ID_H261:
818  if (!CONFIG_H261_ENCODER)
820  if (ff_h261_get_picture_format(s->width, s->height) < 0) {
822  "The specified picture size of %dx%d is not valid for the "
823  "H.261 codec.\nValid sizes are 176x144, 352x288\n",
824  s->width, s->height);
825  return AVERROR(EINVAL);
826  }
827  s->out_format = FMT_H261;
828  avctx->delay = 0;
829  s->low_delay = 1;
830  s->rtp_mode = 0; /* Sliced encoding not supported */
831  break;
832  case AV_CODEC_ID_H263:
833  if (!CONFIG_H263_ENCODER)
836  s->width, s->height) == 8) {
838  "The specified picture size of %dx%d is not valid for "
839  "the H.263 codec.\nValid sizes are 128x96, 176x144, "
840  "352x288, 704x576, and 1408x1152. "
841  "Try H.263+.\n", s->width, s->height);
842  return AVERROR(EINVAL);
843  }
844  s->out_format = FMT_H263;
845  avctx->delay = 0;
846  s->low_delay = 1;
847  break;
848  case AV_CODEC_ID_H263P:
849  s->out_format = FMT_H263;
850  s->h263_plus = 1;
851  /* Fx */
852  s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
853  s->modified_quant = s->h263_aic;
854  s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
855  s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
856 
857  /* /Fx */
858  /* These are just to be sure */
859  avctx->delay = 0;
860  s->low_delay = 1;
861  break;
862  case AV_CODEC_ID_FLV1:
863  s->out_format = FMT_H263;
864  s->h263_flv = 2; /* format = 1; 11-bit codes */
865  s->unrestricted_mv = 1;
866  s->rtp_mode = 0; /* don't allow GOB */
867  avctx->delay = 0;
868  s->low_delay = 1;
869  break;
870  case AV_CODEC_ID_RV10:
871  s->out_format = FMT_H263;
872  avctx->delay = 0;
873  s->low_delay = 1;
874  break;
875  case AV_CODEC_ID_RV20:
876  s->out_format = FMT_H263;
877  avctx->delay = 0;
878  s->low_delay = 1;
879  s->modified_quant = 1;
880  s->h263_aic = 1;
881  s->h263_plus = 1;
882  s->loop_filter = 1;
883  s->unrestricted_mv = 0;
884  break;
885  case AV_CODEC_ID_MPEG4:
886  s->out_format = FMT_H263;
887  s->h263_pred = 1;
888  s->unrestricted_mv = 1;
889  s->low_delay = s->max_b_frames ? 0 : 1;
890  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
891  break;
893  s->out_format = FMT_H263;
894  s->h263_pred = 1;
895  s->unrestricted_mv = 1;
896  s->msmpeg4_version = 2;
897  avctx->delay = 0;
898  s->low_delay = 1;
899  break;
901  s->out_format = FMT_H263;
902  s->h263_pred = 1;
903  s->unrestricted_mv = 1;
904  s->msmpeg4_version = 3;
905  s->flipflop_rounding = 1;
906  avctx->delay = 0;
907  s->low_delay = 1;
908  break;
909  case AV_CODEC_ID_WMV1:
910  s->out_format = FMT_H263;
911  s->h263_pred = 1;
912  s->unrestricted_mv = 1;
913  s->msmpeg4_version = 4;
914  s->flipflop_rounding = 1;
915  avctx->delay = 0;
916  s->low_delay = 1;
917  break;
918  case AV_CODEC_ID_WMV2:
919  s->out_format = FMT_H263;
920  s->h263_pred = 1;
921  s->unrestricted_mv = 1;
922  s->msmpeg4_version = 5;
923  s->flipflop_rounding = 1;
924  avctx->delay = 0;
925  s->low_delay = 1;
926  break;
927  default:
928  return AVERROR(EINVAL);
929  }
930 
931 #if FF_API_PRIVATE_OPT
933  if (avctx->noise_reduction)
934  s->noise_reduction = avctx->noise_reduction;
936 #endif
937 
938  avctx->has_b_frames = !s->low_delay;
939 
940  s->encoding = 1;
941 
942  s->progressive_frame =
943  s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
945  s->alternate_scan);
946 
947  /* init */
949  if ((ret = ff_mpv_common_init(s)) < 0)
950  return ret;
951 
952  ff_fdctdsp_init(&s->fdsp, avctx);
953  ff_me_cmp_init(&s->mecc, avctx);
954  ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
955  ff_pixblockdsp_init(&s->pdsp, avctx);
956  ff_qpeldsp_init(&s->qdsp);
957 
958  if (s->msmpeg4_version) {
959  int ac_stats_size = 2 * 2 * (MAX_LEVEL + 1) * (MAX_RUN + 1) * 2 * sizeof(int);
960  if (!(s->ac_stats = av_mallocz(ac_stats_size)))
961  return AVERROR(ENOMEM);
962  }
963 
964  if (!(avctx->stats_out = av_mallocz(256)) ||
965  !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix, 32) ||
966  !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix, 32) ||
967  !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix, 32) ||
968  !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix16, 32) ||
969  !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix16, 32) ||
970  !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix16, 32) ||
971  !FF_ALLOCZ_TYPED_ARRAY(s->input_picture, MAX_PICTURE_COUNT) ||
972  !FF_ALLOCZ_TYPED_ARRAY(s->reordered_input_picture, MAX_PICTURE_COUNT))
973  return AVERROR(ENOMEM);
974 
975  if (s->noise_reduction) {
976  if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
977  return AVERROR(ENOMEM);
978  }
979 
981 
982  if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
983  s->chroma_qscale_table = ff_h263_chroma_qscale_table;
984 
985  if (s->slice_context_count > 1) {
986  s->rtp_mode = 1;
987 
989  s->h263_slice_structured = 1;
990  }
991 
992  s->quant_precision = 5;
993 
994 #if FF_API_PRIVATE_OPT
997  s->frame_skip_threshold = avctx->frame_skip_threshold;
999  s->frame_skip_factor = avctx->frame_skip_factor;
1000  if (avctx->frame_skip_exp)
1001  s->frame_skip_exp = avctx->frame_skip_exp;
1003  s->frame_skip_cmp = avctx->frame_skip_cmp;
1005 #endif
1006 
1007  ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, avctx->ildct_cmp);
1008  ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
1009 
1010  if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
1012  if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
1014  if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
1016  if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
1017  && s->out_format == FMT_MPEG1)
1019 
1020  /* init q matrix */
1021  for (i = 0; i < 64; i++) {
1022  int j = s->idsp.idct_permutation[i];
1023  if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
1024  s->mpeg_quant) {
1025  s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
1026  s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
1027  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1028  s->intra_matrix[j] =
1029  s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1030  } else if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
1031  s->intra_matrix[j] =
1032  s->inter_matrix[j] = ff_mpeg1_default_intra_matrix[i];
1033  } else {
1034  /* MPEG-1/2 */
1035  s->chroma_intra_matrix[j] =
1036  s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
1037  s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1038  }
1039  if (avctx->intra_matrix)
1040  s->intra_matrix[j] = avctx->intra_matrix[i];
1041  if (avctx->inter_matrix)
1042  s->inter_matrix[j] = avctx->inter_matrix[i];
1043  }
1044 
1045  /* precompute matrix */
1046  /* for mjpeg, we do include qscale in the matrix */
1047  if (s->out_format != FMT_MJPEG) {
1048  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
1049  s->intra_matrix, s->intra_quant_bias, avctx->qmin,
1050  31, 1);
1051  ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
1052  s->inter_matrix, s->inter_quant_bias, avctx->qmin,
1053  31, 0);
1054  }
1055 
1056  if ((ret = ff_rate_control_init(s)) < 0)
1057  return ret;
1058 
1059 #if FF_API_PRIVATE_OPT
1061  if (avctx->brd_scale)
1062  s->brd_scale = avctx->brd_scale;
1063 
1064  if (avctx->prediction_method)
1065  s->pred = avctx->prediction_method + 1;
1067 #endif
1068 
1069  if (s->b_frame_strategy == 2) {
1070  for (i = 0; i < s->max_b_frames + 2; i++) {
1071  s->tmp_frames[i] = av_frame_alloc();
1072  if (!s->tmp_frames[i])
1073  return AVERROR(ENOMEM);
1074 
1075  s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1076  s->tmp_frames[i]->width = s->width >> s->brd_scale;
1077  s->tmp_frames[i]->height = s->height >> s->brd_scale;
1078 
1079  ret = av_frame_get_buffer(s->tmp_frames[i], 0);
1080  if (ret < 0)
1081  return ret;
1082  }
1083  }
1084 
1085  cpb_props = ff_add_cpb_side_data(avctx);
1086  if (!cpb_props)
1087  return AVERROR(ENOMEM);
1088  cpb_props->max_bitrate = avctx->rc_max_rate;
1089  cpb_props->min_bitrate = avctx->rc_min_rate;
1090  cpb_props->avg_bitrate = avctx->bit_rate;
1091  cpb_props->buffer_size = avctx->rc_buffer_size;
1092 
1093  return 0;
1094 }
1095 
1097 {
1099  int i;
1100 
1102 
1104  if (CONFIG_MJPEG_ENCODER &&
1105  s->out_format == FMT_MJPEG)
1107 
1109 
1110  for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1111  av_frame_free(&s->tmp_frames[i]);
1112 
1113  ff_free_picture_tables(&s->new_picture);
1114  ff_mpeg_unref_picture(avctx, &s->new_picture);
1115 
1117  av_freep(&s->ac_stats);
1118 
1119  if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1120  if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1121  s->q_chroma_intra_matrix= NULL;
1122  s->q_chroma_intra_matrix16= NULL;
1123  av_freep(&s->q_intra_matrix);
1124  av_freep(&s->q_inter_matrix);
1125  av_freep(&s->q_intra_matrix16);
1126  av_freep(&s->q_inter_matrix16);
1127  av_freep(&s->input_picture);
1128  av_freep(&s->reordered_input_picture);
1129  av_freep(&s->dct_offset);
1130 
1131  return 0;
1132 }
1133 
1134 static int get_sae(uint8_t *src, int ref, int stride)
1135 {
1136  int x,y;
1137  int acc = 0;
1138 
1139  for (y = 0; y < 16; y++) {
1140  for (x = 0; x < 16; x++) {
1141  acc += FFABS(src[x + y * stride] - ref);
1142  }
1143  }
1144 
1145  return acc;
1146 }
1147 
1149  uint8_t *ref, int stride)
1150 {
1151  int x, y, w, h;
1152  int acc = 0;
1153 
1154  w = s->width & ~15;
1155  h = s->height & ~15;
1156 
1157  for (y = 0; y < h; y += 16) {
1158  for (x = 0; x < w; x += 16) {
1159  int offset = x + y * stride;
1160  int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1161  stride, 16);
1162  int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1163  int sae = get_sae(src + offset, mean, stride);
1164 
1165  acc += sae + 500 < sad;
1166  }
1167  }
1168  return acc;
1169 }
1170 
1171 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1172 {
1173  return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1174  s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1175  s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1176  &s->linesize, &s->uvlinesize);
1177 }
1178 
1179 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1180 {
1181  Picture *pic = NULL;
1182  int64_t pts;
1183  int i, display_picture_number = 0, ret;
1184  int encoding_delay = s->max_b_frames ? s->max_b_frames
1185  : (s->low_delay ? 0 : 1);
1186  int flush_offset = 1;
1187  int direct = 1;
1188 
1189  if (pic_arg) {
1190  pts = pic_arg->pts;
1191  display_picture_number = s->input_picture_number++;
1192 
1193  if (pts != AV_NOPTS_VALUE) {
1194  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1195  int64_t last = s->user_specified_pts;
1196 
1197  if (pts <= last) {
1198  av_log(s->avctx, AV_LOG_ERROR,
1199  "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1200  pts, last);
1201  return AVERROR(EINVAL);
1202  }
1203 
1204  if (!s->low_delay && display_picture_number == 1)
1205  s->dts_delta = pts - last;
1206  }
1207  s->user_specified_pts = pts;
1208  } else {
1209  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1210  s->user_specified_pts =
1211  pts = s->user_specified_pts + 1;
1212  av_log(s->avctx, AV_LOG_INFO,
1213  "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1214  pts);
1215  } else {
1216  pts = display_picture_number;
1217  }
1218  }
1219 
1220  if (!pic_arg->buf[0] ||
1221  pic_arg->linesize[0] != s->linesize ||
1222  pic_arg->linesize[1] != s->uvlinesize ||
1223  pic_arg->linesize[2] != s->uvlinesize)
1224  direct = 0;
1225  if ((s->width & 15) || (s->height & 15))
1226  direct = 0;
1227  if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1228  direct = 0;
1229  if (s->linesize & (STRIDE_ALIGN-1))
1230  direct = 0;
1231 
1232  ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1233  pic_arg->linesize[1], s->linesize, s->uvlinesize);
1234 
1235  i = ff_find_unused_picture(s->avctx, s->picture, direct);
1236  if (i < 0)
1237  return i;
1238 
1239  pic = &s->picture[i];
1240  pic->reference = 3;
1241 
1242  if (direct) {
1243  if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1244  return ret;
1245  }
1246  ret = alloc_picture(s, pic, direct);
1247  if (ret < 0)
1248  return ret;
1249 
1250  if (!direct) {
1251  if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1252  pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1253  pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1254  // empty
1255  } else {
1256  int h_chroma_shift, v_chroma_shift;
1257  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1258  &h_chroma_shift,
1259  &v_chroma_shift);
1260 
1261  for (i = 0; i < 3; i++) {
1262  int src_stride = pic_arg->linesize[i];
1263  int dst_stride = i ? s->uvlinesize : s->linesize;
1264  int h_shift = i ? h_chroma_shift : 0;
1265  int v_shift = i ? v_chroma_shift : 0;
1266  int w = s->width >> h_shift;
1267  int h = s->height >> v_shift;
1268  uint8_t *src = pic_arg->data[i];
1269  uint8_t *dst = pic->f->data[i];
1270  int vpad = 16;
1271 
1272  if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1273  && !s->progressive_sequence
1274  && FFALIGN(s->height, 32) - s->height > 16)
1275  vpad = 32;
1276 
1277  if (!s->avctx->rc_buffer_size)
1278  dst += INPLACE_OFFSET;
1279 
1280  if (src_stride == dst_stride)
1281  memcpy(dst, src, src_stride * h);
1282  else {
1283  int h2 = h;
1284  uint8_t *dst2 = dst;
1285  while (h2--) {
1286  memcpy(dst2, src, w);
1287  dst2 += dst_stride;
1288  src += src_stride;
1289  }
1290  }
1291  if ((s->width & 15) || (s->height & (vpad-1))) {
1292  s->mpvencdsp.draw_edges(dst, dst_stride,
1293  w, h,
1294  16 >> h_shift,
1295  vpad >> v_shift,
1296  EDGE_BOTTOM);
1297  }
1298  }
1299  emms_c();
1300  }
1301  }
1302  ret = av_frame_copy_props(pic->f, pic_arg);
1303  if (ret < 0)
1304  return ret;
1305 
1306  pic->f->display_picture_number = display_picture_number;
1307  pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1308  } else {
1309  /* Flushing: When we have not received enough input frames,
1310  * ensure s->input_picture[0] contains the first picture */
1311  for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1312  if (s->input_picture[flush_offset])
1313  break;
1314 
1315  if (flush_offset <= 1)
1316  flush_offset = 1;
1317  else
1318  encoding_delay = encoding_delay - flush_offset + 1;
1319  }
1320 
1321  /* shift buffer entries */
1322  for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1323  s->input_picture[i - flush_offset] = s->input_picture[i];
1324 
1325  s->input_picture[encoding_delay] = (Picture*) pic;
1326 
1327  return 0;
1328 }
1329 
1331 {
1332  int x, y, plane;
1333  int score = 0;
1334  int64_t score64 = 0;
1335 
1336  for (plane = 0; plane < 3; plane++) {
1337  const int stride = p->f->linesize[plane];
1338  const int bw = plane ? 1 : 2;
1339  for (y = 0; y < s->mb_height * bw; y++) {
1340  for (x = 0; x < s->mb_width * bw; x++) {
1341  int off = p->shared ? 0 : 16;
1342  uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1343  uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1344  int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1345 
1346  switch (FFABS(s->frame_skip_exp)) {
1347  case 0: score = FFMAX(score, v); break;
1348  case 1: score += FFABS(v); break;
1349  case 2: score64 += v * (int64_t)v; break;
1350  case 3: score64 += FFABS(v * (int64_t)v * v); break;
1351  case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1352  }
1353  }
1354  }
1355  }
1356  emms_c();
1357 
1358  if (score)
1359  score64 = score;
1360  if (s->frame_skip_exp < 0)
1361  score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1362  -1.0/s->frame_skip_exp);
1363 
1364  if (score64 < s->frame_skip_threshold)
1365  return 1;
1366  if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1367  return 1;
1368  return 0;
1369 }
1370 
1372 {
1373  int ret;
1374  int size = 0;
1375 
1377  if (ret < 0)
1378  return ret;
1379 
1380  do {
1382  if (ret >= 0) {
1383  size += pkt->size;
1385  } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1386  return ret;
1387  } while (ret >= 0);
1388 
1389  return size;
1390 }
1391 
1393 {
1394  const AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1395  AVPacket *pkt;
1396  const int scale = s->brd_scale;
1397  int width = s->width >> scale;
1398  int height = s->height >> scale;
1399  int i, j, out_size, p_lambda, b_lambda, lambda2;
1400  int64_t best_rd = INT64_MAX;
1401  int best_b_count = -1;
1402  int ret = 0;
1403 
1404  av_assert0(scale >= 0 && scale <= 3);
1405 
1406  pkt = av_packet_alloc();
1407  if (!pkt)
1408  return AVERROR(ENOMEM);
1409 
1410  //emms_c();
1411  //s->next_picture_ptr->quality;
1412  p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1413  //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1414  b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1415  if (!b_lambda) // FIXME we should do this somewhere else
1416  b_lambda = p_lambda;
1417  lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1419 
1420  for (i = 0; i < s->max_b_frames + 2; i++) {
1421  Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1422  s->next_picture_ptr;
1423  uint8_t *data[4];
1424 
1425  if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1426  pre_input = *pre_input_ptr;
1427  memcpy(data, pre_input_ptr->f->data, sizeof(data));
1428 
1429  if (!pre_input.shared && i) {
1430  data[0] += INPLACE_OFFSET;
1431  data[1] += INPLACE_OFFSET;
1432  data[2] += INPLACE_OFFSET;
1433  }
1434 
1435  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1436  s->tmp_frames[i]->linesize[0],
1437  data[0],
1438  pre_input.f->linesize[0],
1439  width, height);
1440  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1441  s->tmp_frames[i]->linesize[1],
1442  data[1],
1443  pre_input.f->linesize[1],
1444  width >> 1, height >> 1);
1445  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1446  s->tmp_frames[i]->linesize[2],
1447  data[2],
1448  pre_input.f->linesize[2],
1449  width >> 1, height >> 1);
1450  }
1451  }
1452 
1453  for (j = 0; j < s->max_b_frames + 1; j++) {
1454  AVCodecContext *c;
1455  int64_t rd = 0;
1456 
1457  if (!s->input_picture[j])
1458  break;
1459 
1461  if (!c) {
1462  ret = AVERROR(ENOMEM);
1463  goto fail;
1464  }
1465 
1466  c->width = width;
1467  c->height = height;
1469  c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1470  c->mb_decision = s->avctx->mb_decision;
1471  c->me_cmp = s->avctx->me_cmp;
1472  c->mb_cmp = s->avctx->mb_cmp;
1473  c->me_sub_cmp = s->avctx->me_sub_cmp;
1474  c->pix_fmt = AV_PIX_FMT_YUV420P;
1475  c->time_base = s->avctx->time_base;
1476  c->max_b_frames = s->max_b_frames;
1477 
1478  ret = avcodec_open2(c, codec, NULL);
1479  if (ret < 0)
1480  goto fail;
1481 
1482 
1483  s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1484  s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1485 
1486  out_size = encode_frame(c, s->tmp_frames[0], pkt);
1487  if (out_size < 0) {
1488  ret = out_size;
1489  goto fail;
1490  }
1491 
1492  //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1493 
1494  for (i = 0; i < s->max_b_frames + 1; i++) {
1495  int is_p = i % (j + 1) == j || i == s->max_b_frames;
1496 
1497  s->tmp_frames[i + 1]->pict_type = is_p ?
1499  s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1500 
1501  out_size = encode_frame(c, s->tmp_frames[i + 1], pkt);
1502  if (out_size < 0) {
1503  ret = out_size;
1504  goto fail;
1505  }
1506 
1507  rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1508  }
1509 
1510  /* get the delayed frames */
1512  if (out_size < 0) {
1513  ret = out_size;
1514  goto fail;
1515  }
1516  rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1517 
1518  rd += c->error[0] + c->error[1] + c->error[2];
1519 
1520  if (rd < best_rd) {
1521  best_rd = rd;
1522  best_b_count = j;
1523  }
1524 
1525 fail:
1528  if (ret < 0) {
1529  best_b_count = ret;
1530  break;
1531  }
1532  }
1533 
1534  av_packet_free(&pkt);
1535 
1536  return best_b_count;
1537 }
1538 
1540 {
1541  int i, ret;
1542 
1543  for (i = 1; i < MAX_PICTURE_COUNT; i++)
1544  s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1545  s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1546 
1547  /* set next picture type & ordering */
1548  if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1549  if (s->frame_skip_threshold || s->frame_skip_factor) {
1550  if (s->picture_in_gop_number < s->gop_size &&
1551  s->next_picture_ptr &&
1552  skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1553  // FIXME check that the gop check above is +-1 correct
1554  av_frame_unref(s->input_picture[0]->f);
1555 
1556  ff_vbv_update(s, 0);
1557 
1558  goto no_output_pic;
1559  }
1560  }
1561 
1562  if (/*s->picture_in_gop_number >= s->gop_size ||*/
1563  !s->next_picture_ptr || s->intra_only) {
1564  s->reordered_input_picture[0] = s->input_picture[0];
1565  s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1566  s->reordered_input_picture[0]->f->coded_picture_number =
1567  s->coded_picture_number++;
1568  } else {
1569  int b_frames = 0;
1570 
1571  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1572  for (i = 0; i < s->max_b_frames + 1; i++) {
1573  int pict_num = s->input_picture[0]->f->display_picture_number + i;
1574 
1575  if (pict_num >= s->rc_context.num_entries)
1576  break;
1577  if (!s->input_picture[i]) {
1578  s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1579  break;
1580  }
1581 
1582  s->input_picture[i]->f->pict_type =
1583  s->rc_context.entry[pict_num].new_pict_type;
1584  }
1585  }
1586 
1587  if (s->b_frame_strategy == 0) {
1588  b_frames = s->max_b_frames;
1589  while (b_frames && !s->input_picture[b_frames])
1590  b_frames--;
1591  } else if (s->b_frame_strategy == 1) {
1592  for (i = 1; i < s->max_b_frames + 1; i++) {
1593  if (s->input_picture[i] &&
1594  s->input_picture[i]->b_frame_score == 0) {
1595  s->input_picture[i]->b_frame_score =
1597  s->input_picture[i ]->f->data[0],
1598  s->input_picture[i - 1]->f->data[0],
1599  s->linesize) + 1;
1600  }
1601  }
1602  for (i = 0; i < s->max_b_frames + 1; i++) {
1603  if (!s->input_picture[i] ||
1604  s->input_picture[i]->b_frame_score - 1 >
1605  s->mb_num / s->b_sensitivity)
1606  break;
1607  }
1608 
1609  b_frames = FFMAX(0, i - 1);
1610 
1611  /* reset scores */
1612  for (i = 0; i < b_frames + 1; i++) {
1613  s->input_picture[i]->b_frame_score = 0;
1614  }
1615  } else if (s->b_frame_strategy == 2) {
1616  b_frames = estimate_best_b_count(s);
1617  if (b_frames < 0)
1618  return b_frames;
1619  }
1620 
1621  emms_c();
1622 
1623  for (i = b_frames - 1; i >= 0; i--) {
1624  int type = s->input_picture[i]->f->pict_type;
1625  if (type && type != AV_PICTURE_TYPE_B)
1626  b_frames = i;
1627  }
1628  if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1629  b_frames == s->max_b_frames) {
1630  av_log(s->avctx, AV_LOG_ERROR,
1631  "warning, too many B-frames in a row\n");
1632  }
1633 
1634  if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1635  if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1636  s->gop_size > s->picture_in_gop_number) {
1637  b_frames = s->gop_size - s->picture_in_gop_number - 1;
1638  } else {
1639  if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1640  b_frames = 0;
1641  s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1642  }
1643  }
1644 
1645  if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1646  s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1647  b_frames--;
1648 
1649  s->reordered_input_picture[0] = s->input_picture[b_frames];
1650  if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1651  s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1652  s->reordered_input_picture[0]->f->coded_picture_number =
1653  s->coded_picture_number++;
1654  for (i = 0; i < b_frames; i++) {
1655  s->reordered_input_picture[i + 1] = s->input_picture[i];
1656  s->reordered_input_picture[i + 1]->f->pict_type =
1658  s->reordered_input_picture[i + 1]->f->coded_picture_number =
1659  s->coded_picture_number++;
1660  }
1661  }
1662  }
1663 no_output_pic:
1664  ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1665 
1666  if (s->reordered_input_picture[0]) {
1667  s->reordered_input_picture[0]->reference =
1668  s->reordered_input_picture[0]->f->pict_type !=
1669  AV_PICTURE_TYPE_B ? 3 : 0;
1670 
1671  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1672  return ret;
1673 
1674  if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1675  // input is a shared pix, so we can't modify it -> allocate a new
1676  // one & ensure that the shared one is reuseable
1677 
1678  Picture *pic;
1679  int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1680  if (i < 0)
1681  return i;
1682  pic = &s->picture[i];
1683 
1684  pic->reference = s->reordered_input_picture[0]->reference;
1685  if (alloc_picture(s, pic, 0) < 0) {
1686  return -1;
1687  }
1688 
1689  ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1690  if (ret < 0)
1691  return ret;
1692 
1693  /* mark us unused / free shared pic */
1694  av_frame_unref(s->reordered_input_picture[0]->f);
1695  s->reordered_input_picture[0]->shared = 0;
1696 
1697  s->current_picture_ptr = pic;
1698  } else {
1699  // input is not a shared pix -> reuse buffer for current_pix
1700  s->current_picture_ptr = s->reordered_input_picture[0];
1701  for (i = 0; i < 4; i++) {
1702  if (s->new_picture.f->data[i])
1703  s->new_picture.f->data[i] += INPLACE_OFFSET;
1704  }
1705  }
1706  ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1707  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1708  s->current_picture_ptr)) < 0)
1709  return ret;
1710 
1711  s->picture_number = s->new_picture.f->display_picture_number;
1712  }
1713  return 0;
1714 }
1715 
1717 {
1718  if (s->unrestricted_mv &&
1719  s->current_picture.reference &&
1720  !s->intra_only) {
1721  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1722  int hshift = desc->log2_chroma_w;
1723  int vshift = desc->log2_chroma_h;
1724  s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1725  s->current_picture.f->linesize[0],
1726  s->h_edge_pos, s->v_edge_pos,
1728  EDGE_TOP | EDGE_BOTTOM);
1729  s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1730  s->current_picture.f->linesize[1],
1731  s->h_edge_pos >> hshift,
1732  s->v_edge_pos >> vshift,
1733  EDGE_WIDTH >> hshift,
1734  EDGE_WIDTH >> vshift,
1735  EDGE_TOP | EDGE_BOTTOM);
1736  s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1737  s->current_picture.f->linesize[2],
1738  s->h_edge_pos >> hshift,
1739  s->v_edge_pos >> vshift,
1740  EDGE_WIDTH >> hshift,
1741  EDGE_WIDTH >> vshift,
1742  EDGE_TOP | EDGE_BOTTOM);
1743  }
1744 
1745  emms_c();
1746 
1747  s->last_pict_type = s->pict_type;
1748  s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1749  if (s->pict_type!= AV_PICTURE_TYPE_B)
1750  s->last_non_b_pict_type = s->pict_type;
1751 
1752 #if FF_API_CODED_FRAME
1754  av_frame_unref(s->avctx->coded_frame);
1755  av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1757 #endif
1758 #if FF_API_ERROR_FRAME
1760  memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1761  sizeof(s->current_picture.encoding_error));
1763 #endif
1764 }
1765 
1767 {
1768  int intra, i;
1769 
1770  for (intra = 0; intra < 2; intra++) {
1771  if (s->dct_count[intra] > (1 << 16)) {
1772  for (i = 0; i < 64; i++) {
1773  s->dct_error_sum[intra][i] >>= 1;
1774  }
1775  s->dct_count[intra] >>= 1;
1776  }
1777 
1778  for (i = 0; i < 64; i++) {
1779  s->dct_offset[intra][i] = (s->noise_reduction *
1780  s->dct_count[intra] +
1781  s->dct_error_sum[intra][i] / 2) /
1782  (s->dct_error_sum[intra][i] + 1);
1783  }
1784  }
1785 }
1786 
1788 {
1789  int ret;
1790 
1791  /* mark & release old frames */
1792  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1793  s->last_picture_ptr != s->next_picture_ptr &&
1794  s->last_picture_ptr->f->buf[0]) {
1795  ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1796  }
1797 
1798  s->current_picture_ptr->f->pict_type = s->pict_type;
1799  s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1800 
1801  ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1802  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1803  s->current_picture_ptr)) < 0)
1804  return ret;
1805 
1806  if (s->pict_type != AV_PICTURE_TYPE_B) {
1807  s->last_picture_ptr = s->next_picture_ptr;
1808  if (!s->droppable)
1809  s->next_picture_ptr = s->current_picture_ptr;
1810  }
1811 
1812  if (s->last_picture_ptr) {
1813  ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1814  if (s->last_picture_ptr->f->buf[0] &&
1815  (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1816  s->last_picture_ptr)) < 0)
1817  return ret;
1818  }
1819  if (s->next_picture_ptr) {
1820  ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1821  if (s->next_picture_ptr->f->buf[0] &&
1822  (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1823  s->next_picture_ptr)) < 0)
1824  return ret;
1825  }
1826 
1827  if (s->picture_structure!= PICT_FRAME) {
1828  int i;
1829  for (i = 0; i < 4; i++) {
1830  if (s->picture_structure == PICT_BOTTOM_FIELD) {
1831  s->current_picture.f->data[i] +=
1832  s->current_picture.f->linesize[i];
1833  }
1834  s->current_picture.f->linesize[i] *= 2;
1835  s->last_picture.f->linesize[i] *= 2;
1836  s->next_picture.f->linesize[i] *= 2;
1837  }
1838  }
1839 
1840  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1841  s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1842  s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1843  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1844  s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1845  s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1846  } else {
1847  s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1848  s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1849  }
1850 
1851  if (s->dct_error_sum) {
1852  av_assert2(s->noise_reduction && s->encoding);
1854  }
1855 
1856  return 0;
1857 }
1858 
1860  const AVFrame *pic_arg, int *got_packet)
1861 {
1863  int i, stuffing_count, ret;
1864  int context_count = s->slice_context_count;
1865 
1866  s->vbv_ignore_qmax = 0;
1867 
1868  s->picture_in_gop_number++;
1869 
1870  if (load_input_picture(s, pic_arg) < 0)
1871  return -1;
1872 
1873  if (select_input_picture(s) < 0) {
1874  return -1;
1875  }
1876 
1877  /* output? */
1878  if (s->new_picture.f->data[0]) {
1879  int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1880  int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1881  :
1882  s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1883  if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1884  return ret;
1885  if (s->mb_info) {
1886  s->mb_info_ptr = av_packet_new_side_data(pkt,
1888  s->mb_width*s->mb_height*12);
1889  s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1890  }
1891 
1892  for (i = 0; i < context_count; i++) {
1893  int start_y = s->thread_context[i]->start_mb_y;
1894  int end_y = s->thread_context[i]-> end_mb_y;
1895  int h = s->mb_height;
1896  uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1897  uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1898 
1899  init_put_bits(&s->thread_context[i]->pb, start, end - start);
1900  }
1901 
1902  s->pict_type = s->new_picture.f->pict_type;
1903  //emms_c();
1904  ret = frame_start(s);
1905  if (ret < 0)
1906  return ret;
1907 vbv_retry:
1908  ret = encode_picture(s, s->picture_number);
1909  if (growing_buffer) {
1910  av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1911  pkt->data = s->pb.buf;
1913  }
1914  if (ret < 0)
1915  return -1;
1916 
1917 #if FF_API_STAT_BITS
1919  avctx->header_bits = s->header_bits;
1920  avctx->mv_bits = s->mv_bits;
1921  avctx->misc_bits = s->misc_bits;
1922  avctx->i_tex_bits = s->i_tex_bits;
1923  avctx->p_tex_bits = s->p_tex_bits;
1924  avctx->i_count = s->i_count;
1925  // FIXME f/b_count in avctx
1926  avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1927  avctx->skip_count = s->skip_count;
1929 #endif
1930 
1931  frame_end(s);
1932 
1933  if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1934  ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1935 
1936  if (avctx->rc_buffer_size) {
1937  RateControlContext *rcc = &s->rc_context;
1938  int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1939  int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
1940  int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1941 
1942  if (put_bits_count(&s->pb) > max_size &&
1943  s->lambda < s->lmax) {
1944  s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1945  (s->qscale + 1) / s->qscale);
1946  if (s->adaptive_quant) {
1947  int i;
1948  for (i = 0; i < s->mb_height * s->mb_stride; i++)
1949  s->lambda_table[i] =
1950  FFMAX(s->lambda_table[i] + min_step,
1951  s->lambda_table[i] * (s->qscale + 1) /
1952  s->qscale);
1953  }
1954  s->mb_skipped = 0; // done in frame_start()
1955  // done in encode_picture() so we must undo it
1956  if (s->pict_type == AV_PICTURE_TYPE_P) {
1957  if (s->flipflop_rounding ||
1958  s->codec_id == AV_CODEC_ID_H263P ||
1959  s->codec_id == AV_CODEC_ID_MPEG4)
1960  s->no_rounding ^= 1;
1961  }
1962  if (s->pict_type != AV_PICTURE_TYPE_B) {
1963  s->time_base = s->last_time_base;
1964  s->last_non_b_time = s->time - s->pp_time;
1965  }
1966  for (i = 0; i < context_count; i++) {
1967  PutBitContext *pb = &s->thread_context[i]->pb;
1968  init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1969  }
1970  s->vbv_ignore_qmax = 1;
1971  av_log(avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1972  goto vbv_retry;
1973  }
1974 
1976  }
1977 
1980 
1981  for (i = 0; i < 4; i++) {
1982  s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1983  avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1984  }
1985  ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
1986  s->current_picture_ptr->encoding_error,
1987  (avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
1988  s->pict_type);
1989 
1991  assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1992  s->misc_bits + s->i_tex_bits +
1993  s->p_tex_bits);
1994  flush_put_bits(&s->pb);
1995  s->frame_bits = put_bits_count(&s->pb);
1996 
1997  stuffing_count = ff_vbv_update(s, s->frame_bits);
1998  s->stuffing_bits = 8*stuffing_count;
1999  if (stuffing_count) {
2000  if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
2001  stuffing_count + 50) {
2002  av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
2003  return -1;
2004  }
2005 
2006  switch (s->codec_id) {
2009  while (stuffing_count--) {
2010  put_bits(&s->pb, 8, 0);
2011  }
2012  break;
2013  case AV_CODEC_ID_MPEG4:
2014  put_bits(&s->pb, 16, 0);
2015  put_bits(&s->pb, 16, 0x1C3);
2016  stuffing_count -= 4;
2017  while (stuffing_count--) {
2018  put_bits(&s->pb, 8, 0xFF);
2019  }
2020  break;
2021  default:
2022  av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
2023  s->stuffing_bits = 0;
2024  }
2025  flush_put_bits(&s->pb);
2026  s->frame_bits = put_bits_count(&s->pb);
2027  }
2028 
2029  /* update MPEG-1/2 vbv_delay for CBR */
2030  if (avctx->rc_max_rate &&
2032  s->out_format == FMT_MPEG1 &&
2033  90000LL * (avctx->rc_buffer_size - 1) <=
2034  avctx->rc_max_rate * 0xFFFFLL) {
2035  AVCPBProperties *props;
2036  size_t props_size;
2037 
2038  int vbv_delay, min_delay;
2039  double inbits = avctx->rc_max_rate *
2041  int minbits = s->frame_bits - 8 *
2042  (s->vbv_delay_ptr - s->pb.buf - 1);
2043  double bits = s->rc_context.buffer_index + minbits - inbits;
2044 
2045  if (bits < 0)
2047  "Internal error, negative bits\n");
2048 
2049  av_assert1(s->repeat_first_field == 0);
2050 
2051  vbv_delay = bits * 90000 / avctx->rc_max_rate;
2052  min_delay = (minbits * 90000LL + avctx->rc_max_rate - 1) /
2053  avctx->rc_max_rate;
2054 
2055  vbv_delay = FFMAX(vbv_delay, min_delay);
2056 
2057  av_assert0(vbv_delay < 0xFFFF);
2058 
2059  s->vbv_delay_ptr[0] &= 0xF8;
2060  s->vbv_delay_ptr[0] |= vbv_delay >> 13;
2061  s->vbv_delay_ptr[1] = vbv_delay >> 5;
2062  s->vbv_delay_ptr[2] &= 0x07;
2063  s->vbv_delay_ptr[2] |= vbv_delay << 3;
2064 
2065  props = av_cpb_properties_alloc(&props_size);
2066  if (!props)
2067  return AVERROR(ENOMEM);
2068  props->vbv_delay = vbv_delay * 300;
2069 
2071  (uint8_t*)props, props_size);
2072  if (ret < 0) {
2073  av_freep(&props);
2074  return ret;
2075  }
2076 
2077 #if FF_API_VBV_DELAY
2079  avctx->vbv_delay = vbv_delay * 300;
2081 #endif
2082  }
2083  s->total_bits += s->frame_bits;
2084 #if FF_API_STAT_BITS
2086  avctx->frame_bits = s->frame_bits;
2088 #endif
2089 
2090 
2091  pkt->pts = s->current_picture.f->pts;
2092  if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2093  if (!s->current_picture.f->coded_picture_number)
2094  pkt->dts = pkt->pts - s->dts_delta;
2095  else
2096  pkt->dts = s->reordered_pts;
2097  s->reordered_pts = pkt->pts;
2098  } else
2099  pkt->dts = pkt->pts;
2100  if (s->current_picture.f->key_frame)
2102  if (s->mb_info)
2104  } else {
2105  s->frame_bits = 0;
2106  }
2107 
2108  /* release non-reference frames */
2109  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2110  if (!s->picture[i].reference)
2111  ff_mpeg_unref_picture(avctx, &s->picture[i]);
2112  }
2113 
2114  av_assert1((s->frame_bits & 7) == 0);
2115 
2116  pkt->size = s->frame_bits / 8;
2117  *got_packet = !!pkt->size;
2118  return 0;
2119 }
2120 
2122  int n, int threshold)
2123 {
2124  static const char tab[64] = {
2125  3, 2, 2, 1, 1, 1, 1, 1,
2126  1, 1, 1, 1, 1, 1, 1, 1,
2127  1, 1, 1, 1, 1, 1, 1, 1,
2128  0, 0, 0, 0, 0, 0, 0, 0,
2129  0, 0, 0, 0, 0, 0, 0, 0,
2130  0, 0, 0, 0, 0, 0, 0, 0,
2131  0, 0, 0, 0, 0, 0, 0, 0,
2132  0, 0, 0, 0, 0, 0, 0, 0
2133  };
2134  int score = 0;
2135  int run = 0;
2136  int i;
2137  int16_t *block = s->block[n];
2138  const int last_index = s->block_last_index[n];
2139  int skip_dc;
2140 
2141  if (threshold < 0) {
2142  skip_dc = 0;
2143  threshold = -threshold;
2144  } else
2145  skip_dc = 1;
2146 
2147  /* Are all we could set to zero already zero? */
2148  if (last_index <= skip_dc - 1)
2149  return;
2150 
2151  for (i = 0; i <= last_index; i++) {
2152  const int j = s->intra_scantable.permutated[i];
2153  const int level = FFABS(block[j]);
2154  if (level == 1) {
2155  if (skip_dc && i == 0)
2156  continue;
2157  score += tab[run];
2158  run = 0;
2159  } else if (level > 1) {
2160  return;
2161  } else {
2162  run++;
2163  }
2164  }
2165  if (score >= threshold)
2166  return;
2167  for (i = skip_dc; i <= last_index; i++) {
2168  const int j = s->intra_scantable.permutated[i];
2169  block[j] = 0;
2170  }
2171  if (block[0])
2172  s->block_last_index[n] = 0;
2173  else
2174  s->block_last_index[n] = -1;
2175 }
2176 
2177 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2178  int last_index)
2179 {
2180  int i;
2181  const int maxlevel = s->max_qcoeff;
2182  const int minlevel = s->min_qcoeff;
2183  int overflow = 0;
2184 
2185  if (s->mb_intra) {
2186  i = 1; // skip clipping of intra dc
2187  } else
2188  i = 0;
2189 
2190  for (; i <= last_index; i++) {
2191  const int j = s->intra_scantable.permutated[i];
2192  int level = block[j];
2193 
2194  if (level > maxlevel) {
2195  level = maxlevel;
2196  overflow++;
2197  } else if (level < minlevel) {
2198  level = minlevel;
2199  overflow++;
2200  }
2201 
2202  block[j] = level;
2203  }
2204 
2205  if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2206  av_log(s->avctx, AV_LOG_INFO,
2207  "warning, clipping %d dct coefficients to %d..%d\n",
2208  overflow, minlevel, maxlevel);
2209 }
2210 
2211 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2212 {
2213  int x, y;
2214  // FIXME optimize
2215  for (y = 0; y < 8; y++) {
2216  for (x = 0; x < 8; x++) {
2217  int x2, y2;
2218  int sum = 0;
2219  int sqr = 0;
2220  int count = 0;
2221 
2222  for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2223  for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2224  int v = ptr[x2 + y2 * stride];
2225  sum += v;
2226  sqr += v * v;
2227  count++;
2228  }
2229  }
2230  weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2231  }
2232  }
2233 }
2234 
2236  int motion_x, int motion_y,
2237  int mb_block_height,
2238  int mb_block_width,
2239  int mb_block_count)
2240 {
2241  int16_t weight[12][64];
2242  int16_t orig[12][64];
2243  const int mb_x = s->mb_x;
2244  const int mb_y = s->mb_y;
2245  int i;
2246  int skip_dct[12];
2247  int dct_offset = s->linesize * 8; // default for progressive frames
2248  int uv_dct_offset = s->uvlinesize * 8;
2249  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2250  ptrdiff_t wrap_y, wrap_c;
2251 
2252  for (i = 0; i < mb_block_count; i++)
2253  skip_dct[i] = s->skipdct;
2254 
2255  if (s->adaptive_quant) {
2256  const int last_qp = s->qscale;
2257  const int mb_xy = mb_x + mb_y * s->mb_stride;
2258 
2259  s->lambda = s->lambda_table[mb_xy];
2260  update_qscale(s);
2261 
2262  if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2263  s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2264  s->dquant = s->qscale - last_qp;
2265 
2266  if (s->out_format == FMT_H263) {
2267  s->dquant = av_clip(s->dquant, -2, 2);
2268 
2269  if (s->codec_id == AV_CODEC_ID_MPEG4) {
2270  if (!s->mb_intra) {
2271  if (s->pict_type == AV_PICTURE_TYPE_B) {
2272  if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2273  s->dquant = 0;
2274  }
2275  if (s->mv_type == MV_TYPE_8X8)
2276  s->dquant = 0;
2277  }
2278  }
2279  }
2280  }
2281  ff_set_qscale(s, last_qp + s->dquant);
2282  } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2283  ff_set_qscale(s, s->qscale + s->dquant);
2284 
2285  wrap_y = s->linesize;
2286  wrap_c = s->uvlinesize;
2287  ptr_y = s->new_picture.f->data[0] +
2288  (mb_y * 16 * wrap_y) + mb_x * 16;
2289  ptr_cb = s->new_picture.f->data[1] +
2290  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2291  ptr_cr = s->new_picture.f->data[2] +
2292  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2293 
2294  if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2295  uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2296  int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2297  int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2298  s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2299  wrap_y, wrap_y,
2300  16, 16, mb_x * 16, mb_y * 16,
2301  s->width, s->height);
2302  ptr_y = ebuf;
2303  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2304  wrap_c, wrap_c,
2305  mb_block_width, mb_block_height,
2306  mb_x * mb_block_width, mb_y * mb_block_height,
2307  cw, ch);
2308  ptr_cb = ebuf + 16 * wrap_y;
2309  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2310  wrap_c, wrap_c,
2311  mb_block_width, mb_block_height,
2312  mb_x * mb_block_width, mb_y * mb_block_height,
2313  cw, ch);
2314  ptr_cr = ebuf + 16 * wrap_y + 16;
2315  }
2316 
2317  if (s->mb_intra) {
2318  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2319  int progressive_score, interlaced_score;
2320 
2321  s->interlaced_dct = 0;
2322  progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2323  s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2324  NULL, wrap_y, 8) - 400;
2325 
2326  if (progressive_score > 0) {
2327  interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2328  NULL, wrap_y * 2, 8) +
2329  s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2330  NULL, wrap_y * 2, 8);
2331  if (progressive_score > interlaced_score) {
2332  s->interlaced_dct = 1;
2333 
2334  dct_offset = wrap_y;
2335  uv_dct_offset = wrap_c;
2336  wrap_y <<= 1;
2337  if (s->chroma_format == CHROMA_422 ||
2338  s->chroma_format == CHROMA_444)
2339  wrap_c <<= 1;
2340  }
2341  }
2342  }
2343 
2344  s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2345  s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2346  s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2347  s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2348 
2349  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2350  skip_dct[4] = 1;
2351  skip_dct[5] = 1;
2352  } else {
2353  s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2354  s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2355  if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2356  s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2357  s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2358  } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2359  s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2360  s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2361  s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2362  s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2363  s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2364  s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2365  }
2366  }
2367  } else {
2368  op_pixels_func (*op_pix)[4];
2369  qpel_mc_func (*op_qpix)[16];
2370  uint8_t *dest_y, *dest_cb, *dest_cr;
2371 
2372  dest_y = s->dest[0];
2373  dest_cb = s->dest[1];
2374  dest_cr = s->dest[2];
2375 
2376  if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2377  op_pix = s->hdsp.put_pixels_tab;
2378  op_qpix = s->qdsp.put_qpel_pixels_tab;
2379  } else {
2380  op_pix = s->hdsp.put_no_rnd_pixels_tab;
2381  op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2382  }
2383 
2384  if (s->mv_dir & MV_DIR_FORWARD) {
2385  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2386  s->last_picture.f->data,
2387  op_pix, op_qpix);
2388  op_pix = s->hdsp.avg_pixels_tab;
2389  op_qpix = s->qdsp.avg_qpel_pixels_tab;
2390  }
2391  if (s->mv_dir & MV_DIR_BACKWARD) {
2392  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2393  s->next_picture.f->data,
2394  op_pix, op_qpix);
2395  }
2396 
2397  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2398  int progressive_score, interlaced_score;
2399 
2400  s->interlaced_dct = 0;
2401  progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2402  s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2403  ptr_y + wrap_y * 8,
2404  wrap_y, 8) - 400;
2405 
2406  if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2407  progressive_score -= 400;
2408 
2409  if (progressive_score > 0) {
2410  interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2411  wrap_y * 2, 8) +
2412  s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2413  ptr_y + wrap_y,
2414  wrap_y * 2, 8);
2415 
2416  if (progressive_score > interlaced_score) {
2417  s->interlaced_dct = 1;
2418 
2419  dct_offset = wrap_y;
2420  uv_dct_offset = wrap_c;
2421  wrap_y <<= 1;
2422  if (s->chroma_format == CHROMA_422)
2423  wrap_c <<= 1;
2424  }
2425  }
2426  }
2427 
2428  s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2429  s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2430  s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2431  dest_y + dct_offset, wrap_y);
2432  s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2433  dest_y + dct_offset + 8, wrap_y);
2434 
2435  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2436  skip_dct[4] = 1;
2437  skip_dct[5] = 1;
2438  } else {
2439  s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2440  s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2441  if (!s->chroma_y_shift) { /* 422 */
2442  s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2443  dest_cb + uv_dct_offset, wrap_c);
2444  s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2445  dest_cr + uv_dct_offset, wrap_c);
2446  }
2447  }
2448  /* pre quantization */
2449  if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2450  2 * s->qscale * s->qscale) {
2451  // FIXME optimize
2452  if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2453  skip_dct[0] = 1;
2454  if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2455  skip_dct[1] = 1;
2456  if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2457  wrap_y, 8) < 20 * s->qscale)
2458  skip_dct[2] = 1;
2459  if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2460  wrap_y, 8) < 20 * s->qscale)
2461  skip_dct[3] = 1;
2462  if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2463  skip_dct[4] = 1;
2464  if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2465  skip_dct[5] = 1;
2466  if (!s->chroma_y_shift) { /* 422 */
2467  if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2468  dest_cb + uv_dct_offset,
2469  wrap_c, 8) < 20 * s->qscale)
2470  skip_dct[6] = 1;
2471  if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2472  dest_cr + uv_dct_offset,
2473  wrap_c, 8) < 20 * s->qscale)
2474  skip_dct[7] = 1;
2475  }
2476  }
2477  }
2478 
2479  if (s->quantizer_noise_shaping) {
2480  if (!skip_dct[0])
2481  get_visual_weight(weight[0], ptr_y , wrap_y);
2482  if (!skip_dct[1])
2483  get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2484  if (!skip_dct[2])
2485  get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2486  if (!skip_dct[3])
2487  get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2488  if (!skip_dct[4])
2489  get_visual_weight(weight[4], ptr_cb , wrap_c);
2490  if (!skip_dct[5])
2491  get_visual_weight(weight[5], ptr_cr , wrap_c);
2492  if (!s->chroma_y_shift) { /* 422 */
2493  if (!skip_dct[6])
2494  get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2495  wrap_c);
2496  if (!skip_dct[7])
2497  get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2498  wrap_c);
2499  }
2500  memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2501  }
2502 
2503  /* DCT & quantize */
2504  av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2505  {
2506  for (i = 0; i < mb_block_count; i++) {
2507  if (!skip_dct[i]) {
2508  int overflow;
2509  s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2510  // FIXME we could decide to change to quantizer instead of
2511  // clipping
2512  // JS: I don't think that would be a good idea it could lower
2513  // quality instead of improve it. Just INTRADC clipping
2514  // deserves changes in quantizer
2515  if (overflow)
2516  clip_coeffs(s, s->block[i], s->block_last_index[i]);
2517  } else
2518  s->block_last_index[i] = -1;
2519  }
2520  if (s->quantizer_noise_shaping) {
2521  for (i = 0; i < mb_block_count; i++) {
2522  if (!skip_dct[i]) {
2523  s->block_last_index[i] =
2524  dct_quantize_refine(s, s->block[i], weight[i],
2525  orig[i], i, s->qscale);
2526  }
2527  }
2528  }
2529 
2530  if (s->luma_elim_threshold && !s->mb_intra)
2531  for (i = 0; i < 4; i++)
2532  dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2533  if (s->chroma_elim_threshold && !s->mb_intra)
2534  for (i = 4; i < mb_block_count; i++)
2535  dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2536 
2537  if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2538  for (i = 0; i < mb_block_count; i++) {
2539  if (s->block_last_index[i] == -1)
2540  s->coded_score[i] = INT_MAX / 256;
2541  }
2542  }
2543  }
2544 
2545  if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2546  s->block_last_index[4] =
2547  s->block_last_index[5] = 0;
2548  s->block[4][0] =
2549  s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2550  if (!s->chroma_y_shift) { /* 422 / 444 */
2551  for (i=6; i<12; i++) {
2552  s->block_last_index[i] = 0;
2553  s->block[i][0] = s->block[4][0];
2554  }
2555  }
2556  }
2557 
2558  // non c quantize code returns incorrect block_last_index FIXME
2559  if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2560  for (i = 0; i < mb_block_count; i++) {
2561  int j;
2562  if (s->block_last_index[i] > 0) {
2563  for (j = 63; j > 0; j--) {
2564  if (s->block[i][s->intra_scantable.permutated[j]])
2565  break;
2566  }
2567  s->block_last_index[i] = j;
2568  }
2569  }
2570  }
2571 
2572  /* huffman encode */
2573  switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2576  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2577  ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2578  break;
2579  case AV_CODEC_ID_MPEG4:
2580  if (CONFIG_MPEG4_ENCODER)
2581  ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2582  break;
2583  case AV_CODEC_ID_MSMPEG4V2:
2584  case AV_CODEC_ID_MSMPEG4V3:
2585  case AV_CODEC_ID_WMV1:
2587  ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2588  break;
2589  case AV_CODEC_ID_WMV2:
2590  if (CONFIG_WMV2_ENCODER)
2591  ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2592  break;
2593  case AV_CODEC_ID_H261:
2594  if (CONFIG_H261_ENCODER)
2595  ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2596  break;
2597  case AV_CODEC_ID_H263:
2598  case AV_CODEC_ID_H263P:
2599  case AV_CODEC_ID_FLV1:
2600  case AV_CODEC_ID_RV10:
2601  case AV_CODEC_ID_RV20:
2602  if (CONFIG_H263_ENCODER)
2603  ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2604  break;
2605  case AV_CODEC_ID_MJPEG:
2606  case AV_CODEC_ID_AMV:
2607  if (CONFIG_MJPEG_ENCODER)
2608  ff_mjpeg_encode_mb(s, s->block);
2609  break;
2610  case AV_CODEC_ID_SPEEDHQ:
2611  if (CONFIG_SPEEDHQ_ENCODER)
2612  ff_speedhq_encode_mb(s, s->block);
2613  break;
2614  default:
2615  av_assert1(0);
2616  }
2617 }
2618 
2619 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2620 {
2621  if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2622  else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2623  else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2624 }
2625 
2627  int i;
2628 
2629  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2630 
2631  /* MPEG-1 */
2632  d->mb_skip_run= s->mb_skip_run;
2633  for(i=0; i<3; i++)
2634  d->last_dc[i] = s->last_dc[i];
2635 
2636  /* statistics */
2637  d->mv_bits= s->mv_bits;
2638  d->i_tex_bits= s->i_tex_bits;
2639  d->p_tex_bits= s->p_tex_bits;
2640  d->i_count= s->i_count;
2641  d->f_count= s->f_count;
2642  d->b_count= s->b_count;
2643  d->skip_count= s->skip_count;
2644  d->misc_bits= s->misc_bits;
2645  d->last_bits= 0;
2646 
2647  d->mb_skipped= 0;
2648  d->qscale= s->qscale;
2649  d->dquant= s->dquant;
2650 
2651  d->esc3_level_length= s->esc3_level_length;
2652 }
2653 
2655  int i;
2656 
2657  memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2658  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2659 
2660  /* MPEG-1 */
2661  d->mb_skip_run= s->mb_skip_run;
2662  for(i=0; i<3; i++)
2663  d->last_dc[i] = s->last_dc[i];
2664 
2665  /* statistics */
2666  d->mv_bits= s->mv_bits;
2667  d->i_tex_bits= s->i_tex_bits;
2668  d->p_tex_bits= s->p_tex_bits;
2669  d->i_count= s->i_count;
2670  d->f_count= s->f_count;
2671  d->b_count= s->b_count;
2672  d->skip_count= s->skip_count;
2673  d->misc_bits= s->misc_bits;
2674 
2675  d->mb_intra= s->mb_intra;
2676  d->mb_skipped= s->mb_skipped;
2677  d->mv_type= s->mv_type;
2678  d->mv_dir= s->mv_dir;
2679  d->pb= s->pb;
2680  if(s->data_partitioning){
2681  d->pb2= s->pb2;
2682  d->tex_pb= s->tex_pb;
2683  }
2684  d->block= s->block;
2685  for(i=0; i<8; i++)
2686  d->block_last_index[i]= s->block_last_index[i];
2687  d->interlaced_dct= s->interlaced_dct;
2688  d->qscale= s->qscale;
2689 
2690  d->esc3_level_length= s->esc3_level_length;
2691 }
2692 
2693 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2695  int *dmin, int *next_block, int motion_x, int motion_y)
2696 {
2697  int score;
2698  uint8_t *dest_backup[3];
2699 
2700  copy_context_before_encode(s, backup, type);
2701 
2702  s->block= s->blocks[*next_block];
2703  s->pb= pb[*next_block];
2704  if(s->data_partitioning){
2705  s->pb2 = pb2 [*next_block];
2706  s->tex_pb= tex_pb[*next_block];
2707  }
2708 
2709  if(*next_block){
2710  memcpy(dest_backup, s->dest, sizeof(s->dest));
2711  s->dest[0] = s->sc.rd_scratchpad;
2712  s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2713  s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2714  av_assert0(s->linesize >= 32); //FIXME
2715  }
2716 
2717  encode_mb(s, motion_x, motion_y);
2718 
2719  score= put_bits_count(&s->pb);
2720  if(s->data_partitioning){
2721  score+= put_bits_count(&s->pb2);
2722  score+= put_bits_count(&s->tex_pb);
2723  }
2724 
2725  if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2726  ff_mpv_reconstruct_mb(s, s->block);
2727 
2728  score *= s->lambda2;
2729  score += sse_mb(s) << FF_LAMBDA_SHIFT;
2730  }
2731 
2732  if(*next_block){
2733  memcpy(s->dest, dest_backup, sizeof(s->dest));
2734  }
2735 
2736  if(score<*dmin){
2737  *dmin= score;
2738  *next_block^=1;
2739 
2741  }
2742 }
2743 
2744 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2745  const uint32_t *sq = ff_square_tab + 256;
2746  int acc=0;
2747  int x,y;
2748 
2749  if(w==16 && h==16)
2750  return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2751  else if(w==8 && h==8)
2752  return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2753 
2754  for(y=0; y<h; y++){
2755  for(x=0; x<w; x++){
2756  acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2757  }
2758  }
2759 
2760  av_assert2(acc>=0);
2761 
2762  return acc;
2763 }
2764 
2765 static int sse_mb(MpegEncContext *s){
2766  int w= 16;
2767  int h= 16;
2768 
2769  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2770  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2771 
2772  if(w==16 && h==16)
2773  if(s->avctx->mb_cmp == FF_CMP_NSSE){
2774  return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2775  s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2776  s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2777  }else{
2778  return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2779  s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2780  s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2781  }
2782  else
2783  return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2784  +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2785  +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2786 }
2787 
2789  MpegEncContext *s= *(void**)arg;
2790 
2791 
2792  s->me.pre_pass=1;
2793  s->me.dia_size= s->avctx->pre_dia_size;
2794  s->first_slice_line=1;
2795  for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2796  for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2797  ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2798  }
2799  s->first_slice_line=0;
2800  }
2801 
2802  s->me.pre_pass=0;
2803 
2804  return 0;
2805 }
2806 
2808  MpegEncContext *s= *(void**)arg;
2809 
2810  s->me.dia_size= s->avctx->dia_size;
2811  s->first_slice_line=1;
2812  for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2813  s->mb_x=0; //for block init below
2815  for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2816  s->block_index[0]+=2;
2817  s->block_index[1]+=2;
2818  s->block_index[2]+=2;
2819  s->block_index[3]+=2;
2820 
2821  /* compute motion vector & mb_type and store in context */
2822  if(s->pict_type==AV_PICTURE_TYPE_B)
2823  ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2824  else
2825  ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2826  }
2827  s->first_slice_line=0;
2828  }
2829  return 0;
2830 }
2831 
2832 static int mb_var_thread(AVCodecContext *c, void *arg){
2833  MpegEncContext *s= *(void**)arg;
2834  int mb_x, mb_y;
2835 
2836  for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2837  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2838  int xx = mb_x * 16;
2839  int yy = mb_y * 16;
2840  uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2841  int varc;
2842  int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2843 
2844  varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2845  (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2846 
2847  s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2848  s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2849  s->me.mb_var_sum_temp += varc;
2850  }
2851  }
2852  return 0;
2853 }
2854 
2856  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2857  if(s->partitioned_frame){
2859  }
2860 
2861  ff_mpeg4_stuffing(&s->pb);
2862  }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2864  } else if (CONFIG_SPEEDHQ_ENCODER && s->out_format == FMT_SPEEDHQ) {
2866  }
2867 
2868  flush_put_bits(&s->pb);
2869 
2870  if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2871  s->misc_bits+= get_bits_diff(s);
2872 }
2873 
2875 {
2876  uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2877  int offset = put_bits_count(&s->pb);
2878  int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2879  int gobn = s->mb_y / s->gob_index;
2880  int pred_x, pred_y;
2881  if (CONFIG_H263_ENCODER)
2882  ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2883  bytestream_put_le32(&ptr, offset);
2884  bytestream_put_byte(&ptr, s->qscale);
2885  bytestream_put_byte(&ptr, gobn);
2886  bytestream_put_le16(&ptr, mba);
2887  bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2888  bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2889  /* 4MV not implemented */
2890  bytestream_put_byte(&ptr, 0); /* hmv2 */
2891  bytestream_put_byte(&ptr, 0); /* vmv2 */
2892 }
2893 
2894 static void update_mb_info(MpegEncContext *s, int startcode)
2895 {
2896  if (!s->mb_info)
2897  return;
2898  if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2899  s->mb_info_size += 12;
2900  s->prev_mb_info = s->last_mb_info;
2901  }
2902  if (startcode) {
2903  s->prev_mb_info = put_bits_count(&s->pb)/8;
2904  /* This might have incremented mb_info_size above, and we return without
2905  * actually writing any info into that slot yet. But in that case,
2906  * this will be called again at the start of the after writing the
2907  * start code, actually writing the mb info. */
2908  return;
2909  }
2910 
2911  s->last_mb_info = put_bits_count(&s->pb)/8;
2912  if (!s->mb_info_size)
2913  s->mb_info_size += 12;
2914  write_mb_info(s);
2915 }
2916 
2917 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2918 {
2919  if ( s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold
2920  && s->slice_context_count == 1
2921  && s->pb.buf == s->avctx->internal->byte_buffer) {
2922  int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2923  int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2924 
2925  uint8_t *new_buffer = NULL;
2926  int new_buffer_size = 0;
2927 
2928  if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2929  av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2930  return AVERROR(ENOMEM);
2931  }
2932 
2933  emms_c();
2934 
2935  av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2936  s->avctx->internal->byte_buffer_size + size_increase);
2937  if (!new_buffer)
2938  return AVERROR(ENOMEM);
2939 
2940  memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2941  av_free(s->avctx->internal->byte_buffer);
2942  s->avctx->internal->byte_buffer = new_buffer;
2943  s->avctx->internal->byte_buffer_size = new_buffer_size;
2944  rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2945  s->ptr_lastgob = s->pb.buf + lastgob_pos;
2946  s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2947  }
2948  if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold)
2949  return AVERROR(EINVAL);
2950  return 0;
2951 }
2952 
2953 static int encode_thread(AVCodecContext *c, void *arg){
2954  MpegEncContext *s= *(void**)arg;
2955  int mb_x, mb_y, mb_y_order;
2956  int chr_h= 16>>s->chroma_y_shift;
2957  int i, j;
2958  MpegEncContext best_s = { 0 }, backup_s;
2959  uint8_t bit_buf[2][MAX_MB_BYTES];
2960  uint8_t bit_buf2[2][MAX_MB_BYTES];
2961  uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2962  PutBitContext pb[2], pb2[2], tex_pb[2];
2963 
2964  for(i=0; i<2; i++){
2965  init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2966  init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2967  init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2968  }
2969 
2970  s->last_bits= put_bits_count(&s->pb);
2971  s->mv_bits=0;
2972  s->misc_bits=0;
2973  s->i_tex_bits=0;
2974  s->p_tex_bits=0;
2975  s->i_count=0;
2976  s->f_count=0;
2977  s->b_count=0;
2978  s->skip_count=0;
2979 
2980  for(i=0; i<3; i++){
2981  /* init last dc values */
2982  /* note: quant matrix value (8) is implied here */
2983  s->last_dc[i] = 128 << s->intra_dc_precision;
2984 
2985  s->current_picture.encoding_error[i] = 0;
2986  }
2987  if(s->codec_id==AV_CODEC_ID_AMV){
2988  s->last_dc[0] = 128*8/13;
2989  s->last_dc[1] = 128*8/14;
2990  s->last_dc[2] = 128*8/14;
2991  }
2992  s->mb_skip_run = 0;
2993  memset(s->last_mv, 0, sizeof(s->last_mv));
2994 
2995  s->last_mv_dir = 0;
2996 
2997  switch(s->codec_id){
2998  case AV_CODEC_ID_H263:
2999  case AV_CODEC_ID_H263P:
3000  case AV_CODEC_ID_FLV1:
3001  if (CONFIG_H263_ENCODER)
3002  s->gob_index = H263_GOB_HEIGHT(s->height);
3003  break;
3004  case AV_CODEC_ID_MPEG4:
3005  if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
3007  break;
3008  }
3009 
3010  s->resync_mb_x=0;
3011  s->resync_mb_y=0;
3012  s->first_slice_line = 1;
3013  s->ptr_lastgob = s->pb.buf;
3014  for (mb_y_order = s->start_mb_y; mb_y_order < s->end_mb_y; mb_y_order++) {
3015  if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
3016  int first_in_slice;
3017  mb_y = ff_speedhq_mb_y_order_to_mb(mb_y_order, s->mb_height, &first_in_slice);
3018  if (first_in_slice && mb_y_order != s->start_mb_y)
3020  s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 1024 << s->intra_dc_precision;
3021  } else {
3022  mb_y = mb_y_order;
3023  }
3024  s->mb_x=0;
3025  s->mb_y= mb_y;
3026 
3027  ff_set_qscale(s, s->qscale);
3029 
3030  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3031  int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
3032  int mb_type= s->mb_type[xy];
3033 // int d;
3034  int dmin= INT_MAX;
3035  int dir;
3036  int size_increase = s->avctx->internal->byte_buffer_size/4
3037  + s->mb_width*MAX_MB_BYTES;
3038 
3040  if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
3041  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
3042  return -1;
3043  }
3044  if(s->data_partitioning){
3045  if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
3046  || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
3047  av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3048  return -1;
3049  }
3050  }
3051 
3052  s->mb_x = mb_x;
3053  s->mb_y = mb_y; // moved into loop, can get changed by H.261
3055 
3056  if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
3058  xy= s->mb_y*s->mb_stride + s->mb_x;
3059  mb_type= s->mb_type[xy];
3060  }
3061 
3062  /* write gob / video packet header */
3063  if(s->rtp_mode){
3064  int current_packet_size, is_gob_start;
3065 
3066  current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
3067 
3068  is_gob_start = s->rtp_payload_size &&
3069  current_packet_size >= s->rtp_payload_size &&
3070  mb_y + mb_x > 0;
3071 
3072  if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
3073 
3074  switch(s->codec_id){
3075  case AV_CODEC_ID_H263:
3076  case AV_CODEC_ID_H263P:
3077  if(!s->h263_slice_structured)
3078  if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3079  break;
3081  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3083  if(s->mb_skip_run) is_gob_start=0;
3084  break;
3085  case AV_CODEC_ID_MJPEG:
3086  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3087  break;
3088  }
3089 
3090  if(is_gob_start){
3091  if(s->start_mb_y != mb_y || mb_x!=0){
3092  write_slice_end(s);
3093 
3094  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3096  }
3097  }
3098 
3099  av_assert2((put_bits_count(&s->pb)&7) == 0);
3100  current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3101 
3102  if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3103  int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
3104  int d = 100 / s->error_rate;
3105  if(r % d == 0){
3106  current_packet_size=0;
3107  s->pb.buf_ptr= s->ptr_lastgob;
3108  av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3109  }
3110  }
3111 
3112 #if FF_API_RTP_CALLBACK
3114  if (s->avctx->rtp_callback){
3115  int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3116  s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3117  }
3119 #endif
3120  update_mb_info(s, 1);
3121 
3122  switch(s->codec_id){
3123  case AV_CODEC_ID_MPEG4:
3124  if (CONFIG_MPEG4_ENCODER) {
3127  }
3128  break;
3131  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3134  }
3135  break;
3136  case AV_CODEC_ID_H263:
3137  case AV_CODEC_ID_H263P:
3138  if (CONFIG_H263_ENCODER)
3140  break;
3141  }
3142 
3143  if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3144  int bits= put_bits_count(&s->pb);
3145  s->misc_bits+= bits - s->last_bits;
3146  s->last_bits= bits;
3147  }
3148 
3149  s->ptr_lastgob += current_packet_size;
3150  s->first_slice_line=1;
3151  s->resync_mb_x=mb_x;
3152  s->resync_mb_y=mb_y;
3153  }
3154  }
3155 
3156  if( (s->resync_mb_x == s->mb_x)
3157  && s->resync_mb_y+1 == s->mb_y){
3158  s->first_slice_line=0;
3159  }
3160 
3161  s->mb_skipped=0;
3162  s->dquant=0; //only for QP_RD
3163 
3164  update_mb_info(s, 0);
3165 
3166  if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3167  int next_block=0;
3168  int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3169 
3170  copy_context_before_encode(&backup_s, s, -1);
3171  backup_s.pb= s->pb;
3172  best_s.data_partitioning= s->data_partitioning;
3173  best_s.partitioned_frame= s->partitioned_frame;
3174  if(s->data_partitioning){
3175  backup_s.pb2= s->pb2;
3176  backup_s.tex_pb= s->tex_pb;
3177  }
3178 
3180  s->mv_dir = MV_DIR_FORWARD;
3181  s->mv_type = MV_TYPE_16X16;
3182  s->mb_intra= 0;
3183  s->mv[0][0][0] = s->p_mv_table[xy][0];
3184  s->mv[0][0][1] = s->p_mv_table[xy][1];
3185  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3186  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3187  }
3189  s->mv_dir = MV_DIR_FORWARD;
3190  s->mv_type = MV_TYPE_FIELD;
3191  s->mb_intra= 0;
3192  for(i=0; i<2; i++){
3193  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3194  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3195  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3196  }
3197  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3198  &dmin, &next_block, 0, 0);
3199  }
3201  s->mv_dir = MV_DIR_FORWARD;
3202  s->mv_type = MV_TYPE_16X16;
3203  s->mb_intra= 0;
3204  s->mv[0][0][0] = 0;
3205  s->mv[0][0][1] = 0;
3206  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3207  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3208  }
3210  s->mv_dir = MV_DIR_FORWARD;
3211  s->mv_type = MV_TYPE_8X8;
3212  s->mb_intra= 0;
3213  for(i=0; i<4; i++){
3214  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3215  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3216  }
3217  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3218  &dmin, &next_block, 0, 0);
3219  }
3221  s->mv_dir = MV_DIR_FORWARD;
3222  s->mv_type = MV_TYPE_16X16;
3223  s->mb_intra= 0;
3224  s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3225  s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3226  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3227  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3228  }
3230  s->mv_dir = MV_DIR_BACKWARD;
3231  s->mv_type = MV_TYPE_16X16;
3232  s->mb_intra= 0;
3233  s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3234  s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3235  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3236  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3237  }
3239  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3240  s->mv_type = MV_TYPE_16X16;
3241  s->mb_intra= 0;
3242  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3243  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3244  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3245  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3246  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3247  &dmin, &next_block, 0, 0);
3248  }
3250  s->mv_dir = MV_DIR_FORWARD;
3251  s->mv_type = MV_TYPE_FIELD;
3252  s->mb_intra= 0;
3253  for(i=0; i<2; i++){
3254  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3255  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3256  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3257  }
3258  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3259  &dmin, &next_block, 0, 0);
3260  }
3262  s->mv_dir = MV_DIR_BACKWARD;
3263  s->mv_type = MV_TYPE_FIELD;
3264  s->mb_intra= 0;
3265  for(i=0; i<2; i++){
3266  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3267  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3268  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3269  }
3270  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3271  &dmin, &next_block, 0, 0);
3272  }
3274  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3275  s->mv_type = MV_TYPE_FIELD;
3276  s->mb_intra= 0;
3277  for(dir=0; dir<2; dir++){
3278  for(i=0; i<2; i++){
3279  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3280  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3281  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3282  }
3283  }
3284  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3285  &dmin, &next_block, 0, 0);
3286  }
3288  s->mv_dir = 0;
3289  s->mv_type = MV_TYPE_16X16;
3290  s->mb_intra= 1;
3291  s->mv[0][0][0] = 0;
3292  s->mv[0][0][1] = 0;
3293  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3294  &dmin, &next_block, 0, 0);
3295  if(s->h263_pred || s->h263_aic){
3296  if(best_s.mb_intra)
3297  s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3298  else
3299  ff_clean_intra_table_entries(s); //old mode?
3300  }
3301  }
3302 
3303  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3304  if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3305  const int last_qp= backup_s.qscale;
3306  int qpi, qp, dc[6];
3307  int16_t ac[6][16];
3308  const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3309  static const int dquant_tab[4]={-1,1,-2,2};
3310  int storecoefs = s->mb_intra && s->dc_val[0];
3311 
3312  av_assert2(backup_s.dquant == 0);
3313 
3314  //FIXME intra
3315  s->mv_dir= best_s.mv_dir;
3316  s->mv_type = MV_TYPE_16X16;
3317  s->mb_intra= best_s.mb_intra;
3318  s->mv[0][0][0] = best_s.mv[0][0][0];
3319  s->mv[0][0][1] = best_s.mv[0][0][1];
3320  s->mv[1][0][0] = best_s.mv[1][0][0];
3321  s->mv[1][0][1] = best_s.mv[1][0][1];
3322 
3323  qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3324  for(; qpi<4; qpi++){
3325  int dquant= dquant_tab[qpi];
3326  qp= last_qp + dquant;
3327  if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3328  continue;
3329  backup_s.dquant= dquant;
3330  if(storecoefs){
3331  for(i=0; i<6; i++){
3332  dc[i]= s->dc_val[0][ s->block_index[i] ];
3333  memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3334  }
3335  }
3336 
3337  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3338  &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3339  if(best_s.qscale != qp){
3340  if(storecoefs){
3341  for(i=0; i<6; i++){
3342  s->dc_val[0][ s->block_index[i] ]= dc[i];
3343  memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3344  }
3345  }
3346  }
3347  }
3348  }
3349  }
3350  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3351  int mx= s->b_direct_mv_table[xy][0];
3352  int my= s->b_direct_mv_table[xy][1];
3353 
3354  backup_s.dquant = 0;
3355  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3356  s->mb_intra= 0;
3357  ff_mpeg4_set_direct_mv(s, mx, my);
3358  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3359  &dmin, &next_block, mx, my);
3360  }
3361  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3362  backup_s.dquant = 0;
3363  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3364  s->mb_intra= 0;
3365  ff_mpeg4_set_direct_mv(s, 0, 0);
3366  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3367  &dmin, &next_block, 0, 0);
3368  }
3369  if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3370  int coded=0;
3371  for(i=0; i<6; i++)
3372  coded |= s->block_last_index[i];
3373  if(coded){
3374  int mx,my;
3375  memcpy(s->mv, best_s.mv, sizeof(s->mv));
3376  if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3377  mx=my=0; //FIXME find the one we actually used
3378  ff_mpeg4_set_direct_mv(s, mx, my);
3379  }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3380  mx= s->mv[1][0][0];
3381  my= s->mv[1][0][1];
3382  }else{
3383  mx= s->mv[0][0][0];
3384  my= s->mv[0][0][1];
3385  }
3386 
3387  s->mv_dir= best_s.mv_dir;
3388  s->mv_type = best_s.mv_type;
3389  s->mb_intra= 0;
3390 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3391  s->mv[0][0][1] = best_s.mv[0][0][1];
3392  s->mv[1][0][0] = best_s.mv[1][0][0];
3393  s->mv[1][0][1] = best_s.mv[1][0][1];*/
3394  backup_s.dquant= 0;
3395  s->skipdct=1;
3396  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3397  &dmin, &next_block, mx, my);
3398  s->skipdct=0;
3399  }
3400  }
3401 
3402  s->current_picture.qscale_table[xy] = best_s.qscale;
3403 
3404  copy_context_after_encode(s, &best_s, -1);
3405 
3406  pb_bits_count= put_bits_count(&s->pb);
3407  flush_put_bits(&s->pb);
3408  ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3409  s->pb= backup_s.pb;
3410 
3411  if(s->data_partitioning){
3412  pb2_bits_count= put_bits_count(&s->pb2);
3413  flush_put_bits(&s->pb2);
3414  ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3415  s->pb2= backup_s.pb2;
3416 
3417  tex_pb_bits_count= put_bits_count(&s->tex_pb);
3418  flush_put_bits(&s->tex_pb);
3419  ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3420  s->tex_pb= backup_s.tex_pb;
3421  }
3422  s->last_bits= put_bits_count(&s->pb);
3423 
3424  if (CONFIG_H263_ENCODER &&
3425  s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3427 
3428  if(next_block==0){ //FIXME 16 vs linesize16
3429  s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3430  s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3431  s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3432  }
3433 
3434  if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3435  ff_mpv_reconstruct_mb(s, s->block);
3436  } else {
3437  int motion_x = 0, motion_y = 0;
3438  s->mv_type=MV_TYPE_16X16;
3439  // only one MB-Type possible
3440 
3441  switch(mb_type){
3443  s->mv_dir = 0;
3444  s->mb_intra= 1;
3445  motion_x= s->mv[0][0][0] = 0;
3446  motion_y= s->mv[0][0][1] = 0;
3447  break;
3449  s->mv_dir = MV_DIR_FORWARD;
3450  s->mb_intra= 0;
3451  motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3452  motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3453  break;
3455  s->mv_dir = MV_DIR_FORWARD;
3456  s->mv_type = MV_TYPE_FIELD;
3457  s->mb_intra= 0;
3458  for(i=0; i<2; i++){
3459  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3460  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3461  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3462  }
3463  break;
3465  s->mv_dir = MV_DIR_FORWARD;
3466  s->mv_type = MV_TYPE_8X8;
3467  s->mb_intra= 0;
3468  for(i=0; i<4; i++){
3469  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3470  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3471  }
3472  break;
3474  if (CONFIG_MPEG4_ENCODER) {
3476  s->mb_intra= 0;
3477  motion_x=s->b_direct_mv_table[xy][0];
3478  motion_y=s->b_direct_mv_table[xy][1];
3479  ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3480  }
3481  break;
3483  if (CONFIG_MPEG4_ENCODER) {
3485  s->mb_intra= 0;
3486  ff_mpeg4_set_direct_mv(s, 0, 0);
3487  }
3488  break;
3490  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3491  s->mb_intra= 0;
3492  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3493  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3494  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3495  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3496  break;
3498  s->mv_dir = MV_DIR_BACKWARD;
3499  s->mb_intra= 0;
3500  motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3501  motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3502  break;
3504  s->mv_dir = MV_DIR_FORWARD;
3505  s->mb_intra= 0;
3506  motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3507  motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3508  break;
3510  s->mv_dir = MV_DIR_FORWARD;
3511  s->mv_type = MV_TYPE_FIELD;
3512  s->mb_intra= 0;
3513  for(i=0; i<2; i++){
3514  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3515  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3516  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3517  }
3518  break;
3520  s->mv_dir = MV_DIR_BACKWARD;
3521  s->mv_type = MV_TYPE_FIELD;
3522  s->mb_intra= 0;
3523  for(i=0; i<2; i++){
3524  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3525  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3526  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3527  }
3528  break;
3530  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3531  s->mv_type = MV_TYPE_FIELD;
3532  s->mb_intra= 0;
3533  for(dir=0; dir<2; dir++){
3534  for(i=0; i<2; i++){
3535  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3536  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3537  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3538  }
3539  }
3540  break;
3541  default:
3542  av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3543  }
3544 
3545  encode_mb(s, motion_x, motion_y);
3546 
3547  // RAL: Update last macroblock type
3548  s->last_mv_dir = s->mv_dir;
3549 
3550  if (CONFIG_H263_ENCODER &&
3551  s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3553 
3554  ff_mpv_reconstruct_mb(s, s->block);
3555  }
3556 
3557  /* clean the MV table in IPS frames for direct mode in B-frames */
3558  if(s->mb_intra /* && I,P,S_TYPE */){
3559  s->p_mv_table[xy][0]=0;
3560  s->p_mv_table[xy][1]=0;
3561  }
3562 
3563  if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3564  int w= 16;
3565  int h= 16;
3566 
3567  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3568  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3569 
3570  s->current_picture.encoding_error[0] += sse(
3571  s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3572  s->dest[0], w, h, s->linesize);
3573  s->current_picture.encoding_error[1] += sse(
3574  s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3575  s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3576  s->current_picture.encoding_error[2] += sse(
3577  s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3578  s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3579  }
3580  if(s->loop_filter){
3581  if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3583  }
3584  ff_dlog(s->avctx, "MB %d %d bits\n",
3585  s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3586  }
3587  }
3588 
3589  //not beautiful here but we must write it before flushing so it has to be here
3590  if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3592 
3593  write_slice_end(s);
3594 
3595 #if FF_API_RTP_CALLBACK
3597  /* Send the last GOB if RTP */
3598  if (s->avctx->rtp_callback) {
3599  int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3600  int pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3601  /* Call the RTP callback to send the last GOB */
3602  emms_c();
3603  s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3604  }
3606 #endif
3607 
3608  return 0;
3609 }
3610 
3611 #define MERGE(field) dst->field += src->field; src->field=0
3613  MERGE(me.scene_change_score);
3614  MERGE(me.mc_mb_var_sum_temp);
3615  MERGE(me.mb_var_sum_temp);
3616 }
3617 
3619  int i;
3620 
3621  MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3622  MERGE(dct_count[1]);
3623  MERGE(mv_bits);
3624  MERGE(i_tex_bits);
3625  MERGE(p_tex_bits);
3626  MERGE(i_count);
3627  MERGE(f_count);
3628  MERGE(b_count);
3629  MERGE(skip_count);
3630  MERGE(misc_bits);
3631  MERGE(er.error_count);
3636 
3637  if (dst->noise_reduction){
3638  for(i=0; i<64; i++){
3639  MERGE(dct_error_sum[0][i]);
3640  MERGE(dct_error_sum[1][i]);
3641  }
3642  }
3643 
3644  av_assert1(put_bits_count(&src->pb) % 8 ==0);
3645  av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3646  ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3647  flush_put_bits(&dst->pb);
3648 }
3649 
3650 static int estimate_qp(MpegEncContext *s, int dry_run){
3651  if (s->next_lambda){
3652  s->current_picture_ptr->f->quality =
3653  s->current_picture.f->quality = s->next_lambda;
3654  if(!dry_run) s->next_lambda= 0;
3655  } else if (!s->fixed_qscale) {
3656  int quality = ff_rate_estimate_qscale(s, dry_run);
3657  s->current_picture_ptr->f->quality =
3658  s->current_picture.f->quality = quality;
3659  if (s->current_picture.f->quality < 0)
3660  return -1;
3661  }
3662 
3663  if(s->adaptive_quant){
3664  switch(s->codec_id){
3665  case AV_CODEC_ID_MPEG4:
3666  if (CONFIG_MPEG4_ENCODER)
3668  break;
3669  case AV_CODEC_ID_H263:
3670  case AV_CODEC_ID_H263P:
3671  case AV_CODEC_ID_FLV1:
3672  if (CONFIG_H263_ENCODER)
3674  break;
3675  default:
3677  }
3678 
3679  s->lambda= s->lambda_table[0];
3680  //FIXME broken
3681  }else
3682  s->lambda = s->current_picture.f->quality;
3683  update_qscale(s);
3684  return 0;
3685 }
3686 
3687 /* must be called before writing the header */
3689  av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3690  s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3691 
3692  if(s->pict_type==AV_PICTURE_TYPE_B){
3693  s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3694  av_assert1(s->pb_time > 0 && s->pb_time < s->pp_time);
3695  }else{
3696  s->pp_time= s->time - s->last_non_b_time;
3697  s->last_non_b_time= s->time;
3698  av_assert1(s->picture_number==0 || s->pp_time > 0);
3699  }
3700 }
3701 
3703 {
3704  int i, ret;
3705  int bits;
3706  int context_count = s->slice_context_count;
3707 
3708  s->picture_number = picture_number;
3709 
3710  /* Reset the average MB variance */
3711  s->me.mb_var_sum_temp =
3712  s->me.mc_mb_var_sum_temp = 0;
3713 
3714  /* we need to initialize some time vars before we can encode B-frames */
3715  // RAL: Condition added for MPEG1VIDEO
3716  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3718  if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3720 
3721  s->me.scene_change_score=0;
3722 
3723 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3724 
3725  if(s->pict_type==AV_PICTURE_TYPE_I){
3726  if(s->msmpeg4_version >= 3) s->no_rounding=1;
3727  else s->no_rounding=0;
3728  }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3729  if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3730  s->no_rounding ^= 1;
3731  }
3732 
3733  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3734  if (estimate_qp(s,1) < 0)
3735  return -1;
3737  } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3738  if(s->pict_type==AV_PICTURE_TYPE_B)
3739  s->lambda= s->last_lambda_for[s->pict_type];
3740  else
3741  s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3742  update_qscale(s);
3743  }
3744 
3745  if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3746  if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3747  if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3748  s->q_chroma_intra_matrix = s->q_intra_matrix;
3749  s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3750  }
3751 
3752  s->mb_intra=0; //for the rate distortion & bit compare functions
3753  for(i=1; i<context_count; i++){
3754  ret = ff_update_duplicate_context(s->thread_context[i], s);
3755  if (ret < 0)
3756  return ret;
3757  }
3758 
3759  if(ff_init_me(s)<0)
3760  return -1;
3761 
3762  /* Estimate motion for every MB */
3763  if(s->pict_type != AV_PICTURE_TYPE_I){
3764  s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3765  s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3766  if (s->pict_type != AV_PICTURE_TYPE_B) {
3767  if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3768  s->me_pre == 2) {
3769  s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3770  }
3771  }
3772 
3773  s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3774  }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3775  /* I-Frame */
3776  for(i=0; i<s->mb_stride*s->mb_height; i++)
3777  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3778 
3779  if(!s->fixed_qscale){
3780  /* finding spatial complexity for I-frame rate control */
3781  s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3782  }
3783  }
3784  for(i=1; i<context_count; i++){
3785  merge_context_after_me(s, s->thread_context[i]);
3786  }
3787  s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3788  s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3789  emms_c();
3790 
3791  if (s->me.scene_change_score > s->scenechange_threshold &&
3792  s->pict_type == AV_PICTURE_TYPE_P) {
3793  s->pict_type= AV_PICTURE_TYPE_I;
3794  for(i=0; i<s->mb_stride*s->mb_height; i++)
3795  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3796  if(s->msmpeg4_version >= 3)
3797  s->no_rounding=1;
3798  ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3799  s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3800  }
3801 
3802  if(!s->umvplus){
3803  if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3804  s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3805 
3806  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3807  int a,b;
3808  a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3809  b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3810  s->f_code= FFMAX3(s->f_code, a, b);
3811  }
3812 
3814  ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3815  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3816  int j;
3817  for(i=0; i<2; i++){
3818  for(j=0; j<2; j++)
3819  ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3820  s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3821  }
3822  }
3823  }
3824 
3825  if(s->pict_type==AV_PICTURE_TYPE_B){
3826  int a, b;
3827 
3828  a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3829  b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3830  s->f_code = FFMAX(a, b);
3831 
3832  a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3833  b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3834  s->b_code = FFMAX(a, b);
3835 
3836  ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3837  ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3838  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3839  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3840  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3841  int dir, j;
3842  for(dir=0; dir<2; dir++){
3843  for(i=0; i<2; i++){
3844  for(j=0; j<2; j++){
3847  ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3848  s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3849  }
3850  }
3851  }
3852  }
3853  }
3854  }
3855 
3856  if (estimate_qp(s, 0) < 0)
3857  return -1;
3858 
3859  if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3860  s->pict_type == AV_PICTURE_TYPE_I &&
3861  !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3862  s->qscale= 3; //reduce clipping problems
3863 
3864  if (s->out_format == FMT_MJPEG) {
3865  const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3866  const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3867 
3868  if (s->avctx->intra_matrix) {
3869  chroma_matrix =
3870  luma_matrix = s->avctx->intra_matrix;
3871  }
3872  if (s->avctx->chroma_intra_matrix)
3873  chroma_matrix = s->avctx->chroma_intra_matrix;
3874 
3875  /* for mjpeg, we do include qscale in the matrix */
3876  for(i=1;i<64;i++){
3877  int j = s->idsp.idct_permutation[i];
3878 
3879  s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3880  s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3881  }
3882  s->y_dc_scale_table=
3883  s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3884  s->chroma_intra_matrix[0] =
3885  s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3886  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3887  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3888  ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3889  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3890  s->qscale= 8;
3891  }
3892  if(s->codec_id == AV_CODEC_ID_AMV){
3893  static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3894  static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3895  for(i=1;i<64;i++){
3896  int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3897 
3898  s->intra_matrix[j] = sp5x_qscale_five_quant_table[0][i];
3899  s->chroma_intra_matrix[j] = sp5x_qscale_five_quant_table[1][i];
3900  }
3901  s->y_dc_scale_table= y;
3902  s->c_dc_scale_table= c;
3903  s->intra_matrix[0] = 13;
3904  s->chroma_intra_matrix[0] = 14;
3905  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3906  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3907  ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3908  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3909  s->qscale= 8;
3910  }
3911 
3912  if (s->out_format == FMT_SPEEDHQ) {
3913  s->y_dc_scale_table=
3914  s->c_dc_scale_table= ff_mpeg2_dc_scale_table[3];
3915  }
3916 
3917  //FIXME var duplication
3918  s->current_picture_ptr->f->key_frame =
3919  s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3920  s->current_picture_ptr->f->pict_type =
3921  s->current_picture.f->pict_type = s->pict_type;
3922 
3923  if (s->current_picture.f->key_frame)
3924  s->picture_in_gop_number=0;
3925 
3926  s->mb_x = s->mb_y = 0;
3927  s->last_bits= put_bits_count(&s->pb);
3928  switch(s->out_format) {
3929  case FMT_MJPEG:
3930  if (CONFIG_MJPEG_ENCODER && s->huffman != HUFFMAN_TABLE_OPTIMAL)
3931  ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3932  s->pred, s->intra_matrix, s->chroma_intra_matrix);
3933  break;
3934  case FMT_SPEEDHQ:
3935  if (CONFIG_SPEEDHQ_ENCODER)
3937  break;
3938  case FMT_H261:
3939  if (CONFIG_H261_ENCODER)
3941  break;
3942  case FMT_H263:
3943  if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3945  else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3947  else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3949  if (ret < 0)
3950  return ret;
3951  } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3953  if (ret < 0)
3954  return ret;
3955  }
3956  else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3958  else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3960  else if (CONFIG_H263_ENCODER)
3962  break;
3963  case FMT_MPEG1:
3964  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3966  break;
3967  default:
3968  av_assert0(0);
3969  }
3970  bits= put_bits_count(&s->pb);
3971  s->header_bits= bits - s->last_bits;
3972 
3973  for(i=1; i<context_count; i++){
3974  update_duplicate_context_after_me(s->thread_context[i], s);
3975  }
3976  s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3977  for(i=1; i<context_count; i++){
3978  if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3979  set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3980  merge_context_after_encode(s, s->thread_context[i]);
3981  }
3982  emms_c();
3983  return 0;
3984 }
3985 
3986 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3987  const int intra= s->mb_intra;
3988  int i;
3989 
3990  s->dct_count[intra]++;
3991 
3992  for(i=0; i<64; i++){
3993  int level= block[i];
3994 
3995  if(level){
3996  if(level>0){
3997  s->dct_error_sum[intra][i] += level;
3998  level -= s->dct_offset[intra][i];
3999  if(level<0) level=0;
4000  }else{
4001  s->dct_error_sum[intra][i] -= level;
4002  level += s->dct_offset[intra][i];
4003  if(level>0) level=0;
4004  }
4005  block[i]= level;
4006  }
4007  }
4008 }
4009 
4011  int16_t *block, int n,
4012  int qscale, int *overflow){
4013  const int *qmat;
4014  const uint16_t *matrix;
4015  const uint8_t *scantable;
4016  const uint8_t *perm_scantable;
4017  int max=0;
4018  unsigned int threshold1, threshold2;
4019  int bias=0;
4020  int run_tab[65];
4021  int level_tab[65];
4022  int score_tab[65];
4023  int survivor[65];
4024  int survivor_count;
4025  int last_run=0;
4026  int last_level=0;
4027  int last_score= 0;
4028  int last_i;
4029  int coeff[2][64];
4030  int coeff_count[64];
4031  int qmul, qadd, start_i, last_non_zero, i, dc;
4032  const int esc_length= s->ac_esc_length;
4033  uint8_t * length;
4034  uint8_t * last_length;
4035  const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
4036  int mpeg2_qscale;
4037 
4038  s->fdsp.fdct(block);
4039 
4040  if(s->dct_error_sum)
4041  s->denoise_dct(s, block);
4042  qmul= qscale*16;
4043  qadd= ((qscale-1)|1)*8;
4044 
4045  if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
4046  else mpeg2_qscale = qscale << 1;
4047 
4048  if (s->mb_intra) {
4049  int q;
4050  scantable= s->intra_scantable.scantable;
4051  perm_scantable= s->intra_scantable.permutated;
4052  if (!s->h263_aic) {
4053  if (n < 4)
4054  q = s->y_dc_scale;
4055  else
4056  q = s->c_dc_scale;
4057  q = q << 3;
4058  } else{
4059  /* For AIC we skip quant/dequant of INTRADC */
4060  q = 1 << 3;
4061  qadd=0;
4062  }
4063 
4064  /* note: block[0] is assumed to be positive */
4065  block[0] = (block[0] + (q >> 1)) / q;
4066  start_i = 1;
4067  last_non_zero = 0;
4068  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4069  matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
4070  if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
4071  bias= 1<<(QMAT_SHIFT-1);
4072 
4073  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4074  length = s->intra_chroma_ac_vlc_length;
4075  last_length= s->intra_chroma_ac_vlc_last_length;
4076  } else {
4077  length = s->intra_ac_vlc_length;
4078  last_length= s->intra_ac_vlc_last_length;
4079  }
4080  } else {
4081  scantable= s->inter_scantable.scantable;
4082  perm_scantable= s->inter_scantable.permutated;
4083  start_i = 0;
4084  last_non_zero = -1;
4085  qmat = s->q_inter_matrix[qscale];
4086  matrix = s->inter_matrix;
4087  length = s->inter_ac_vlc_length;
4088  last_length= s->inter_ac_vlc_last_length;
4089  }
4090  last_i= start_i;
4091 
4092  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4093  threshold2= (threshold1<<1);
4094 
4095  for(i=63; i>=start_i; i--) {
4096  const int j = scantable[i];
4097  int level = block[j] * qmat[j];
4098 
4099  if(((unsigned)(level+threshold1))>threshold2){
4100  last_non_zero = i;
4101  break;
4102  }
4103  }
4104 
4105  for(i=start_i; i<=last_non_zero; i++) {
4106  const int j = scantable[i];
4107  int level = block[j] * qmat[j];
4108 
4109 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4110 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4111  if(((unsigned)(level+threshold1))>threshold2){
4112  if(level>0){
4113  level= (bias + level)>>QMAT_SHIFT;
4114  coeff[0][i]= level;
4115  coeff[1][i]= level-1;
4116 // coeff[2][k]= level-2;
4117  }else{
4118  level= (bias - level)>>QMAT_SHIFT;
4119  coeff[0][i]= -level;
4120  coeff[1][i]= -level+1;
4121 // coeff[2][k]= -level+2;
4122  }
4123  coeff_count[i]= FFMIN(level, 2);
4124  av_assert2(coeff_count[i]);
4125  max |=level;
4126  }else{
4127  coeff[0][i]= (level>>31)|1;
4128  coeff_count[i]= 1;
4129  }
4130  }
4131 
4132  *overflow= s->max_qcoeff < max; //overflow might have happened
4133 
4134  if(last_non_zero < start_i){
4135  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4136  return last_non_zero;
4137  }
4138 
4139  score_tab[start_i]= 0;
4140  survivor[0]= start_i;
4141  survivor_count= 1;
4142 
4143  for(i=start_i; i<=last_non_zero; i++){
4144  int level_index, j, zero_distortion;
4145  int dct_coeff= FFABS(block[ scantable[i] ]);
4146  int best_score=256*256*256*120;
4147 
4148  if (s->fdsp.fdct == ff_fdct_ifast)
4149  dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4150  zero_distortion= dct_coeff*dct_coeff;
4151 
4152  for(level_index=0; level_index < coeff_count[i]; level_index++){
4153  int distortion;
4154  int level= coeff[level_index][i];
4155  const int alevel= FFABS(level);
4156  int unquant_coeff;
4157 
4158  av_assert2(level);
4159 
4160  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4161  unquant_coeff= alevel*qmul + qadd;
4162  } else if(s->out_format == FMT_MJPEG) {
4163  j = s->idsp.idct_permutation[scantable[i]];
4164  unquant_coeff = alevel * matrix[j] * 8;
4165  }else{ // MPEG-1
4166  j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4167  if(s->mb_intra){
4168  unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4169  unquant_coeff = (unquant_coeff - 1) | 1;
4170  }else{
4171  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4172  unquant_coeff = (unquant_coeff - 1) | 1;
4173  }
4174  unquant_coeff<<= 3;
4175  }
4176 
4177  distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4178  level+=64;
4179  if((level&(~127)) == 0){
4180  for(j=survivor_count-1; j>=0; j--){
4181  int run= i - survivor[j];
4182  int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4183  score += score_tab[i-run];
4184 
4185  if(score < best_score){
4186  best_score= score;
4187  run_tab[i+1]= run;
4188  level_tab[i+1]= level-64;
4189  }
4190  }
4191 
4192  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4193  for(j=survivor_count-1; j>=0; j--){
4194  int run= i - survivor[j];
4195  int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4196  score += score_tab[i-run];
4197  if(score < last_score){
4198  last_score= score;
4199  last_run= run;
4200  last_level= level-64;
4201  last_i= i+1;
4202  }
4203  }
4204  }
4205  }else{
4206  distortion += esc_length*lambda;
4207  for(j=survivor_count-1; j>=0; j--){
4208  int run= i - survivor[j];
4209  int score= distortion + score_tab[i-run];
4210 
4211  if(score < best_score){
4212  best_score= score;
4213  run_tab[i+1]= run;
4214  level_tab[i+1]= level-64;
4215  }
4216  }
4217 
4218  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4219  for(j=survivor_count-1; j>=0; j--){
4220  int run= i - survivor[j];
4221  int score= distortion + score_tab[i-run];
4222  if(score < last_score){
4223  last_score= score;
4224  last_run= run;
4225  last_level= level-64;
4226  last_i= i+1;
4227  }
4228  }
4229  }
4230  }
4231  }
4232 
4233  score_tab[i+1]= best_score;
4234 
4235  // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4236  if(last_non_zero <= 27){
4237  for(; survivor_count; survivor_count--){
4238  if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4239  break;
4240  }
4241  }else{
4242  for(; survivor_count; survivor_count--){
4243  if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4244  break;
4245  }
4246  }
4247 
4248  survivor[ survivor_count++ ]= i+1;
4249  }
4250 
4251  if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4252  last_score= 256*256*256*120;
4253  for(i= survivor[0]; i<=last_non_zero + 1; i++){
4254  int score= score_tab[i];
4255  if (i)
4256  score += lambda * 2; // FIXME more exact?
4257 
4258  if(score < last_score){
4259  last_score= score;
4260  last_i= i;
4261  last_level= level_tab[i];
4262  last_run= run_tab[i];
4263  }
4264  }
4265  }
4266 
4267  s->coded_score[n] = last_score;
4268 
4269  dc= FFABS(block[0]);
4270  last_non_zero= last_i - 1;
4271  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4272 
4273  if(last_non_zero < start_i)
4274  return last_non_zero;
4275 
4276  if(last_non_zero == 0 && start_i == 0){
4277  int best_level= 0;
4278  int best_score= dc * dc;
4279 
4280  for(i=0; i<coeff_count[0]; i++){
4281  int level= coeff[i][0];
4282  int alevel= FFABS(level);
4283  int unquant_coeff, score, distortion;
4284 
4285  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4286  unquant_coeff= (alevel*qmul + qadd)>>3;
4287  } else{ // MPEG-1
4288  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4289  unquant_coeff = (unquant_coeff - 1) | 1;
4290  }
4291  unquant_coeff = (unquant_coeff + 4) >> 3;
4292  unquant_coeff<<= 3 + 3;
4293 
4294  distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4295  level+=64;
4296  if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4297  else score= distortion + esc_length*lambda;
4298 
4299  if(score < best_score){
4300  best_score= score;
4301  best_level= level - 64;
4302  }
4303  }
4304  block[0]= best_level;
4305  s->coded_score[n] = best_score - dc*dc;
4306  if(best_level == 0) return -1;
4307  else return last_non_zero;
4308  }
4309 
4310  i= last_i;
4311  av_assert2(last_level);
4312 
4313  block[ perm_scantable[last_non_zero] ]= last_level;
4314  i -= last_run + 1;
4315 
4316  for(; i>start_i; i -= run_tab[i] + 1){
4317  block[ perm_scantable[i-1] ]= level_tab[i];
4318  }
4319 
4320  return last_non_zero;
4321 }
4322 
4323 static int16_t basis[64][64];
4324 
4325 static void build_basis(uint8_t *perm){
4326  int i, j, x, y;
4327  emms_c();
4328  for(i=0; i<8; i++){
4329  for(j=0; j<8; j++){
4330  for(y=0; y<8; y++){
4331  for(x=0; x<8; x++){
4332  double s= 0.25*(1<<BASIS_SHIFT);
4333  int index= 8*i + j;
4334  int perm_index= perm[index];
4335  if(i==0) s*= sqrt(0.5);
4336  if(j==0) s*= sqrt(0.5);
4337  basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4338  }
4339  }
4340  }
4341  }
4342 }
4343 
4344 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4345  int16_t *block, int16_t *weight, int16_t *orig,
4346  int n, int qscale){
4347  int16_t rem[64];
4348  LOCAL_ALIGNED_16(int16_t, d1, [64]);
4349  const uint8_t *scantable;
4350  const uint8_t *perm_scantable;
4351 // unsigned int threshold1, threshold2;
4352 // int bias=0;
4353  int run_tab[65];
4354  int prev_run=0;
4355  int prev_level=0;
4356  int qmul, qadd, start_i, last_non_zero, i, dc;
4357  uint8_t * length;
4358  uint8_t * last_length;
4359  int lambda;
4360  int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4361 
4362  if(basis[0][0] == 0)
4363  build_basis(s->idsp.idct_permutation);
4364 
4365  qmul= qscale*2;
4366  qadd= (qscale-1)|1;
4367  if (s->mb_intra) {
4368  scantable= s->intra_scantable.scantable;
4369  perm_scantable= s->intra_scantable.permutated;
4370  if (!s->h263_aic) {
4371  if (n < 4)
4372  q = s->y_dc_scale;
4373  else
4374  q = s->c_dc_scale;
4375  } else{
4376  /* For AIC we skip quant/dequant of INTRADC */
4377  q = 1;
4378  qadd=0;
4379  }
4380  q <<= RECON_SHIFT-3;
4381  /* note: block[0] is assumed to be positive */
4382  dc= block[0]*q;
4383 // block[0] = (block[0] + (q >> 1)) / q;
4384  start_i = 1;
4385 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4386 // bias= 1<<(QMAT_SHIFT-1);
4387  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4388  length = s->intra_chroma_ac_vlc_length;
4389  last_length= s->intra_chroma_ac_vlc_last_length;
4390  } else {
4391  length = s->intra_ac_vlc_length;
4392  last_length= s->intra_ac_vlc_last_length;
4393  }
4394  } else {
4395  scantable= s->inter_scantable.scantable;
4396  perm_scantable= s->inter_scantable.permutated;
4397  dc= 0;
4398  start_i = 0;
4399  length = s->inter_ac_vlc_length;
4400  last_length= s->inter_ac_vlc_last_length;
4401  }
4402  last_non_zero = s->block_last_index[n];
4403 
4404  dc += (1<<(RECON_SHIFT-1));
4405  for(i=0; i<64; i++){
4406  rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4407  }
4408 
4409  sum=0;
4410  for(i=0; i<64; i++){
4411  int one= 36;
4412  int qns=4;
4413  int w;
4414 
4415  w= FFABS(weight[i]) + qns*one;
4416  w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4417 
4418  weight[i] = w;
4419 // w=weight[i] = (63*qns + (w/2)) / w;
4420 
4421  av_assert2(w>0);
4422  av_assert2(w<(1<<6));
4423  sum += w*w;
4424  }
4425  lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4426 
4427  run=0;
4428  rle_index=0;
4429  for(i=start_i; i<=last_non_zero; i++){
4430  int j= perm_scantable[i];
4431  const int level= block[j];
4432  int coeff;
4433 
4434  if(level){
4435  if(level<0) coeff= qmul*level - qadd;
4436  else coeff= qmul*level + qadd;
4437  run_tab[rle_index++]=run;
4438  run=0;
4439 
4440  s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4441  }else{
4442  run++;
4443  }
4444  }
4445 
4446  for(;;){
4447  int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4448  int best_coeff=0;
4449  int best_change=0;
4450  int run2, best_unquant_change=0, analyze_gradient;
4451  analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4452 
4453  if(analyze_gradient){
4454  for(i=0; i<64; i++){
4455  int w= weight[i];
4456 
4457  d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4458  }
4459  s->fdsp.fdct(d1);
4460  }
4461 
4462  if(start_i){
4463  const int level= block[0];
4464  int change, old_coeff;
4465 
4466  av_assert2(s->mb_intra);
4467 
4468  old_coeff= q*level;
4469 
4470  for(change=-1; change<=1; change+=2){
4471  int new_level= level + change;
4472  int score, new_coeff;
4473 
4474  new_coeff= q*new_level;
4475  if(new_coeff >= 2048 || new_coeff < 0)
4476  continue;
4477 
4478  score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4479  new_coeff - old_coeff);
4480  if(score<best_score){
4481  best_score= score;
4482  best_coeff= 0;
4483  best_change= change;
4484  best_unquant_change= new_coeff - old_coeff;
4485  }
4486  }
4487  }
4488 
4489  run=0;
4490  rle_index=0;
4491  run2= run_tab[rle_index++];
4492  prev_level=0;
4493  prev_run=0;
4494 
4495  for(i=start_i; i<64; i++){
4496  int j= perm_scantable[i];
4497  const int level= block[j];
4498  int change, old_coeff;
4499 
4500  if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4501  break;
4502 
4503  if(level){
4504  if(level<0) old_coeff= qmul*level - qadd;
4505  else old_coeff= qmul*level + qadd;
4506  run2= run_tab[rle_index++]; //FIXME ! maybe after last
4507  }else{
4508  old_coeff=0;
4509  run2--;
4510  av_assert2(run2>=0 || i >= last_non_zero );
4511  }
4512 
4513  for(change=-1; change<=1; change+=2){
4514  int new_level= level + change;
4515  int score, new_coeff, unquant_change;
4516 
4517  score=0;
4518  if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4519  continue;
4520 
4521  if(new_level){
4522  if(new_level<0) new_coeff= qmul*new_level - qadd;
4523  else new_coeff= qmul*new_level + qadd;
4524  if(new_coeff >= 2048 || new_coeff <= -2048)
4525  continue;
4526  //FIXME check for overflow
4527 
4528  if(level){
4529  if(level < 63 && level > -63){
4530  if(i < last_non_zero)
4531  score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4532  - length[UNI_AC_ENC_INDEX(run, level+64)];
4533  else
4534  score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4535  - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4536  }
4537  }else{
4538  av_assert2(FFABS(new_level)==1);
4539 
4540  if(analyze_gradient){
4541  int g= d1[ scantable[i] ];
4542  if(g && (g^new_level) >= 0)
4543  continue;
4544  }
4545 
4546  if(i < last_non_zero){
4547  int next_i= i + run2 + 1;
4548  int next_level= block[ perm_scantable[next_i] ] + 64;
4549 
4550  if(next_level&(~127))
4551  next_level= 0;
4552 
4553  if(next_i < last_non_zero)
4554  score += length[UNI_AC_ENC_INDEX(run, 65)]
4555  + length[UNI_AC_ENC_INDEX(run2, next_level)]
4556  - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4557  else
4558  score += length[UNI_AC_ENC_INDEX(run, 65)]
4559  + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4560  - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4561  }else{
4562  score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4563  if(prev_level){
4564  score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4565  - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4566  }
4567  }
4568  }
4569  }else{
4570  new_coeff=0;
4571  av_assert2(FFABS(level)==1);
4572 
4573  if(i < last_non_zero){
4574  int next_i= i + run2 + 1;
4575  int next_level= block[ perm_scantable[next_i] ] + 64;
4576 
4577  if(next_level&(~127))
4578  next_level= 0;
4579 
4580  if(next_i < last_non_zero)
4581  score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4582  - length[UNI_AC_ENC_INDEX(run2, next_level)]
4583  - length[UNI_AC_ENC_INDEX(run, 65)];
4584  else
4585  score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4586  - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4587  - length[UNI_AC_ENC_INDEX(run, 65)];
4588  }else{
4589  score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4590  if(prev_level){
4591  score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4592  - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4593  }
4594  }
4595  }
4596 
4597  score *= lambda;
4598 
4599  unquant_change= new_coeff - old_coeff;
4600  av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4601 
4602  score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4603  unquant_change);
4604  if(score<best_score){
4605  best_score= score;
4606  best_coeff= i;
4607  best_change= change;
4608  best_unquant_change= unquant_change;
4609  }
4610  }
4611  if(level){
4612  prev_level= level + 64;
4613  if(prev_level&(~127))
4614  prev_level= 0;
4615  prev_run= run;
4616  run=0;
4617  }else{
4618  run++;
4619  }
4620  }
4621 
4622  if(best_change){
4623  int j= perm_scantable[ best_coeff ];
4624 
4625  block[j] += best_change;
4626 
4627  if(best_coeff > last_non_zero){
4628  last_non_zero= best_coeff;
4629  av_assert2(block[j]);
4630  }else{
4631  for(; last_non_zero>=start_i; last_non_zero--){
4632  if(block[perm_scantable[last_non_zero]])
4633  break;
4634  }
4635  }
4636 
4637  run=0;
4638  rle_index=0;
4639  for(i=start_i; i<=last_non_zero; i++){
4640  int j= perm_scantable[i];
4641  const int level= block[j];
4642 
4643  if(level){
4644  run_tab[rle_index++]=run;
4645  run=0;
4646  }else{
4647  run++;
4648  }
4649  }
4650 
4651  s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4652  }else{
4653  break;
4654  }
4655  }
4656 
4657  return last_non_zero;
4658 }
4659 
4660 /**
4661  * Permute an 8x8 block according to permutation.
4662  * @param block the block which will be permuted according to
4663  * the given permutation vector
4664  * @param permutation the permutation vector
4665  * @param last the last non zero coefficient in scantable order, used to
4666  * speed the permutation up
4667  * @param scantable the used scantable, this is only used to speed the
4668  * permutation up, the block is not (inverse) permutated
4669  * to scantable order!
4670  */
4671 void ff_block_permute(int16_t *block, uint8_t *permutation,
4672  const uint8_t *scantable, int last)
4673 {
4674  int i;
4675  int16_t temp[64];
4676 
4677  if (last <= 0)
4678  return;
4679  //FIXME it is ok but not clean and might fail for some permutations
4680  // if (permutation[1] == 1)
4681  // return;
4682 
4683  for (i = 0; i <= last; i++) {
4684  const int j = scantable[i];
4685  temp[j] = block[j];
4686  block[j] = 0;
4687  }
4688 
4689  for (i = 0; i <= last; i++) {
4690  const int j = scantable[i];
4691  const int perm_j = permutation[j];
4692  block[perm_j] = temp[j];
4693  }
4694 }
4695 
4697  int16_t *block, int n,
4698  int qscale, int *overflow)
4699 {
4700  int i, j, level, last_non_zero, q, start_i;
4701  const int *qmat;
4702  const uint8_t *scantable;
4703  int bias;
4704  int max=0;
4705  unsigned int threshold1, threshold2;
4706 
4707  s->fdsp.fdct(block);
4708 
4709  if(s->dct_error_sum)
4710  s->denoise_dct(s, block);
4711 
4712  if (s->mb_intra) {
4713  scantable= s->intra_scantable.scantable;
4714  if (!s->h263_aic) {
4715  if (n < 4)
4716  q = s->y_dc_scale;
4717  else
4718  q = s->c_dc_scale;
4719  q = q << 3;
4720  } else
4721  /* For AIC we skip quant/dequant of INTRADC */
4722  q = 1 << 3;
4723 
4724  /* note: block[0] is assumed to be positive */
4725  block[0] = (block[0] + (q >> 1)) / q;
4726  start_i = 1;
4727  last_non_zero = 0;
4728  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4729  bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4730  } else {
4731  scantable= s->inter_scantable.scantable;
4732  start_i = 0;
4733  last_non_zero = -1;
4734  qmat = s->q_inter_matrix[qscale];
4735  bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4736  }
4737  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4738  threshold2= (threshold1<<1);
4739  for(i=63;i>=start_i;i--) {
4740  j = scantable[i];
4741  level = block[j] * qmat[j];
4742 
4743  if(((unsigned)(level+threshold1))>threshold2){
4744  last_non_zero = i;
4745  break;
4746  }else{
4747  block[j]=0;
4748  }
4749  }
4750  for(i=start_i; i<=last_non_zero; i++) {
4751  j = scantable[i];
4752  level = block[j] * qmat[j];
4753 
4754 // if( bias+level >= (1<<QMAT_SHIFT)
4755 // || bias-level >= (1<<QMAT_SHIFT)){
4756  if(((unsigned)(level+threshold1))>threshold2){
4757  if(level>0){
4758  level= (bias + level)>>QMAT_SHIFT;
4759  block[j]= level;
4760  }else{
4761  level= (bias - level)>>QMAT_SHIFT;
4762  block[j]= -level;
4763  }
4764  max |=level;
4765  }else{
4766  block[j]=0;
4767  }
4768  }
4769  *overflow= s->max_qcoeff < max; //overflow might have happened
4770 
4771  /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4772  if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4773  ff_block_permute(block, s->idsp.idct_permutation,
4774  scantable, last_non_zero);
4775 
4776  return last_non_zero;
4777 }
4778 
4779 #define OFFSET(x) offsetof(MpegEncContext, x)
4780 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4781 static const AVOption h263_options[] = {
4782  { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4783  { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4785  { NULL },
4786 };
4787 
4788 static const AVClass h263_class = {
4789  .class_name = "H.263 encoder",
4790  .item_name = av_default_item_name,
4791  .option = h263_options,
4792  .version = LIBAVUTIL_VERSION_INT,
4793 };
4794 
4796  .name = "h263",
4797  .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4798  .type = AVMEDIA_TYPE_VIDEO,
4799  .id = AV_CODEC_ID_H263,
4800  .priv_data_size = sizeof(MpegEncContext),
4802  .encode2 = ff_mpv_encode_picture,
4803  .close = ff_mpv_encode_end,
4804  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4806  .priv_class = &h263_class,
4807 };
4808 
4809 static const AVOption h263p_options[] = {
4810  { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4811  { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4812  { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4813  { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4815  { NULL },
4816 };
4817 static const AVClass h263p_class = {
4818  .class_name = "H.263p encoder",
4819  .item_name = av_default_item_name,
4820  .option = h263p_options,
4821  .version = LIBAVUTIL_VERSION_INT,
4822 };
4823 
4825  .name = "h263p",
4826  .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4827  .type = AVMEDIA_TYPE_VIDEO,
4828  .id = AV_CODEC_ID_H263P,
4829  .priv_data_size = sizeof(MpegEncContext),
4831  .encode2 = ff_mpv_encode_picture,
4832  .close = ff_mpv_encode_end,
4833  .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4834  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4836  .priv_class = &h263p_class,
4837 };
4838 
4839 static const AVClass msmpeg4v2_class = {
4840  .class_name = "msmpeg4v2 encoder",
4841  .item_name = av_default_item_name,
4842  .option = ff_mpv_generic_options,
4843  .version = LIBAVUTIL_VERSION_INT,
4844 };
4845 
4847  .name = "msmpeg4v2",
4848  .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4849  .type = AVMEDIA_TYPE_VIDEO,
4850  .id = AV_CODEC_ID_MSMPEG4V2,
4851  .priv_data_size = sizeof(MpegEncContext),
4853  .encode2 = ff_mpv_encode_picture,
4854  .close = ff_mpv_encode_end,
4855  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4857  .priv_class = &msmpeg4v2_class,
4858 };
4859 
4860 static const AVClass msmpeg4v3_class = {
4861  .class_name = "msmpeg4v3 encoder",
4862  .item_name = av_default_item_name,
4863  .option = ff_mpv_generic_options,
4864  .version = LIBAVUTIL_VERSION_INT,
4865 };
4866 
4868  .name = "msmpeg4",
4869  .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4870  .type = AVMEDIA_TYPE_VIDEO,
4871  .id = AV_CODEC_ID_MSMPEG4V3,
4872  .priv_data_size = sizeof(MpegEncContext),
4874  .encode2 = ff_mpv_encode_picture,
4875  .close = ff_mpv_encode_end,
4876  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4878  .priv_class = &msmpeg4v3_class,
4879 };
4880 
4881 static const AVClass wmv1_class = {
4882  .class_name = "wmv1 encoder",
4883  .item_name = av_default_item_name,
4884  .option = ff_mpv_generic_options,
4885  .version = LIBAVUTIL_VERSION_INT,
4886 };
4887 
4889  .name = "wmv1",
4890  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4891  .type = AVMEDIA_TYPE_VIDEO,
4892  .id = AV_CODEC_ID_WMV1,
4893  .priv_data_size = sizeof(MpegEncContext),
4895  .encode2 = ff_mpv_encode_picture,
4896  .close = ff_mpv_encode_end,
4897  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4899  .priv_class = &wmv1_class,
4900 };
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:103
MpegEncContext::i_count
int i_count
Definition: mpegvideo.h:348
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:39
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:913
QMAT_SHIFT_MMX
#define QMAT_SHIFT_MMX
Definition: mpegvideo_enc.c:75
ff_speedhq_end_slice
void ff_speedhq_end_slice(MpegEncContext *s)
Definition: speedhqenc.c:150
MpegEncContext::mb_skipped
int mb_skipped
MUST BE SET only during DECODING.
Definition: mpegvideo.h:195
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:634
ff_wmv2_encode_picture_header
int ff_wmv2_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: wmv2enc.c:74
AVCodec
AVCodec.
Definition: codec.h:197
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
MpegEncContext::mb_type
uint16_t * mb_type
Table for candidate MB types for encoding (defines in mpegutils.h)
Definition: mpegvideo.h:291
stride
int stride
Definition: mace.c:144
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:266
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
direct
static void direct(const float *in, const FFTComplex *ir, int len, float *out)
Definition: af_afir.c:60
h263data.h
ff_speedhq_encode_init
av_cold int ff_speedhq_encode_init(MpegEncContext *s)
Definition: speedhqenc.c:102
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
level
uint8_t level
Definition: svq3.c:206
MpegEncContext::data_partitioning
int data_partitioning
data partitioning flag from header
Definition: mpegvideo.h:406
av_clip
#define av_clip
Definition: common.h:122
set_frame_distances
static void set_frame_distances(MpegEncContext *s)
Definition: mpegvideo_enc.c:3688
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
avcodec_receive_packet
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:395
MpegEncContext::lambda
unsigned int lambda
Lagrange multiplier used in rate distortion.
Definition: mpegvideo.h:206
H263_GOB_HEIGHT
#define H263_GOB_HEIGHT(h)
Definition: h263.h:43
FF_LAMBDA_SCALE
#define FF_LAMBDA_SCALE
Definition: avutil.h:226
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegutils.h:124
r
const char * r
Definition: vf_curves.c:116
acc
int acc
Definition: yuv2rgb.c:555
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_wmv1_encoder
AVCodec ff_wmv1_encoder
Definition: mpegvideo_enc.c:4888
ff_mpeg4_encode_video_packet_header
void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
Definition: mpeg4videoenc.c:1361
MpegEncContext::b_code
int b_code
backward MV resolution for B-frames (MPEG-4)
Definition: mpegvideo.h:239
AVCodecContext::mpeg_quant
attribute_deprecated int mpeg_quant
Definition: avcodec.h:831
mem_internal.h
ff_mpeg4_merge_partitions
void ff_mpeg4_merge_partitions(MpegEncContext *s)
Definition: mpeg4videoenc.c:1335
ff_mpv_common_defaults
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:676
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:337
FF_COMPLIANCE_EXPERIMENTAL
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: avcodec.h:1606
EDGE_BOTTOM
#define EDGE_BOTTOM
Definition: mpegvideoencdsp.h:30
mjpegenc_common.h
AVCodecContext::rc_min_rate
int64_t rc_min_rate
minimum bitrate
Definition: avcodec.h:1423
ff_fix_long_p_mvs
void ff_fix_long_p_mvs(MpegEncContext *s, int type)
Definition: motion_est.c:1650
ff_speedhq_encode_picture_header
void ff_speedhq_encode_picture_header(MpegEncContext *s)
Definition: speedhqenc.c:140
thread.h
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:55
ff_side_data_set_encoder_stats
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
Definition: avpacket.c:820
encode_mb
static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2619
AV_CODEC_FLAG_QSCALE
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
Definition: avcodec.h:275
sse_mb
static int sse_mb(MpegEncContext *s)
Definition: mpegvideo_enc.c:2765
MAX_RUN
#define MAX_RUN
Definition: rl.h:35
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:57
COPY
#define COPY(a)
ff_block_permute
void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
Definition: mpegvideo_enc.c:4671
ff_qpeldsp_init
av_cold void ff_qpeldsp_init(QpelDSPContext *c)
Definition: qpeldsp.c:783
basis
static int16_t basis[64][64]
Definition: mpegvideo_enc.c:4323
ff_mjpeg_encode_picture_header
void ff_mjpeg_encode_picture_header(AVCodecContext *avctx, PutBitContext *pb, ScanTable *intra_scantable, int pred, uint16_t luma_intra_matrix[64], uint16_t chroma_intra_matrix[64])
Definition: mjpegenc_common.c:220
AVCodecContext::intra_matrix
uint16_t * intra_matrix
custom intra quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:1036
MpegEncContext::current_picture
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:180
mv_bits
static const uint8_t mv_bits[2][16][10]
Definition: mobiclip.c:162
estimate_motion_thread
static int estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2807
AVCodecContext::lumi_masking
float lumi_masking
luminance masking (0-> disabled)
Definition: avcodec.h:855
update_noise_reduction
static void update_noise_reduction(MpegEncContext *s)
Definition: mpegvideo_enc.c:1766
out_size
int out_size
Definition: movenc.c:55
MV_DIRECT
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4)
Definition: mpegvideo.h:264
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:61
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
MpegEncContext::partitioned_frame
int partitioned_frame
is current frame partitioned
Definition: mpegvideo.h:407
MpegEncContext::dct_offset
uint16_t(* dct_offset)[64]
Definition: mpegvideo.h:334
UNI_AC_ENC_INDEX
#define UNI_AC_ENC_INDEX(run, level)
Definition: mpegvideo.h:318
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:218
pixdesc.h
ff_msmpeg4_encode_mb
void ff_msmpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: msmpeg4enc.c:369
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:411
MAX_DMV
#define MAX_DMV
Definition: motion_est.h:37
CANDIDATE_MB_TYPE_INTER_I
#define CANDIDATE_MB_TYPE_INTER_I
Definition: mpegutils.h:114
w
uint8_t w
Definition: llviddspenc.c:39
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:586
BUF_BITS
static const int BUF_BITS
Definition: put_bits.h:42
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:369
ff_free_picture_tables
void ff_free_picture_tables(Picture *pic)
Definition: mpegpicture.c:454
alloc_picture
static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
Definition: mpegvideo_enc.c:1171
MpegEncContext::f_count
int f_count
Definition: mpegvideo.h:349
AVOption
AVOption.
Definition: opt.h:248
ff_mpv_generic_options
const AVOption ff_mpv_generic_options[]
Definition: mpegvideo_enc.c:87
b
#define b
Definition: input.c:41
MpegEncContext::last_dc
int last_dc[3]
last DC values for MPEG-1
Definition: mpegvideo.h:185
CANDIDATE_MB_TYPE_BACKWARD_I
#define CANDIDATE_MB_TYPE_BACKWARD_I
Definition: mpegutils.h:116
data
const char data[16]
Definition: mxf.c:142
MpegEncContext::vbv_delay
int vbv_delay
Definition: mpegvideo.h:213
ff_mpeg2_non_linear_qscale
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:26
AVCodecContext::p_tex_bits
attribute_deprecated int p_tex_bits
Definition: avcodec.h:1537
MpegEncContext::fdsp
FDCTDSPContext fdsp
Definition: mpegvideo.h:227
ff_mjpeg_encode_init
av_cold int ff_mjpeg_encode_init(MpegEncContext *s)
Definition: mjpegenc.c:258
INPLACE_OFFSET
#define INPLACE_OFFSET
Definition: mpegutils.h:121
AVCodecContext::skip_count
attribute_deprecated int skip_count
Definition: avcodec.h:1543
ff_clean_intra_table_entries
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
Definition: mpegvideo.c:1904
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:210
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:38
ff_pixblockdsp_init
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, AVCodecContext *avctx)
Definition: pixblockdsp.c:81
ff_h261_encode_init
void ff_h261_encode_init(MpegEncContext *s)
Definition: h261enc.c:373
speedhqenc.h
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:2267
max
#define max(a, b)
Definition: cuda_runtime.h:33
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:98
mathematics.h
ff_rv20_encode_picture_header
void ff_rv20_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: rv20enc.c:35
ff_rate_control_init
av_cold int ff_rate_control_init(MpegEncContext *s)
Definition: ratecontrol.c:472
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:312
Picture
Picture.
Definition: mpegpicture.h:45
CANDIDATE_MB_TYPE_SKIPPED
#define CANDIDATE_MB_TYPE_SKIPPED
Definition: mpegutils.h:107
pre_estimate_motion_thread
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2788
FF_LAMBDA_SHIFT
#define FF_LAMBDA_SHIFT
Definition: avutil.h:225
MpegEncContext::pb
PutBitContext pb
bit output
Definition: mpegvideo.h:151
AVCodecContext::mb_decision
int mb_decision
macroblock decision mode
Definition: avcodec.h:1024
CANDIDATE_MB_TYPE_INTER
#define CANDIDATE_MB_TYPE_INTER
Definition: mpegutils.h:105
ff_add_cpb_side_data
AVCPBProperties * ff_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
Definition: utils.c:1058
AVCodecContext::qmax
int qmax
maximum quantizer
Definition: avcodec.h:1387
AVCodecContext::frame_skip_threshold
attribute_deprecated int frame_skip_threshold
Definition: avcodec.h:1467
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:342
mpegutils.h
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:509
AV_CODEC_FLAG_4MV
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
Definition: avcodec.h:279
ff_set_cmp
void ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type)
Definition: me_cmp.c:475
AVCodecContext::delay
int delay
Codec delay.
Definition: avcodec.h:692
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:410
thread.h
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: avpacket.c:75
ff_msmpeg4_encode_picture_header
void ff_msmpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: msmpeg4enc.c:217
FF_COMPLIANCE_UNOFFICIAL
#define FF_COMPLIANCE_UNOFFICIAL
Allow unofficial extensions.
Definition: avcodec.h:1605
quality
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about quality
Definition: rate_distortion.txt:12
MpegEncContext::mv
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
Definition: mpegvideo.h:276
FF_MPV_FLAG_SKIP_RD
#define FF_MPV_FLAG_SKIP_RD
Definition: mpegvideo.h:593
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:332
MV_DIR_BACKWARD
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:263
AV_CODEC_ID_H261
@ AV_CODEC_ID_H261
Definition: codec_id.h:52
av_gcd
int64_t av_gcd(int64_t a, int64_t b)
Compute the greatest common divisor of two integer operands.
Definition: mathematics.c:37
CANDIDATE_MB_TYPE_FORWARD_I
#define CANDIDATE_MB_TYPE_FORWARD_I
Definition: mpegutils.h:115
ff_mpeg1_encode_picture_header
void ff_mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
MAX_FCODE
#define MAX_FCODE
Definition: mpegutils.h:48
FF_CMP_VSSE
#define FF_CMP_VSSE
Definition: avcodec.h:950
AVCodecContext::frame_bits
attribute_deprecated int frame_bits
Definition: avcodec.h:1549
ff_mpegvideoencdsp_init
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
Definition: mpegvideoencdsp.c:232
ff_mpv_encode_picture
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
Definition: mpegvideo_enc.c:1859
sp5x.h
AVCodecContext::pre_me
attribute_deprecated int pre_me
Definition: avcodec.h:976
OFFSET
#define OFFSET(x)
Definition: mpegvideo_enc.c:4779
ff_copy_bits
void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
Definition: bitstream.c:69
estimate_qp
static int estimate_qp(MpegEncContext *s, int dry_run)
Definition: mpegvideo_enc.c:3650
AVCodecContext::prediction_method
attribute_deprecated int prediction_method
Definition: avcodec.h:895
ff_get_best_fcode
int ff_get_best_fcode(MpegEncContext *s, int16_t(*mv_table)[2], int type)
Definition: motion_est.c:1598
FDCTDSPContext
Definition: fdctdsp.h:26
ff_set_mpeg4_time
void ff_set_mpeg4_time(MpegEncContext *s)
Definition: mpeg4videoenc.c:874
faandct.h
Floating point AAN DCT.
MpegEncContext::pict_type
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:212
av_packet_add_side_data
FF_ENABLE_DEPRECATION_WARNINGS int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
Definition: avpacket.c:309
ff_match_2uint16
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
Definition: utils.c:897
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:545
merge_context_after_me
static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo_enc.c:3612
ff_h263_pred_motion
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
Definition: h263.c:319
STRIDE_ALIGN
#define STRIDE_ALIGN
Definition: internal.h:118
fail
#define fail()
Definition: checkasm.h:133
h261.h
AVCodecContext::thread_count
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
Definition: avcodec.h:1773
CANDIDATE_MB_TYPE_BIDIR
#define CANDIDATE_MB_TYPE_BIDIR
Definition: mpegutils.h:112
MpegEncContext::padding_bug_score
int padding_bug_score
used to detect the VERY common padding bug in MPEG-4
Definition: mpegvideo.h:413
tab
static const struct twinvq_data tab
Definition: twinvq_data.h:10345
get_intra_count
static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride)
Definition: mpegvideo_enc.c:1148
ff_me_cmp_init
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:1015
avcodec_find_encoder
AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
Definition: allcodecs.c:941
ff_h263dsp_init
av_cold void ff_h263dsp_init(H263DSPContext *ctx)
Definition: h263dsp.c:117
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:616
perm
perm
Definition: f_perms.c:74
AVCodecContext::bit_rate_tolerance
int bit_rate_tolerance
number of bits the bitstream is allowed to diverge from the reference.
Definition: avcodec.h:594
MpegEncContext::umvplus
int umvplus
== H.263+ && unrestricted_mv
Definition: mpegvideo.h:377
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2601
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AV_CODEC_FLAG_LOW_DELAY
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:325
pts
static int64_t pts
Definition: transcode_aac.c:652
AV_CODEC_ID_MSMPEG4V2
@ AV_CODEC_ID_MSMPEG4V2
Definition: codec_id.h:64
ff_h263_update_motion_val
void ff_h263_update_motion_val(MpegEncContext *s)
Definition: h263.c:54
AV_CODEC_FLAG_LOOP_FILTER
#define AV_CODEC_FLAG_LOOP_FILTER
loop filter.
Definition: avcodec.h:304
ff_sqrt
#define ff_sqrt
Definition: mathops.h:206
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
AVRational::num
int num
Numerator.
Definition: rational.h:59
mpv_encode_init_static
static void mpv_encode_init_static(void)
Definition: mpegvideo_enc.c:252
CANDIDATE_MB_TYPE_INTER4V
#define CANDIDATE_MB_TYPE_INTER4V
Definition: mpegutils.h:106
h263_options
static const AVOption h263_options[]
Definition: mpegvideo_enc.c:4781
MpegEncContext::frame_pred_frame_dct
int frame_pred_frame_dct
Definition: mpegvideo.h:470
flv.h
MAX_PICTURE_COUNT
#define MAX_PICTURE_COUNT
Definition: mpegpicture.h:32
ff_dct_encode_init
av_cold int ff_dct_encode_init(MpegEncContext *s)
Definition: mpegvideo_enc.c:277
ff_mpeg4_encode_mb
void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: mpeg4videoenc.c:476
AV_CODEC_FLAG_INTERLACED_DCT
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:321
ff_mpv_reallocate_putbitbuffer
int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
Definition: mpegvideo_enc.c:2917
ff_mjpeg_encode_mb
void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mjpegenc.c:472
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
ff_rate_estimate_qscale
float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
Definition: ratecontrol.c:868
ff_mpeg4_encode_picture_header
int ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: mpeg4videoenc.c:1059
MpegEncContext::i_tex_bits
int i_tex_bits
Definition: mpegvideo.h:346
AVCodecContext::p_count
attribute_deprecated int p_count
Definition: avcodec.h:1541
ff_mpv_common_end
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:1111
frame_start
static int frame_start(MpegEncContext *s)
Definition: mpegvideo_enc.c:1787
RateControlContext
rate control context.
Definition: ratecontrol.h:63
mpeg12.h
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:175
ff_init_qscale_tab
void ff_init_qscale_tab(MpegEncContext *s)
init s->current_picture.qscale_table from s->lambda_table
Definition: mpegvideo_enc.c:220
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
update_mb_info
static void update_mb_info(MpegEncContext *s, int startcode)
Definition: mpegvideo_enc.c:2894
av_cold
#define av_cold
Definition: attributes.h:90
dct.h
ff_fdctdsp_init
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
Definition: fdctdsp.c:26
MAX_MV
#define MAX_MV
Definition: motion_est.h:35
ff_h261_get_picture_format
int ff_h261_get_picture_format(int width, int height)
Definition: h261enc.c:41
default_fcode_tab
static uint8_t default_fcode_tab[MAX_MV *2+1]
Definition: mpegvideo_enc.c:85
ff_mpeg4_set_direct_mv
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
Definition: mpeg4video.c:117
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
build_basis
static void build_basis(uint8_t *perm)
Definition: mpegvideo_enc.c:4325
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:826
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:173
ff_h263_encode_init
void ff_h263_encode_init(MpegEncContext *s)
Definition: ituh263enc.c:757
width
#define width
MAX_MB_BYTES
#define MAX_MB_BYTES
Definition: mpegutils.h:47
FFMAX3
#define FFMAX3(a, b, c)
Definition: common.h:104
ff_h263_chroma_qscale_table
const uint8_t ff_h263_chroma_qscale_table[32]
Definition: h263data.c:260
s
#define s(width, name)
Definition: cbs_vp9.c:257
rebase_put_bits
static void rebase_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Rebase the bit writer onto a reallocated buffer.
Definition: put_bits.h:88
CHROMA_422
#define CHROMA_422
Definition: mpegvideo.h:489
encode_frame
static int encode_frame(AVCodecContext *c, AVFrame *frame, AVPacket *pkt)
Definition: mpegvideo_enc.c:1371
ff_mpeg_unref_picture
void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
Deallocate a picture.
Definition: mpegpicture.c:295
BASIS_SHIFT
#define BASIS_SHIFT
Definition: mpegvideoencdsp.h:26
ff_mpeg2_dc_scale_table
const uint8_t *const ff_mpeg2_dc_scale_table[4]
Definition: mpegvideodata.c:77
MpegEncContext::mv_dir
int mv_dir
Definition: mpegvideo.h:261
g
const char * g
Definition: vf_curves.c:117
MpegEncContext::mb_skip_run
int mb_skip_run
Definition: mpegvideo.h:289
msmpeg4v3_class
static const AVClass msmpeg4v3_class
Definition: mpegvideo_enc.c:4860
sse
static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride)
Definition: mpegvideo_enc.c:2744
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
HUFFMAN_TABLE_OPTIMAL
@ HUFFMAN_TABLE_OPTIMAL
Compute and use optimal Huffman tables.
Definition: mjpegenc.h:97
AVCodecContext::mv_bits
attribute_deprecated int mv_bits
Definition: avcodec.h:1531
ff_estimate_b_frame_motion
void ff_estimate_b_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1490
AV_CODEC_ID_WMV2
@ AV_CODEC_ID_WMV2
Definition: codec_id.h:67
bits
uint8_t bits
Definition: vp3data.h:141
LOCAL_ALIGNED_16
#define LOCAL_ALIGNED_16(t, v,...)
Definition: mem_internal.h:130
FMT_H261
@ FMT_H261
Definition: mpegutils.h:125
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:309
AVCodecContext::brd_scale
attribute_deprecated int brd_scale
Definition: avcodec.h:1109
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1747
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
ff_write_quant_matrix
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
Definition: mpegvideo_enc.c:204
limits.h
AV_CODEC_ID_MSMPEG4V1
@ AV_CODEC_ID_MSMPEG4V1
Definition: codec_id.h:63
ff_h263_encode_picture_header
void ff_h263_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: ituh263enc.c:103
ff_h263_encode_gob_header
void ff_h263_encode_gob_header(MpegEncContext *s, int mb_line)
Encode a group of blocks header.
Definition: ituh263enc.c:240
MpegEncContext::b_count
int b_count
Definition: mpegvideo.h:350
ff_mpeg4_stuffing
void ff_mpeg4_stuffing(PutBitContext *pbc)
add MPEG-4 stuffing bits (01...1)
Definition: mpeg4videoenc.c:864
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
AVCodecContext::rc_max_rate
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1416
AVCodecContext::error
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:1699
AVCPBProperties
This structure describes the bitrate properties of an encoded bitstream.
Definition: avcodec.h:453
PutBitContext
Definition: put_bits.h:44
Picture::encoding_error
uint64_t encoding_error[AV_NUM_DATA_POINTERS]
Definition: mpegpicture.h:91
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:546
AVCodecContext::p_masking
float p_masking
p block masking (0-> disabled)
Definition: avcodec.h:876
mb_var_thread
static int mb_var_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2832
arg
const char * arg
Definition: jacosubdec.c:66
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
if
if(ret)
Definition: filter_design.txt:179
Picture::reference
int reference
Definition: mpegpicture.h:88
AVCodecContext::rc_buffer_size
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:1401
AVCPBProperties::avg_bitrate
int avg_bitrate
Average bitrate of the stream, in bits per second.
Definition: avcodec.h:477
ff_find_unused_picture
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
Definition: mpegpicture.c:440
h263_class
static const AVClass h263_class
Definition: mpegvideo_enc.c:4788
MpegEncContext::pb2
PutBitContext pb2
used for data partitioned VOPs
Definition: mpegvideo.h:411
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:173
ff_write_pass1_stats
void ff_write_pass1_stats(MpegEncContext *s)
Definition: ratecontrol.c:38
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
PutBitContext::buf
uint8_t * buf
Definition: put_bits.h:47
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:192
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:658
write_mb_info
static void write_mb_info(MpegEncContext *s)
Definition: mpegvideo_enc.c:2874
run
uint8_t run
Definition: svq3.c:205
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1171
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:288
MpegEncContext::dct_error_sum
int(* dct_error_sum)[64]
Definition: mpegvideo.h:332
MpegEncContext::f_code
int f_code
forward MV resolution
Definition: mpegvideo.h:238
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:331
me
#define me
Definition: vf_colormatrix.c:104
AVCodecContext::i_tex_bits
attribute_deprecated int i_tex_bits
Definition: avcodec.h:1535
aandcttab.h
EDGE_WIDTH
#define EDGE_WIDTH
Definition: mpegpicture.h:33
AVCodecContext::misc_bits
attribute_deprecated int misc_bits
Definition: avcodec.h:1545
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:188
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:571
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:586
ROUNDED_DIV
#define ROUNDED_DIV(a, b)
Definition: common.h:56
ff_faandct
void ff_faandct(int16_t *data)
Definition: faandct.c:114
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
ff_fdct_ifast
void ff_fdct_ifast(int16_t *data)
Definition: jfdctfst.c:208
src
#define src
Definition: vp8dsp.c:255
ff_msmpeg4v3_encoder
AVCodec ff_msmpeg4v3_encoder
Definition: mpegvideo_enc.c:4867
ff_set_qscale
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:2331
AV_CODEC_ID_SPEEDHQ
@ AV_CODEC_ID_SPEEDHQ
Definition: codec_id.h:274
mathops.h
MpegEncContext::mv_bits
int mv_bits
Definition: mpegvideo.h:344
AV_CODEC_FLAG_AC_PRED
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
Definition: avcodec.h:338
MERGE
#define MERGE(field)
Definition: mpegvideo_enc.c:3611
AVCodecContext::ildct_cmp
int ildct_cmp
interlaced DCT comparison function
Definition: avcodec.h:940
AVCodecContext::b_frame_strategy
attribute_deprecated int b_frame_strategy
Definition: avcodec.h:810
AVCodecContext::noise_reduction
attribute_deprecated int noise_reduction
Definition: avcodec.h:1054
ff_vbv_update
int ff_vbv_update(MpegEncContext *s, int frame_size)
Definition: ratecontrol.c:681
ff_mpv_encode_end
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:1096
FF_MB_DECISION_SIMPLE
#define FF_MB_DECISION_SIMPLE
uses mb_cmp
Definition: avcodec.h:1025
qpeldsp.h
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:144
ff_jpeg_fdct_islow_8
void ff_jpeg_fdct_islow_8(int16_t *data)
AVCodecContext::trellis
int trellis
trellis RD quantization
Definition: avcodec.h:1487
AV_CODEC_ID_WMV1
@ AV_CODEC_ID_WMV1
Definition: codec_id.h:66
ff_alloc_picture
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int shared, int encoding, int chroma_x_shift, int chroma_y_shift, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
Definition: mpegpicture.c:232
av_cpb_properties_alloc
AVCPBProperties * av_cpb_properties_alloc(size_t *size)
Allocate a CPB properties structure and initialize its fields to default values.
Definition: utils.c:1044
op_pixels_func
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
FMT_MJPEG
@ FMT_MJPEG
Definition: mpegutils.h:127
wmv2.h
AVOnce
#define AVOnce
Definition: thread.h:172
index
int index
Definition: gxfenc.c:89
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
ff_jpeg_fdct_islow_10
void ff_jpeg_fdct_islow_10(int16_t *data)
ff_clean_h263_qscales
void ff_clean_h263_qscales(MpegEncContext *s)
modify qscale so that encoding is actually possible in H.263 (limit difference to -2....
Definition: ituh263enc.c:266
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
weight
static int weight(int i, int blen, int offset)
Definition: diracdec.c:1561
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:50
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:267
AVCodecContext::temporal_cplx_masking
float temporal_cplx_masking
temporary complexity masking (0-> disabled)
Definition: avcodec.h:862
mpv_encode_defaults
static void mpv_encode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for encoding.
Definition: mpegvideo_enc.c:262
denoise_dct_c
static void denoise_dct_c(MpegEncContext *s, int16_t *block)
Definition: mpegvideo_enc.c:3986
set_put_bits_buffer_size
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
Definition: put_bits.h:376
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:29
MAX_LEVEL
#define MAX_LEVEL
Definition: rl.h:36
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:659
get_sae
static int get_sae(uint8_t *src, int ref, int stride)
Definition: mpegvideo_enc.c:1134
AVCodecContext::stats_out
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1557
AV_CODEC_FLAG_QPEL
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
Definition: avcodec.h:287
ERContext::error_count
atomic_int error_count
Definition: error_resilience.h:64
AVCodecContext::vbv_delay
attribute_deprecated uint64_t vbv_delay
VBV delay coded in the last frame (in periods of a 27 MHz clock).
Definition: avcodec.h:2029
merge_context_after_encode
static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo_enc.c:3618
MpegEncContext::mb_intra
int mb_intra
Definition: mpegvideo.h:290
QUANT_BIAS_SHIFT
#define QUANT_BIAS_SHIFT
Definition: mpegvideo_enc.c:73
VE
#define VE
Definition: mpegvideo_enc.c:4780
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:308
AVPacket::size
int size
Definition: packet.h:370
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
ff_mpeg_ref_picture
int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
Definition: mpegpicture.c:355
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
AVCodecContext::gop_size
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:731
MpegEncContext::qscale
int qscale
QP.
Definition: mpegvideo.h:204
load_input_picture
static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
Definition: mpegvideo_enc.c:1179
ff_mpeg4_clean_buffers
void ff_mpeg4_clean_buffers(MpegEncContext *s)
Definition: mpeg4video.c:45
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
dct_single_coeff_elimination
static void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
Definition: mpegvideo_enc.c:2121
CONFIG_MSMPEG4_ENCODER
#define CONFIG_MSMPEG4_ENCODER
Definition: msmpeg4.h:75
MpegEncContext::intra_matrix
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:300
ff_mpeg1_encode_slice_header
void ff_mpeg1_encode_slice_header(MpegEncContext *s)
ff_pre_estimate_p_frame_motion
int ff_pre_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1061
ff_mpeg1_clean_buffers
void ff_mpeg1_clean_buffers(MpegEncContext *s)
Definition: mpeg12.c:114
FF_IDCT_PERM_NONE
@ FF_IDCT_PERM_NONE
Definition: idctdsp.h:38
MpegEncContext::lambda2
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
Definition: mpegvideo.h:207
FFMAX
#define FFMAX(a, b)
Definition: common.h:103
ff_mpeg4_default_intra_matrix
const int16_t ff_mpeg4_default_intra_matrix[64]
Definition: mpeg4data.h:335
MpegEncContext::dct_count
int dct_count[2]
Definition: mpegvideo.h:333
AVCodecContext::frame_skip_exp
attribute_deprecated int frame_skip_exp
Definition: avcodec.h:1475
FMT_SPEEDHQ
@ FMT_SPEEDHQ
Definition: mpegutils.h:128
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:53
ff_mpv_motion
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
Definition: mpegvideo_motion.c:974
size
int size
Definition: twinvq_data.h:10344
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
AVCodecContext::rtp_payload_size
attribute_deprecated int rtp_payload_size
Definition: avcodec.h:1520
RECON_SHIFT
#define RECON_SHIFT
Definition: mpegvideoencdsp.h:27
AVCodecInternal::byte_buffer
uint8_t * byte_buffer
temporary buffer used for encoders to store their bitstream
Definition: internal.h:165
CANDIDATE_MB_TYPE_DIRECT0
#define CANDIDATE_MB_TYPE_DIRECT0
Definition: mpegutils.h:119
ff_mpeg1_encode_mb
void ff_mpeg1_encode_mb(MpegEncContext *s, int16_t block[8][64], int motion_x, int motion_y)
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1785
MpegEncContext::interlaced_dct
int interlaced_dct
Definition: mpegvideo.h:496
AV_PKT_DATA_H263_MB_INFO
@ AV_PKT_DATA_H263_MB_INFO
An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of structures with info about macroblo...
Definition: packet.h:93
ff_mpeg1_default_intra_matrix
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:30
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:368
encode_mb_internal
static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, int motion_y, int mb_block_height, int mb_block_width, int mb_block_count)
Definition: mpegvideo_enc.c:2235
AV_CODEC_FLAG_PASS2
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:300
height
#define height
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
FFMIN
#define FFMIN(a, b)
Definition: common.h:105
ff_mpeg4_default_non_intra_matrix
const int16_t ff_mpeg4_default_non_intra_matrix[64]
Definition: mpeg4data.h:346
MpegEncContext::mv_type
int mv_type
Definition: mpegvideo.h:265
AVCPBProperties::max_bitrate
int max_bitrate
Maximum bitrate of the stream, in bits per second.
Definition: avcodec.h:459
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:112
mpegvideodata.h
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
ff_msmpeg4_encode_ext_header
void ff_msmpeg4_encode_ext_header(MpegEncContext *s)
Definition: msmpeg4enc.c:277
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:269
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:375
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: avpacket.c:64
MpegEncContext::picture_in_gop_number
int picture_in_gop_number
0-> first pic in gop, ...
Definition: mpegvideo.h:128
AVCodecInternal::byte_buffer_size
unsigned int byte_buffer_size
Definition: internal.h:166
rv10.h
AVCodecContext::i_count
attribute_deprecated int i_count
Definition: avcodec.h:1539
FF_COMPLIANCE_NORMAL
#define FF_COMPLIANCE_NORMAL
Definition: avcodec.h:1604
ff_clean_mpeg4_qscales
void ff_clean_mpeg4_qscales(MpegEncContext *s)
modify mb_type & qscale so that encoding is actually possible in MPEG-4
Definition: mpeg4videoenc.c:214
M_PI
#define M_PI
Definition: mathematics.h:52
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:205
PutBitContext::buf_end
uint8_t * buf_end
Definition: put_bits.h:47
AVCodec::id
enum AVCodecID id
Definition: codec.h:211
update_qscale
static void update_qscale(MpegEncContext *s)
Definition: mpegvideo_enc.c:176
CHROMA_444
#define CHROMA_444
Definition: mpegvideo.h:490
ff_update_block_index
static void ff_update_block_index(MpegEncContext *s)
Definition: mpegvideo.h:750
AVCPBProperties::vbv_delay
uint64_t vbv_delay
The delay between the time the packet this structure is associated with is received and the time when...
Definition: avcodec.h:495
ff_msmpeg4v2_encoder
AVCodec ff_msmpeg4v2_encoder
Definition: mpegvideo_enc.c:4846
MpegEncContext::block_last_index
int block_last_index[12]
last non zero coefficient in block
Definition: mpegvideo.h:86
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:56
encode_mb_hq
static void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type, PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], int *dmin, int *next_block, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2693
src1
#define src1
Definition: h264pred.c:140
ff_msmpeg4_encode_init
void ff_msmpeg4_encode_init(MpegEncContext *s)
Definition: msmpeg4enc.c:116
ff_speedhq_encode_mb
void ff_speedhq_encode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: speedhqenc.c:245
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
MpegEncContext::last_mv
int last_mv[2][2][2]
last MV, used for MV prediction in MPEG-1 & B-frame MPEG-4
Definition: mpegvideo.h:278
ff_dct_encode_init_x86
void ff_dct_encode_init_x86(MpegEncContext *s)
Definition: mpegvideoenc.c:214
MpegEncContext::progressive_frame
int progressive_frame
Definition: mpegvideo.h:494
FMT_H263
@ FMT_H263
Definition: mpegutils.h:126
i
int i
Definition: input.c:407
AV_CODEC_ID_RV10
@ AV_CODEC_ID_RV10
Definition: codec_id.h:54
AVCodecContext::b_sensitivity
attribute_deprecated int b_sensitivity
Definition: avcodec.h:1142
CHROMA_420
#define CHROMA_420
Definition: mpegvideo.h:488
lrintf
#define lrintf(x)
Definition: libm_mips.h:70
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:362
put_bits_count
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:76
dct_quantize_trellis_c
static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:4010
encode_thread
static int encode_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2953
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:637
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:49
internal.h
ff_square_tab
const uint32_t ff_square_tab[512]
Definition: me_cmp.c:34
estimate_best_b_count
static int estimate_best_b_count(MpegEncContext *s)
Definition: mpegvideo_enc.c:1392
AVCodecContext::intra_dc_precision
int intra_dc_precision
precision of the intra DC coefficient - 8
Definition: avcodec.h:1062
MpegEncContext::esc3_level_length
int esc3_level_length
Definition: mpegvideo.h:442
MpegEncContext::obmc
int obmc
overlapped block motion compensation
Definition: mpegvideo.h:366
MpegEncContext::tex_pb
PutBitContext tex_pb
used for data partitioned VOPs
Definition: mpegvideo.h:410
AVCodecContext::frame_skip_cmp
attribute_deprecated int frame_skip_cmp
Definition: avcodec.h:1479
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:288
qmat16
static const int32_t qmat16[MAT_SIZE]
Definition: hq_hqadata.c:342
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:50
AV_CODEC_ID_RV20
@ AV_CODEC_ID_RV20
Definition: codec_id.h:55
av_always_inline
#define av_always_inline
Definition: attributes.h:49
AVCodecContext::header_bits
attribute_deprecated int header_bits
Definition: avcodec.h:1533
get_visual_weight
static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
Definition: mpegvideo_enc.c:2211
ff_h263_format
const uint16_t ff_h263_format[8][2]
Definition: h263data.c:236
FF_CMP_NSSE
#define FF_CMP_NSSE
Definition: avcodec.h:951
uint8_t
uint8_t
Definition: audio_convert.c:194
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
ff_flv_encode_picture_header
void ff_flv_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: flvenc.c:27
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
h263p_class
static const AVClass h263p_class
Definition: mpegvideo_enc.c:4817
ff_mpeg4_init_partitions
void ff_mpeg4_init_partitions(MpegEncContext *s)
Definition: mpeg4videoenc.c:1322
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:204
AV_PKT_DATA_CPB_PROPERTIES
@ AV_PKT_DATA_CPB_PROPERTIES
This side data corresponds to the AVCPBProperties struct.
Definition: packet.h:145
MpegEncContext::er
ERContext er
Definition: mpegvideo.h:571
ff_init_me
int ff_init_me(MpegEncContext *s)
Definition: motion_est.c:306
AVCPBProperties::min_bitrate
int min_bitrate
Minimum bitrate of the stream, in bits per second.
Definition: avcodec.h:468
AVCodecContext::height
int height
Definition: avcodec.h:709
avcodec_send_frame
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:364
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:746
ff_rate_control_uninit
av_cold void ff_rate_control_uninit(MpegEncContext *s)
Definition: ratecontrol.c:672
CANDIDATE_MB_TYPE_DIRECT
#define CANDIDATE_MB_TYPE_DIRECT
Definition: mpegutils.h:109
MpegEncContext::picture_number
int picture_number
Definition: mpegvideo.h:127
av_packet_new_side_data
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, buffer_size_t size)
Definition: avpacket.c:343
RateControlContext::buffer_index
double buffer_index
amount of bits in the video/audio buffer
Definition: ratecontrol.h:66
idctdsp.h
MpegEncContext::h263_slice_structured
int h263_slice_structured
Definition: mpegvideo.h:379
avcodec.h
msmpeg4.h
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
get_bits_diff
static int get_bits_diff(MpegEncContext *s)
Definition: mpegvideo.h:765
AV_CODEC_FLAG_CLOSED_GOP
#define AV_CODEC_FLAG_CLOSED_GOP
Definition: avcodec.h:343
ret
ret
Definition: filter_design.txt:187
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ff_mpeg1_default_non_intra_matrix
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:41
av_packet_shrink_side_data
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, buffer_size_t size)
Definition: avpacket.c:584
AVCPBProperties::buffer_size
int buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
Definition: avcodec.h:486
AVCodecContext::strict_std_compliance
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1601
CANDIDATE_MB_TYPE_BIDIR_I
#define CANDIDATE_MB_TYPE_BIDIR_I
Definition: mpegutils.h:117
ff_inv_aanscales
const uint16_t ff_inv_aanscales[64]
Definition: aandcttab.c:38
ff_mjpeg_encode_close
av_cold void ff_mjpeg_encode_close(MpegEncContext *s)
Definition: mjpegenc.c:313
ff_h263_loop_filter
void ff_h263_loop_filter(MpegEncContext *s)
Definition: h263.c:147
FF_MPV_FLAG_CBP_RD
#define FF_MPV_FLAG_CBP_RD
Definition: mpegvideo.h:596
ff_h261_encode_mb
void ff_h261_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: h261enc.c:238
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: avcodec.h:215
update_duplicate_context_after_me
static void update_duplicate_context_after_me(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo_enc.c:233
ff_rv10_encode_picture_header
int ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: rv10enc.c:32
AVCodecContext::scenechange_threshold
attribute_deprecated int scenechange_threshold
Definition: avcodec.h:1050
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:156
ff_fix_long_mvs
void ff_fix_long_mvs(MpegEncContext *s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
Definition: motion_est.c:1699
MpegEncContext::block
int16_t(* block)[64]
points to one of the following blocks
Definition: mpegvideo.h:513
MpegEncContext::dquant
int dquant
qscale difference to prev qscale
Definition: mpegvideo.h:210
AVCodecContext::dark_masking
float dark_masking
darkness masking (0-> disabled)
Definition: avcodec.h:883
AVCodecContext
main external API structure.
Definition: avcodec.h:536
AVCodecContext::active_thread_type
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1792
default_mv_penalty
static uint8_t default_mv_penalty[MAX_FCODE+1][MAX_DMV *2+1]
Definition: mpegvideo_enc.c:84
Picture::shared
int shared
Definition: mpegpicture.h:89
AV_CODEC_ID_H263P
@ AV_CODEC_ID_H263P
Definition: codec_id.h:68
ff_estimate_p_frame_motion
void ff_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:885
EDGE_TOP
#define EDGE_TOP
Definition: mpegvideoencdsp.h:29
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo.c:499
put_bits_ptr
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:342
MpegEncContext::skip_count
int skip_count
Definition: mpegvideo.h:351
CANDIDATE_MB_TYPE_INTRA
#define CANDIDATE_MB_TYPE_INTRA
Definition: mpegutils.h:104
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
mpeg4video.h
MpegEncContext::last_bits
int last_bits
temp var used for calculating the above vars
Definition: mpegvideo.h:353
AVCodecContext::qmin
int qmin
minimum quantizer
Definition: avcodec.h:1380
AVRational::den
int den
Denominator.
Definition: rational.h:60
Picture::f
struct AVFrame * f
Definition: mpegpicture.h:46
MpegEncContext::gop_picture_number
int gop_picture_number
index of the first picture of a GOP based on fake_pic_num & MPEG-1 specific
Definition: mpegvideo.h:456
select_input_picture
static int select_input_picture(MpegEncContext *s)
Definition: mpegvideo_enc.c:1539
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:225
AVCodecContext::spatial_cplx_masking
float spatial_cplx_masking
spatial complexity masking (0-> disabled)
Definition: avcodec.h:869
frame_end
static void frame_end(MpegEncContext *s)
Definition: mpegvideo_enc.c:1716
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
ff_mpv_reconstruct_mb
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo.c:2248
temp
else temp
Definition: vf_mcdeint.c:259
mean
static float mean(const float *input, int size)
Definition: vf_nnedi.c:864
av_clip_uint8
#define av_clip_uint8
Definition: common.h:128
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
QMAT_SHIFT
#define QMAT_SHIFT
Definition: mpegvideo_enc.c:76
CANDIDATE_MB_TYPE_FORWARD
#define CANDIDATE_MB_TYPE_FORWARD
Definition: mpegutils.h:110
FF_MB_DECISION_RD
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:1027
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:83
shift
static int shift(int a, int b)
Definition: sonic.c:82
desc
const char * desc
Definition: libsvtav1.c:79
ff_wmv2_encode_mb
void ff_wmv2_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: wmv2enc.c:147
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AVERROR_ENCODER_NOT_FOUND
#define AVERROR_ENCODER_NOT_FOUND
Encoder not found.
Definition: error.h:54
ff_speedhq_mb_y_order_to_mb
int ff_speedhq_mb_y_order_to_mb(int mb_y_order, int mb_height, int *first_in_slice)
Definition: speedhqenc.c:273
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
AVCodecContext::max_b_frames
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:796
MpegEncContext::noise_reduction
int noise_reduction
Definition: mpegvideo.h:587
ff_convert_matrix
void ff_convert_matrix(MpegEncContext *s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
Definition: mpegvideo_enc.c:92
packet_internal.h
ff_mpeg1_encode_init
void ff_mpeg1_encode_init(MpegEncContext *s)
overflow
Undefined Behavior In the C some operations are like signed integer overflow
Definition: undefined.txt:3
FDCTDSPContext::fdct
void(* fdct)(int16_t *block)
Definition: fdctdsp.h:27
skip_check
static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
Definition: mpegvideo_enc.c:1330
ff_mpv_encode_init
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:296
MpegEncContext::frame_skip_threshold
int frame_skip_threshold
Definition: mpegvideo.h:581
AVCodecContext::rc_max_available_vbv_use
float rc_max_available_vbv_use
Ratecontrol attempt to use, at maximum, of what can be used without an underflow.
Definition: avcodec.h:1430
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:110
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
dct_quantize_refine
static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale)
Definition: mpegvideo_enc.c:4344
ff_h263p_encoder
AVCodec ff_h263p_encoder
Definition: mpegvideo_enc.c:4824
diff
static av_always_inline int diff(const uint32_t a, const uint32_t b)
Definition: vf_palettegen.c:136
FF_MPV_FLAG_STRICT_GOP
#define FF_MPV_FLAG_STRICT_GOP
Definition: mpegvideo.h:594
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:48
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:262
FF_CMP_DCTMAX
#define FF_CMP_DCTMAX
Definition: avcodec.h:954
AVCodecContext::slices
int slices
Number of slices.
Definition: avcodec.h:1187
FF_MB_DECISION_BITS
#define FF_MB_DECISION_BITS
chooses the one which needs the fewest bits
Definition: avcodec.h:1026
AVPacket
This structure stores compressed data.
Definition: packet.h:346
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:563
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:242
clip_coeffs
static void clip_coeffs(MpegEncContext *s, int16_t *block, int last_index)
Definition: mpegvideo_enc.c:2177
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AVCodecContext::inter_matrix
uint16_t * inter_matrix
custom inter quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:1045
MAX_B_FRAMES
#define MAX_B_FRAMES
Definition: mpegvideo.h:64
encode_picture
static int encode_picture(MpegEncContext *s, int picture_number)
Definition: mpegvideo_enc.c:3702
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:709
AVCodecContext::me_penalty_compensation
attribute_deprecated int me_penalty_compensation
Definition: avcodec.h:1097
bytestream.h
wmv1_class
static const AVClass wmv1_class
Definition: mpegvideo_enc.c:4881
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:349
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:73
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
FF_MPV_FLAG_QP_RD
#define FF_MPV_FLAG_QP_RD
Definition: mpegvideo.h:595
MpegEncContext::misc_bits
int misc_bits
cbp, mb_type
Definition: mpegvideo.h:352
ff_mjpeg_encode_picture_trailer
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
Definition: mjpegenc_common.c:414
copy_context_after_encode
static void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type)
Definition: mpegvideo_enc.c:2654
AV_CODEC_ID_MSMPEG4V3
@ AV_CODEC_ID_MSMPEG4V3
Definition: codec_id.h:65
ff_get_2pass_fcode
void ff_get_2pass_fcode(MpegEncContext *s)
Definition: ratecontrol.c:857
h
h
Definition: vp9dsp_template.c:2038
MpegEncContext::end_mb_y
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:154
FF_QP2LAMBDA
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:227
copy_context_before_encode
static void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type)
Definition: mpegvideo_enc.c:2626
AV_CODEC_ID_FLV1
@ AV_CODEC_ID_FLV1
Definition: codec_id.h:70
ff_h261_reorder_mb_index
void ff_h261_reorder_mb_index(MpegEncContext *s)
Definition: h261enc.c:109
ff_dct_quantize_c
int ff_dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:4696
ff_alloc_packet2
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
Definition: encode.c:33
sp5x_qscale_five_quant_table
static const uint8_t sp5x_qscale_five_quant_table[][64]
Definition: sp5x.h:135
int
int
Definition: ffmpeg_filter.c:170
AVFrame::display_picture_number
int display_picture_number
picture number in display order
Definition: frame.h:436
msmpeg4v2_class
static const AVClass msmpeg4v2_class
Definition: mpegvideo_enc.c:4839
mjpegenc.h
AV_PICTURE_TYPE_S
@ AV_PICTURE_TYPE_S
S(GMC)-VOP MPEG-4.
Definition: avutil.h:277
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:51
FF_MPV_COMMON_OPTS
#define FF_MPV_COMMON_OPTS
Definition: mpegvideo.h:629
AVCodecContext::frame_skip_factor
attribute_deprecated int frame_skip_factor
Definition: avcodec.h:1471
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:81
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
mb_info
Definition: cinepakenc.c:87
ff_mjpeg_encode_stuffing
int ff_mjpeg_encode_stuffing(MpegEncContext *s)
Writes the complete JPEG frame when optimal huffman tables are enabled, otherwise writes the stuffing...
Definition: mjpegenc.c:184
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:915
MpegEncContext::alt_inter_vlc
int alt_inter_vlc
alternative inter vlc
Definition: mpegvideo.h:380
MpegEncContext::p_tex_bits
int p_tex_bits
Definition: mpegvideo.h:347
ff_h263_encode_mb
void ff_h263_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: ituh263enc.c:447
pixblockdsp.h
ff_h261_encode_picture_header
void ff_h261_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: h261enc.c:54
CANDIDATE_MB_TYPE_BACKWARD
#define CANDIDATE_MB_TYPE_BACKWARD
Definition: mpegutils.h:111
ff_aanscales
const uint16_t ff_aanscales[64]
Definition: aandcttab.c:26
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:296
h263p_options
static const AVOption h263p_options[]
Definition: mpegvideo_enc.c:4809
h263.h
write_slice_end
static void write_slice_end(MpegEncContext *s)
Definition: mpegvideo_enc.c:2855
ff_h263_encoder
AVCodec ff_h263_encoder
Definition: mpegvideo_enc.c:4795
intmath.h