FFmpeg
mpegvideo_enc.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /*
26  * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
27  */
28 
29 /**
30  * @file
31  * The simplest mpeg encoder (well, it was the simplest!).
32  */
33 
34 #include <stdint.h>
35 
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/pixdesc.h"
40 #include "libavutil/opt.h"
41 #include "avcodec.h"
42 #include "dct.h"
43 #include "idctdsp.h"
44 #include "mpeg12.h"
45 #include "mpegvideo.h"
46 #include "mpegvideodata.h"
47 #include "h261.h"
48 #include "h263.h"
49 #include "h263data.h"
50 #include "mjpegenc_common.h"
51 #include "mathops.h"
52 #include "mpegutils.h"
53 #include "mjpegenc.h"
54 #include "msmpeg4.h"
55 #include "pixblockdsp.h"
56 #include "qpeldsp.h"
57 #include "faandct.h"
58 #include "thread.h"
59 #include "aandcttab.h"
60 #include "flv.h"
61 #include "mpeg4video.h"
62 #include "internal.h"
63 #include "bytestream.h"
64 #include "wmv2.h"
65 #include "rv10.h"
66 #include "packet_internal.h"
67 #include "libxvid.h"
68 #include <limits.h>
69 #include "sp5x.h"
70 
71 #define QUANT_BIAS_SHIFT 8
72 
73 #define QMAT_SHIFT_MMX 16
74 #define QMAT_SHIFT 21
75 
77 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
78 static int sse_mb(MpegEncContext *s);
79 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
80 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
81 
84 
87  { NULL },
88 };
89 
90 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
91  uint16_t (*qmat16)[2][64],
92  const uint16_t *quant_matrix,
93  int bias, int qmin, int qmax, int intra)
94 {
95  FDCTDSPContext *fdsp = &s->fdsp;
96  int qscale;
97  int shift = 0;
98 
99  for (qscale = qmin; qscale <= qmax; qscale++) {
100  int i;
101  int qscale2;
102 
103  if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
104  else qscale2 = qscale << 1;
105 
106  if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
107 #if CONFIG_FAANDCT
108  fdsp->fdct == ff_faandct ||
109 #endif /* CONFIG_FAANDCT */
111  for (i = 0; i < 64; i++) {
112  const int j = s->idsp.idct_permutation[i];
113  int64_t den = (int64_t) qscale2 * quant_matrix[j];
114  /* 16 <= qscale * quant_matrix[i] <= 7905
115  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
116  * 19952 <= x <= 249205026
117  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
118  * 3444240 >= (1 << 36) / (x) >= 275 */
119 
120  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
121  }
122  } else if (fdsp->fdct == ff_fdct_ifast) {
123  for (i = 0; i < 64; i++) {
124  const int j = s->idsp.idct_permutation[i];
125  int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
126  /* 16 <= qscale * quant_matrix[i] <= 7905
127  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
128  * 19952 <= x <= 249205026
129  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
130  * 3444240 >= (1 << 36) / (x) >= 275 */
131 
132  qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
133  }
134  } else {
135  for (i = 0; i < 64; i++) {
136  const int j = s->idsp.idct_permutation[i];
137  int64_t den = (int64_t) qscale2 * quant_matrix[j];
138  /* We can safely suppose that 16 <= quant_matrix[i] <= 255
139  * Assume x = qscale * quant_matrix[i]
140  * So 16 <= x <= 7905
141  * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
142  * so 32768 >= (1 << 19) / (x) >= 67 */
143  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
144  //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
145  // (qscale * quant_matrix[i]);
146  qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
147 
148  if (qmat16[qscale][0][i] == 0 ||
149  qmat16[qscale][0][i] == 128 * 256)
150  qmat16[qscale][0][i] = 128 * 256 - 1;
151  qmat16[qscale][1][i] =
152  ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
153  qmat16[qscale][0][i]);
154  }
155  }
156 
157  for (i = intra; i < 64; i++) {
158  int64_t max = 8191;
159  if (fdsp->fdct == ff_fdct_ifast) {
160  max = (8191LL * ff_aanscales[i]) >> 14;
161  }
162  while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
163  shift++;
164  }
165  }
166  }
167  if (shift) {
168  av_log(s->avctx, AV_LOG_INFO,
169  "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
170  QMAT_SHIFT - shift);
171  }
172 }
173 
174 static inline void update_qscale(MpegEncContext *s)
175 {
176  if (s->q_scale_type == 1 && 0) {
177  int i;
178  int bestdiff=INT_MAX;
179  int best = 1;
180 
181  for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
182  int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
183  if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
184  (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
185  continue;
186  if (diff < bestdiff) {
187  bestdiff = diff;
188  best = i;
189  }
190  }
191  s->qscale = best;
192  } else {
193  s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
194  (FF_LAMBDA_SHIFT + 7);
195  s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
196  }
197 
198  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
200 }
201 
202 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
203 {
204  int i;
205 
206  if (matrix) {
207  put_bits(pb, 1, 1);
208  for (i = 0; i < 64; i++) {
209  put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
210  }
211  } else
212  put_bits(pb, 1, 0);
213 }
214 
215 /**
216  * init s->current_picture.qscale_table from s->lambda_table
217  */
219 {
220  int8_t * const qscale_table = s->current_picture.qscale_table;
221  int i;
222 
223  for (i = 0; i < s->mb_num; i++) {
224  unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
225  int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
226  qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
227  s->avctx->qmax);
228  }
229 }
230 
233 {
234 #define COPY(a) dst->a= src->a
235  COPY(pict_type);
237  COPY(f_code);
238  COPY(b_code);
239  COPY(qscale);
240  COPY(lambda);
241  COPY(lambda2);
244  COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
245  COPY(progressive_frame); // FIXME don't set in encode_header
246  COPY(partitioned_frame); // FIXME don't set in encode_header
247 #undef COPY
248 }
249 
250 /**
251  * Set the given MpegEncContext to defaults for encoding.
252  * the changed fields will not depend upon the prior state of the MpegEncContext.
253  */
255 {
256  int i;
258 
259  for (i = -16; i < 16; i++) {
260  default_fcode_tab[i + MAX_MV] = 1;
261  }
262  s->me.mv_penalty = default_mv_penalty;
263  s->fcode_tab = default_fcode_tab;
264 
265  s->input_picture_number = 0;
266  s->picture_in_gop_number = 0;
267 }
268 
270 {
271  if (ARCH_X86)
273 
274  if (CONFIG_H263_ENCODER)
275  ff_h263dsp_init(&s->h263dsp);
276  if (!s->dct_quantize)
277  s->dct_quantize = ff_dct_quantize_c;
278  if (!s->denoise_dct)
279  s->denoise_dct = denoise_dct_c;
280  s->fast_dct_quantize = s->dct_quantize;
281  if (s->avctx->trellis)
282  s->dct_quantize = dct_quantize_trellis_c;
283 
284  return 0;
285 }
286 
287 /* init video encoder */
289 {
291  AVCPBProperties *cpb_props;
292  int i, ret, format_supported;
293 
295 
296  switch (avctx->codec_id) {
298  if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
301  "only YUV420 and YUV422 are supported\n");
302  return AVERROR(EINVAL);
303  }
304  break;
305  case AV_CODEC_ID_MJPEG:
306  case AV_CODEC_ID_AMV:
307  format_supported = 0;
308  /* JPEG color space */
316  format_supported = 1;
317  /* MPEG color space */
322  format_supported = 1;
323 
324  if (!format_supported) {
325  av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
326  return AVERROR(EINVAL);
327  }
328  break;
329  default:
330  if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
331  av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
332  return AVERROR(EINVAL);
333  }
334  }
335 
336  switch (avctx->pix_fmt) {
337  case AV_PIX_FMT_YUVJ444P:
338  case AV_PIX_FMT_YUV444P:
339  s->chroma_format = CHROMA_444;
340  break;
341  case AV_PIX_FMT_YUVJ422P:
342  case AV_PIX_FMT_YUV422P:
343  s->chroma_format = CHROMA_422;
344  break;
345  case AV_PIX_FMT_YUVJ420P:
346  case AV_PIX_FMT_YUV420P:
347  default:
348  s->chroma_format = CHROMA_420;
349  break;
350  }
351 
353 
354 #if FF_API_PRIVATE_OPT
356  if (avctx->rtp_payload_size)
357  s->rtp_payload_size = avctx->rtp_payload_size;
359  s->me_penalty_compensation = avctx->me_penalty_compensation;
360  if (avctx->pre_me)
361  s->me_pre = avctx->pre_me;
363 #endif
364 
365  s->bit_rate = avctx->bit_rate;
366  s->width = avctx->width;
367  s->height = avctx->height;
368  if (avctx->gop_size > 600 &&
371  "keyframe interval too large!, reducing it from %d to %d\n",
372  avctx->gop_size, 600);
373  avctx->gop_size = 600;
374  }
375  s->gop_size = avctx->gop_size;
376  s->avctx = avctx;
378  av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
379  "is %d.\n", MAX_B_FRAMES);
381  }
382  s->max_b_frames = avctx->max_b_frames;
383  s->codec_id = avctx->codec->id;
384  s->strict_std_compliance = avctx->strict_std_compliance;
385  s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
386  s->rtp_mode = !!s->rtp_payload_size;
387  s->intra_dc_precision = avctx->intra_dc_precision;
388 
389  // workaround some differences between how applications specify dc precision
390  if (s->intra_dc_precision < 0) {
391  s->intra_dc_precision += 8;
392  } else if (s->intra_dc_precision >= 8)
393  s->intra_dc_precision -= 8;
394 
395  if (s->intra_dc_precision < 0) {
397  "intra dc precision must be positive, note some applications use"
398  " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
399  return AVERROR(EINVAL);
400  }
401 
403  s->huffman = 0;
404 
405  if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
406  av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
407  return AVERROR(EINVAL);
408  }
409  s->user_specified_pts = AV_NOPTS_VALUE;
410 
411  if (s->gop_size <= 1) {
412  s->intra_only = 1;
413  s->gop_size = 12;
414  } else {
415  s->intra_only = 0;
416  }
417 
418  /* Fixed QSCALE */
419  s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
420 
421  s->adaptive_quant = (s->avctx->lumi_masking ||
422  s->avctx->dark_masking ||
423  s->avctx->temporal_cplx_masking ||
424  s->avctx->spatial_cplx_masking ||
425  s->avctx->p_masking ||
426  s->border_masking ||
427  (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
428  !s->fixed_qscale;
429 
430  s->loop_filter = !!(s->avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
431 
433  switch(avctx->codec_id) {
436  avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
437  break;
438  case AV_CODEC_ID_MPEG4:
442  if (avctx->rc_max_rate >= 15000000) {
443  avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
444  } else if(avctx->rc_max_rate >= 2000000) {
445  avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
446  } else if(avctx->rc_max_rate >= 384000) {
447  avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
448  } else
449  avctx->rc_buffer_size = 40;
450  avctx->rc_buffer_size *= 16384;
451  break;
452  }
453  if (avctx->rc_buffer_size) {
454  av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
455  }
456  }
457 
458  if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
459  av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
460  return AVERROR(EINVAL);
461  }
462 
465  "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
466  }
467 
469  av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
470  return AVERROR(EINVAL);
471  }
472 
474  av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
475  return AVERROR(EINVAL);
476  }
477 
478  if (avctx->rc_max_rate &&
482  "impossible bitrate constraints, this will fail\n");
483  }
484 
485  if (avctx->rc_buffer_size &&
486  avctx->bit_rate * (int64_t)avctx->time_base.num >
487  avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
488  av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
489  return AVERROR(EINVAL);
490  }
491 
492  if (!s->fixed_qscale &&
495  double nbt = avctx->bit_rate * av_q2d(avctx->time_base) * 5;
497  "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
498  if (nbt <= INT_MAX) {
499  avctx->bit_rate_tolerance = nbt;
500  } else
501  avctx->bit_rate_tolerance = INT_MAX;
502  }
503 
504  if (s->avctx->rc_max_rate &&
505  s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
506  (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
507  s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
508  90000LL * (avctx->rc_buffer_size - 1) >
509  s->avctx->rc_max_rate * 0xFFFFLL) {
511  "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
512  "specified vbv buffer is too large for the given bitrate!\n");
513  }
514 
515  if ((s->avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
516  s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
517  s->codec_id != AV_CODEC_ID_FLV1) {
518  av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
519  return AVERROR(EINVAL);
520  }
521 
522  if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
524  "OBMC is only supported with simple mb decision\n");
525  return AVERROR(EINVAL);
526  }
527 
528  if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
529  av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
530  return AVERROR(EINVAL);
531  }
532 
533  if (s->max_b_frames &&
534  s->codec_id != AV_CODEC_ID_MPEG4 &&
535  s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
536  s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
537  av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
538  return AVERROR(EINVAL);
539  }
540  if (s->max_b_frames < 0) {
542  "max b frames must be 0 or positive for mpegvideo based encoders\n");
543  return AVERROR(EINVAL);
544  }
545 
546  if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
547  s->codec_id == AV_CODEC_ID_H263 ||
548  s->codec_id == AV_CODEC_ID_H263P) &&
549  (avctx->sample_aspect_ratio.num > 255 ||
550  avctx->sample_aspect_ratio.den > 255)) {
552  "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
556  }
557 
558  if ((s->codec_id == AV_CODEC_ID_H263 ||
559  s->codec_id == AV_CODEC_ID_H263P) &&
560  (avctx->width > 2048 ||
561  avctx->height > 1152 )) {
562  av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
563  return AVERROR(EINVAL);
564  }
565  if ((s->codec_id == AV_CODEC_ID_H263 ||
566  s->codec_id == AV_CODEC_ID_H263P) &&
567  ((avctx->width &3) ||
568  (avctx->height&3) )) {
569  av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
570  return AVERROR(EINVAL);
571  }
572 
573  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
574  (avctx->width > 4095 ||
575  avctx->height > 4095 )) {
576  av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
577  return AVERROR(EINVAL);
578  }
579 
580  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
581  (avctx->width > 16383 ||
582  avctx->height > 16383 )) {
583  av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
584  return AVERROR(EINVAL);
585  }
586 
587  if (s->codec_id == AV_CODEC_ID_RV10 &&
588  (avctx->width &15 ||
589  avctx->height&15 )) {
590  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
591  return AVERROR(EINVAL);
592  }
593 
594  if (s->codec_id == AV_CODEC_ID_RV20 &&
595  (avctx->width &3 ||
596  avctx->height&3 )) {
597  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
598  return AVERROR(EINVAL);
599  }
600 
601  if ((s->codec_id == AV_CODEC_ID_WMV1 ||
602  s->codec_id == AV_CODEC_ID_WMV2) &&
603  avctx->width & 1) {
604  av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
605  return AVERROR(EINVAL);
606  }
607 
608  if ((s->avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME)) &&
609  s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
610  av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
611  return AVERROR(EINVAL);
612  }
613 
614 #if FF_API_PRIVATE_OPT
616  if (avctx->mpeg_quant)
617  s->mpeg_quant = avctx->mpeg_quant;
619 #endif
620 
621  // FIXME mpeg2 uses that too
622  if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
623  && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
625  "mpeg2 style quantization not supported by codec\n");
626  return AVERROR(EINVAL);
627  }
628 
629  if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
630  av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
631  return AVERROR(EINVAL);
632  }
633 
634  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
635  s->avctx->mb_decision != FF_MB_DECISION_RD) {
636  av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
637  return AVERROR(EINVAL);
638  }
639 
640  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
641  (s->codec_id == AV_CODEC_ID_AMV ||
642  s->codec_id == AV_CODEC_ID_MJPEG)) {
643  // Used to produce garbage with MJPEG.
645  "QP RD is no longer compatible with MJPEG or AMV\n");
646  return AVERROR(EINVAL);
647  }
648 
649 #if FF_API_PRIVATE_OPT
652  s->scenechange_threshold = avctx->scenechange_threshold;
654 #endif
655 
656  if (s->scenechange_threshold < 1000000000 &&
657  (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
659  "closed gop with scene change detection are not supported yet, "
660  "set threshold to 1000000000\n");
661  return AVERROR_PATCHWELCOME;
662  }
663 
664  if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
665  if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
666  s->strict_std_compliance >= FF_COMPLIANCE_NORMAL) {
668  "low delay forcing is only available for mpeg2, "
669  "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
670  return AVERROR(EINVAL);
671  }
672  if (s->max_b_frames != 0) {
674  "B-frames cannot be used with low delay\n");
675  return AVERROR(EINVAL);
676  }
677  }
678 
679  if (s->q_scale_type == 1) {
680  if (avctx->qmax > 28) {
682  "non linear quant only supports qmax <= 28 currently\n");
683  return AVERROR_PATCHWELCOME;
684  }
685  }
686 
687  if (avctx->slices > 1 &&
689  av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
690  return AVERROR(EINVAL);
691  }
692 
693  if (s->avctx->thread_count > 1 &&
694  s->codec_id != AV_CODEC_ID_MPEG4 &&
695  s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
696  s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
697  s->codec_id != AV_CODEC_ID_MJPEG &&
698  (s->codec_id != AV_CODEC_ID_H263P)) {
700  "multi threaded encoding not supported by codec\n");
701  return AVERROR_PATCHWELCOME;
702  }
703 
704  if (s->avctx->thread_count < 1) {
706  "automatic thread number detection not supported by codec, "
707  "patch welcome\n");
708  return AVERROR_PATCHWELCOME;
709  }
710 
711  if (!avctx->time_base.den || !avctx->time_base.num) {
712  av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
713  return AVERROR(EINVAL);
714  }
715 
716 #if FF_API_PRIVATE_OPT
718  if (avctx->b_frame_strategy)
719  s->b_frame_strategy = avctx->b_frame_strategy;
720  if (avctx->b_sensitivity != 40)
721  s->b_sensitivity = avctx->b_sensitivity;
723 #endif
724 
725  if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
727  "notice: b_frame_strategy only affects the first pass\n");
728  s->b_frame_strategy = 0;
729  }
730 
732  if (i > 1) {
733  av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
734  avctx->time_base.den /= i;
735  avctx->time_base.num /= i;
736  //return -1;
737  }
738 
739  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id==AV_CODEC_ID_AMV) {
740  // (a + x * 3 / 8) / x
741  s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
742  s->inter_quant_bias = 0;
743  } else {
744  s->intra_quant_bias = 0;
745  // (a - x / 4) / x
746  s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
747  }
748 
749  if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
750  av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
751  return AVERROR(EINVAL);
752  }
753 
754  av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
755 
756  if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
757  s->avctx->time_base.den > (1 << 16) - 1) {
759  "timebase %d/%d not supported by MPEG 4 standard, "
760  "the maximum admitted value for the timebase denominator "
761  "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
762  (1 << 16) - 1);
763  return AVERROR(EINVAL);
764  }
765  s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
766 
767  switch (avctx->codec->id) {
769  s->out_format = FMT_MPEG1;
770  s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
771  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
772  break;
774  s->out_format = FMT_MPEG1;
775  s->low_delay = !!(s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
776  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
777  s->rtp_mode = 1;
778  break;
779  case AV_CODEC_ID_MJPEG:
780  case AV_CODEC_ID_AMV:
781  s->out_format = FMT_MJPEG;
782  s->intra_only = 1; /* force intra only for jpeg */
783  if (!CONFIG_MJPEG_ENCODER)
785  if ((ret = ff_mjpeg_encode_init(s)) < 0)
786  return ret;
787  avctx->delay = 0;
788  s->low_delay = 1;
789  break;
790  case AV_CODEC_ID_H261:
791  if (!CONFIG_H261_ENCODER)
793  if (ff_h261_get_picture_format(s->width, s->height) < 0) {
795  "The specified picture size of %dx%d is not valid for the "
796  "H.261 codec.\nValid sizes are 176x144, 352x288\n",
797  s->width, s->height);
798  return AVERROR(EINVAL);
799  }
800  s->out_format = FMT_H261;
801  avctx->delay = 0;
802  s->low_delay = 1;
803  s->rtp_mode = 0; /* Sliced encoding not supported */
804  break;
805  case AV_CODEC_ID_H263:
806  if (!CONFIG_H263_ENCODER)
809  s->width, s->height) == 8) {
811  "The specified picture size of %dx%d is not valid for "
812  "the H.263 codec.\nValid sizes are 128x96, 176x144, "
813  "352x288, 704x576, and 1408x1152. "
814  "Try H.263+.\n", s->width, s->height);
815  return AVERROR(EINVAL);
816  }
817  s->out_format = FMT_H263;
818  avctx->delay = 0;
819  s->low_delay = 1;
820  break;
821  case AV_CODEC_ID_H263P:
822  s->out_format = FMT_H263;
823  s->h263_plus = 1;
824  /* Fx */
825  s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
826  s->modified_quant = s->h263_aic;
827  s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
828  s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
829 
830  /* /Fx */
831  /* These are just to be sure */
832  avctx->delay = 0;
833  s->low_delay = 1;
834  break;
835  case AV_CODEC_ID_FLV1:
836  s->out_format = FMT_H263;
837  s->h263_flv = 2; /* format = 1; 11-bit codes */
838  s->unrestricted_mv = 1;
839  s->rtp_mode = 0; /* don't allow GOB */
840  avctx->delay = 0;
841  s->low_delay = 1;
842  break;
843  case AV_CODEC_ID_RV10:
844  s->out_format = FMT_H263;
845  avctx->delay = 0;
846  s->low_delay = 1;
847  break;
848  case AV_CODEC_ID_RV20:
849  s->out_format = FMT_H263;
850  avctx->delay = 0;
851  s->low_delay = 1;
852  s->modified_quant = 1;
853  s->h263_aic = 1;
854  s->h263_plus = 1;
855  s->loop_filter = 1;
856  s->unrestricted_mv = 0;
857  break;
858  case AV_CODEC_ID_MPEG4:
859  s->out_format = FMT_H263;
860  s->h263_pred = 1;
861  s->unrestricted_mv = 1;
862  s->low_delay = s->max_b_frames ? 0 : 1;
863  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
864  break;
866  s->out_format = FMT_H263;
867  s->h263_pred = 1;
868  s->unrestricted_mv = 1;
869  s->msmpeg4_version = 2;
870  avctx->delay = 0;
871  s->low_delay = 1;
872  break;
874  s->out_format = FMT_H263;
875  s->h263_pred = 1;
876  s->unrestricted_mv = 1;
877  s->msmpeg4_version = 3;
878  s->flipflop_rounding = 1;
879  avctx->delay = 0;
880  s->low_delay = 1;
881  break;
882  case AV_CODEC_ID_WMV1:
883  s->out_format = FMT_H263;
884  s->h263_pred = 1;
885  s->unrestricted_mv = 1;
886  s->msmpeg4_version = 4;
887  s->flipflop_rounding = 1;
888  avctx->delay = 0;
889  s->low_delay = 1;
890  break;
891  case AV_CODEC_ID_WMV2:
892  s->out_format = FMT_H263;
893  s->h263_pred = 1;
894  s->unrestricted_mv = 1;
895  s->msmpeg4_version = 5;
896  s->flipflop_rounding = 1;
897  avctx->delay = 0;
898  s->low_delay = 1;
899  break;
900  default:
901  return AVERROR(EINVAL);
902  }
903 
904 #if FF_API_PRIVATE_OPT
906  if (avctx->noise_reduction)
907  s->noise_reduction = avctx->noise_reduction;
909 #endif
910 
911  avctx->has_b_frames = !s->low_delay;
912 
913  s->encoding = 1;
914 
915  s->progressive_frame =
916  s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
918  s->alternate_scan);
919 
920  /* init */
922  if ((ret = ff_mpv_common_init(s)) < 0)
923  return ret;
924 
925  ff_fdctdsp_init(&s->fdsp, avctx);
926  ff_me_cmp_init(&s->mecc, avctx);
927  ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
928  ff_pixblockdsp_init(&s->pdsp, avctx);
929  ff_qpeldsp_init(&s->qdsp);
930 
931  if (s->msmpeg4_version) {
932  FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats,
933  2 * 2 * (MAX_LEVEL + 1) *
934  (MAX_RUN + 1) * 2 * sizeof(int), fail);
935  }
936  FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
937 
938  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail);
939  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail);
940  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail);
941  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
942  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
943  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
944  FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture,
945  MAX_PICTURE_COUNT * sizeof(Picture *), fail);
946  FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture,
947  MAX_PICTURE_COUNT * sizeof(Picture *), fail);
948 
949 
950  if (s->noise_reduction) {
951  FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset,
952  2 * 64 * sizeof(uint16_t), fail);
953  }
954 
956 
957  if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
958  s->chroma_qscale_table = ff_h263_chroma_qscale_table;
959 
960  if (s->slice_context_count > 1) {
961  s->rtp_mode = 1;
962 
964  s->h263_slice_structured = 1;
965  }
966 
967  s->quant_precision = 5;
968 
969 #if FF_API_PRIVATE_OPT
972  s->frame_skip_threshold = avctx->frame_skip_threshold;
974  s->frame_skip_factor = avctx->frame_skip_factor;
975  if (avctx->frame_skip_exp)
976  s->frame_skip_exp = avctx->frame_skip_exp;
978  s->frame_skip_cmp = avctx->frame_skip_cmp;
980 #endif
981 
982  ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, s->avctx->ildct_cmp);
983  ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
984 
985  if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
987  if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
989  if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
990  if ((ret = ff_msmpeg4_encode_init(s)) < 0)
991  return ret;
992  if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
993  && s->out_format == FMT_MPEG1)
995 
996  /* init q matrix */
997  for (i = 0; i < 64; i++) {
998  int j = s->idsp.idct_permutation[i];
999  if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
1000  s->mpeg_quant) {
1001  s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
1002  s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
1003  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1004  s->intra_matrix[j] =
1005  s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1006  } else {
1007  /* MPEG-1/2 */
1008  s->chroma_intra_matrix[j] =
1009  s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
1010  s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1011  }
1012  if (s->avctx->intra_matrix)
1013  s->intra_matrix[j] = s->avctx->intra_matrix[i];
1014  if (s->avctx->inter_matrix)
1015  s->inter_matrix[j] = s->avctx->inter_matrix[i];
1016  }
1017 
1018  /* precompute matrix */
1019  /* for mjpeg, we do include qscale in the matrix */
1020  if (s->out_format != FMT_MJPEG) {
1021  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
1022  s->intra_matrix, s->intra_quant_bias, avctx->qmin,
1023  31, 1);
1024  ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
1025  s->inter_matrix, s->inter_quant_bias, avctx->qmin,
1026  31, 0);
1027  }
1028 
1029  if ((ret = ff_rate_control_init(s)) < 0)
1030  return ret;
1031 
1032 #if FF_API_PRIVATE_OPT
1034  if (avctx->brd_scale)
1035  s->brd_scale = avctx->brd_scale;
1036 
1037  if (avctx->prediction_method)
1038  s->pred = avctx->prediction_method + 1;
1040 #endif
1041 
1042  if (s->b_frame_strategy == 2) {
1043  for (i = 0; i < s->max_b_frames + 2; i++) {
1044  s->tmp_frames[i] = av_frame_alloc();
1045  if (!s->tmp_frames[i])
1046  return AVERROR(ENOMEM);
1047 
1048  s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1049  s->tmp_frames[i]->width = s->width >> s->brd_scale;
1050  s->tmp_frames[i]->height = s->height >> s->brd_scale;
1051 
1052  ret = av_frame_get_buffer(s->tmp_frames[i], 0);
1053  if (ret < 0)
1054  return ret;
1055  }
1056  }
1057 
1058  cpb_props = ff_add_cpb_side_data(avctx);
1059  if (!cpb_props)
1060  return AVERROR(ENOMEM);
1061  cpb_props->max_bitrate = avctx->rc_max_rate;
1062  cpb_props->min_bitrate = avctx->rc_min_rate;
1063  cpb_props->avg_bitrate = avctx->bit_rate;
1064  cpb_props->buffer_size = avctx->rc_buffer_size;
1065 
1066  return 0;
1067 fail:
1069  return AVERROR_UNKNOWN;
1070 }
1071 
1073 {
1075  int i;
1076 
1078 
1080  if (CONFIG_MJPEG_ENCODER &&
1081  s->out_format == FMT_MJPEG)
1083 
1085 
1086  for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1087  av_frame_free(&s->tmp_frames[i]);
1088 
1089  ff_free_picture_tables(&s->new_picture);
1090  ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1091 
1092  av_freep(&s->avctx->stats_out);
1093  av_freep(&s->ac_stats);
1094 
1095  if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1096  if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1097  s->q_chroma_intra_matrix= NULL;
1098  s->q_chroma_intra_matrix16= NULL;
1099  av_freep(&s->q_intra_matrix);
1100  av_freep(&s->q_inter_matrix);
1101  av_freep(&s->q_intra_matrix16);
1102  av_freep(&s->q_inter_matrix16);
1103  av_freep(&s->input_picture);
1104  av_freep(&s->reordered_input_picture);
1105  av_freep(&s->dct_offset);
1106 
1107  return 0;
1108 }
1109 
1110 static int get_sae(uint8_t *src, int ref, int stride)
1111 {
1112  int x,y;
1113  int acc = 0;
1114 
1115  for (y = 0; y < 16; y++) {
1116  for (x = 0; x < 16; x++) {
1117  acc += FFABS(src[x + y * stride] - ref);
1118  }
1119  }
1120 
1121  return acc;
1122 }
1123 
1125  uint8_t *ref, int stride)
1126 {
1127  int x, y, w, h;
1128  int acc = 0;
1129 
1130  w = s->width & ~15;
1131  h = s->height & ~15;
1132 
1133  for (y = 0; y < h; y += 16) {
1134  for (x = 0; x < w; x += 16) {
1135  int offset = x + y * stride;
1136  int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1137  stride, 16);
1138  int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1139  int sae = get_sae(src + offset, mean, stride);
1140 
1141  acc += sae + 500 < sad;
1142  }
1143  }
1144  return acc;
1145 }
1146 
1147 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1148 {
1149  return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1150  s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1151  s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1152  &s->linesize, &s->uvlinesize);
1153 }
1154 
1155 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1156 {
1157  Picture *pic = NULL;
1158  int64_t pts;
1159  int i, display_picture_number = 0, ret;
1160  int encoding_delay = s->max_b_frames ? s->max_b_frames
1161  : (s->low_delay ? 0 : 1);
1162  int flush_offset = 1;
1163  int direct = 1;
1164 
1165  if (pic_arg) {
1166  pts = pic_arg->pts;
1167  display_picture_number = s->input_picture_number++;
1168 
1169  if (pts != AV_NOPTS_VALUE) {
1170  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1171  int64_t last = s->user_specified_pts;
1172 
1173  if (pts <= last) {
1174  av_log(s->avctx, AV_LOG_ERROR,
1175  "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1176  pts, last);
1177  return AVERROR(EINVAL);
1178  }
1179 
1180  if (!s->low_delay && display_picture_number == 1)
1181  s->dts_delta = pts - last;
1182  }
1183  s->user_specified_pts = pts;
1184  } else {
1185  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1186  s->user_specified_pts =
1187  pts = s->user_specified_pts + 1;
1188  av_log(s->avctx, AV_LOG_INFO,
1189  "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1190  pts);
1191  } else {
1192  pts = display_picture_number;
1193  }
1194  }
1195 
1196  if (!pic_arg->buf[0] ||
1197  pic_arg->linesize[0] != s->linesize ||
1198  pic_arg->linesize[1] != s->uvlinesize ||
1199  pic_arg->linesize[2] != s->uvlinesize)
1200  direct = 0;
1201  if ((s->width & 15) || (s->height & 15))
1202  direct = 0;
1203  if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1204  direct = 0;
1205  if (s->linesize & (STRIDE_ALIGN-1))
1206  direct = 0;
1207 
1208  ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1209  pic_arg->linesize[1], s->linesize, s->uvlinesize);
1210 
1211  i = ff_find_unused_picture(s->avctx, s->picture, direct);
1212  if (i < 0)
1213  return i;
1214 
1215  pic = &s->picture[i];
1216  pic->reference = 3;
1217 
1218  if (direct) {
1219  if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1220  return ret;
1221  }
1222  ret = alloc_picture(s, pic, direct);
1223  if (ret < 0)
1224  return ret;
1225 
1226  if (!direct) {
1227  if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1228  pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1229  pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1230  // empty
1231  } else {
1232  int h_chroma_shift, v_chroma_shift;
1233  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1234  &h_chroma_shift,
1235  &v_chroma_shift);
1236 
1237  for (i = 0; i < 3; i++) {
1238  int src_stride = pic_arg->linesize[i];
1239  int dst_stride = i ? s->uvlinesize : s->linesize;
1240  int h_shift = i ? h_chroma_shift : 0;
1241  int v_shift = i ? v_chroma_shift : 0;
1242  int w = s->width >> h_shift;
1243  int h = s->height >> v_shift;
1244  uint8_t *src = pic_arg->data[i];
1245  uint8_t *dst = pic->f->data[i];
1246  int vpad = 16;
1247 
1248  if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1249  && !s->progressive_sequence
1250  && FFALIGN(s->height, 32) - s->height > 16)
1251  vpad = 32;
1252 
1253  if (!s->avctx->rc_buffer_size)
1254  dst += INPLACE_OFFSET;
1255 
1256  if (src_stride == dst_stride)
1257  memcpy(dst, src, src_stride * h);
1258  else {
1259  int h2 = h;
1260  uint8_t *dst2 = dst;
1261  while (h2--) {
1262  memcpy(dst2, src, w);
1263  dst2 += dst_stride;
1264  src += src_stride;
1265  }
1266  }
1267  if ((s->width & 15) || (s->height & (vpad-1))) {
1268  s->mpvencdsp.draw_edges(dst, dst_stride,
1269  w, h,
1270  16 >> h_shift,
1271  vpad >> v_shift,
1272  EDGE_BOTTOM);
1273  }
1274  }
1275  emms_c();
1276  }
1277  }
1278  ret = av_frame_copy_props(pic->f, pic_arg);
1279  if (ret < 0)
1280  return ret;
1281 
1282  pic->f->display_picture_number = display_picture_number;
1283  pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1284  } else {
1285  /* Flushing: When we have not received enough input frames,
1286  * ensure s->input_picture[0] contains the first picture */
1287  for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1288  if (s->input_picture[flush_offset])
1289  break;
1290 
1291  if (flush_offset <= 1)
1292  flush_offset = 1;
1293  else
1294  encoding_delay = encoding_delay - flush_offset + 1;
1295  }
1296 
1297  /* shift buffer entries */
1298  for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1299  s->input_picture[i - flush_offset] = s->input_picture[i];
1300 
1301  s->input_picture[encoding_delay] = (Picture*) pic;
1302 
1303  return 0;
1304 }
1305 
1307 {
1308  int x, y, plane;
1309  int score = 0;
1310  int64_t score64 = 0;
1311 
1312  for (plane = 0; plane < 3; plane++) {
1313  const int stride = p->f->linesize[plane];
1314  const int bw = plane ? 1 : 2;
1315  for (y = 0; y < s->mb_height * bw; y++) {
1316  for (x = 0; x < s->mb_width * bw; x++) {
1317  int off = p->shared ? 0 : 16;
1318  uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1319  uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1320  int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1321 
1322  switch (FFABS(s->frame_skip_exp)) {
1323  case 0: score = FFMAX(score, v); break;
1324  case 1: score += FFABS(v); break;
1325  case 2: score64 += v * (int64_t)v; break;
1326  case 3: score64 += FFABS(v * (int64_t)v * v); break;
1327  case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1328  }
1329  }
1330  }
1331  }
1332  emms_c();
1333 
1334  if (score)
1335  score64 = score;
1336  if (s->frame_skip_exp < 0)
1337  score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1338  -1.0/s->frame_skip_exp);
1339 
1340  if (score64 < s->frame_skip_threshold)
1341  return 1;
1342  if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1343  return 1;
1344  return 0;
1345 }
1346 
1348 {
1349  AVPacket pkt = { 0 };
1350  int ret;
1351  int size = 0;
1352 
1353  av_init_packet(&pkt);
1354 
1356  if (ret < 0)
1357  return ret;
1358 
1359  do {
1361  if (ret >= 0) {
1362  size += pkt.size;
1363  av_packet_unref(&pkt);
1364  } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1365  return ret;
1366  } while (ret >= 0);
1367 
1368  return size;
1369 }
1370 
1372 {
1373  const AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1374  const int scale = s->brd_scale;
1375  int width = s->width >> scale;
1376  int height = s->height >> scale;
1377  int i, j, out_size, p_lambda, b_lambda, lambda2;
1378  int64_t best_rd = INT64_MAX;
1379  int best_b_count = -1;
1380  int ret = 0;
1381 
1382  av_assert0(scale >= 0 && scale <= 3);
1383 
1384  //emms_c();
1385  //s->next_picture_ptr->quality;
1386  p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1387  //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1388  b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1389  if (!b_lambda) // FIXME we should do this somewhere else
1390  b_lambda = p_lambda;
1391  lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1393 
1394  for (i = 0; i < s->max_b_frames + 2; i++) {
1395  Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1396  s->next_picture_ptr;
1397  uint8_t *data[4];
1398 
1399  if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1400  pre_input = *pre_input_ptr;
1401  memcpy(data, pre_input_ptr->f->data, sizeof(data));
1402 
1403  if (!pre_input.shared && i) {
1404  data[0] += INPLACE_OFFSET;
1405  data[1] += INPLACE_OFFSET;
1406  data[2] += INPLACE_OFFSET;
1407  }
1408 
1409  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1410  s->tmp_frames[i]->linesize[0],
1411  data[0],
1412  pre_input.f->linesize[0],
1413  width, height);
1414  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1415  s->tmp_frames[i]->linesize[1],
1416  data[1],
1417  pre_input.f->linesize[1],
1418  width >> 1, height >> 1);
1419  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1420  s->tmp_frames[i]->linesize[2],
1421  data[2],
1422  pre_input.f->linesize[2],
1423  width >> 1, height >> 1);
1424  }
1425  }
1426 
1427  for (j = 0; j < s->max_b_frames + 1; j++) {
1428  AVCodecContext *c;
1429  int64_t rd = 0;
1430 
1431  if (!s->input_picture[j])
1432  break;
1433 
1435  if (!c)
1436  return AVERROR(ENOMEM);
1437 
1438  c->width = width;
1439  c->height = height;
1441  c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1442  c->mb_decision = s->avctx->mb_decision;
1443  c->me_cmp = s->avctx->me_cmp;
1444  c->mb_cmp = s->avctx->mb_cmp;
1445  c->me_sub_cmp = s->avctx->me_sub_cmp;
1446  c->pix_fmt = AV_PIX_FMT_YUV420P;
1447  c->time_base = s->avctx->time_base;
1448  c->max_b_frames = s->max_b_frames;
1449 
1450  ret = avcodec_open2(c, codec, NULL);
1451  if (ret < 0)
1452  goto fail;
1453 
1454  s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1455  s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1456 
1457  out_size = encode_frame(c, s->tmp_frames[0]);
1458  if (out_size < 0) {
1459  ret = out_size;
1460  goto fail;
1461  }
1462 
1463  //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1464 
1465  for (i = 0; i < s->max_b_frames + 1; i++) {
1466  int is_p = i % (j + 1) == j || i == s->max_b_frames;
1467 
1468  s->tmp_frames[i + 1]->pict_type = is_p ?
1470  s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1471 
1472  out_size = encode_frame(c, s->tmp_frames[i + 1]);
1473  if (out_size < 0) {
1474  ret = out_size;
1475  goto fail;
1476  }
1477 
1478  rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1479  }
1480 
1481  /* get the delayed frames */
1483  if (out_size < 0) {
1484  ret = out_size;
1485  goto fail;
1486  }
1487  rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1488 
1489  rd += c->error[0] + c->error[1] + c->error[2];
1490 
1491  if (rd < best_rd) {
1492  best_rd = rd;
1493  best_b_count = j;
1494  }
1495 
1496 fail:
1498  if (ret < 0)
1499  return ret;
1500  }
1501 
1502  return best_b_count;
1503 }
1504 
1506 {
1507  int i, ret;
1508 
1509  for (i = 1; i < MAX_PICTURE_COUNT; i++)
1510  s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1511  s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1512 
1513  /* set next picture type & ordering */
1514  if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1515  if (s->frame_skip_threshold || s->frame_skip_factor) {
1516  if (s->picture_in_gop_number < s->gop_size &&
1517  s->next_picture_ptr &&
1518  skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1519  // FIXME check that the gop check above is +-1 correct
1520  av_frame_unref(s->input_picture[0]->f);
1521 
1522  ff_vbv_update(s, 0);
1523 
1524  goto no_output_pic;
1525  }
1526  }
1527 
1528  if (/*s->picture_in_gop_number >= s->gop_size ||*/
1529  !s->next_picture_ptr || s->intra_only) {
1530  s->reordered_input_picture[0] = s->input_picture[0];
1531  s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1532  s->reordered_input_picture[0]->f->coded_picture_number =
1533  s->coded_picture_number++;
1534  } else {
1535  int b_frames = 0;
1536 
1537  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1538  for (i = 0; i < s->max_b_frames + 1; i++) {
1539  int pict_num = s->input_picture[0]->f->display_picture_number + i;
1540 
1541  if (pict_num >= s->rc_context.num_entries)
1542  break;
1543  if (!s->input_picture[i]) {
1544  s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1545  break;
1546  }
1547 
1548  s->input_picture[i]->f->pict_type =
1549  s->rc_context.entry[pict_num].new_pict_type;
1550  }
1551  }
1552 
1553  if (s->b_frame_strategy == 0) {
1554  b_frames = s->max_b_frames;
1555  while (b_frames && !s->input_picture[b_frames])
1556  b_frames--;
1557  } else if (s->b_frame_strategy == 1) {
1558  for (i = 1; i < s->max_b_frames + 1; i++) {
1559  if (s->input_picture[i] &&
1560  s->input_picture[i]->b_frame_score == 0) {
1561  s->input_picture[i]->b_frame_score =
1563  s->input_picture[i ]->f->data[0],
1564  s->input_picture[i - 1]->f->data[0],
1565  s->linesize) + 1;
1566  }
1567  }
1568  for (i = 0; i < s->max_b_frames + 1; i++) {
1569  if (!s->input_picture[i] ||
1570  s->input_picture[i]->b_frame_score - 1 >
1571  s->mb_num / s->b_sensitivity)
1572  break;
1573  }
1574 
1575  b_frames = FFMAX(0, i - 1);
1576 
1577  /* reset scores */
1578  for (i = 0; i < b_frames + 1; i++) {
1579  s->input_picture[i]->b_frame_score = 0;
1580  }
1581  } else if (s->b_frame_strategy == 2) {
1582  b_frames = estimate_best_b_count(s);
1583  if (b_frames < 0)
1584  return b_frames;
1585  }
1586 
1587  emms_c();
1588 
1589  for (i = b_frames - 1; i >= 0; i--) {
1590  int type = s->input_picture[i]->f->pict_type;
1591  if (type && type != AV_PICTURE_TYPE_B)
1592  b_frames = i;
1593  }
1594  if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1595  b_frames == s->max_b_frames) {
1596  av_log(s->avctx, AV_LOG_ERROR,
1597  "warning, too many B-frames in a row\n");
1598  }
1599 
1600  if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1601  if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1602  s->gop_size > s->picture_in_gop_number) {
1603  b_frames = s->gop_size - s->picture_in_gop_number - 1;
1604  } else {
1605  if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1606  b_frames = 0;
1607  s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1608  }
1609  }
1610 
1611  if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1612  s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1613  b_frames--;
1614 
1615  s->reordered_input_picture[0] = s->input_picture[b_frames];
1616  if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1617  s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1618  s->reordered_input_picture[0]->f->coded_picture_number =
1619  s->coded_picture_number++;
1620  for (i = 0; i < b_frames; i++) {
1621  s->reordered_input_picture[i + 1] = s->input_picture[i];
1622  s->reordered_input_picture[i + 1]->f->pict_type =
1624  s->reordered_input_picture[i + 1]->f->coded_picture_number =
1625  s->coded_picture_number++;
1626  }
1627  }
1628  }
1629 no_output_pic:
1630  ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1631 
1632  if (s->reordered_input_picture[0]) {
1633  s->reordered_input_picture[0]->reference =
1634  s->reordered_input_picture[0]->f->pict_type !=
1635  AV_PICTURE_TYPE_B ? 3 : 0;
1636 
1637  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1638  return ret;
1639 
1640  if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1641  // input is a shared pix, so we can't modify it -> allocate a new
1642  // one & ensure that the shared one is reuseable
1643 
1644  Picture *pic;
1645  int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1646  if (i < 0)
1647  return i;
1648  pic = &s->picture[i];
1649 
1650  pic->reference = s->reordered_input_picture[0]->reference;
1651  if (alloc_picture(s, pic, 0) < 0) {
1652  return -1;
1653  }
1654 
1655  ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1656  if (ret < 0)
1657  return ret;
1658 
1659  /* mark us unused / free shared pic */
1660  av_frame_unref(s->reordered_input_picture[0]->f);
1661  s->reordered_input_picture[0]->shared = 0;
1662 
1663  s->current_picture_ptr = pic;
1664  } else {
1665  // input is not a shared pix -> reuse buffer for current_pix
1666  s->current_picture_ptr = s->reordered_input_picture[0];
1667  for (i = 0; i < 4; i++) {
1668  s->new_picture.f->data[i] += INPLACE_OFFSET;
1669  }
1670  }
1671  ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1672  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1673  s->current_picture_ptr)) < 0)
1674  return ret;
1675 
1676  s->picture_number = s->new_picture.f->display_picture_number;
1677  }
1678  return 0;
1679 }
1680 
1682 {
1683  if (s->unrestricted_mv &&
1684  s->current_picture.reference &&
1685  !s->intra_only) {
1686  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1687  int hshift = desc->log2_chroma_w;
1688  int vshift = desc->log2_chroma_h;
1689  s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1690  s->current_picture.f->linesize[0],
1691  s->h_edge_pos, s->v_edge_pos,
1693  EDGE_TOP | EDGE_BOTTOM);
1694  s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1695  s->current_picture.f->linesize[1],
1696  s->h_edge_pos >> hshift,
1697  s->v_edge_pos >> vshift,
1698  EDGE_WIDTH >> hshift,
1699  EDGE_WIDTH >> vshift,
1700  EDGE_TOP | EDGE_BOTTOM);
1701  s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1702  s->current_picture.f->linesize[2],
1703  s->h_edge_pos >> hshift,
1704  s->v_edge_pos >> vshift,
1705  EDGE_WIDTH >> hshift,
1706  EDGE_WIDTH >> vshift,
1707  EDGE_TOP | EDGE_BOTTOM);
1708  }
1709 
1710  emms_c();
1711 
1712  s->last_pict_type = s->pict_type;
1713  s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1714  if (s->pict_type!= AV_PICTURE_TYPE_B)
1715  s->last_non_b_pict_type = s->pict_type;
1716 
1717 #if FF_API_CODED_FRAME
1719  av_frame_unref(s->avctx->coded_frame);
1720  av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1722 #endif
1723 #if FF_API_ERROR_FRAME
1725  memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1726  sizeof(s->current_picture.encoding_error));
1728 #endif
1729 }
1730 
1732 {
1733  int intra, i;
1734 
1735  for (intra = 0; intra < 2; intra++) {
1736  if (s->dct_count[intra] > (1 << 16)) {
1737  for (i = 0; i < 64; i++) {
1738  s->dct_error_sum[intra][i] >>= 1;
1739  }
1740  s->dct_count[intra] >>= 1;
1741  }
1742 
1743  for (i = 0; i < 64; i++) {
1744  s->dct_offset[intra][i] = (s->noise_reduction *
1745  s->dct_count[intra] +
1746  s->dct_error_sum[intra][i] / 2) /
1747  (s->dct_error_sum[intra][i] + 1);
1748  }
1749  }
1750 }
1751 
1753 {
1754  int ret;
1755 
1756  /* mark & release old frames */
1757  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1758  s->last_picture_ptr != s->next_picture_ptr &&
1759  s->last_picture_ptr->f->buf[0]) {
1760  ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1761  }
1762 
1763  s->current_picture_ptr->f->pict_type = s->pict_type;
1764  s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1765 
1766  ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1767  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1768  s->current_picture_ptr)) < 0)
1769  return ret;
1770 
1771  if (s->pict_type != AV_PICTURE_TYPE_B) {
1772  s->last_picture_ptr = s->next_picture_ptr;
1773  if (!s->droppable)
1774  s->next_picture_ptr = s->current_picture_ptr;
1775  }
1776 
1777  if (s->last_picture_ptr) {
1778  ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1779  if (s->last_picture_ptr->f->buf[0] &&
1780  (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1781  s->last_picture_ptr)) < 0)
1782  return ret;
1783  }
1784  if (s->next_picture_ptr) {
1785  ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1786  if (s->next_picture_ptr->f->buf[0] &&
1787  (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1788  s->next_picture_ptr)) < 0)
1789  return ret;
1790  }
1791 
1792  if (s->picture_structure!= PICT_FRAME) {
1793  int i;
1794  for (i = 0; i < 4; i++) {
1795  if (s->picture_structure == PICT_BOTTOM_FIELD) {
1796  s->current_picture.f->data[i] +=
1797  s->current_picture.f->linesize[i];
1798  }
1799  s->current_picture.f->linesize[i] *= 2;
1800  s->last_picture.f->linesize[i] *= 2;
1801  s->next_picture.f->linesize[i] *= 2;
1802  }
1803  }
1804 
1805  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1806  s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1807  s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1808  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1809  s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1810  s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1811  } else {
1812  s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1813  s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1814  }
1815 
1816  if (s->dct_error_sum) {
1817  av_assert2(s->noise_reduction && s->encoding);
1819  }
1820 
1821  return 0;
1822 }
1823 
1825  const AVFrame *pic_arg, int *got_packet)
1826 {
1828  int i, stuffing_count, ret;
1829  int context_count = s->slice_context_count;
1830 
1831  s->vbv_ignore_qmax = 0;
1832 
1833  s->picture_in_gop_number++;
1834 
1835  if (load_input_picture(s, pic_arg) < 0)
1836  return -1;
1837 
1838  if (select_input_picture(s) < 0) {
1839  return -1;
1840  }
1841 
1842  /* output? */
1843  if (s->new_picture.f->data[0]) {
1844  int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1845  int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1846  :
1847  s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1848  if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1849  return ret;
1850  if (s->mb_info) {
1851  s->mb_info_ptr = av_packet_new_side_data(pkt,
1853  s->mb_width*s->mb_height*12);
1854  s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1855  }
1856 
1857  for (i = 0; i < context_count; i++) {
1858  int start_y = s->thread_context[i]->start_mb_y;
1859  int end_y = s->thread_context[i]-> end_mb_y;
1860  int h = s->mb_height;
1861  uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1862  uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1863 
1864  init_put_bits(&s->thread_context[i]->pb, start, end - start);
1865  }
1866 
1867  s->pict_type = s->new_picture.f->pict_type;
1868  //emms_c();
1869  ret = frame_start(s);
1870  if (ret < 0)
1871  return ret;
1872 vbv_retry:
1873  ret = encode_picture(s, s->picture_number);
1874  if (growing_buffer) {
1875  av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1876  pkt->data = s->pb.buf;
1878  }
1879  if (ret < 0)
1880  return -1;
1881 
1882 #if FF_API_STAT_BITS
1884  avctx->header_bits = s->header_bits;
1885  avctx->mv_bits = s->mv_bits;
1886  avctx->misc_bits = s->misc_bits;
1887  avctx->i_tex_bits = s->i_tex_bits;
1888  avctx->p_tex_bits = s->p_tex_bits;
1889  avctx->i_count = s->i_count;
1890  // FIXME f/b_count in avctx
1891  avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1892  avctx->skip_count = s->skip_count;
1894 #endif
1895 
1896  frame_end(s);
1897 
1898  if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1899  ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1900 
1901  if (avctx->rc_buffer_size) {
1902  RateControlContext *rcc = &s->rc_context;
1903  int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1904  int hq = (s->avctx->mb_decision == FF_MB_DECISION_RD || s->avctx->trellis);
1905  int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1906 
1907  if (put_bits_count(&s->pb) > max_size &&
1908  s->lambda < s->lmax) {
1909  s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1910  (s->qscale + 1) / s->qscale);
1911  if (s->adaptive_quant) {
1912  int i;
1913  for (i = 0; i < s->mb_height * s->mb_stride; i++)
1914  s->lambda_table[i] =
1915  FFMAX(s->lambda_table[i] + min_step,
1916  s->lambda_table[i] * (s->qscale + 1) /
1917  s->qscale);
1918  }
1919  s->mb_skipped = 0; // done in frame_start()
1920  // done in encode_picture() so we must undo it
1921  if (s->pict_type == AV_PICTURE_TYPE_P) {
1922  if (s->flipflop_rounding ||
1923  s->codec_id == AV_CODEC_ID_H263P ||
1924  s->codec_id == AV_CODEC_ID_MPEG4)
1925  s->no_rounding ^= 1;
1926  }
1927  if (s->pict_type != AV_PICTURE_TYPE_B) {
1928  s->time_base = s->last_time_base;
1929  s->last_non_b_time = s->time - s->pp_time;
1930  }
1931  for (i = 0; i < context_count; i++) {
1932  PutBitContext *pb = &s->thread_context[i]->pb;
1933  init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1934  }
1935  s->vbv_ignore_qmax = 1;
1936  av_log(s->avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1937  goto vbv_retry;
1938  }
1939 
1940  av_assert0(s->avctx->rc_max_rate);
1941  }
1942 
1943  if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1945 
1946  for (i = 0; i < 4; i++) {
1947  s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1948  avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1949  }
1950  ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
1951  s->current_picture_ptr->encoding_error,
1952  (s->avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
1953  s->pict_type);
1954 
1955  if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1956  assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1957  s->misc_bits + s->i_tex_bits +
1958  s->p_tex_bits);
1959  flush_put_bits(&s->pb);
1960  s->frame_bits = put_bits_count(&s->pb);
1961 
1962  stuffing_count = ff_vbv_update(s, s->frame_bits);
1963  s->stuffing_bits = 8*stuffing_count;
1964  if (stuffing_count) {
1965  if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1966  stuffing_count + 50) {
1967  av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1968  return -1;
1969  }
1970 
1971  switch (s->codec_id) {
1974  while (stuffing_count--) {
1975  put_bits(&s->pb, 8, 0);
1976  }
1977  break;
1978  case AV_CODEC_ID_MPEG4:
1979  put_bits(&s->pb, 16, 0);
1980  put_bits(&s->pb, 16, 0x1C3);
1981  stuffing_count -= 4;
1982  while (stuffing_count--) {
1983  put_bits(&s->pb, 8, 0xFF);
1984  }
1985  break;
1986  default:
1987  av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1988  }
1989  flush_put_bits(&s->pb);
1990  s->frame_bits = put_bits_count(&s->pb);
1991  }
1992 
1993  /* update MPEG-1/2 vbv_delay for CBR */
1994  if (s->avctx->rc_max_rate &&
1995  s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1996  s->out_format == FMT_MPEG1 &&
1997  90000LL * (avctx->rc_buffer_size - 1) <=
1998  s->avctx->rc_max_rate * 0xFFFFLL) {
1999  AVCPBProperties *props;
2000  size_t props_size;
2001 
2002  int vbv_delay, min_delay;
2003  double inbits = s->avctx->rc_max_rate *
2004  av_q2d(s->avctx->time_base);
2005  int minbits = s->frame_bits - 8 *
2006  (s->vbv_delay_ptr - s->pb.buf - 1);
2007  double bits = s->rc_context.buffer_index + minbits - inbits;
2008 
2009  if (bits < 0)
2010  av_log(s->avctx, AV_LOG_ERROR,
2011  "Internal error, negative bits\n");
2012 
2013  av_assert1(s->repeat_first_field == 0);
2014 
2015  vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
2016  min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
2017  s->avctx->rc_max_rate;
2018 
2019  vbv_delay = FFMAX(vbv_delay, min_delay);
2020 
2021  av_assert0(vbv_delay < 0xFFFF);
2022 
2023  s->vbv_delay_ptr[0] &= 0xF8;
2024  s->vbv_delay_ptr[0] |= vbv_delay >> 13;
2025  s->vbv_delay_ptr[1] = vbv_delay >> 5;
2026  s->vbv_delay_ptr[2] &= 0x07;
2027  s->vbv_delay_ptr[2] |= vbv_delay << 3;
2028 
2029  props = av_cpb_properties_alloc(&props_size);
2030  if (!props)
2031  return AVERROR(ENOMEM);
2032  props->vbv_delay = vbv_delay * 300;
2033 
2035  (uint8_t*)props, props_size);
2036  if (ret < 0) {
2037  av_freep(&props);
2038  return ret;
2039  }
2040 
2041 #if FF_API_VBV_DELAY
2043  avctx->vbv_delay = vbv_delay * 300;
2045 #endif
2046  }
2047  s->total_bits += s->frame_bits;
2048 #if FF_API_STAT_BITS
2050  avctx->frame_bits = s->frame_bits;
2052 #endif
2053 
2054 
2055  pkt->pts = s->current_picture.f->pts;
2056  if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2057  if (!s->current_picture.f->coded_picture_number)
2058  pkt->dts = pkt->pts - s->dts_delta;
2059  else
2060  pkt->dts = s->reordered_pts;
2061  s->reordered_pts = pkt->pts;
2062  } else
2063  pkt->dts = pkt->pts;
2064  if (s->current_picture.f->key_frame)
2066  if (s->mb_info)
2068  } else {
2069  s->frame_bits = 0;
2070  }
2071 
2072  /* release non-reference frames */
2073  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2074  if (!s->picture[i].reference)
2075  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
2076  }
2077 
2078  av_assert1((s->frame_bits & 7) == 0);
2079 
2080  pkt->size = s->frame_bits / 8;
2081  *got_packet = !!pkt->size;
2082  return 0;
2083 }
2084 
2086  int n, int threshold)
2087 {
2088  static const char tab[64] = {
2089  3, 2, 2, 1, 1, 1, 1, 1,
2090  1, 1, 1, 1, 1, 1, 1, 1,
2091  1, 1, 1, 1, 1, 1, 1, 1,
2092  0, 0, 0, 0, 0, 0, 0, 0,
2093  0, 0, 0, 0, 0, 0, 0, 0,
2094  0, 0, 0, 0, 0, 0, 0, 0,
2095  0, 0, 0, 0, 0, 0, 0, 0,
2096  0, 0, 0, 0, 0, 0, 0, 0
2097  };
2098  int score = 0;
2099  int run = 0;
2100  int i;
2101  int16_t *block = s->block[n];
2102  const int last_index = s->block_last_index[n];
2103  int skip_dc;
2104 
2105  if (threshold < 0) {
2106  skip_dc = 0;
2107  threshold = -threshold;
2108  } else
2109  skip_dc = 1;
2110 
2111  /* Are all we could set to zero already zero? */
2112  if (last_index <= skip_dc - 1)
2113  return;
2114 
2115  for (i = 0; i <= last_index; i++) {
2116  const int j = s->intra_scantable.permutated[i];
2117  const int level = FFABS(block[j]);
2118  if (level == 1) {
2119  if (skip_dc && i == 0)
2120  continue;
2121  score += tab[run];
2122  run = 0;
2123  } else if (level > 1) {
2124  return;
2125  } else {
2126  run++;
2127  }
2128  }
2129  if (score >= threshold)
2130  return;
2131  for (i = skip_dc; i <= last_index; i++) {
2132  const int j = s->intra_scantable.permutated[i];
2133  block[j] = 0;
2134  }
2135  if (block[0])
2136  s->block_last_index[n] = 0;
2137  else
2138  s->block_last_index[n] = -1;
2139 }
2140 
2141 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2142  int last_index)
2143 {
2144  int i;
2145  const int maxlevel = s->max_qcoeff;
2146  const int minlevel = s->min_qcoeff;
2147  int overflow = 0;
2148 
2149  if (s->mb_intra) {
2150  i = 1; // skip clipping of intra dc
2151  } else
2152  i = 0;
2153 
2154  for (; i <= last_index; i++) {
2155  const int j = s->intra_scantable.permutated[i];
2156  int level = block[j];
2157 
2158  if (level > maxlevel) {
2159  level = maxlevel;
2160  overflow++;
2161  } else if (level < minlevel) {
2162  level = minlevel;
2163  overflow++;
2164  }
2165 
2166  block[j] = level;
2167  }
2168 
2169  if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2170  av_log(s->avctx, AV_LOG_INFO,
2171  "warning, clipping %d dct coefficients to %d..%d\n",
2172  overflow, minlevel, maxlevel);
2173 }
2174 
2175 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2176 {
2177  int x, y;
2178  // FIXME optimize
2179  for (y = 0; y < 8; y++) {
2180  for (x = 0; x < 8; x++) {
2181  int x2, y2;
2182  int sum = 0;
2183  int sqr = 0;
2184  int count = 0;
2185 
2186  for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2187  for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2188  int v = ptr[x2 + y2 * stride];
2189  sum += v;
2190  sqr += v * v;
2191  count++;
2192  }
2193  }
2194  weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2195  }
2196  }
2197 }
2198 
2200  int motion_x, int motion_y,
2201  int mb_block_height,
2202  int mb_block_width,
2203  int mb_block_count)
2204 {
2205  int16_t weight[12][64];
2206  int16_t orig[12][64];
2207  const int mb_x = s->mb_x;
2208  const int mb_y = s->mb_y;
2209  int i;
2210  int skip_dct[12];
2211  int dct_offset = s->linesize * 8; // default for progressive frames
2212  int uv_dct_offset = s->uvlinesize * 8;
2213  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2214  ptrdiff_t wrap_y, wrap_c;
2215 
2216  for (i = 0; i < mb_block_count; i++)
2217  skip_dct[i] = s->skipdct;
2218 
2219  if (s->adaptive_quant) {
2220  const int last_qp = s->qscale;
2221  const int mb_xy = mb_x + mb_y * s->mb_stride;
2222 
2223  s->lambda = s->lambda_table[mb_xy];
2224  update_qscale(s);
2225 
2226  if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2227  s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2228  s->dquant = s->qscale - last_qp;
2229 
2230  if (s->out_format == FMT_H263) {
2231  s->dquant = av_clip(s->dquant, -2, 2);
2232 
2233  if (s->codec_id == AV_CODEC_ID_MPEG4) {
2234  if (!s->mb_intra) {
2235  if (s->pict_type == AV_PICTURE_TYPE_B) {
2236  if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2237  s->dquant = 0;
2238  }
2239  if (s->mv_type == MV_TYPE_8X8)
2240  s->dquant = 0;
2241  }
2242  }
2243  }
2244  }
2245  ff_set_qscale(s, last_qp + s->dquant);
2246  } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2247  ff_set_qscale(s, s->qscale + s->dquant);
2248 
2249  wrap_y = s->linesize;
2250  wrap_c = s->uvlinesize;
2251  ptr_y = s->new_picture.f->data[0] +
2252  (mb_y * 16 * wrap_y) + mb_x * 16;
2253  ptr_cb = s->new_picture.f->data[1] +
2254  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2255  ptr_cr = s->new_picture.f->data[2] +
2256  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2257 
2258  if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2259  uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2260  int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2261  int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2262  s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2263  wrap_y, wrap_y,
2264  16, 16, mb_x * 16, mb_y * 16,
2265  s->width, s->height);
2266  ptr_y = ebuf;
2267  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2268  wrap_c, wrap_c,
2269  mb_block_width, mb_block_height,
2270  mb_x * mb_block_width, mb_y * mb_block_height,
2271  cw, ch);
2272  ptr_cb = ebuf + 16 * wrap_y;
2273  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2274  wrap_c, wrap_c,
2275  mb_block_width, mb_block_height,
2276  mb_x * mb_block_width, mb_y * mb_block_height,
2277  cw, ch);
2278  ptr_cr = ebuf + 16 * wrap_y + 16;
2279  }
2280 
2281  if (s->mb_intra) {
2282  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2283  int progressive_score, interlaced_score;
2284 
2285  s->interlaced_dct = 0;
2286  progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2287  s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2288  NULL, wrap_y, 8) - 400;
2289 
2290  if (progressive_score > 0) {
2291  interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2292  NULL, wrap_y * 2, 8) +
2293  s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2294  NULL, wrap_y * 2, 8);
2295  if (progressive_score > interlaced_score) {
2296  s->interlaced_dct = 1;
2297 
2298  dct_offset = wrap_y;
2299  uv_dct_offset = wrap_c;
2300  wrap_y <<= 1;
2301  if (s->chroma_format == CHROMA_422 ||
2302  s->chroma_format == CHROMA_444)
2303  wrap_c <<= 1;
2304  }
2305  }
2306  }
2307 
2308  s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2309  s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2310  s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2311  s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2312 
2313  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2314  skip_dct[4] = 1;
2315  skip_dct[5] = 1;
2316  } else {
2317  s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2318  s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2319  if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2320  s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2321  s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2322  } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2323  s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2324  s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2325  s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2326  s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2327  s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2328  s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2329  }
2330  }
2331  } else {
2332  op_pixels_func (*op_pix)[4];
2333  qpel_mc_func (*op_qpix)[16];
2334  uint8_t *dest_y, *dest_cb, *dest_cr;
2335 
2336  dest_y = s->dest[0];
2337  dest_cb = s->dest[1];
2338  dest_cr = s->dest[2];
2339 
2340  if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2341  op_pix = s->hdsp.put_pixels_tab;
2342  op_qpix = s->qdsp.put_qpel_pixels_tab;
2343  } else {
2344  op_pix = s->hdsp.put_no_rnd_pixels_tab;
2345  op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2346  }
2347 
2348  if (s->mv_dir & MV_DIR_FORWARD) {
2349  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2350  s->last_picture.f->data,
2351  op_pix, op_qpix);
2352  op_pix = s->hdsp.avg_pixels_tab;
2353  op_qpix = s->qdsp.avg_qpel_pixels_tab;
2354  }
2355  if (s->mv_dir & MV_DIR_BACKWARD) {
2356  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2357  s->next_picture.f->data,
2358  op_pix, op_qpix);
2359  }
2360 
2361  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2362  int progressive_score, interlaced_score;
2363 
2364  s->interlaced_dct = 0;
2365  progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2366  s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2367  ptr_y + wrap_y * 8,
2368  wrap_y, 8) - 400;
2369 
2370  if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2371  progressive_score -= 400;
2372 
2373  if (progressive_score > 0) {
2374  interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2375  wrap_y * 2, 8) +
2376  s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2377  ptr_y + wrap_y,
2378  wrap_y * 2, 8);
2379 
2380  if (progressive_score > interlaced_score) {
2381  s->interlaced_dct = 1;
2382 
2383  dct_offset = wrap_y;
2384  uv_dct_offset = wrap_c;
2385  wrap_y <<= 1;
2386  if (s->chroma_format == CHROMA_422)
2387  wrap_c <<= 1;
2388  }
2389  }
2390  }
2391 
2392  s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2393  s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2394  s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2395  dest_y + dct_offset, wrap_y);
2396  s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2397  dest_y + dct_offset + 8, wrap_y);
2398 
2399  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2400  skip_dct[4] = 1;
2401  skip_dct[5] = 1;
2402  } else {
2403  s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2404  s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2405  if (!s->chroma_y_shift) { /* 422 */
2406  s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2407  dest_cb + uv_dct_offset, wrap_c);
2408  s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2409  dest_cr + uv_dct_offset, wrap_c);
2410  }
2411  }
2412  /* pre quantization */
2413  if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2414  2 * s->qscale * s->qscale) {
2415  // FIXME optimize
2416  if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2417  skip_dct[0] = 1;
2418  if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2419  skip_dct[1] = 1;
2420  if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2421  wrap_y, 8) < 20 * s->qscale)
2422  skip_dct[2] = 1;
2423  if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2424  wrap_y, 8) < 20 * s->qscale)
2425  skip_dct[3] = 1;
2426  if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2427  skip_dct[4] = 1;
2428  if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2429  skip_dct[5] = 1;
2430  if (!s->chroma_y_shift) { /* 422 */
2431  if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2432  dest_cb + uv_dct_offset,
2433  wrap_c, 8) < 20 * s->qscale)
2434  skip_dct[6] = 1;
2435  if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2436  dest_cr + uv_dct_offset,
2437  wrap_c, 8) < 20 * s->qscale)
2438  skip_dct[7] = 1;
2439  }
2440  }
2441  }
2442 
2443  if (s->quantizer_noise_shaping) {
2444  if (!skip_dct[0])
2445  get_visual_weight(weight[0], ptr_y , wrap_y);
2446  if (!skip_dct[1])
2447  get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2448  if (!skip_dct[2])
2449  get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2450  if (!skip_dct[3])
2451  get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2452  if (!skip_dct[4])
2453  get_visual_weight(weight[4], ptr_cb , wrap_c);
2454  if (!skip_dct[5])
2455  get_visual_weight(weight[5], ptr_cr , wrap_c);
2456  if (!s->chroma_y_shift) { /* 422 */
2457  if (!skip_dct[6])
2458  get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2459  wrap_c);
2460  if (!skip_dct[7])
2461  get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2462  wrap_c);
2463  }
2464  memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2465  }
2466 
2467  /* DCT & quantize */
2468  av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2469  {
2470  for (i = 0; i < mb_block_count; i++) {
2471  if (!skip_dct[i]) {
2472  int overflow;
2473  s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2474  // FIXME we could decide to change to quantizer instead of
2475  // clipping
2476  // JS: I don't think that would be a good idea it could lower
2477  // quality instead of improve it. Just INTRADC clipping
2478  // deserves changes in quantizer
2479  if (overflow)
2480  clip_coeffs(s, s->block[i], s->block_last_index[i]);
2481  } else
2482  s->block_last_index[i] = -1;
2483  }
2484  if (s->quantizer_noise_shaping) {
2485  for (i = 0; i < mb_block_count; i++) {
2486  if (!skip_dct[i]) {
2487  s->block_last_index[i] =
2488  dct_quantize_refine(s, s->block[i], weight[i],
2489  orig[i], i, s->qscale);
2490  }
2491  }
2492  }
2493 
2494  if (s->luma_elim_threshold && !s->mb_intra)
2495  for (i = 0; i < 4; i++)
2496  dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2497  if (s->chroma_elim_threshold && !s->mb_intra)
2498  for (i = 4; i < mb_block_count; i++)
2499  dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2500 
2501  if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2502  for (i = 0; i < mb_block_count; i++) {
2503  if (s->block_last_index[i] == -1)
2504  s->coded_score[i] = INT_MAX / 256;
2505  }
2506  }
2507  }
2508 
2509  if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2510  s->block_last_index[4] =
2511  s->block_last_index[5] = 0;
2512  s->block[4][0] =
2513  s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2514  if (!s->chroma_y_shift) { /* 422 / 444 */
2515  for (i=6; i<12; i++) {
2516  s->block_last_index[i] = 0;
2517  s->block[i][0] = s->block[4][0];
2518  }
2519  }
2520  }
2521 
2522  // non c quantize code returns incorrect block_last_index FIXME
2523  if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2524  for (i = 0; i < mb_block_count; i++) {
2525  int j;
2526  if (s->block_last_index[i] > 0) {
2527  for (j = 63; j > 0; j--) {
2528  if (s->block[i][s->intra_scantable.permutated[j]])
2529  break;
2530  }
2531  s->block_last_index[i] = j;
2532  }
2533  }
2534  }
2535 
2536  /* huffman encode */
2537  switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2540  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2541  ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2542  break;
2543  case AV_CODEC_ID_MPEG4:
2544  if (CONFIG_MPEG4_ENCODER)
2545  ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2546  break;
2547  case AV_CODEC_ID_MSMPEG4V2:
2548  case AV_CODEC_ID_MSMPEG4V3:
2549  case AV_CODEC_ID_WMV1:
2551  ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2552  break;
2553  case AV_CODEC_ID_WMV2:
2554  if (CONFIG_WMV2_ENCODER)
2555  ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2556  break;
2557  case AV_CODEC_ID_H261:
2558  if (CONFIG_H261_ENCODER)
2559  ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2560  break;
2561  case AV_CODEC_ID_H263:
2562  case AV_CODEC_ID_H263P:
2563  case AV_CODEC_ID_FLV1:
2564  case AV_CODEC_ID_RV10:
2565  case AV_CODEC_ID_RV20:
2566  if (CONFIG_H263_ENCODER)
2567  ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2568  break;
2569  case AV_CODEC_ID_MJPEG:
2570  case AV_CODEC_ID_AMV:
2571  if (CONFIG_MJPEG_ENCODER)
2572  ff_mjpeg_encode_mb(s, s->block);
2573  break;
2574  default:
2575  av_assert1(0);
2576  }
2577 }
2578 
2579 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2580 {
2581  if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2582  else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2583  else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2584 }
2585 
2587  int i;
2588 
2589  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2590 
2591  /* MPEG-1 */
2592  d->mb_skip_run= s->mb_skip_run;
2593  for(i=0; i<3; i++)
2594  d->last_dc[i] = s->last_dc[i];
2595 
2596  /* statistics */
2597  d->mv_bits= s->mv_bits;
2598  d->i_tex_bits= s->i_tex_bits;
2599  d->p_tex_bits= s->p_tex_bits;
2600  d->i_count= s->i_count;
2601  d->f_count= s->f_count;
2602  d->b_count= s->b_count;
2603  d->skip_count= s->skip_count;
2604  d->misc_bits= s->misc_bits;
2605  d->last_bits= 0;
2606 
2607  d->mb_skipped= 0;
2608  d->qscale= s->qscale;
2609  d->dquant= s->dquant;
2610 
2611  d->esc3_level_length= s->esc3_level_length;
2612 }
2613 
2615  int i;
2616 
2617  memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2618  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2619 
2620  /* MPEG-1 */
2621  d->mb_skip_run= s->mb_skip_run;
2622  for(i=0; i<3; i++)
2623  d->last_dc[i] = s->last_dc[i];
2624 
2625  /* statistics */
2626  d->mv_bits= s->mv_bits;
2627  d->i_tex_bits= s->i_tex_bits;
2628  d->p_tex_bits= s->p_tex_bits;
2629  d->i_count= s->i_count;
2630  d->f_count= s->f_count;
2631  d->b_count= s->b_count;
2632  d->skip_count= s->skip_count;
2633  d->misc_bits= s->misc_bits;
2634 
2635  d->mb_intra= s->mb_intra;
2636  d->mb_skipped= s->mb_skipped;
2637  d->mv_type= s->mv_type;
2638  d->mv_dir= s->mv_dir;
2639  d->pb= s->pb;
2640  if(s->data_partitioning){
2641  d->pb2= s->pb2;
2642  d->tex_pb= s->tex_pb;
2643  }
2644  d->block= s->block;
2645  for(i=0; i<8; i++)
2646  d->block_last_index[i]= s->block_last_index[i];
2647  d->interlaced_dct= s->interlaced_dct;
2648  d->qscale= s->qscale;
2649 
2650  d->esc3_level_length= s->esc3_level_length;
2651 }
2652 
2653 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2655  int *dmin, int *next_block, int motion_x, int motion_y)
2656 {
2657  int score;
2658  uint8_t *dest_backup[3];
2659 
2660  copy_context_before_encode(s, backup, type);
2661 
2662  s->block= s->blocks[*next_block];
2663  s->pb= pb[*next_block];
2664  if(s->data_partitioning){
2665  s->pb2 = pb2 [*next_block];
2666  s->tex_pb= tex_pb[*next_block];
2667  }
2668 
2669  if(*next_block){
2670  memcpy(dest_backup, s->dest, sizeof(s->dest));
2671  s->dest[0] = s->sc.rd_scratchpad;
2672  s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2673  s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2674  av_assert0(s->linesize >= 32); //FIXME
2675  }
2676 
2677  encode_mb(s, motion_x, motion_y);
2678 
2679  score= put_bits_count(&s->pb);
2680  if(s->data_partitioning){
2681  score+= put_bits_count(&s->pb2);
2682  score+= put_bits_count(&s->tex_pb);
2683  }
2684 
2685  if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2686  ff_mpv_reconstruct_mb(s, s->block);
2687 
2688  score *= s->lambda2;
2689  score += sse_mb(s) << FF_LAMBDA_SHIFT;
2690  }
2691 
2692  if(*next_block){
2693  memcpy(s->dest, dest_backup, sizeof(s->dest));
2694  }
2695 
2696  if(score<*dmin){
2697  *dmin= score;
2698  *next_block^=1;
2699 
2701  }
2702 }
2703 
2704 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2705  const uint32_t *sq = ff_square_tab + 256;
2706  int acc=0;
2707  int x,y;
2708 
2709  if(w==16 && h==16)
2710  return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2711  else if(w==8 && h==8)
2712  return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2713 
2714  for(y=0; y<h; y++){
2715  for(x=0; x<w; x++){
2716  acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2717  }
2718  }
2719 
2720  av_assert2(acc>=0);
2721 
2722  return acc;
2723 }
2724 
2725 static int sse_mb(MpegEncContext *s){
2726  int w= 16;
2727  int h= 16;
2728 
2729  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2730  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2731 
2732  if(w==16 && h==16)
2733  if(s->avctx->mb_cmp == FF_CMP_NSSE){
2734  return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2735  s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2736  s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2737  }else{
2738  return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2739  s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2740  s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2741  }
2742  else
2743  return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2744  +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2745  +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2746 }
2747 
2749  MpegEncContext *s= *(void**)arg;
2750 
2751 
2752  s->me.pre_pass=1;
2753  s->me.dia_size= s->avctx->pre_dia_size;
2754  s->first_slice_line=1;
2755  for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2756  for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2757  ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2758  }
2759  s->first_slice_line=0;
2760  }
2761 
2762  s->me.pre_pass=0;
2763 
2764  return 0;
2765 }
2766 
2768  MpegEncContext *s= *(void**)arg;
2769 
2771 
2772  s->me.dia_size= s->avctx->dia_size;
2773  s->first_slice_line=1;
2774  for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2775  s->mb_x=0; //for block init below
2777  for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2778  s->block_index[0]+=2;
2779  s->block_index[1]+=2;
2780  s->block_index[2]+=2;
2781  s->block_index[3]+=2;
2782 
2783  /* compute motion vector & mb_type and store in context */
2784  if(s->pict_type==AV_PICTURE_TYPE_B)
2785  ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2786  else
2787  ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2788  }
2789  s->first_slice_line=0;
2790  }
2791  return 0;
2792 }
2793 
2794 static int mb_var_thread(AVCodecContext *c, void *arg){
2795  MpegEncContext *s= *(void**)arg;
2796  int mb_x, mb_y;
2797 
2799 
2800  for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2801  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2802  int xx = mb_x * 16;
2803  int yy = mb_y * 16;
2804  uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2805  int varc;
2806  int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2807 
2808  varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2809  (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2810 
2811  s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2812  s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2813  s->me.mb_var_sum_temp += varc;
2814  }
2815  }
2816  return 0;
2817 }
2818 
2820  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2821  if(s->partitioned_frame){
2823  }
2824 
2825  ff_mpeg4_stuffing(&s->pb);
2826  }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2828  }
2829 
2830  avpriv_align_put_bits(&s->pb);
2831  flush_put_bits(&s->pb);
2832 
2833  if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2834  s->misc_bits+= get_bits_diff(s);
2835 }
2836 
2838 {
2839  uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2840  int offset = put_bits_count(&s->pb);
2841  int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2842  int gobn = s->mb_y / s->gob_index;
2843  int pred_x, pred_y;
2844  if (CONFIG_H263_ENCODER)
2845  ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2846  bytestream_put_le32(&ptr, offset);
2847  bytestream_put_byte(&ptr, s->qscale);
2848  bytestream_put_byte(&ptr, gobn);
2849  bytestream_put_le16(&ptr, mba);
2850  bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2851  bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2852  /* 4MV not implemented */
2853  bytestream_put_byte(&ptr, 0); /* hmv2 */
2854  bytestream_put_byte(&ptr, 0); /* vmv2 */
2855 }
2856 
2857 static void update_mb_info(MpegEncContext *s, int startcode)
2858 {
2859  if (!s->mb_info)
2860  return;
2861  if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2862  s->mb_info_size += 12;
2863  s->prev_mb_info = s->last_mb_info;
2864  }
2865  if (startcode) {
2866  s->prev_mb_info = put_bits_count(&s->pb)/8;
2867  /* This might have incremented mb_info_size above, and we return without
2868  * actually writing any info into that slot yet. But in that case,
2869  * this will be called again at the start of the after writing the
2870  * start code, actually writing the mb info. */
2871  return;
2872  }
2873 
2874  s->last_mb_info = put_bits_count(&s->pb)/8;
2875  if (!s->mb_info_size)
2876  s->mb_info_size += 12;
2877  write_mb_info(s);
2878 }
2879 
2880 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2881 {
2882  if ( s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold
2883  && s->slice_context_count == 1
2884  && s->pb.buf == s->avctx->internal->byte_buffer) {
2885  int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2886  int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2887 
2888  uint8_t *new_buffer = NULL;
2889  int new_buffer_size = 0;
2890 
2891  if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2892  av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2893  return AVERROR(ENOMEM);
2894  }
2895 
2896  emms_c();
2897 
2898  av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2899  s->avctx->internal->byte_buffer_size + size_increase);
2900  if (!new_buffer)
2901  return AVERROR(ENOMEM);
2902 
2903  memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2904  av_free(s->avctx->internal->byte_buffer);
2905  s->avctx->internal->byte_buffer = new_buffer;
2906  s->avctx->internal->byte_buffer_size = new_buffer_size;
2907  rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2908  s->ptr_lastgob = s->pb.buf + lastgob_pos;
2909  s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2910  }
2911  if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold)
2912  return AVERROR(EINVAL);
2913  return 0;
2914 }
2915 
2916 static int encode_thread(AVCodecContext *c, void *arg){
2917  MpegEncContext *s= *(void**)arg;
2918  int mb_x, mb_y;
2919  int chr_h= 16>>s->chroma_y_shift;
2920  int i, j;
2921  MpegEncContext best_s = { 0 }, backup_s;
2922  uint8_t bit_buf[2][MAX_MB_BYTES];
2923  uint8_t bit_buf2[2][MAX_MB_BYTES];
2924  uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2925  PutBitContext pb[2], pb2[2], tex_pb[2];
2926 
2928 
2929  for(i=0; i<2; i++){
2930  init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2931  init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2932  init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2933  }
2934 
2935  s->last_bits= put_bits_count(&s->pb);
2936  s->mv_bits=0;
2937  s->misc_bits=0;
2938  s->i_tex_bits=0;
2939  s->p_tex_bits=0;
2940  s->i_count=0;
2941  s->f_count=0;
2942  s->b_count=0;
2943  s->skip_count=0;
2944 
2945  for(i=0; i<3; i++){
2946  /* init last dc values */
2947  /* note: quant matrix value (8) is implied here */
2948  s->last_dc[i] = 128 << s->intra_dc_precision;
2949 
2950  s->current_picture.encoding_error[i] = 0;
2951  }
2952  if(s->codec_id==AV_CODEC_ID_AMV){
2953  s->last_dc[0] = 128*8/13;
2954  s->last_dc[1] = 128*8/14;
2955  s->last_dc[2] = 128*8/14;
2956  }
2957  s->mb_skip_run = 0;
2958  memset(s->last_mv, 0, sizeof(s->last_mv));
2959 
2960  s->last_mv_dir = 0;
2961 
2962  switch(s->codec_id){
2963  case AV_CODEC_ID_H263:
2964  case AV_CODEC_ID_H263P:
2965  case AV_CODEC_ID_FLV1:
2966  if (CONFIG_H263_ENCODER)
2967  s->gob_index = H263_GOB_HEIGHT(s->height);
2968  break;
2969  case AV_CODEC_ID_MPEG4:
2970  if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
2972  break;
2973  }
2974 
2975  s->resync_mb_x=0;
2976  s->resync_mb_y=0;
2977  s->first_slice_line = 1;
2978  s->ptr_lastgob = s->pb.buf;
2979  for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2980  s->mb_x=0;
2981  s->mb_y= mb_y;
2982 
2983  ff_set_qscale(s, s->qscale);
2985 
2986  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2987  int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2988  int mb_type= s->mb_type[xy];
2989 // int d;
2990  int dmin= INT_MAX;
2991  int dir;
2992  int size_increase = s->avctx->internal->byte_buffer_size/4
2993  + s->mb_width*MAX_MB_BYTES;
2994 
2996  if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2997  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
2998  return -1;
2999  }
3000  if(s->data_partitioning){
3001  if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
3002  || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
3003  av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3004  return -1;
3005  }
3006  }
3007 
3008  s->mb_x = mb_x;
3009  s->mb_y = mb_y; // moved into loop, can get changed by H.261
3011 
3012  if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
3014  xy= s->mb_y*s->mb_stride + s->mb_x;
3015  mb_type= s->mb_type[xy];
3016  }
3017 
3018  /* write gob / video packet header */
3019  if(s->rtp_mode){
3020  int current_packet_size, is_gob_start;
3021 
3022  current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
3023 
3024  is_gob_start = s->rtp_payload_size &&
3025  current_packet_size >= s->rtp_payload_size &&
3026  mb_y + mb_x > 0;
3027 
3028  if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
3029 
3030  switch(s->codec_id){
3031  case AV_CODEC_ID_H263:
3032  case AV_CODEC_ID_H263P:
3033  if(!s->h263_slice_structured)
3034  if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3035  break;
3037  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3039  if(s->mb_skip_run) is_gob_start=0;
3040  break;
3041  case AV_CODEC_ID_MJPEG:
3042  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3043  break;
3044  }
3045 
3046  if(is_gob_start){
3047  if(s->start_mb_y != mb_y || mb_x!=0){
3048  write_slice_end(s);
3049 
3050  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3052  }
3053  }
3054 
3055  av_assert2((put_bits_count(&s->pb)&7) == 0);
3056  current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3057 
3058  if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3059  int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
3060  int d = 100 / s->error_rate;
3061  if(r % d == 0){
3062  current_packet_size=0;
3063  s->pb.buf_ptr= s->ptr_lastgob;
3064  av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3065  }
3066  }
3067 
3068 #if FF_API_RTP_CALLBACK
3070  if (s->avctx->rtp_callback){
3071  int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3072  s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3073  }
3075 #endif
3076  update_mb_info(s, 1);
3077 
3078  switch(s->codec_id){
3079  case AV_CODEC_ID_MPEG4:
3080  if (CONFIG_MPEG4_ENCODER) {
3083  }
3084  break;
3087  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3090  }
3091  break;
3092  case AV_CODEC_ID_H263:
3093  case AV_CODEC_ID_H263P:
3094  if (CONFIG_H263_ENCODER)
3096  break;
3097  }
3098 
3099  if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3100  int bits= put_bits_count(&s->pb);
3101  s->misc_bits+= bits - s->last_bits;
3102  s->last_bits= bits;
3103  }
3104 
3105  s->ptr_lastgob += current_packet_size;
3106  s->first_slice_line=1;
3107  s->resync_mb_x=mb_x;
3108  s->resync_mb_y=mb_y;
3109  }
3110  }
3111 
3112  if( (s->resync_mb_x == s->mb_x)
3113  && s->resync_mb_y+1 == s->mb_y){
3114  s->first_slice_line=0;
3115  }
3116 
3117  s->mb_skipped=0;
3118  s->dquant=0; //only for QP_RD
3119 
3120  update_mb_info(s, 0);
3121 
3122  if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3123  int next_block=0;
3124  int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3125 
3126  copy_context_before_encode(&backup_s, s, -1);
3127  backup_s.pb= s->pb;
3128  best_s.data_partitioning= s->data_partitioning;
3129  best_s.partitioned_frame= s->partitioned_frame;
3130  if(s->data_partitioning){
3131  backup_s.pb2= s->pb2;
3132  backup_s.tex_pb= s->tex_pb;
3133  }
3134 
3136  s->mv_dir = MV_DIR_FORWARD;
3137  s->mv_type = MV_TYPE_16X16;
3138  s->mb_intra= 0;
3139  s->mv[0][0][0] = s->p_mv_table[xy][0];
3140  s->mv[0][0][1] = s->p_mv_table[xy][1];
3141  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3142  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3143  }
3145  s->mv_dir = MV_DIR_FORWARD;
3146  s->mv_type = MV_TYPE_FIELD;
3147  s->mb_intra= 0;
3148  for(i=0; i<2; i++){
3149  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3150  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3151  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3152  }
3153  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3154  &dmin, &next_block, 0, 0);
3155  }
3157  s->mv_dir = MV_DIR_FORWARD;
3158  s->mv_type = MV_TYPE_16X16;
3159  s->mb_intra= 0;
3160  s->mv[0][0][0] = 0;
3161  s->mv[0][0][1] = 0;
3162  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3163  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3164  }
3166  s->mv_dir = MV_DIR_FORWARD;
3167  s->mv_type = MV_TYPE_8X8;
3168  s->mb_intra= 0;
3169  for(i=0; i<4; i++){
3170  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3171  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3172  }
3173  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3174  &dmin, &next_block, 0, 0);
3175  }
3177  s->mv_dir = MV_DIR_FORWARD;
3178  s->mv_type = MV_TYPE_16X16;
3179  s->mb_intra= 0;
3180  s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3181  s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3182  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3183  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3184  }
3186  s->mv_dir = MV_DIR_BACKWARD;
3187  s->mv_type = MV_TYPE_16X16;
3188  s->mb_intra= 0;
3189  s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3190  s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3191  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3192  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3193  }
3195  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3196  s->mv_type = MV_TYPE_16X16;
3197  s->mb_intra= 0;
3198  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3199  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3200  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3201  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3202  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3203  &dmin, &next_block, 0, 0);
3204  }
3206  s->mv_dir = MV_DIR_FORWARD;
3207  s->mv_type = MV_TYPE_FIELD;
3208  s->mb_intra= 0;
3209  for(i=0; i<2; i++){
3210  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3211  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3212  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3213  }
3214  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3215  &dmin, &next_block, 0, 0);
3216  }
3218  s->mv_dir = MV_DIR_BACKWARD;
3219  s->mv_type = MV_TYPE_FIELD;
3220  s->mb_intra= 0;
3221  for(i=0; i<2; i++){
3222  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3223  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3224  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3225  }
3226  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3227  &dmin, &next_block, 0, 0);
3228  }
3230  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3231  s->mv_type = MV_TYPE_FIELD;
3232  s->mb_intra= 0;
3233  for(dir=0; dir<2; dir++){
3234  for(i=0; i<2; i++){
3235  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3236  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3237  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3238  }
3239  }
3240  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3241  &dmin, &next_block, 0, 0);
3242  }
3244  s->mv_dir = 0;
3245  s->mv_type = MV_TYPE_16X16;
3246  s->mb_intra= 1;
3247  s->mv[0][0][0] = 0;
3248  s->mv[0][0][1] = 0;
3249  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3250  &dmin, &next_block, 0, 0);
3251  if(s->h263_pred || s->h263_aic){
3252  if(best_s.mb_intra)
3253  s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3254  else
3255  ff_clean_intra_table_entries(s); //old mode?
3256  }
3257  }
3258 
3259  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3260  if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3261  const int last_qp= backup_s.qscale;
3262  int qpi, qp, dc[6];
3263  int16_t ac[6][16];
3264  const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3265  static const int dquant_tab[4]={-1,1,-2,2};
3266  int storecoefs = s->mb_intra && s->dc_val[0];
3267 
3268  av_assert2(backup_s.dquant == 0);
3269 
3270  //FIXME intra
3271  s->mv_dir= best_s.mv_dir;
3272  s->mv_type = MV_TYPE_16X16;
3273  s->mb_intra= best_s.mb_intra;
3274  s->mv[0][0][0] = best_s.mv[0][0][0];
3275  s->mv[0][0][1] = best_s.mv[0][0][1];
3276  s->mv[1][0][0] = best_s.mv[1][0][0];
3277  s->mv[1][0][1] = best_s.mv[1][0][1];
3278 
3279  qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3280  for(; qpi<4; qpi++){
3281  int dquant= dquant_tab[qpi];
3282  qp= last_qp + dquant;
3283  if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3284  continue;
3285  backup_s.dquant= dquant;
3286  if(storecoefs){
3287  for(i=0; i<6; i++){
3288  dc[i]= s->dc_val[0][ s->block_index[i] ];
3289  memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3290  }
3291  }
3292 
3293  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3294  &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3295  if(best_s.qscale != qp){
3296  if(storecoefs){
3297  for(i=0; i<6; i++){
3298  s->dc_val[0][ s->block_index[i] ]= dc[i];
3299  memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3300  }
3301  }
3302  }
3303  }
3304  }
3305  }
3306  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3307  int mx= s->b_direct_mv_table[xy][0];
3308  int my= s->b_direct_mv_table[xy][1];
3309 
3310  backup_s.dquant = 0;
3311  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3312  s->mb_intra= 0;
3313  ff_mpeg4_set_direct_mv(s, mx, my);
3314  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3315  &dmin, &next_block, mx, my);
3316  }
3317  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3318  backup_s.dquant = 0;
3319  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3320  s->mb_intra= 0;
3321  ff_mpeg4_set_direct_mv(s, 0, 0);
3322  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3323  &dmin, &next_block, 0, 0);
3324  }
3325  if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3326  int coded=0;
3327  for(i=0; i<6; i++)
3328  coded |= s->block_last_index[i];
3329  if(coded){
3330  int mx,my;
3331  memcpy(s->mv, best_s.mv, sizeof(s->mv));
3332  if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3333  mx=my=0; //FIXME find the one we actually used
3334  ff_mpeg4_set_direct_mv(s, mx, my);
3335  }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3336  mx= s->mv[1][0][0];
3337  my= s->mv[1][0][1];
3338  }else{
3339  mx= s->mv[0][0][0];
3340  my= s->mv[0][0][1];
3341  }
3342 
3343  s->mv_dir= best_s.mv_dir;
3344  s->mv_type = best_s.mv_type;
3345  s->mb_intra= 0;
3346 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3347  s->mv[0][0][1] = best_s.mv[0][0][1];
3348  s->mv[1][0][0] = best_s.mv[1][0][0];
3349  s->mv[1][0][1] = best_s.mv[1][0][1];*/
3350  backup_s.dquant= 0;
3351  s->skipdct=1;
3352  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3353  &dmin, &next_block, mx, my);
3354  s->skipdct=0;
3355  }
3356  }
3357 
3358  s->current_picture.qscale_table[xy] = best_s.qscale;
3359 
3360  copy_context_after_encode(s, &best_s, -1);
3361 
3362  pb_bits_count= put_bits_count(&s->pb);
3363  flush_put_bits(&s->pb);
3364  avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3365  s->pb= backup_s.pb;
3366 
3367  if(s->data_partitioning){
3368  pb2_bits_count= put_bits_count(&s->pb2);
3369  flush_put_bits(&s->pb2);
3370  avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3371  s->pb2= backup_s.pb2;
3372 
3373  tex_pb_bits_count= put_bits_count(&s->tex_pb);
3374  flush_put_bits(&s->tex_pb);
3375  avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3376  s->tex_pb= backup_s.tex_pb;
3377  }
3378  s->last_bits= put_bits_count(&s->pb);
3379 
3380  if (CONFIG_H263_ENCODER &&
3381  s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3383 
3384  if(next_block==0){ //FIXME 16 vs linesize16
3385  s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3386  s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3387  s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3388  }
3389 
3390  if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3391  ff_mpv_reconstruct_mb(s, s->block);
3392  } else {
3393  int motion_x = 0, motion_y = 0;
3394  s->mv_type=MV_TYPE_16X16;
3395  // only one MB-Type possible
3396 
3397  switch(mb_type){
3399  s->mv_dir = 0;
3400  s->mb_intra= 1;
3401  motion_x= s->mv[0][0][0] = 0;
3402  motion_y= s->mv[0][0][1] = 0;
3403  break;
3405  s->mv_dir = MV_DIR_FORWARD;
3406  s->mb_intra= 0;
3407  motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3408  motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3409  break;
3411  s->mv_dir = MV_DIR_FORWARD;
3412  s->mv_type = MV_TYPE_FIELD;
3413  s->mb_intra= 0;
3414  for(i=0; i<2; i++){
3415  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3416  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3417  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3418  }
3419  break;
3421  s->mv_dir = MV_DIR_FORWARD;
3422  s->mv_type = MV_TYPE_8X8;
3423  s->mb_intra= 0;
3424  for(i=0; i<4; i++){
3425  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3426  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3427  }
3428  break;
3430  if (CONFIG_MPEG4_ENCODER) {
3432  s->mb_intra= 0;
3433  motion_x=s->b_direct_mv_table[xy][0];
3434  motion_y=s->b_direct_mv_table[xy][1];
3435  ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3436  }
3437  break;
3439  if (CONFIG_MPEG4_ENCODER) {
3441  s->mb_intra= 0;
3442  ff_mpeg4_set_direct_mv(s, 0, 0);
3443  }
3444  break;
3446  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3447  s->mb_intra= 0;
3448  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3449  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3450  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3451  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3452  break;
3454  s->mv_dir = MV_DIR_BACKWARD;
3455  s->mb_intra= 0;
3456  motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3457  motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3458  break;
3460  s->mv_dir = MV_DIR_FORWARD;
3461  s->mb_intra= 0;
3462  motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3463  motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3464  break;
3466  s->mv_dir = MV_DIR_FORWARD;
3467  s->mv_type = MV_TYPE_FIELD;
3468  s->mb_intra= 0;
3469  for(i=0; i<2; i++){
3470  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3471  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3472  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3473  }
3474  break;
3476  s->mv_dir = MV_DIR_BACKWARD;
3477  s->mv_type = MV_TYPE_FIELD;
3478  s->mb_intra= 0;
3479  for(i=0; i<2; i++){
3480  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3481  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3482  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3483  }
3484  break;
3486  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3487  s->mv_type = MV_TYPE_FIELD;
3488  s->mb_intra= 0;
3489  for(dir=0; dir<2; dir++){
3490  for(i=0; i<2; i++){
3491  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3492  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3493  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3494  }
3495  }
3496  break;
3497  default:
3498  av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3499  }
3500 
3501  encode_mb(s, motion_x, motion_y);
3502 
3503  // RAL: Update last macroblock type
3504  s->last_mv_dir = s->mv_dir;
3505 
3506  if (CONFIG_H263_ENCODER &&
3507  s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3509 
3510  ff_mpv_reconstruct_mb(s, s->block);
3511  }
3512 
3513  /* clean the MV table in IPS frames for direct mode in B-frames */
3514  if(s->mb_intra /* && I,P,S_TYPE */){
3515  s->p_mv_table[xy][0]=0;
3516  s->p_mv_table[xy][1]=0;
3517  }
3518 
3519  if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3520  int w= 16;
3521  int h= 16;
3522 
3523  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3524  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3525 
3526  s->current_picture.encoding_error[0] += sse(
3527  s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3528  s->dest[0], w, h, s->linesize);
3529  s->current_picture.encoding_error[1] += sse(
3530  s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3531  s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3532  s->current_picture.encoding_error[2] += sse(
3533  s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3534  s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3535  }
3536  if(s->loop_filter){
3537  if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3539  }
3540  ff_dlog(s->avctx, "MB %d %d bits\n",
3541  s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3542  }
3543  }
3544 
3545  //not beautiful here but we must write it before flushing so it has to be here
3546  if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3548 
3549  write_slice_end(s);
3550 
3551 #if FF_API_RTP_CALLBACK
3553  /* Send the last GOB if RTP */
3554  if (s->avctx->rtp_callback) {
3555  int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3556  int pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3557  /* Call the RTP callback to send the last GOB */
3558  emms_c();
3559  s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3560  }
3562 #endif
3563 
3564  return 0;
3565 }
3566 
3567 #define MERGE(field) dst->field += src->field; src->field=0
3569  MERGE(me.scene_change_score);
3570  MERGE(me.mc_mb_var_sum_temp);
3571  MERGE(me.mb_var_sum_temp);
3572 }
3573 
3575  int i;
3576 
3577  MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3578  MERGE(dct_count[1]);
3579  MERGE(mv_bits);
3580  MERGE(i_tex_bits);
3581  MERGE(p_tex_bits);
3582  MERGE(i_count);
3583  MERGE(f_count);
3584  MERGE(b_count);
3585  MERGE(skip_count);
3586  MERGE(misc_bits);
3587  MERGE(er.error_count);
3592 
3593  if (dst->noise_reduction){
3594  for(i=0; i<64; i++){
3595  MERGE(dct_error_sum[0][i]);
3596  MERGE(dct_error_sum[1][i]);
3597  }
3598  }
3599 
3600  av_assert1(put_bits_count(&src->pb) % 8 ==0);
3601  av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3602  avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3603  flush_put_bits(&dst->pb);
3604 }
3605 
3606 static int estimate_qp(MpegEncContext *s, int dry_run){
3607  if (s->next_lambda){
3608  s->current_picture_ptr->f->quality =
3609  s->current_picture.f->quality = s->next_lambda;
3610  if(!dry_run) s->next_lambda= 0;
3611  } else if (!s->fixed_qscale) {
3612  int quality = ff_rate_estimate_qscale(s, dry_run);
3613  s->current_picture_ptr->f->quality =
3614  s->current_picture.f->quality = quality;
3615  if (s->current_picture.f->quality < 0)
3616  return -1;
3617  }
3618 
3619  if(s->adaptive_quant){
3620  switch(s->codec_id){
3621  case AV_CODEC_ID_MPEG4:
3622  if (CONFIG_MPEG4_ENCODER)
3624  break;
3625  case AV_CODEC_ID_H263:
3626  case AV_CODEC_ID_H263P:
3627  case AV_CODEC_ID_FLV1:
3628  if (CONFIG_H263_ENCODER)
3630  break;
3631  default:
3633  }
3634 
3635  s->lambda= s->lambda_table[0];
3636  //FIXME broken
3637  }else
3638  s->lambda = s->current_picture.f->quality;
3639  update_qscale(s);
3640  return 0;
3641 }
3642 
3643 /* must be called before writing the header */
3645  av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3646  s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3647 
3648  if(s->pict_type==AV_PICTURE_TYPE_B){
3649  s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3650  av_assert1(s->pb_time > 0 && s->pb_time < s->pp_time);
3651  }else{
3652  s->pp_time= s->time - s->last_non_b_time;
3653  s->last_non_b_time= s->time;
3654  av_assert1(s->picture_number==0 || s->pp_time > 0);
3655  }
3656 }
3657 
3659 {
3660  int i, ret;
3661  int bits;
3662  int context_count = s->slice_context_count;
3663 
3664  s->picture_number = picture_number;
3665 
3666  /* Reset the average MB variance */
3667  s->me.mb_var_sum_temp =
3668  s->me.mc_mb_var_sum_temp = 0;
3669 
3670  /* we need to initialize some time vars before we can encode B-frames */
3671  // RAL: Condition added for MPEG1VIDEO
3672  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3674  if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3676 
3677  s->me.scene_change_score=0;
3678 
3679 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3680 
3681  if(s->pict_type==AV_PICTURE_TYPE_I){
3682  if(s->msmpeg4_version >= 3) s->no_rounding=1;
3683  else s->no_rounding=0;
3684  }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3685  if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3686  s->no_rounding ^= 1;
3687  }
3688 
3689  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3690  if (estimate_qp(s,1) < 0)
3691  return -1;
3693  } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3694  if(s->pict_type==AV_PICTURE_TYPE_B)
3695  s->lambda= s->last_lambda_for[s->pict_type];
3696  else
3697  s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3698  update_qscale(s);
3699  }
3700 
3701  if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3702  if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3703  if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3704  s->q_chroma_intra_matrix = s->q_intra_matrix;
3705  s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3706  }
3707 
3708  s->mb_intra=0; //for the rate distortion & bit compare functions
3709  for(i=1; i<context_count; i++){
3710  ret = ff_update_duplicate_context(s->thread_context[i], s);
3711  if (ret < 0)
3712  return ret;
3713  }
3714 
3715  if(ff_init_me(s)<0)
3716  return -1;
3717 
3718  /* Estimate motion for every MB */
3719  if(s->pict_type != AV_PICTURE_TYPE_I){
3720  s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3721  s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3722  if (s->pict_type != AV_PICTURE_TYPE_B) {
3723  if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3724  s->me_pre == 2) {
3725  s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3726  }
3727  }
3728 
3729  s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3730  }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3731  /* I-Frame */
3732  for(i=0; i<s->mb_stride*s->mb_height; i++)
3733  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3734 
3735  if(!s->fixed_qscale){
3736  /* finding spatial complexity for I-frame rate control */
3737  s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3738  }
3739  }
3740  for(i=1; i<context_count; i++){
3741  merge_context_after_me(s, s->thread_context[i]);
3742  }
3743  s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3744  s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3745  emms_c();
3746 
3747  if (s->me.scene_change_score > s->scenechange_threshold &&
3748  s->pict_type == AV_PICTURE_TYPE_P) {
3749  s->pict_type= AV_PICTURE_TYPE_I;
3750  for(i=0; i<s->mb_stride*s->mb_height; i++)
3751  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3752  if(s->msmpeg4_version >= 3)
3753  s->no_rounding=1;
3754  ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3755  s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3756  }
3757 
3758  if(!s->umvplus){
3759  if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3760  s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3761 
3762  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3763  int a,b;
3764  a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3765  b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3766  s->f_code= FFMAX3(s->f_code, a, b);
3767  }
3768 
3770  ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3771  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3772  int j;
3773  for(i=0; i<2; i++){
3774  for(j=0; j<2; j++)
3775  ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3776  s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3777  }
3778  }
3779  }
3780 
3781  if(s->pict_type==AV_PICTURE_TYPE_B){
3782  int a, b;
3783 
3784  a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3785  b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3786  s->f_code = FFMAX(a, b);
3787 
3788  a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3789  b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3790  s->b_code = FFMAX(a, b);
3791 
3792  ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3793  ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3794  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3795  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3796  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3797  int dir, j;
3798  for(dir=0; dir<2; dir++){
3799  for(i=0; i<2; i++){
3800  for(j=0; j<2; j++){
3803  ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3804  s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3805  }
3806  }
3807  }
3808  }
3809  }
3810  }
3811 
3812  if (estimate_qp(s, 0) < 0)
3813  return -1;
3814 
3815  if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3816  s->pict_type == AV_PICTURE_TYPE_I &&
3817  !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3818  s->qscale= 3; //reduce clipping problems
3819 
3820  if (s->out_format == FMT_MJPEG) {
3821  const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3822  const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3823 
3824  if (s->avctx->intra_matrix) {
3825  chroma_matrix =
3826  luma_matrix = s->avctx->intra_matrix;
3827  }
3828  if (s->avctx->chroma_intra_matrix)
3829  chroma_matrix = s->avctx->chroma_intra_matrix;
3830 
3831  /* for mjpeg, we do include qscale in the matrix */
3832  for(i=1;i<64;i++){
3833  int j = s->idsp.idct_permutation[i];
3834 
3835  s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3836  s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3837  }
3838  s->y_dc_scale_table=
3839  s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3840  s->chroma_intra_matrix[0] =
3841  s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3842  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3843  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3844  ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3845  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3846  s->qscale= 8;
3847  }
3848  if(s->codec_id == AV_CODEC_ID_AMV){
3849  static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3850  static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3851  for(i=1;i<64;i++){
3852  int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3853 
3854  s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
3855  s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
3856  }
3857  s->y_dc_scale_table= y;
3858  s->c_dc_scale_table= c;
3859  s->intra_matrix[0] = 13;
3860  s->chroma_intra_matrix[0] = 14;
3861  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3862  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3863  ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3864  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3865  s->qscale= 8;
3866  }
3867 
3868  //FIXME var duplication
3869  s->current_picture_ptr->f->key_frame =
3870  s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3871  s->current_picture_ptr->f->pict_type =
3872  s->current_picture.f->pict_type = s->pict_type;
3873 
3874  if (s->current_picture.f->key_frame)
3875  s->picture_in_gop_number=0;
3876 
3877  s->mb_x = s->mb_y = 0;
3878  s->last_bits= put_bits_count(&s->pb);
3879  switch(s->out_format) {
3880  case FMT_MJPEG:
3881  if (CONFIG_MJPEG_ENCODER && s->huffman != HUFFMAN_TABLE_OPTIMAL)
3882  ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3883  s->pred, s->intra_matrix, s->chroma_intra_matrix);
3884  break;
3885  case FMT_H261:
3886  if (CONFIG_H261_ENCODER)
3888  break;
3889  case FMT_H263:
3890  if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3892  else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3894  else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3896  if (ret < 0)
3897  return ret;
3898  } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3900  if (ret < 0)
3901  return ret;
3902  }
3903  else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3905  else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3907  else if (CONFIG_H263_ENCODER)
3909  break;
3910  case FMT_MPEG1:
3911  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
3913  break;
3914  default:
3915  av_assert0(0);
3916  }
3917  bits= put_bits_count(&s->pb);
3918  s->header_bits= bits - s->last_bits;
3919 
3920  for(i=1; i<context_count; i++){
3921  update_duplicate_context_after_me(s->thread_context[i], s);
3922  }
3923  s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3924  for(i=1; i<context_count; i++){
3925  if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3926  set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-32));
3927  merge_context_after_encode(s, s->thread_context[i]);
3928  }
3929  emms_c();
3930  return 0;
3931 }
3932 
3933 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3934  const int intra= s->mb_intra;
3935  int i;
3936 
3937  s->dct_count[intra]++;
3938 
3939  for(i=0; i<64; i++){
3940  int level= block[i];
3941 
3942  if(level){
3943  if(level>0){
3944  s->dct_error_sum[intra][i] += level;
3945  level -= s->dct_offset[intra][i];
3946  if(level<0) level=0;
3947  }else{
3948  s->dct_error_sum[intra][i] -= level;
3949  level += s->dct_offset[intra][i];
3950  if(level>0) level=0;
3951  }
3952  block[i]= level;
3953  }
3954  }
3955 }
3956 
3958  int16_t *block, int n,
3959  int qscale, int *overflow){
3960  const int *qmat;
3961  const uint16_t *matrix;
3962  const uint8_t *scantable;
3963  const uint8_t *perm_scantable;
3964  int max=0;
3965  unsigned int threshold1, threshold2;
3966  int bias=0;
3967  int run_tab[65];
3968  int level_tab[65];
3969  int score_tab[65];
3970  int survivor[65];
3971  int survivor_count;
3972  int last_run=0;
3973  int last_level=0;
3974  int last_score= 0;
3975  int last_i;
3976  int coeff[2][64];
3977  int coeff_count[64];
3978  int qmul, qadd, start_i, last_non_zero, i, dc;
3979  const int esc_length= s->ac_esc_length;
3980  uint8_t * length;
3981  uint8_t * last_length;
3982  const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3983  int mpeg2_qscale;
3984 
3985  s->fdsp.fdct(block);
3986 
3987  if(s->dct_error_sum)
3988  s->denoise_dct(s, block);
3989  qmul= qscale*16;
3990  qadd= ((qscale-1)|1)*8;
3991 
3992  if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
3993  else mpeg2_qscale = qscale << 1;
3994 
3995  if (s->mb_intra) {
3996  int q;
3997  scantable= s->intra_scantable.scantable;
3998  perm_scantable= s->intra_scantable.permutated;
3999  if (!s->h263_aic) {
4000  if (n < 4)
4001  q = s->y_dc_scale;
4002  else
4003  q = s->c_dc_scale;
4004  q = q << 3;
4005  } else{
4006  /* For AIC we skip quant/dequant of INTRADC */
4007  q = 1 << 3;
4008  qadd=0;
4009  }
4010 
4011  /* note: block[0] is assumed to be positive */
4012  block[0] = (block[0] + (q >> 1)) / q;
4013  start_i = 1;
4014  last_non_zero = 0;
4015  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4016  matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
4017  if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
4018  bias= 1<<(QMAT_SHIFT-1);
4019 
4020  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4021  length = s->intra_chroma_ac_vlc_length;
4022  last_length= s->intra_chroma_ac_vlc_last_length;
4023  } else {
4024  length = s->intra_ac_vlc_length;
4025  last_length= s->intra_ac_vlc_last_length;
4026  }
4027  } else {
4028  scantable= s->inter_scantable.scantable;
4029  perm_scantable= s->inter_scantable.permutated;
4030  start_i = 0;
4031  last_non_zero = -1;
4032  qmat = s->q_inter_matrix[qscale];
4033  matrix = s->inter_matrix;
4034  length = s->inter_ac_vlc_length;
4035  last_length= s->inter_ac_vlc_last_length;
4036  }
4037  last_i= start_i;
4038 
4039  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4040  threshold2= (threshold1<<1);
4041 
4042  for(i=63; i>=start_i; i--) {
4043  const int j = scantable[i];
4044  int level = block[j] * qmat[j];
4045 
4046  if(((unsigned)(level+threshold1))>threshold2){
4047  last_non_zero = i;
4048  break;
4049  }
4050  }
4051 
4052  for(i=start_i; i<=last_non_zero; i++) {
4053  const int j = scantable[i];
4054  int level = block[j] * qmat[j];
4055 
4056 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4057 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4058  if(((unsigned)(level+threshold1))>threshold2){
4059  if(level>0){
4060  level= (bias + level)>>QMAT_SHIFT;
4061  coeff[0][i]= level;
4062  coeff[1][i]= level-1;
4063 // coeff[2][k]= level-2;
4064  }else{
4065  level= (bias - level)>>QMAT_SHIFT;
4066  coeff[0][i]= -level;
4067  coeff[1][i]= -level+1;
4068 // coeff[2][k]= -level+2;
4069  }
4070  coeff_count[i]= FFMIN(level, 2);
4071  av_assert2(coeff_count[i]);
4072  max |=level;
4073  }else{
4074  coeff[0][i]= (level>>31)|1;
4075  coeff_count[i]= 1;
4076  }
4077  }
4078 
4079  *overflow= s->max_qcoeff < max; //overflow might have happened
4080 
4081  if(last_non_zero < start_i){
4082  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4083  return last_non_zero;
4084  }
4085 
4086  score_tab[start_i]= 0;
4087  survivor[0]= start_i;
4088  survivor_count= 1;
4089 
4090  for(i=start_i; i<=last_non_zero; i++){
4091  int level_index, j, zero_distortion;
4092  int dct_coeff= FFABS(block[ scantable[i] ]);
4093  int best_score=256*256*256*120;
4094 
4095  if (s->fdsp.fdct == ff_fdct_ifast)
4096  dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4097  zero_distortion= dct_coeff*dct_coeff;
4098 
4099  for(level_index=0; level_index < coeff_count[i]; level_index++){
4100  int distortion;
4101  int level= coeff[level_index][i];
4102  const int alevel= FFABS(level);
4103  int unquant_coeff;
4104 
4105  av_assert2(level);
4106 
4107  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4108  unquant_coeff= alevel*qmul + qadd;
4109  } else if(s->out_format == FMT_MJPEG) {
4110  j = s->idsp.idct_permutation[scantable[i]];
4111  unquant_coeff = alevel * matrix[j] * 8;
4112  }else{ // MPEG-1
4113  j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4114  if(s->mb_intra){
4115  unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4116  unquant_coeff = (unquant_coeff - 1) | 1;
4117  }else{
4118  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4119  unquant_coeff = (unquant_coeff - 1) | 1;
4120  }
4121  unquant_coeff<<= 3;
4122  }
4123 
4124  distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4125  level+=64;
4126  if((level&(~127)) == 0){
4127  for(j=survivor_count-1; j>=0; j--){
4128  int run= i - survivor[j];
4129  int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4130  score += score_tab[i-run];
4131 
4132  if(score < best_score){
4133  best_score= score;
4134  run_tab[i+1]= run;
4135  level_tab[i+1]= level-64;
4136  }
4137  }
4138 
4139  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4140  for(j=survivor_count-1; j>=0; j--){
4141  int run= i - survivor[j];
4142  int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4143  score += score_tab[i-run];
4144  if(score < last_score){
4145  last_score= score;
4146  last_run= run;
4147  last_level= level-64;
4148  last_i= i+1;
4149  }
4150  }
4151  }
4152  }else{
4153  distortion += esc_length*lambda;
4154  for(j=survivor_count-1; j>=0; j--){
4155  int run= i - survivor[j];
4156  int score= distortion + score_tab[i-run];
4157 
4158  if(score < best_score){
4159  best_score= score;
4160  run_tab[i+1]= run;
4161  level_tab[i+1]= level-64;
4162  }
4163  }
4164 
4165  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4166  for(j=survivor_count-1; j>=0; j--){
4167  int run= i - survivor[j];
4168  int score= distortion + score_tab[i-run];
4169  if(score < last_score){
4170  last_score= score;
4171  last_run= run;
4172  last_level= level-64;
4173  last_i= i+1;
4174  }
4175  }
4176  }
4177  }
4178  }
4179 
4180  score_tab[i+1]= best_score;
4181 
4182  // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4183  if(last_non_zero <= 27){
4184  for(; survivor_count; survivor_count--){
4185  if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4186  break;
4187  }
4188  }else{
4189  for(; survivor_count; survivor_count--){
4190  if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4191  break;
4192  }
4193  }
4194 
4195  survivor[ survivor_count++ ]= i+1;
4196  }
4197 
4198  if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4199  last_score= 256*256*256*120;
4200  for(i= survivor[0]; i<=last_non_zero + 1; i++){
4201  int score= score_tab[i];
4202  if (i)
4203  score += lambda * 2; // FIXME more exact?
4204 
4205  if(score < last_score){
4206  last_score= score;
4207  last_i= i;
4208  last_level= level_tab[i];
4209  last_run= run_tab[i];
4210  }
4211  }
4212  }
4213 
4214  s->coded_score[n] = last_score;
4215 
4216  dc= FFABS(block[0]);
4217  last_non_zero= last_i - 1;
4218  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4219 
4220  if(last_non_zero < start_i)
4221  return last_non_zero;
4222 
4223  if(last_non_zero == 0 && start_i == 0){
4224  int best_level= 0;
4225  int best_score= dc * dc;
4226 
4227  for(i=0; i<coeff_count[0]; i++){
4228  int level= coeff[i][0];
4229  int alevel= FFABS(level);
4230  int unquant_coeff, score, distortion;
4231 
4232  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4233  unquant_coeff= (alevel*qmul + qadd)>>3;
4234  } else{ // MPEG-1
4235  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4236  unquant_coeff = (unquant_coeff - 1) | 1;
4237  }
4238  unquant_coeff = (unquant_coeff + 4) >> 3;
4239  unquant_coeff<<= 3 + 3;
4240 
4241  distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4242  level+=64;
4243  if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4244  else score= distortion + esc_length*lambda;
4245 
4246  if(score < best_score){
4247  best_score= score;
4248  best_level= level - 64;
4249  }
4250  }
4251  block[0]= best_level;
4252  s->coded_score[n] = best_score - dc*dc;
4253  if(best_level == 0) return -1;
4254  else return last_non_zero;
4255  }
4256 
4257  i= last_i;
4258  av_assert2(last_level);
4259 
4260  block[ perm_scantable[last_non_zero] ]= last_level;
4261  i -= last_run + 1;
4262 
4263  for(; i>start_i; i -= run_tab[i] + 1){
4264  block[ perm_scantable[i-1] ]= level_tab[i];
4265  }
4266 
4267  return last_non_zero;
4268 }
4269 
4270 static int16_t basis[64][64];
4271 
4272 static void build_basis(uint8_t *perm){
4273  int i, j, x, y;
4274  emms_c();
4275  for(i=0; i<8; i++){
4276  for(j=0; j<8; j++){
4277  for(y=0; y<8; y++){
4278  for(x=0; x<8; x++){
4279  double s= 0.25*(1<<BASIS_SHIFT);
4280  int index= 8*i + j;
4281  int perm_index= perm[index];
4282  if(i==0) s*= sqrt(0.5);
4283  if(j==0) s*= sqrt(0.5);
4284  basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4285  }
4286  }
4287  }
4288  }
4289 }
4290 
4291 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4292  int16_t *block, int16_t *weight, int16_t *orig,
4293  int n, int qscale){
4294  int16_t rem[64];
4295  LOCAL_ALIGNED_16(int16_t, d1, [64]);
4296  const uint8_t *scantable;
4297  const uint8_t *perm_scantable;
4298 // unsigned int threshold1, threshold2;
4299 // int bias=0;
4300  int run_tab[65];
4301  int prev_run=0;
4302  int prev_level=0;
4303  int qmul, qadd, start_i, last_non_zero, i, dc;
4304  uint8_t * length;
4305  uint8_t * last_length;
4306  int lambda;
4307  int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4308 
4309  if(basis[0][0] == 0)
4310  build_basis(s->idsp.idct_permutation);
4311 
4312  qmul= qscale*2;
4313  qadd= (qscale-1)|1;
4314  if (s->mb_intra) {
4315  scantable= s->intra_scantable.scantable;
4316  perm_scantable= s->intra_scantable.permutated;
4317  if (!s->h263_aic) {
4318  if (n < 4)
4319  q = s->y_dc_scale;
4320  else
4321  q = s->c_dc_scale;
4322  } else{
4323  /* For AIC we skip quant/dequant of INTRADC */
4324  q = 1;
4325  qadd=0;
4326  }
4327  q <<= RECON_SHIFT-3;
4328  /* note: block[0] is assumed to be positive */
4329  dc= block[0]*q;
4330 // block[0] = (block[0] + (q >> 1)) / q;
4331  start_i = 1;
4332 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4333 // bias= 1<<(QMAT_SHIFT-1);
4334  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4335  length = s->intra_chroma_ac_vlc_length;
4336  last_length= s->intra_chroma_ac_vlc_last_length;
4337  } else {
4338  length = s->intra_ac_vlc_length;
4339  last_length= s->intra_ac_vlc_last_length;
4340  }
4341  } else {
4342  scantable= s->inter_scantable.scantable;
4343  perm_scantable= s->inter_scantable.permutated;
4344  dc= 0;
4345  start_i = 0;
4346  length = s->inter_ac_vlc_length;
4347  last_length= s->inter_ac_vlc_last_length;
4348  }
4349  last_non_zero = s->block_last_index[n];
4350 
4351  dc += (1<<(RECON_SHIFT-1));
4352  for(i=0; i<64; i++){
4353  rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4354  }
4355 
4356  sum=0;
4357  for(i=0; i<64; i++){
4358  int one= 36;
4359  int qns=4;
4360  int w;
4361 
4362  w= FFABS(weight[i]) + qns*one;
4363  w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4364 
4365  weight[i] = w;
4366 // w=weight[i] = (63*qns + (w/2)) / w;
4367 
4368  av_assert2(w>0);
4369  av_assert2(w<(1<<6));
4370  sum += w*w;
4371  }
4372  lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4373 
4374  run=0;
4375  rle_index=0;
4376  for(i=start_i; i<=last_non_zero; i++){
4377  int j= perm_scantable[i];
4378  const int level= block[j];
4379  int coeff;
4380 
4381  if(level){
4382  if(level<0) coeff= qmul*level - qadd;
4383  else coeff= qmul*level + qadd;
4384  run_tab[rle_index++]=run;
4385  run=0;
4386 
4387  s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4388  }else{
4389  run++;
4390  }
4391  }
4392 
4393  for(;;){
4394  int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4395  int best_coeff=0;
4396  int best_change=0;
4397  int run2, best_unquant_change=0, analyze_gradient;
4398  analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4399 
4400  if(analyze_gradient){
4401  for(i=0; i<64; i++){
4402  int w= weight[i];
4403 
4404  d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4405  }
4406  s->fdsp.fdct(d1);
4407  }
4408 
4409  if(start_i){
4410  const int level= block[0];
4411  int change, old_coeff;
4412 
4413  av_assert2(s->mb_intra);
4414 
4415  old_coeff= q*level;
4416 
4417  for(change=-1; change<=1; change+=2){
4418  int new_level= level + change;
4419  int score, new_coeff;
4420 
4421  new_coeff= q*new_level;
4422  if(new_coeff >= 2048 || new_coeff < 0)
4423  continue;
4424 
4425  score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4426  new_coeff - old_coeff);
4427  if(score<best_score){
4428  best_score= score;
4429  best_coeff= 0;
4430  best_change= change;
4431  best_unquant_change= new_coeff - old_coeff;
4432  }
4433  }
4434  }
4435 
4436  run=0;
4437  rle_index=0;
4438  run2= run_tab[rle_index++];
4439  prev_level=0;
4440  prev_run=0;
4441 
4442  for(i=start_i; i<64; i++){
4443  int j= perm_scantable[i];
4444  const int level= block[j];
4445  int change, old_coeff;
4446 
4447  if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4448  break;
4449 
4450  if(level){
4451  if(level<0) old_coeff= qmul*level - qadd;
4452  else old_coeff= qmul*level + qadd;
4453  run2= run_tab[rle_index++]; //FIXME ! maybe after last
4454  }else{
4455  old_coeff=0;
4456  run2--;
4457  av_assert2(run2>=0 || i >= last_non_zero );
4458  }
4459 
4460  for(change=-1; change<=1; change+=2){
4461  int new_level= level + change;
4462  int score, new_coeff, unquant_change;
4463 
4464  score=0;
4465  if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4466  continue;
4467 
4468  if(new_level){
4469  if(new_level<0) new_coeff= qmul*new_level - qadd;
4470  else new_coeff= qmul*new_level + qadd;
4471  if(new_coeff >= 2048 || new_coeff <= -2048)
4472  continue;
4473  //FIXME check for overflow
4474 
4475  if(level){
4476  if(level < 63 && level > -63){
4477  if(i < last_non_zero)
4478  score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4479  - length[UNI_AC_ENC_INDEX(run, level+64)];
4480  else
4481  score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4482  - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4483  }
4484  }else{
4485  av_assert2(FFABS(new_level)==1);
4486 
4487  if(analyze_gradient){
4488  int g= d1[ scantable[i] ];
4489  if(g && (g^new_level) >= 0)
4490  continue;
4491  }
4492 
4493  if(i < last_non_zero){
4494  int next_i= i + run2 + 1;
4495  int next_level= block[ perm_scantable[next_i] ] + 64;
4496 
4497  if(next_level&(~127))
4498  next_level= 0;
4499 
4500  if(next_i < last_non_zero)
4501  score += length[UNI_AC_ENC_INDEX(run, 65)]
4502  + length[UNI_AC_ENC_INDEX(run2, next_level)]
4503  - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4504  else
4505  score += length[UNI_AC_ENC_INDEX(run, 65)]
4506  + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4507  - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4508  }else{
4509  score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4510  if(prev_level){
4511  score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4512  - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4513  }
4514  }
4515  }
4516  }else{
4517  new_coeff=0;
4518  av_assert2(FFABS(level)==1);
4519 
4520  if(i < last_non_zero){
4521  int next_i= i + run2 + 1;
4522  int next_level= block[ perm_scantable[next_i] ] + 64;
4523 
4524  if(next_level&(~127))
4525  next_level= 0;
4526 
4527  if(next_i < last_non_zero)
4528  score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4529  - length[UNI_AC_ENC_INDEX(run2, next_level)]
4530  - length[UNI_AC_ENC_INDEX(run, 65)];
4531  else
4532  score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4533  - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4534  - length[UNI_AC_ENC_INDEX(run, 65)];
4535  }else{
4536  score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4537  if(prev_level){
4538  score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4539  - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4540  }
4541  }
4542  }
4543 
4544  score *= lambda;
4545 
4546  unquant_change= new_coeff - old_coeff;
4547  av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4548 
4549  score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4550  unquant_change);
4551  if(score<best_score){
4552  best_score= score;
4553  best_coeff= i;
4554  best_change= change;
4555  best_unquant_change= unquant_change;
4556  }
4557  }
4558  if(level){
4559  prev_level= level + 64;
4560  if(prev_level&(~127))
4561  prev_level= 0;
4562  prev_run= run;
4563  run=0;
4564  }else{
4565  run++;
4566  }
4567  }
4568 
4569  if(best_change){
4570  int j= perm_scantable[ best_coeff ];
4571 
4572  block[j] += best_change;
4573 
4574  if(best_coeff > last_non_zero){
4575  last_non_zero= best_coeff;
4576  av_assert2(block[j]);
4577  }else{
4578  for(; last_non_zero>=start_i; last_non_zero--){
4579  if(block[perm_scantable[last_non_zero]])
4580  break;
4581  }
4582  }
4583 
4584  run=0;
4585  rle_index=0;
4586  for(i=start_i; i<=last_non_zero; i++){
4587  int j= perm_scantable[i];
4588  const int level= block[j];
4589 
4590  if(level){
4591  run_tab[rle_index++]=run;
4592  run=0;
4593  }else{
4594  run++;
4595  }
4596  }
4597 
4598  s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4599  }else{
4600  break;
4601  }
4602  }
4603 
4604  return last_non_zero;
4605 }
4606 
4607 /**
4608  * Permute an 8x8 block according to permutation.
4609  * @param block the block which will be permuted according to
4610  * the given permutation vector
4611  * @param permutation the permutation vector
4612  * @param last the last non zero coefficient in scantable order, used to
4613  * speed the permutation up
4614  * @param scantable the used scantable, this is only used to speed the
4615  * permutation up, the block is not (inverse) permutated
4616  * to scantable order!
4617  */
4618 void ff_block_permute(int16_t *block, uint8_t *permutation,
4619  const uint8_t *scantable, int last)
4620 {
4621  int i;
4622  int16_t temp[64];
4623 
4624  if (last <= 0)
4625  return;
4626  //FIXME it is ok but not clean and might fail for some permutations
4627  // if (permutation[1] == 1)
4628  // return;
4629 
4630  for (i = 0; i <= last; i++) {
4631  const int j = scantable[i];
4632  temp[j] = block[j];
4633  block[j] = 0;
4634  }
4635 
4636  for (i = 0; i <= last; i++) {
4637  const int j = scantable[i];
4638  const int perm_j = permutation[j];
4639  block[perm_j] = temp[j];
4640  }
4641 }
4642 
4644  int16_t *block, int n,
4645  int qscale, int *overflow)
4646 {
4647  int i, j, level, last_non_zero, q, start_i;
4648  const int *qmat;
4649  const uint8_t *scantable;
4650  int bias;
4651  int max=0;
4652  unsigned int threshold1, threshold2;
4653 
4654  s->fdsp.fdct(block);
4655 
4656  if(s->dct_error_sum)
4657  s->denoise_dct(s, block);
4658 
4659  if (s->mb_intra) {
4660  scantable= s->intra_scantable.scantable;
4661  if (!s->h263_aic) {
4662  if (n < 4)
4663  q = s->y_dc_scale;
4664  else
4665  q = s->c_dc_scale;
4666  q = q << 3;
4667  } else
4668  /* For AIC we skip quant/dequant of INTRADC */
4669  q = 1 << 3;
4670 
4671  /* note: block[0] is assumed to be positive */
4672  block[0] = (block[0] + (q >> 1)) / q;
4673  start_i = 1;
4674  last_non_zero = 0;
4675  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4676  bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4677  } else {
4678  scantable= s->inter_scantable.scantable;
4679  start_i = 0;
4680  last_non_zero = -1;
4681  qmat = s->q_inter_matrix[qscale];
4682  bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4683  }
4684  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4685  threshold2= (threshold1<<1);
4686  for(i=63;i>=start_i;i--) {
4687  j = scantable[i];
4688  level = block[j] * qmat[j];
4689 
4690  if(((unsigned)(level+threshold1))>threshold2){
4691  last_non_zero = i;
4692  break;
4693  }else{
4694  block[j]=0;
4695  }
4696  }
4697  for(i=start_i; i<=last_non_zero; i++) {
4698  j = scantable[i];
4699  level = block[j] * qmat[j];
4700 
4701 // if( bias+level >= (1<<QMAT_SHIFT)
4702 // || bias-level >= (1<<QMAT_SHIFT)){
4703  if(((unsigned)(level+threshold1))>threshold2){
4704  if(level>0){
4705  level= (bias + level)>>QMAT_SHIFT;
4706  block[j]= level;
4707  }else{
4708  level= (bias - level)>>QMAT_SHIFT;
4709  block[j]= -level;
4710  }
4711  max |=level;
4712  }else{
4713  block[j]=0;
4714  }
4715  }
4716  *overflow= s->max_qcoeff < max; //overflow might have happened
4717 
4718  /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4719  if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4720  ff_block_permute(block, s->idsp.idct_permutation,
4721  scantable, last_non_zero);
4722 
4723  return last_non_zero;
4724 }
4725 
4726 #define OFFSET(x) offsetof(MpegEncContext, x)
4727 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4728 static const AVOption h263_options[] = {
4729  { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4730  { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4732  { NULL },
4733 };
4734 
4735 static const AVClass h263_class = {
4736  .class_name = "H.263 encoder",
4737  .item_name = av_default_item_name,
4738  .option = h263_options,
4739  .version = LIBAVUTIL_VERSION_INT,
4740 };
4741 
4743  .name = "h263",
4744  .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4745  .type = AVMEDIA_TYPE_VIDEO,
4746  .id = AV_CODEC_ID_H263,
4747  .priv_data_size = sizeof(MpegEncContext),
4749  .encode2 = ff_mpv_encode_picture,
4750  .close = ff_mpv_encode_end,
4752  .priv_class = &h263_class,
4753 };
4754 
4755 static const AVOption h263p_options[] = {
4756  { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4757  { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4758  { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4759  { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4761  { NULL },
4762 };
4763 static const AVClass h263p_class = {
4764  .class_name = "H.263p encoder",
4765  .item_name = av_default_item_name,
4766  .option = h263p_options,
4767  .version = LIBAVUTIL_VERSION_INT,
4768 };
4769 
4771  .name = "h263p",
4772  .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4773  .type = AVMEDIA_TYPE_VIDEO,
4774  .id = AV_CODEC_ID_H263P,
4775  .priv_data_size = sizeof(MpegEncContext),
4777  .encode2 = ff_mpv_encode_picture,
4778  .close = ff_mpv_encode_end,
4779  .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4781  .priv_class = &h263p_class,
4782 };
4783 
4784 static const AVClass msmpeg4v2_class = {
4785  .class_name = "msmpeg4v2 encoder",
4786  .item_name = av_default_item_name,
4787  .option = ff_mpv_generic_options,
4788  .version = LIBAVUTIL_VERSION_INT,
4789 };
4790 
4792  .name = "msmpeg4v2",
4793  .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4794  .type = AVMEDIA_TYPE_VIDEO,
4795  .id = AV_CODEC_ID_MSMPEG4V2,
4796  .priv_data_size = sizeof(MpegEncContext),
4798  .encode2 = ff_mpv_encode_picture,
4799  .close = ff_mpv_encode_end,
4801  .priv_class = &msmpeg4v2_class,
4802 };
4803 
4804 static const AVClass msmpeg4v3_class = {
4805  .class_name = "msmpeg4v3 encoder",
4806  .item_name = av_default_item_name,
4807  .option = ff_mpv_generic_options,
4808  .version = LIBAVUTIL_VERSION_INT,
4809 };
4810 
4812  .name = "msmpeg4",
4813  .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4814  .type = AVMEDIA_TYPE_VIDEO,
4815  .id = AV_CODEC_ID_MSMPEG4V3,
4816  .priv_data_size = sizeof(MpegEncContext),
4818  .encode2 = ff_mpv_encode_picture,
4819  .close = ff_mpv_encode_end,
4821  .priv_class = &msmpeg4v3_class,
4822 };
4823 
4824 static const AVClass wmv1_class = {
4825  .class_name = "wmv1 encoder",
4826  .item_name = av_default_item_name,
4827  .option = ff_mpv_generic_options,
4828  .version = LIBAVUTIL_VERSION_INT,
4829 };
4830 
4832  .name = "wmv1",
4833  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4834  .type = AVMEDIA_TYPE_VIDEO,
4835  .id = AV_CODEC_ID_WMV1,
4836  .priv_data_size = sizeof(MpegEncContext),
4838  .encode2 = ff_mpv_encode_picture,
4839  .close = ff_mpv_encode_end,
4841  .priv_class = &wmv1_class,
4842 };
MpegEncContext::i_count
int i_count
Definition: mpegvideo.h:348
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:39
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:890
QMAT_SHIFT_MMX
#define QMAT_SHIFT_MMX
Definition: mpegvideo_enc.c:73
MpegEncContext::mb_skipped
int mb_skipped
MUST BE SET only during DECODING.
Definition: mpegvideo.h:195
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:605
ff_wmv2_encode_picture_header
int ff_wmv2_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: wmv2enc.c:74
AVCodec
AVCodec.
Definition: codec.h:190
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:85
MpegEncContext::mb_type
uint16_t * mb_type
Table for candidate MB types for encoding (defines in mpegutils.h)
Definition: mpegvideo.h:291
stride
int stride
Definition: mace.c:144
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:266
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
direct
static void direct(const float *in, const FFTComplex *ir, int len, float *out)
Definition: af_afir.c:60
h263data.h
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
level
uint8_t level
Definition: svq3.c:210
MpegEncContext::data_partitioning
int data_partitioning
data partitioning flag from header
Definition: mpegvideo.h:404
set_frame_distances
static void set_frame_distances(MpegEncContext *s)
Definition: mpegvideo_enc.c:3644
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
avcodec_receive_packet
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:422
MpegEncContext::lambda
unsigned int lambda
Lagrange multiplier used in rate distortion.
Definition: mpegvideo.h:206
H263_GOB_HEIGHT
#define H263_GOB_HEIGHT(h)
Definition: h263.h:42
FF_LAMBDA_SCALE
#define FF_LAMBDA_SCALE
Definition: avutil.h:226
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegutils.h:124
r
const char * r
Definition: vf_curves.c:114
acc
int acc
Definition: yuv2rgb.c:555
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_wmv1_encoder
AVCodec ff_wmv1_encoder
Definition: mpegvideo_enc.c:4831
ff_mpeg4_encode_video_packet_header
void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
Definition: mpeg4videoenc.c:1364
MpegEncContext::b_code
int b_code
backward MV resolution for B-frames (MPEG-4)
Definition: mpegvideo.h:239
AVCodecContext::mpeg_quant
attribute_deprecated int mpeg_quant
Definition: avcodec.h:821
ff_mpeg4_merge_partitions
void ff_mpeg4_merge_partitions(MpegEncContext *s)
Definition: mpeg4videoenc.c:1338
ff_mpv_common_defaults
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:644
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:325
FF_COMPLIANCE_EXPERIMENTAL
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: avcodec.h:1594
EDGE_BOTTOM
#define EDGE_BOTTOM
Definition: mpegvideoencdsp.h:30
mjpegenc_common.h
AVCodecContext::rc_min_rate
int64_t rc_min_rate
minimum bitrate
Definition: avcodec.h:1411
ff_fix_long_p_mvs
void ff_fix_long_p_mvs(MpegEncContext *s, int type)
Definition: motion_est.c:1650
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2549
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:55
ff_side_data_set_encoder_stats
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
Definition: avpacket.c:728
encode_mb
static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2579
AV_CODEC_FLAG_QSCALE
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
Definition: avcodec.h:275
sse_mb
static int sse_mb(MpegEncContext *s)
Definition: mpegvideo_enc.c:2725
MAX_RUN
#define MAX_RUN
Definition: rl.h:35
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:48
COPY
#define COPY(a)
ff_block_permute
void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
Definition: mpegvideo_enc.c:4618
ff_qpeldsp_init
av_cold void ff_qpeldsp_init(QpelDSPContext *c)
Definition: qpeldsp.c:783
basis
static int16_t basis[64][64]
Definition: mpegvideo_enc.c:4270
ff_mjpeg_encode_picture_header
void ff_mjpeg_encode_picture_header(AVCodecContext *avctx, PutBitContext *pb, ScanTable *intra_scantable, int pred, uint16_t luma_intra_matrix[64], uint16_t chroma_intra_matrix[64])
Definition: mjpegenc_common.c:248
encode_frame
static int encode_frame(AVCodecContext *c, AVFrame *frame)
Definition: mpegvideo_enc.c:1347
MpegEncContext::current_picture
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:180
estimate_motion_thread
static int estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2767
update_noise_reduction
static void update_noise_reduction(MpegEncContext *s)
Definition: mpegvideo_enc.c:1731
out_size
int out_size
Definition: movenc.c:55
MV_DIRECT
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4)
Definition: mpegvideo.h:264
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:61
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
end
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:92
MpegEncContext::partitioned_frame
int partitioned_frame
is current frame partitioned
Definition: mpegvideo.h:405
MpegEncContext::dct_offset
uint16_t(* dct_offset)[64]
Definition: mpegvideo.h:334
UNI_AC_ENC_INDEX
#define UNI_AC_ENC_INDEX(run, level)
Definition: mpegvideo.h:318
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:208
pixdesc.h
ff_msmpeg4_encode_mb
void ff_msmpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: msmpeg4enc.c:376
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:393
MAX_DMV
#define MAX_DMV
Definition: motion_est.h:37
CANDIDATE_MB_TYPE_INTER_I
#define CANDIDATE_MB_TYPE_INTER_I
Definition: mpegutils.h:114
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:535
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:355
ff_free_picture_tables
void ff_free_picture_tables(Picture *pic)
Definition: mpegpicture.c:465
alloc_picture
static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
Definition: mpegvideo_enc.c:1147
MpegEncContext::f_count
int f_count
Definition: mpegvideo.h:349
AVOption
AVOption.
Definition: opt.h:246
ff_mpv_generic_options
const AVOption ff_mpv_generic_options[]
Definition: mpegvideo_enc.c:85
b
#define b
Definition: input.c:41
MpegEncContext::last_dc
int last_dc[3]
last DC values for MPEG-1
Definition: mpegvideo.h:185
CANDIDATE_MB_TYPE_BACKWARD_I
#define CANDIDATE_MB_TYPE_BACKWARD_I
Definition: mpegutils.h:116
data
const char data[16]
Definition: mxf.c:91
MpegEncContext::vbv_delay
int vbv_delay
Definition: mpegvideo.h:213
ff_mpeg2_non_linear_qscale
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:26
AVCodecContext::p_tex_bits
attribute_deprecated int p_tex_bits
Definition: avcodec.h:1525
MpegEncContext::fdsp
FDCTDSPContext fdsp
Definition: mpegvideo.h:227
ff_mjpeg_encode_init
av_cold int ff_mjpeg_encode_init(MpegEncContext *s)
Definition: mjpegenc.c:71
INPLACE_OFFSET
#define INPLACE_OFFSET
Definition: mpegutils.h:121
AVCodecContext::skip_count
attribute_deprecated int skip_count
Definition: avcodec.h:1531
ff_clean_intra_table_entries
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
Definition: mpegvideo.c:1916
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:38
ff_pixblockdsp_init
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, AVCodecContext *avctx)
Definition: pixblockdsp.c:81
ff_h261_encode_init
void ff_h261_encode_init(MpegEncContext *s)
Definition: h261enc.c:365
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:2279
max
#define max(a, b)
Definition: cuda_runtime.h:33
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:98
mathematics.h
ff_rv20_encode_picture_header
void ff_rv20_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: rv20enc.c:35
ff_rate_control_init
av_cold int ff_rate_control_init(MpegEncContext *s)
Definition: ratecontrol.c:472
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:312
Picture
Picture.
Definition: mpegpicture.h:45
CANDIDATE_MB_TYPE_SKIPPED
#define CANDIDATE_MB_TYPE_SKIPPED
Definition: mpegutils.h:107
pre_estimate_motion_thread
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2748
FF_LAMBDA_SHIFT
#define FF_LAMBDA_SHIFT
Definition: avutil.h:225
MpegEncContext::pb
PutBitContext pb
bit output
Definition: mpegvideo.h:151
CANDIDATE_MB_TYPE_INTER
#define CANDIDATE_MB_TYPE_INTER
Definition: mpegutils.h:105
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:71
ff_add_cpb_side_data
AVCPBProperties * ff_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
Definition: utils.c:2037
AVCodecContext::qmax
int qmax
maximum quantizer
Definition: avcodec.h:1375
AVCodecContext::frame_skip_threshold
attribute_deprecated int frame_skip_threshold
Definition: avcodec.h:1455
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:342
mpegutils.h
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:491
AV_CODEC_FLAG_4MV
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
Definition: avcodec.h:279
ff_set_cmp
void ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type)
Definition: me_cmp.c:474
AVCodecContext::delay
int delay
Codec delay.
Definition: avcodec.h:682
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:388
thread.h
ff_msmpeg4_encode_picture_header
void ff_msmpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: msmpeg4enc.c:224
FF_COMPLIANCE_UNOFFICIAL
#define FF_COMPLIANCE_UNOFFICIAL
Allow unofficial extensions.
Definition: avcodec.h:1593
quality
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about quality
Definition: rate_distortion.txt:12
MpegEncContext::mv
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
Definition: mpegvideo.h:276
FF_MPV_FLAG_SKIP_RD
#define FF_MPV_FLAG_SKIP_RD
Definition: mpegvideo.h:588
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:314
MV_DIR_BACKWARD
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:263
AV_CODEC_ID_H261
@ AV_CODEC_ID_H261
Definition: codec_id.h:52
av_gcd
int64_t av_gcd(int64_t a, int64_t b)
Compute the greatest common divisor of two integer operands.
Definition: mathematics.c:37
CANDIDATE_MB_TYPE_FORWARD_I
#define CANDIDATE_MB_TYPE_FORWARD_I
Definition: mpegutils.h:115
ff_mpeg1_encode_picture_header
void ff_mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: mpeg12enc.c:426
MAX_FCODE
#define MAX_FCODE
Definition: mpegutils.h:48
FF_CMP_VSSE
#define FF_CMP_VSSE
Definition: avcodec.h:940
AVCodecContext::frame_bits
attribute_deprecated int frame_bits
Definition: avcodec.h:1537
ff_mpegvideoencdsp_init
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
Definition: mpegvideoencdsp.c:232
ff_mpv_encode_picture
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
Definition: mpegvideo_enc.c:1824
sp5x.h
AVCodecContext::pre_me
attribute_deprecated int pre_me
Definition: avcodec.h:966
OFFSET
#define OFFSET(x)
Definition: mpegvideo_enc.c:4726
estimate_qp
static int estimate_qp(MpegEncContext *s, int dry_run)
Definition: mpegvideo_enc.c:3606
AVCodecContext::prediction_method
attribute_deprecated int prediction_method
Definition: avcodec.h:885
ff_get_best_fcode
int ff_get_best_fcode(MpegEncContext *s, int16_t(*mv_table)[2], int type)
Definition: motion_est.c:1598
FDCTDSPContext
Definition: fdctdsp.h:26
ff_set_mpeg4_time
void ff_set_mpeg4_time(MpegEncContext *s)
Definition: mpeg4videoenc.c:874
faandct.h
Floating point AAN DCT.
MpegEncContext::pict_type
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:212
av_packet_add_side_data
FF_ENABLE_DEPRECATION_WARNINGS int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
Definition: avpacket.c:298
ff_match_2uint16
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
Definition: utils.c:1841
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:535
merge_context_after_me
static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo_enc.c:3568
ff_h263_pred_motion
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
Definition: h263.c:307
STRIDE_ALIGN
#define STRIDE_ALIGN
Definition: internal.h:108
fail
#define fail()
Definition: checkasm.h:123
h261.h
CANDIDATE_MB_TYPE_BIDIR
#define CANDIDATE_MB_TYPE_BIDIR
Definition: mpegutils.h:112
ff_mjpeg_encode_stuffing
int ff_mjpeg_encode_stuffing(MpegEncContext *s)
Writes the complete JPEG frame when optimal huffman tables are enabled, otherwise writes the stuffing...
Definition: mjpegenc_common.c:539
MpegEncContext::padding_bug_score
int padding_bug_score
used to detect the VERY common padding bug in MPEG-4
Definition: mpegvideo.h:411
tab
static const struct twinvq_data tab
Definition: twinvq_data.h:11135
get_intra_count
static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride)
Definition: mpegvideo_enc.c:1124
av_packet_shrink_side_data
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Shrink the already allocated side data buffer.
Definition: avpacket.c:555
ff_me_cmp_init
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:1035
avcodec_find_encoder
AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
Definition: allcodecs.c:914
ff_h263dsp_init
av_cold void ff_h263dsp_init(H263DSPContext *ctx)
Definition: h263dsp.c:117
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:606
perm
perm
Definition: f_perms.c:74
AVCodecContext::bit_rate_tolerance
int bit_rate_tolerance
number of bits the bitstream is allowed to diverge from the reference.
Definition: avcodec.h:584
MpegEncContext::umvplus
int umvplus
== H.263+ && unrestricted_mv
Definition: mpegvideo.h:375
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2577
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AV_CODEC_FLAG_LOW_DELAY
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:325
pts
static int64_t pts
Definition: transcode_aac.c:647
AV_CODEC_ID_MSMPEG4V2
@ AV_CODEC_ID_MSMPEG4V2
Definition: codec_id.h:64
ff_h263_update_motion_val
void ff_h263_update_motion_val(MpegEncContext *s)
Definition: h263.c:42
AV_CODEC_FLAG_LOOP_FILTER
#define AV_CODEC_FLAG_LOOP_FILTER
loop filter.
Definition: avcodec.h:304
ff_sqrt
#define ff_sqrt
Definition: mathops.h:206
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
AVRational::num
int num
Numerator.
Definition: rational.h:59
CANDIDATE_MB_TYPE_INTER4V
#define CANDIDATE_MB_TYPE_INTER4V
Definition: mpegutils.h:106
h263_options
static const AVOption h263_options[]
Definition: mpegvideo_enc.c:4728
MpegEncContext::frame_pred_frame_dct
int frame_pred_frame_dct
Definition: mpegvideo.h:465
sp5x_quant_table
static const uint8_t sp5x_quant_table[20][64]
Definition: sp5x.h:135
flv.h
MAX_PICTURE_COUNT
#define MAX_PICTURE_COUNT
Definition: mpegpicture.h:32
ff_dct_encode_init
av_cold int ff_dct_encode_init(MpegEncContext *s)
Definition: mpegvideo_enc.c:269
ff_mpeg4_encode_mb
void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: mpeg4videoenc.c:476
AV_CODEC_FLAG_INTERLACED_DCT
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:321
ff_mpv_reallocate_putbitbuffer
int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
Definition: mpegvideo_enc.c:2880
ff_mjpeg_encode_mb
void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mjpegenc.c:283
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
ff_rate_estimate_qscale
float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
Definition: ratecontrol.c:868
ff_mpeg4_encode_picture_header
int ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: mpeg4videoenc.c:1062
MpegEncContext::i_tex_bits
int i_tex_bits
Definition: mpegvideo.h:346
AVCodecContext::p_count
attribute_deprecated int p_count
Definition: avcodec.h:1529
ff_mpv_common_end
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:1138
frame_start
static int frame_start(MpegEncContext *s)
Definition: mpegvideo_enc.c:1752
RateControlContext
rate control context.
Definition: ratecontrol.h:63
mpeg12.h
ff_init_qscale_tab
void ff_init_qscale_tab(MpegEncContext *s)
init s->current_picture.qscale_table from s->lambda_table
Definition: mpegvideo_enc.c:218
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
update_mb_info
static void update_mb_info(MpegEncContext *s, int startcode)
Definition: mpegvideo_enc.c:2857
av_cold
#define av_cold
Definition: attributes.h:90
dct.h
ff_fdctdsp_init
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
Definition: fdctdsp.c:26
MAX_MV
#define MAX_MV
Definition: motion_est.h:35
ff_h261_get_picture_format
int ff_h261_get_picture_format(int width, int height)
Definition: h261enc.c:40
default_fcode_tab
static uint8_t default_fcode_tab[MAX_MV *2+1]
Definition: mpegvideo_enc.c:83
ff_mpeg4_set_direct_mv
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
Definition: mpeg4video.c:117
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
build_basis
static void build_basis(uint8_t *perm)
Definition: mpegvideo_enc.c:4272
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:816
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:157
ff_h263_encode_init
void ff_h263_encode_init(MpegEncContext *s)
Definition: ituh263enc.c:761
width
#define width
MAX_MB_BYTES
#define MAX_MB_BYTES
Definition: mpegutils.h:47
FFMAX3
#define FFMAX3(a, b, c)
Definition: common.h:95
ff_h263_chroma_qscale_table
const uint8_t ff_h263_chroma_qscale_table[32]
Definition: h263data.c:262
s
#define s(width, name)
Definition: cbs_vp9.c:257
rebase_put_bits
static void rebase_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Rebase the bit writer onto a reallocated buffer.
Definition: put_bits.h:79
CHROMA_422
#define CHROMA_422
Definition: mpegvideo.h:484
ff_mpeg_unref_picture
void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
Deallocate a picture.
Definition: mpegpicture.c:299
BASIS_SHIFT
#define BASIS_SHIFT
Definition: mpegvideoencdsp.h:26
ff_mpeg2_dc_scale_table
const uint8_t *const ff_mpeg2_dc_scale_table[4]
Definition: mpegvideodata.c:77
MpegEncContext::mv_dir
int mv_dir
Definition: mpegvideo.h:261
g
const char * g
Definition: vf_curves.c:115
MpegEncContext::mb_skip_run
int mb_skip_run
Definition: mpegvideo.h:289
msmpeg4v3_class
static const AVClass msmpeg4v3_class
Definition: mpegvideo_enc.c:4804
sse
static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride)
Definition: mpegvideo_enc.c:2704
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
HUFFMAN_TABLE_OPTIMAL
@ HUFFMAN_TABLE_OPTIMAL
Compute and use optimal Huffman tables.
Definition: mjpegenc.h:97
AVCodecContext::mv_bits
attribute_deprecated int mv_bits
Definition: avcodec.h:1519
ff_estimate_b_frame_motion
void ff_estimate_b_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1490
AV_CODEC_ID_WMV2
@ AV_CODEC_ID_WMV2
Definition: codec_id.h:67
bits
uint8_t bits
Definition: vp3data.h:202
FMT_H261
@ FMT_H261
Definition: mpegutils.h:125
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
AVCodecContext::brd_scale
attribute_deprecated int brd_scale
Definition: avcodec.h:1099
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1757
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
ff_write_quant_matrix
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
Definition: mpegvideo_enc.c:202
limits.h
ff_check_alignment
int ff_check_alignment(void)
Definition: me_cmp.c:1014
AV_CODEC_ID_MSMPEG4V1
@ AV_CODEC_ID_MSMPEG4V1
Definition: codec_id.h:63
ff_h263_encode_picture_header
void ff_h263_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: ituh263enc.c:103
ff_h263_encode_gob_header
void ff_h263_encode_gob_header(MpegEncContext *s, int mb_line)
Encode a group of blocks header.
Definition: ituh263enc.c:240
MpegEncContext::b_count
int b_count
Definition: mpegvideo.h:350
ff_mpeg4_stuffing
void ff_mpeg4_stuffing(PutBitContext *pbc)
add MPEG-4 stuffing bits (01...1)
Definition: mpeg4videoenc.c:864
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
AVCodecContext::rc_max_rate
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1404
AVCodecContext::error
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:1709
AVCPBProperties
This structure describes the bitrate properties of an encoded bitstream.
Definition: avcodec.h:448
av_packet_new_side_data
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Allocate new information of a packet.
Definition: avpacket.c:332
PutBitContext
Definition: put_bits.h:35
Picture::encoding_error
uint64_t encoding_error[AV_NUM_DATA_POINTERS]
Definition: mpegpicture.h:90
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:536
mb_var_thread
static int mb_var_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2794
arg
const char * arg
Definition: jacosubdec.c:66
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
if
if(ret)
Definition: filter_design.txt:179
Picture::reference
int reference
Definition: mpegpicture.h:87
AVCodecContext::rc_buffer_size
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:1389
AVCPBProperties::avg_bitrate
int avg_bitrate
Average bitrate of the stream, in bits per second.
Definition: avcodec.h:472
ff_find_unused_picture
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
Definition: mpegpicture.c:451
h263_class
static const AVClass h263_class
Definition: mpegvideo_enc.c:4735
MpegEncContext::pb2
PutBitContext pb2
used for data partitioned VOPs
Definition: mpegvideo.h:409
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
ff_write_pass1_stats
void ff_write_pass1_stats(MpegEncContext *s)
Definition: ratecontrol.c:38
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
PutBitContext::buf
uint8_t * buf
Definition: put_bits.h:38
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:263
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:659
write_mb_info
static void write_mb_info(MpegEncContext *s)
Definition: mpegvideo_enc.c:2837
run
uint8_t run
Definition: svq3.c:209
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1161
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:288
MpegEncContext::dct_error_sum
int(* dct_error_sum)[64]
Definition: mpegvideo.h:332
MpegEncContext::f_code
int f_code
forward MV resolution
Definition: mpegvideo.h:238
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:329
me
#define me
Definition: vf_colormatrix.c:104
AVCodecContext::i_tex_bits
attribute_deprecated int i_tex_bits
Definition: avcodec.h:1523
aandcttab.h
EDGE_WIDTH
#define EDGE_WIDTH
Definition: mpegpicture.h:33
AVCodecContext::misc_bits
attribute_deprecated int misc_bits
Definition: avcodec.h:1533
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:172
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:561
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:576
ROUNDED_DIV
#define ROUNDED_DIV(a, b)
Definition: common.h:56
ff_faandct
void ff_faandct(int16_t *data)
Definition: faandct.c:114
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
ff_fdct_ifast
void ff_fdct_ifast(int16_t *data)
Definition: jfdctfst.c:208
src
#define src
Definition: vp8dsp.c:254
ff_msmpeg4v3_encoder
AVCodec ff_msmpeg4v3_encoder
Definition: mpegvideo_enc.c:4811
ff_set_qscale
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:2343
mathops.h
MpegEncContext::mv_bits
int mv_bits
Definition: mpegvideo.h:344
AV_CODEC_FLAG_AC_PRED
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
Definition: avcodec.h:338
MERGE
#define MERGE(field)
Definition: mpegvideo_enc.c:3567
AVCodecContext::b_frame_strategy
attribute_deprecated int b_frame_strategy
Definition: avcodec.h:800
AVCodecContext::noise_reduction
attribute_deprecated int noise_reduction
Definition: avcodec.h:1044
ff_vbv_update
int ff_vbv_update(MpegEncContext *s, int frame_size)
Definition: ratecontrol.c:681
ff_mpv_encode_end
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:1072
FF_MB_DECISION_SIMPLE
#define FF_MB_DECISION_SIMPLE
uses mb_cmp
Definition: avcodec.h:1015
qpeldsp.h
avcodec_open2
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:565
ff_jpeg_fdct_islow_8
void ff_jpeg_fdct_islow_8(int16_t *data)
AVCodecContext::trellis
int trellis
trellis RD quantization
Definition: avcodec.h:1475
AV_CODEC_ID_WMV1
@ AV_CODEC_ID_WMV1
Definition: codec_id.h:66
ff_alloc_picture
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int shared, int encoding, int chroma_x_shift, int chroma_y_shift, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
Definition: mpegpicture.c:236
av_cpb_properties_alloc
AVCPBProperties * av_cpb_properties_alloc(size_t *size)
Allocate a CPB properties structure and initialize its fields to default values.
Definition: utils.c:2023
op_pixels_func
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
FMT_MJPEG
@ FMT_MJPEG
Definition: mpegutils.h:127
wmv2.h
index
int index
Definition: gxfenc.c:89
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
ff_jpeg_fdct_islow_10
void ff_jpeg_fdct_islow_10(int16_t *data)
ff_clean_h263_qscales
void ff_clean_h263_qscales(MpegEncContext *s)
modify qscale so that encoding is actually possible in H.263 (limit difference to -2....
Definition: ituh263enc.c:266
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
weight
static int weight(int i, int blen, int offset)
Definition: diracdec.c:1560
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:50
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:267
avpriv_copy_bits
void avpriv_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
Definition: bitstream.c:64
mpv_encode_defaults
static void mpv_encode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for encoding.
Definition: mpegvideo_enc.c:254
denoise_dct_c
static void denoise_dct_c(MpegEncContext *s, int16_t *block)
Definition: mpegvideo_enc.c:3933
set_put_bits_buffer_size
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
Definition: put_bits.h:358
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:29
MAX_LEVEL
#define MAX_LEVEL
Definition: rl.h:36
avpriv_align_put_bits
void avpriv_align_put_bits(PutBitContext *s)
Pad the bitstream with zeros up to the next byte boundary.
Definition: bitstream.c:48
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:649
get_sae
static int get_sae(uint8_t *src, int ref, int stride)
Definition: mpegvideo_enc.c:1110
AV_CODEC_FLAG_QPEL
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
Definition: avcodec.h:287
desc
const char * desc
Definition: nvenc.c:79
ERContext::error_count
atomic_int error_count
Definition: error_resilience.h:64
AVCodecContext::vbv_delay
attribute_deprecated uint64_t vbv_delay
VBV delay coded in the last frame (in periods of a 27 MHz clock).
Definition: avcodec.h:2027
merge_context_after_encode
static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo_enc.c:3574
MpegEncContext::mb_intra
int mb_intra
Definition: mpegvideo.h:290
QUANT_BIAS_SHIFT
#define QUANT_BIAS_SHIFT
Definition: mpegvideo_enc.c:71
VE
#define VE
Definition: mpegvideo_enc.c:4727
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:308
AVPacket::size
int size
Definition: packet.h:356
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
ff_mpeg_ref_picture
int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
Definition: mpegpicture.c:366
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:188
AVCodecContext::gop_size
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:721
MpegEncContext::qscale
int qscale
QP.
Definition: mpegvideo.h:204
load_input_picture
static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
Definition: mpegvideo_enc.c:1155
ff_mpeg4_clean_buffers
void ff_mpeg4_clean_buffers(MpegEncContext *s)
Definition: mpeg4video.c:45
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:444
dct_single_coeff_elimination
static void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
Definition: mpegvideo_enc.c:2085
CONFIG_MSMPEG4_ENCODER
#define CONFIG_MSMPEG4_ENCODER
Definition: msmpeg4.h:75
MpegEncContext::intra_matrix
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:300
ff_mpeg1_encode_slice_header
void ff_mpeg1_encode_slice_header(MpegEncContext *s)
Definition: mpeg12enc.c:412
ff_pre_estimate_p_frame_motion
int ff_pre_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1061
ff_mpeg1_clean_buffers
void ff_mpeg1_clean_buffers(MpegEncContext *s)
Definition: mpeg12.c:115
FF_IDCT_PERM_NONE
@ FF_IDCT_PERM_NONE
Definition: idctdsp.h:38
MpegEncContext::lambda2
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
Definition: mpegvideo.h:207
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
ff_mpeg4_default_intra_matrix
const int16_t ff_mpeg4_default_intra_matrix[64]
Definition: mpeg4data.h:337
MpegEncContext::dct_count
int dct_count[2]
Definition: mpegvideo.h:333
AVCodecContext::frame_skip_exp
attribute_deprecated int frame_skip_exp
Definition: avcodec.h:1463
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:53
ff_mpv_motion
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
Definition: mpegvideo_motion.c:974
size
int size
Definition: twinvq_data.h:11134
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
AVCodecContext::rtp_payload_size
attribute_deprecated int rtp_payload_size
Definition: avcodec.h:1508
RECON_SHIFT
#define RECON_SHIFT
Definition: mpegvideoencdsp.h:27
AVCodecInternal::byte_buffer
uint8_t * byte_buffer
temporary buffer used for encoders to store their bitstream
Definition: internal.h:149
CANDIDATE_MB_TYPE_DIRECT0
#define CANDIDATE_MB_TYPE_DIRECT0
Definition: mpegutils.h:119
ff_mpeg1_encode_mb
void ff_mpeg1_encode_mb(MpegEncContext *s, int16_t block[8][64], int motion_x, int motion_y)
Definition: mpeg12enc.c:1028
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1797
MpegEncContext::interlaced_dct
int interlaced_dct
Definition: mpegvideo.h:491
AV_PKT_DATA_H263_MB_INFO
@ AV_PKT_DATA_H263_MB_INFO
An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of structures with info about macroblo...
Definition: packet.h:93
ff_mpeg1_default_intra_matrix
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:30
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:354
encode_mb_internal
static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, int motion_y, int mb_block_height, int mb_block_width, int mb_block_count)
Definition: mpegvideo_enc.c:2199
AV_CODEC_FLAG_PASS2
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:300
height
#define height
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
ff_mpeg4_default_non_intra_matrix
const int16_t ff_mpeg4_default_non_intra_matrix[64]
Definition: mpeg4data.h:348
ff_msmpeg4_encode_init
int ff_msmpeg4_encode_init(MpegEncContext *s)
Definition: msmpeg4enc.c:121
MpegEncContext::mv_type
int mv_type
Definition: mpegvideo.h:265
AVCPBProperties::max_bitrate
int max_bitrate
Maximum bitrate of the stream, in bits per second.
Definition: avcodec.h:454
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:110
mpegvideodata.h
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
ff_msmpeg4_encode_ext_header
void ff_msmpeg4_encode_ext_header(MpegEncContext *s)
Definition: msmpeg4enc.c:284
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:269
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:361
MpegEncContext::picture_in_gop_number
int picture_in_gop_number
0-> first pic in gop, ...
Definition: mpegvideo.h:128
AVCodecInternal::byte_buffer_size
unsigned int byte_buffer_size
Definition: internal.h:150
rv10.h
AVCodecContext::i_count
attribute_deprecated int i_count
Definition: avcodec.h:1527
FF_COMPLIANCE_NORMAL
#define FF_COMPLIANCE_NORMAL
Definition: avcodec.h:1592
ff_clean_mpeg4_qscales
void ff_clean_mpeg4_qscales(MpegEncContext *s)
modify mb_type & qscale so that encoding is actually possible in MPEG-4
Definition: mpeg4videoenc.c:214
M_PI
#define M_PI
Definition: mathematics.h:52
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
PutBitContext::buf_end
uint8_t * buf_end
Definition: put_bits.h:38
AVCodec::id
enum AVCodecID id
Definition: codec.h:204
update_qscale
static void update_qscale(MpegEncContext *s)
Definition: mpegvideo_enc.c:174
CHROMA_444
#define CHROMA_444
Definition: mpegvideo.h:485
ff_update_block_index
static void ff_update_block_index(MpegEncContext *s)
Definition: mpegvideo.h:740
AVCPBProperties::vbv_delay
uint64_t vbv_delay
The delay between the time the packet this structure is associated with is received and the time when...
Definition: avcodec.h:490
ff_msmpeg4v2_encoder
AVCodec ff_msmpeg4v2_encoder
Definition: mpegvideo_enc.c:4791
MpegEncContext::block_last_index
int block_last_index[12]
last non zero coefficient in block
Definition: mpegvideo.h:86
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:56
encode_mb_hq
static void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type, PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], int *dmin, int *next_block, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2653
src1
#define src1
Definition: h264pred.c:139
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
MpegEncContext::last_mv
int last_mv[2][2][2]
last MV, used for MV prediction in MPEG-1 & B-frame MPEG-4
Definition: mpegvideo.h:278
ff_dct_encode_init_x86
void ff_dct_encode_init_x86(MpegEncContext *s)
Definition: mpegvideoenc.c:213
MpegEncContext::progressive_frame
int progressive_frame
Definition: mpegvideo.h:489
FMT_H263
@ FMT_H263
Definition: mpegutils.h:126
AV_CODEC_ID_RV10
@ AV_CODEC_ID_RV10
Definition: codec_id.h:54
AVCodecContext::b_sensitivity
attribute_deprecated int b_sensitivity
Definition: avcodec.h:1132
CHROMA_420
#define CHROMA_420
Definition: mpegvideo.h:483
lrintf
#define lrintf(x)
Definition: libm_mips.h:70
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:348
put_bits_count
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:67
dct_quantize_trellis_c
static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:3957
encode_thread
static int encode_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2916
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:627
internal.h
ff_square_tab
const uint32_t ff_square_tab[512]
Definition: me_cmp.c:33
estimate_best_b_count
static int estimate_best_b_count(MpegEncContext *s)
Definition: mpegvideo_enc.c:1371
AVCodecContext::intra_dc_precision
int intra_dc_precision
precision of the intra DC coefficient - 8
Definition: avcodec.h:1052
MpegEncContext::esc3_level_length
int esc3_level_length
Definition: mpegvideo.h:440
MpegEncContext::obmc
int obmc
overlapped block motion compensation
Definition: mpegvideo.h:366
MpegEncContext::tex_pb
PutBitContext tex_pb
used for data partitioned VOPs
Definition: mpegvideo.h:408
AVCodecContext::frame_skip_cmp
attribute_deprecated int frame_skip_cmp
Definition: avcodec.h:1467
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:288
qmat16
static const int32_t qmat16[MAT_SIZE]
Definition: hq_hqadata.c:342
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:70
AV_CODEC_ID_RV20
@ AV_CODEC_ID_RV20
Definition: codec_id.h:55
av_always_inline
#define av_always_inline
Definition: attributes.h:49
AVCodecContext::header_bits
attribute_deprecated int header_bits
Definition: avcodec.h:1521
get_visual_weight
static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
Definition: mpegvideo_enc.c:2175
ff_h263_format
const uint16_t ff_h263_format[8][2]
Definition: h263data.c:238
FF_CMP_NSSE
#define FF_CMP_NSSE
Definition: avcodec.h:941
uint8_t
uint8_t
Definition: audio_convert.c:194
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:554
ff_flv_encode_picture_header
void ff_flv_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: flvenc.c:27
h263p_class
static const AVClass h263p_class
Definition: mpegvideo_enc.c:4763
ff_mpeg4_init_partitions
void ff_mpeg4_init_partitions(MpegEncContext *s)
Definition: mpeg4videoenc.c:1325
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:197
AV_PKT_DATA_CPB_PROPERTIES
@ AV_PKT_DATA_CPB_PROPERTIES
This side data corresponds to the AVCPBProperties struct.
Definition: packet.h:145
MpegEncContext::er
ERContext er
Definition: mpegvideo.h:566
ff_init_me
int ff_init_me(MpegEncContext *s)
Definition: motion_est.c:306
AVCPBProperties::min_bitrate
int min_bitrate
Minimum bitrate of the stream, in bits per second.
Definition: avcodec.h:463
AVCodecContext::height
int height
Definition: avcodec.h:699
avcodec_send_frame
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:392
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
ff_rate_control_uninit
av_cold void ff_rate_control_uninit(MpegEncContext *s)
Definition: ratecontrol.c:672
CANDIDATE_MB_TYPE_DIRECT
#define CANDIDATE_MB_TYPE_DIRECT
Definition: mpegutils.h:109
MpegEncContext::picture_number
int picture_number
Definition: mpegvideo.h:127
RateControlContext::buffer_index
double buffer_index
amount of bits in the video/audio buffer
Definition: ratecontrol.h:66
idctdsp.h
MpegEncContext::h263_slice_structured
int h263_slice_structured
Definition: mpegvideo.h:377
avcodec.h
msmpeg4.h
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
get_bits_diff
static int get_bits_diff(MpegEncContext *s)
Definition: mpegvideo.h:755
AV_CODEC_FLAG_CLOSED_GOP
#define AV_CODEC_FLAG_CLOSED_GOP
Definition: avcodec.h:343
ret
ret
Definition: filter_design.txt:187
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ff_mpeg1_default_non_intra_matrix
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:41
AVCPBProperties::buffer_size
int buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
Definition: avcodec.h:481
AVCodecContext::strict_std_compliance
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1589
CANDIDATE_MB_TYPE_BIDIR_I
#define CANDIDATE_MB_TYPE_BIDIR_I
Definition: mpegutils.h:117
ff_inv_aanscales
const uint16_t ff_inv_aanscales[64]
Definition: aandcttab.c:38
ff_mjpeg_encode_close
av_cold void ff_mjpeg_encode_close(MpegEncContext *s)
Definition: mjpegenc.c:126
ff_h263_loop_filter
void ff_h263_loop_filter(MpegEncContext *s)
Definition: h263.c:135
FF_MPV_FLAG_CBP_RD
#define FF_MPV_FLAG_CBP_RD
Definition: mpegvideo.h:591
ff_h261_encode_mb
void ff_h261_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: h261enc.c:237
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: avcodec.h:215
update_duplicate_context_after_me
static void update_duplicate_context_after_me(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo_enc.c:231
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen_template.c:38
ff_rv10_encode_picture_header
int ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: rv10enc.c:32
AVCodecContext::scenechange_threshold
attribute_deprecated int scenechange_threshold
Definition: avcodec.h:1040
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:156
ff_fix_long_mvs
void ff_fix_long_mvs(MpegEncContext *s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
Definition: motion_est.c:1699
MpegEncContext::block
int16_t(* block)[64]
points to one of the following blocks
Definition: mpegvideo.h:508
MpegEncContext::dquant
int dquant
qscale difference to prev qscale
Definition: mpegvideo.h:210
AVCodecContext
main external API structure.
Definition: avcodec.h:526
AVCodecContext::active_thread_type
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1804
default_mv_penalty
static uint8_t default_mv_penalty[MAX_FCODE+1][MAX_DMV *2+1]
Definition: mpegvideo_enc.c:82
Picture::shared
int shared
Definition: mpegpicture.h:88
AV_CODEC_ID_H263P
@ AV_CODEC_ID_H263P
Definition: codec_id.h:68
ff_estimate_p_frame_motion
void ff_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:885
EDGE_TOP
#define EDGE_TOP
Definition: mpegvideoencdsp.h:29
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo.c:466
put_bits_ptr
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:324
MpegEncContext::skip_count
int skip_count
Definition: mpegvideo.h:351
pkt
static AVPacket pkt
Definition: demuxing_decoding.c:54
CANDIDATE_MB_TYPE_INTRA
#define CANDIDATE_MB_TYPE_INTRA
Definition: mpegutils.h:104
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
mpeg4video.h
MpegEncContext::last_bits
int last_bits
temp var used for calculating the above vars
Definition: mpegvideo.h:353
AVCodecContext::qmin
int qmin
minimum quantizer
Definition: avcodec.h:1368
AVRational::den
int den
Denominator.
Definition: rational.h:60
Picture::f
struct AVFrame * f
Definition: mpegpicture.h:46
MpegEncContext::gop_picture_number
int gop_picture_number
index of the first picture of a GOP based on fake_pic_num & MPEG-1 specific
Definition: mpegvideo.h:451
select_input_picture
static int select_input_picture(MpegEncContext *s)
Definition: mpegvideo_enc.c:1505
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:223
frame_end
static void frame_end(MpegEncContext *s)
Definition: mpegvideo_enc.c:1681
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
ff_mpv_reconstruct_mb
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo.c:2260
temp
else temp
Definition: vf_mcdeint.c:256
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
QMAT_SHIFT
#define QMAT_SHIFT
Definition: mpegvideo_enc.c:74
CANDIDATE_MB_TYPE_FORWARD
#define CANDIDATE_MB_TYPE_FORWARD
Definition: mpegutils.h:110
FF_MB_DECISION_RD
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:1017
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
shift
static int shift(int a, int b)
Definition: sonic.c:82
ff_wmv2_encode_mb
void ff_wmv2_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: wmv2enc.c:147
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AVERROR_ENCODER_NOT_FOUND
#define AVERROR_ENCODER_NOT_FOUND
Encoder not found.
Definition: error.h:54
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
AVCodecContext::max_b_frames
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:786
MpegEncContext::noise_reduction
int noise_reduction
Definition: mpegvideo.h:582
ff_convert_matrix
void ff_convert_matrix(MpegEncContext *s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
Definition: mpegvideo_enc.c:90
packet_internal.h
ff_mpeg1_encode_init
void ff_mpeg1_encode_init(MpegEncContext *s)
Definition: mpeg12enc.c:1037
overflow
Undefined Behavior In the C some operations are like signed integer overflow
Definition: undefined.txt:3
FDCTDSPContext::fdct
void(* fdct)(int16_t *block)
Definition: fdctdsp.h:27
skip_check
static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
Definition: mpegvideo_enc.c:1306
ff_mpv_encode_init
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:288
MpegEncContext::frame_skip_threshold
int frame_skip_threshold
Definition: mpegvideo.h:576
AVCodecContext::rc_max_available_vbv_use
float rc_max_available_vbv_use
Ratecontrol attempt to use, at maximum, of what can be used without an underflow.
Definition: avcodec.h:1418
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:101
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
dct_quantize_refine
static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale)
Definition: mpegvideo_enc.c:4291
ff_h263p_encoder
AVCodec ff_h263p_encoder
Definition: mpegvideo_enc.c:4770
diff
static av_always_inline int diff(const uint32_t a, const uint32_t b)
Definition: vf_palettegen.c:136
FF_MPV_FLAG_STRICT_GOP
#define FF_MPV_FLAG_STRICT_GOP
Definition: mpegvideo.h:589
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:48
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:262
FF_CMP_DCTMAX
#define FF_CMP_DCTMAX
Definition: avcodec.h:944
LOCAL_ALIGNED_16
#define LOCAL_ALIGNED_16(t, v,...)
Definition: internal.h:131
AVCodecContext::slices
int slices
Number of slices.
Definition: avcodec.h:1177
FF_MB_DECISION_BITS
#define FF_MB_DECISION_BITS
chooses the one which needs the fewest bits
Definition: avcodec.h:1016
AVPacket
This structure stores compressed data.
Definition: packet.h:332
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:553
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:240
clip_coeffs
static void clip_coeffs(MpegEncContext *s, int16_t *block, int last_index)
Definition: mpegvideo_enc.c:2141
libxvid.h
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
MAX_B_FRAMES
#define MAX_B_FRAMES
Definition: mpegvideo.h:64
encode_picture
static int encode_picture(MpegEncContext *s, int picture_number)
Definition: mpegvideo_enc.c:3658
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:699
AVCodecContext::me_penalty_compensation
attribute_deprecated int me_penalty_compensation
Definition: avcodec.h:1087
bytestream.h
wmv1_class
static const AVClass wmv1_class
Definition: mpegvideo_enc.c:4824
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:331
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:72
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
FF_MPV_FLAG_QP_RD
#define FF_MPV_FLAG_QP_RD
Definition: mpegvideo.h:590
MpegEncContext::misc_bits
int misc_bits
cbp, mb_type
Definition: mpegvideo.h:352
ff_mjpeg_encode_picture_trailer
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
Definition: mjpegenc_common.c:587
FF_ALLOCZ_OR_GOTO
#define FF_ALLOCZ_OR_GOTO(ctx, p, size, label)
Definition: internal.h:149
copy_context_after_encode
static void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type)
Definition: mpegvideo_enc.c:2614
AV_CODEC_ID_MSMPEG4V3
@ AV_CODEC_ID_MSMPEG4V3
Definition: codec_id.h:65
ff_get_2pass_fcode
void ff_get_2pass_fcode(MpegEncContext *s)
Definition: ratecontrol.c:857
h
h
Definition: vp9dsp_template.c:2038
MpegEncContext::end_mb_y
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:154
FF_QP2LAMBDA
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:227
copy_context_before_encode
static void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type)
Definition: mpegvideo_enc.c:2586
AV_CODEC_ID_FLV1
@ AV_CODEC_ID_FLV1
Definition: codec_id.h:70
ff_h261_reorder_mb_index
void ff_h261_reorder_mb_index(MpegEncContext *s)
Definition: h261enc.c:108
ff_dct_quantize_c
int ff_dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:4643
ff_alloc_packet2
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
Definition: encode.c:32
int
int
Definition: ffmpeg_filter.c:192
AVFrame::display_picture_number
int display_picture_number
picture number in display order
Definition: frame.h:418
msmpeg4v2_class
static const AVClass msmpeg4v2_class
Definition: mpegvideo_enc.c:4784
mjpegenc.h
AV_PICTURE_TYPE_S
@ AV_PICTURE_TYPE_S
S(GMC)-VOP MPEG-4.
Definition: avutil.h:277
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:51
FF_MPV_COMMON_OPTS
#define FF_MPV_COMMON_OPTS
Definition: mpegvideo.h:616
AVCodecContext::frame_skip_factor
attribute_deprecated int frame_skip_factor
Definition: avcodec.h:1459
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:81
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
mb_info
Definition: cinepakenc.c:87
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:905
MpegEncContext::alt_inter_vlc
int alt_inter_vlc
alternative inter vlc
Definition: mpegvideo.h:378
MpegEncContext::p_tex_bits
int p_tex_bits
Definition: mpegvideo.h:347
ff_h263_encode_mb
void ff_h263_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: ituh263enc.c:447
pixblockdsp.h
ff_h261_encode_picture_header
void ff_h261_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: h261enc.c:53
CANDIDATE_MB_TYPE_BACKWARD
#define CANDIDATE_MB_TYPE_BACKWARD
Definition: mpegutils.h:111
ff_aanscales
const uint16_t ff_aanscales[64]
Definition: aandcttab.c:26
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:296
av_init_packet
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:35
h263p_options
static const AVOption h263p_options[]
Definition: mpegvideo_enc.c:4755
h263.h
write_slice_end
static void write_slice_end(MpegEncContext *s)
Definition: mpegvideo_enc.c:2819
ff_h263_encoder
AVCodec ff_h263_encoder
Definition: mpegvideo_enc.c:4742
intmath.h