FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mpegvideo_enc.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /*
26  * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
27  */
28 
29 /**
30  * @file
31  * The simplest mpeg encoder (well, it was the simplest!).
32  */
33 
34 #include <stdint.h>
35 
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/pixdesc.h"
40 #include "libavutil/opt.h"
41 #include "libavutil/timer.h"
42 #include "avcodec.h"
43 #include "dct.h"
44 #include "idctdsp.h"
45 #include "mpeg12.h"
46 #include "mpegvideo.h"
47 #include "mpegvideodata.h"
48 #include "h261.h"
49 #include "h263.h"
50 #include "h263data.h"
51 #include "mjpegenc_common.h"
52 #include "mathops.h"
53 #include "mpegutils.h"
54 #include "mjpegenc.h"
55 #include "msmpeg4.h"
56 #include "pixblockdsp.h"
57 #include "qpeldsp.h"
58 #include "faandct.h"
59 #include "thread.h"
60 #include "aandcttab.h"
61 #include "flv.h"
62 #include "mpeg4video.h"
63 #include "internal.h"
64 #include "bytestream.h"
65 #include "wmv2.h"
66 #include "rv10.h"
67 #include "libxvid.h"
68 #include <limits.h>
69 #include "sp5x.h"
70 
71 #define QUANT_BIAS_SHIFT 8
72 
73 #define QMAT_SHIFT_MMX 16
74 #define QMAT_SHIFT 21
75 
77 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
78 static int sse_mb(MpegEncContext *s);
79 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
80 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
81 
84 
87  { NULL },
88 };
89 
90 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
91  uint16_t (*qmat16)[2][64],
92  const uint16_t *quant_matrix,
93  int bias, int qmin, int qmax, int intra)
94 {
95  FDCTDSPContext *fdsp = &s->fdsp;
96  int qscale;
97  int shift = 0;
98 
99  for (qscale = qmin; qscale <= qmax; qscale++) {
100  int i;
101  int qscale2;
102 
104  else qscale2 = qscale << 1;
105 
106  if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
107 #if CONFIG_FAANDCT
108  fdsp->fdct == ff_faandct ||
109 #endif /* CONFIG_FAANDCT */
110  fdsp->fdct == ff_jpeg_fdct_islow_10) {
111  for (i = 0; i < 64; i++) {
112  const int j = s->idsp.idct_permutation[i];
113  int64_t den = (int64_t) qscale2 * quant_matrix[j];
114  /* 16 <= qscale * quant_matrix[i] <= 7905
115  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
116  * 19952 <= x <= 249205026
117  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
118  * 3444240 >= (1 << 36) / (x) >= 275 */
119 
120  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
121  }
122  } else if (fdsp->fdct == ff_fdct_ifast) {
123  for (i = 0; i < 64; i++) {
124  const int j = s->idsp.idct_permutation[i];
125  int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
126  /* 16 <= qscale * quant_matrix[i] <= 7905
127  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
128  * 19952 <= x <= 249205026
129  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
130  * 3444240 >= (1 << 36) / (x) >= 275 */
131 
132  qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
133  }
134  } else {
135  for (i = 0; i < 64; i++) {
136  const int j = s->idsp.idct_permutation[i];
137  int64_t den = (int64_t) qscale2 * quant_matrix[j];
138  /* We can safely suppose that 16 <= quant_matrix[i] <= 255
139  * Assume x = qscale * quant_matrix[i]
140  * So 16 <= x <= 7905
141  * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
142  * so 32768 >= (1 << 19) / (x) >= 67 */
143  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
144  //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
145  // (qscale * quant_matrix[i]);
146  qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
147 
148  if (qmat16[qscale][0][i] == 0 ||
149  qmat16[qscale][0][i] == 128 * 256)
150  qmat16[qscale][0][i] = 128 * 256 - 1;
151  qmat16[qscale][1][i] =
152  ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
153  qmat16[qscale][0][i]);
154  }
155  }
156 
157  for (i = intra; i < 64; i++) {
158  int64_t max = 8191;
159  if (fdsp->fdct == ff_fdct_ifast) {
160  max = (8191LL * ff_aanscales[i]) >> 14;
161  }
162  while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
163  shift++;
164  }
165  }
166  }
167  if (shift) {
169  "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
170  QMAT_SHIFT - shift);
171  }
172 }
173 
174 static inline void update_qscale(MpegEncContext *s)
175 {
176  if (s->q_scale_type == 1 && 0) {
177  int i;
178  int bestdiff=INT_MAX;
179  int best = 1;
180 
181  for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
182  int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
183  if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
185  continue;
186  if (diff < bestdiff) {
187  bestdiff = diff;
188  best = i;
189  }
190  }
191  s->qscale = best;
192  } else {
193  s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
194  (FF_LAMBDA_SHIFT + 7);
195  s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
196  }
197 
198  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
200 }
201 
202 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
203 {
204  int i;
205 
206  if (matrix) {
207  put_bits(pb, 1, 1);
208  for (i = 0; i < 64; i++) {
209  put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
210  }
211  } else
212  put_bits(pb, 1, 0);
213 }
214 
215 /**
216  * init s->current_picture.qscale_table from s->lambda_table
217  */
219 {
220  int8_t * const qscale_table = s->current_picture.qscale_table;
221  int i;
222 
223  for (i = 0; i < s->mb_num; i++) {
224  unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
225  int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
226  qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
227  s->avctx->qmax);
228  }
229 }
230 
233 {
234 #define COPY(a) dst->a= src->a
235  COPY(pict_type);
237  COPY(f_code);
238  COPY(b_code);
239  COPY(qscale);
240  COPY(lambda);
241  COPY(lambda2);
244  COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
245  COPY(progressive_frame); // FIXME don't set in encode_header
246  COPY(partitioned_frame); // FIXME don't set in encode_header
247 #undef COPY
248 }
249 
250 /**
251  * Set the given MpegEncContext to defaults for encoding.
252  * the changed fields will not depend upon the prior state of the MpegEncContext.
253  */
255 {
256  int i;
258 
259  for (i = -16; i < 16; i++) {
260  default_fcode_tab[i + MAX_MV] = 1;
261  }
264 
265  s->input_picture_number = 0;
266  s->picture_in_gop_number = 0;
267 }
268 
270 {
271  if (ARCH_X86)
273 
274  if (CONFIG_H263_ENCODER)
276  if (!s->dct_quantize)
278  if (!s->denoise_dct)
281  if (s->avctx->trellis)
283 
284  return 0;
285 }
286 
287 /* init video encoder */
289 {
290  MpegEncContext *s = avctx->priv_data;
291  AVCPBProperties *cpb_props;
292  int i, ret, format_supported;
293 
295 
296  switch (avctx->codec_id) {
298  if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
299  avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
300  av_log(avctx, AV_LOG_ERROR,
301  "only YUV420 and YUV422 are supported\n");
302  return -1;
303  }
304  break;
305  case AV_CODEC_ID_MJPEG:
306  case AV_CODEC_ID_AMV:
307  format_supported = 0;
308  /* JPEG color space */
309  if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
310  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
311  avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
312  (avctx->color_range == AVCOL_RANGE_JPEG &&
313  (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
314  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
315  avctx->pix_fmt == AV_PIX_FMT_YUV444P)))
316  format_supported = 1;
317  /* MPEG color space */
318  else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
319  (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
320  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
321  avctx->pix_fmt == AV_PIX_FMT_YUV444P))
322  format_supported = 1;
323 
324  if (!format_supported) {
325  av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
326  return -1;
327  }
328  break;
329  default:
330  if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
331  av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
332  return -1;
333  }
334  }
335 
336  switch (avctx->pix_fmt) {
337  case AV_PIX_FMT_YUVJ444P:
338  case AV_PIX_FMT_YUV444P:
340  break;
341  case AV_PIX_FMT_YUVJ422P:
342  case AV_PIX_FMT_YUV422P:
344  break;
345  case AV_PIX_FMT_YUVJ420P:
346  case AV_PIX_FMT_YUV420P:
347  default:
349  break;
350  }
351 
352  avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
353 
354 #if FF_API_PRIVATE_OPT
356  if (avctx->rtp_payload_size)
358  if (avctx->me_penalty_compensation)
360  if (avctx->pre_me)
361  s->me_pre = avctx->pre_me;
363 #endif
364 
365  s->bit_rate = avctx->bit_rate;
366  s->width = avctx->width;
367  s->height = avctx->height;
368  if (avctx->gop_size > 600 &&
370  av_log(avctx, AV_LOG_WARNING,
371  "keyframe interval too large!, reducing it from %d to %d\n",
372  avctx->gop_size, 600);
373  avctx->gop_size = 600;
374  }
375  s->gop_size = avctx->gop_size;
376  s->avctx = avctx;
377  if (avctx->max_b_frames > MAX_B_FRAMES) {
378  av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
379  "is %d.\n", MAX_B_FRAMES);
380  avctx->max_b_frames = MAX_B_FRAMES;
381  }
382  s->max_b_frames = avctx->max_b_frames;
383  s->codec_id = avctx->codec->id;
385  s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
386  s->rtp_mode = !!s->rtp_payload_size;
388 
389  // workaround some differences between how applications specify dc precision
390  if (s->intra_dc_precision < 0) {
391  s->intra_dc_precision += 8;
392  } else if (s->intra_dc_precision >= 8)
393  s->intra_dc_precision -= 8;
394 
395  if (s->intra_dc_precision < 0) {
396  av_log(avctx, AV_LOG_ERROR,
397  "intra dc precision must be positive, note some applications use"
398  " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
399  return AVERROR(EINVAL);
400  }
401 
402  if (avctx->codec_id == AV_CODEC_ID_AMV || (avctx->active_thread_type & FF_THREAD_SLICE))
403  s->huffman = 0;
404 
405  if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
406  av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
407  return AVERROR(EINVAL);
408  }
410 
411  if (s->gop_size <= 1) {
412  s->intra_only = 1;
413  s->gop_size = 12;
414  } else {
415  s->intra_only = 0;
416  }
417 
418 #if FF_API_MOTION_EST
420  s->me_method = avctx->me_method;
422 #endif
423 
424  /* Fixed QSCALE */
425  s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
426 
427 #if FF_API_MPV_OPT
429  if (avctx->border_masking != 0.0)
430  s->border_masking = avctx->border_masking;
432 #endif
433 
434  s->adaptive_quant = (s->avctx->lumi_masking ||
435  s->avctx->dark_masking ||
438  s->avctx->p_masking ||
439  s->border_masking ||
440  (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
441  !s->fixed_qscale;
442 
444 
445  if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
446  switch(avctx->codec_id) {
449  avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
450  break;
451  case AV_CODEC_ID_MPEG4:
455  if (avctx->rc_max_rate >= 15000000) {
456  avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
457  } else if(avctx->rc_max_rate >= 2000000) {
458  avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
459  } else if(avctx->rc_max_rate >= 384000) {
460  avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
461  } else
462  avctx->rc_buffer_size = 40;
463  avctx->rc_buffer_size *= 16384;
464  break;
465  }
466  if (avctx->rc_buffer_size) {
467  av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
468  }
469  }
470 
471  if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
472  av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
473  return -1;
474  }
475 
476  if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
477  av_log(avctx, AV_LOG_INFO,
478  "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
479  }
480 
481  if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
482  av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
483  return -1;
484  }
485 
486  if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
487  av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
488  return -1;
489  }
490 
491  if (avctx->rc_max_rate &&
492  avctx->rc_max_rate == avctx->bit_rate &&
493  avctx->rc_max_rate != avctx->rc_min_rate) {
494  av_log(avctx, AV_LOG_INFO,
495  "impossible bitrate constraints, this will fail\n");
496  }
497 
498  if (avctx->rc_buffer_size &&
499  avctx->bit_rate * (int64_t)avctx->time_base.num >
500  avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
501  av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
502  return -1;
503  }
504 
505  if (!s->fixed_qscale &&
506  avctx->bit_rate * av_q2d(avctx->time_base) >
507  avctx->bit_rate_tolerance) {
508  av_log(avctx, AV_LOG_WARNING,
509  "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, (int64_t)avctx->bit_rate);
510  avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
511  }
512 
513  if (s->avctx->rc_max_rate &&
514  s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
517  90000LL * (avctx->rc_buffer_size - 1) >
518  s->avctx->rc_max_rate * 0xFFFFLL) {
519  av_log(avctx, AV_LOG_INFO,
520  "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
521  "specified vbv buffer is too large for the given bitrate!\n");
522  }
523 
524  if ((s->avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
526  s->codec_id != AV_CODEC_ID_FLV1) {
527  av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
528  return -1;
529  }
530 
531  if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
532  av_log(avctx, AV_LOG_ERROR,
533  "OBMC is only supported with simple mb decision\n");
534  return -1;
535  }
536 
537  if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
538  av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
539  return -1;
540  }
541 
542  if (s->max_b_frames &&
543  s->codec_id != AV_CODEC_ID_MPEG4 &&
546  av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
547  return -1;
548  }
549  if (s->max_b_frames < 0) {
550  av_log(avctx, AV_LOG_ERROR,
551  "max b frames must be 0 or positive for mpegvideo based encoders\n");
552  return -1;
553  }
554 
555  if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
556  s->codec_id == AV_CODEC_ID_H263 ||
557  s->codec_id == AV_CODEC_ID_H263P) &&
558  (avctx->sample_aspect_ratio.num > 255 ||
559  avctx->sample_aspect_ratio.den > 255)) {
560  av_log(avctx, AV_LOG_WARNING,
561  "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
564  avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
565  }
566 
567  if ((s->codec_id == AV_CODEC_ID_H263 ||
568  s->codec_id == AV_CODEC_ID_H263P) &&
569  (avctx->width > 2048 ||
570  avctx->height > 1152 )) {
571  av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
572  return -1;
573  }
574  if ((s->codec_id == AV_CODEC_ID_H263 ||
575  s->codec_id == AV_CODEC_ID_H263P) &&
576  ((avctx->width &3) ||
577  (avctx->height&3) )) {
578  av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
579  return -1;
580  }
581 
582  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
583  (avctx->width > 4095 ||
584  avctx->height > 4095 )) {
585  av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
586  return -1;
587  }
588 
589  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
590  (avctx->width > 16383 ||
591  avctx->height > 16383 )) {
592  av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
593  return -1;
594  }
595 
596  if (s->codec_id == AV_CODEC_ID_RV10 &&
597  (avctx->width &15 ||
598  avctx->height&15 )) {
599  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
600  return AVERROR(EINVAL);
601  }
602 
603  if (s->codec_id == AV_CODEC_ID_RV20 &&
604  (avctx->width &3 ||
605  avctx->height&3 )) {
606  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
607  return AVERROR(EINVAL);
608  }
609 
610  if ((s->codec_id == AV_CODEC_ID_WMV1 ||
611  s->codec_id == AV_CODEC_ID_WMV2) &&
612  avctx->width & 1) {
613  av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
614  return -1;
615  }
616 
619  av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
620  return -1;
621  }
622 
623 #if FF_API_PRIVATE_OPT
625  if (avctx->mpeg_quant)
626  s->mpeg_quant = avctx->mpeg_quant;
628 #endif
629 
630  // FIXME mpeg2 uses that too
631  if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
632  && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
633  av_log(avctx, AV_LOG_ERROR,
634  "mpeg2 style quantization not supported by codec\n");
635  return -1;
636  }
637 
638  if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
639  av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
640  return -1;
641  }
642 
643  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
645  av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
646  return -1;
647  }
648 
649  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
650  (s->codec_id == AV_CODEC_ID_AMV ||
651  s->codec_id == AV_CODEC_ID_MJPEG)) {
652  // Used to produce garbage with MJPEG.
653  av_log(avctx, AV_LOG_ERROR,
654  "QP RD is no longer compatible with MJPEG or AMV\n");
655  return -1;
656  }
657 
658 #if FF_API_PRIVATE_OPT
660  if (avctx->scenechange_threshold)
663 #endif
664 
665  if (s->scenechange_threshold < 1000000000 &&
667  av_log(avctx, AV_LOG_ERROR,
668  "closed gop with scene change detection are not supported yet, "
669  "set threshold to 1000000000\n");
670  return -1;
671  }
672 
673  if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
674  if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
675  av_log(avctx, AV_LOG_ERROR,
676  "low delay forcing is only available for mpeg2\n");
677  return -1;
678  }
679  if (s->max_b_frames != 0) {
680  av_log(avctx, AV_LOG_ERROR,
681  "B-frames cannot be used with low delay\n");
682  return -1;
683  }
684  }
685 
686  if (s->q_scale_type == 1) {
687  if (avctx->qmax > 28) {
688  av_log(avctx, AV_LOG_ERROR,
689  "non linear quant only supports qmax <= 28 currently\n");
690  return -1;
691  }
692  }
693 
694  if (avctx->slices > 1 &&
695  (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
696  av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
697  return AVERROR(EINVAL);
698  }
699 
700  if (s->avctx->thread_count > 1 &&
701  s->codec_id != AV_CODEC_ID_MPEG4 &&
704  s->codec_id != AV_CODEC_ID_MJPEG &&
705  (s->codec_id != AV_CODEC_ID_H263P)) {
706  av_log(avctx, AV_LOG_ERROR,
707  "multi threaded encoding not supported by codec\n");
708  return -1;
709  }
710 
711  if (s->avctx->thread_count < 1) {
712  av_log(avctx, AV_LOG_ERROR,
713  "automatic thread number detection not supported by codec, "
714  "patch welcome\n");
715  return -1;
716  }
717 
718  if (!avctx->time_base.den || !avctx->time_base.num) {
719  av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
720  return -1;
721  }
722 
723 #if FF_API_PRIVATE_OPT
725  if (avctx->b_frame_strategy)
727  if (avctx->b_sensitivity != 40)
728  s->b_sensitivity = avctx->b_sensitivity;
730 #endif
731 
732  if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
733  av_log(avctx, AV_LOG_INFO,
734  "notice: b_frame_strategy only affects the first pass\n");
735  s->b_frame_strategy = 0;
736  }
737 
738  i = av_gcd(avctx->time_base.den, avctx->time_base.num);
739  if (i > 1) {
740  av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
741  avctx->time_base.den /= i;
742  avctx->time_base.num /= i;
743  //return -1;
744  }
745 
747  // (a + x * 3 / 8) / x
748  s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
749  s->inter_quant_bias = 0;
750  } else {
751  s->intra_quant_bias = 0;
752  // (a - x / 4) / x
753  s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
754  }
755 
756  if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
757  av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
758  return AVERROR(EINVAL);
759  }
760 
761 #if FF_API_QUANT_BIAS
768 #endif
769 
770  av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
771 
772  if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
773  s->avctx->time_base.den > (1 << 16) - 1) {
774  av_log(avctx, AV_LOG_ERROR,
775  "timebase %d/%d not supported by MPEG 4 standard, "
776  "the maximum admitted value for the timebase denominator "
777  "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
778  (1 << 16) - 1);
779  return -1;
780  }
781  s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
782 
783  switch (avctx->codec->id) {
785  s->out_format = FMT_MPEG1;
787  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
788  break;
790  s->out_format = FMT_MPEG1;
792  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
793  s->rtp_mode = 1;
794  break;
795  case AV_CODEC_ID_MJPEG:
796  case AV_CODEC_ID_AMV:
797  s->out_format = FMT_MJPEG;
798  s->intra_only = 1; /* force intra only for jpeg */
799  if (!CONFIG_MJPEG_ENCODER ||
800  ff_mjpeg_encode_init(s) < 0)
801  return -1;
802  avctx->delay = 0;
803  s->low_delay = 1;
804  break;
805  case AV_CODEC_ID_H261:
806  if (!CONFIG_H261_ENCODER)
807  return -1;
808  if (ff_h261_get_picture_format(s->width, s->height) < 0) {
809  av_log(avctx, AV_LOG_ERROR,
810  "The specified picture size of %dx%d is not valid for the "
811  "H.261 codec.\nValid sizes are 176x144, 352x288\n",
812  s->width, s->height);
813  return -1;
814  }
815  s->out_format = FMT_H261;
816  avctx->delay = 0;
817  s->low_delay = 1;
818  s->rtp_mode = 0; /* Sliced encoding not supported */
819  break;
820  case AV_CODEC_ID_H263:
821  if (!CONFIG_H263_ENCODER)
822  return -1;
824  s->width, s->height) == 8) {
825  av_log(avctx, AV_LOG_ERROR,
826  "The specified picture size of %dx%d is not valid for "
827  "the H.263 codec.\nValid sizes are 128x96, 176x144, "
828  "352x288, 704x576, and 1408x1152. "
829  "Try H.263+.\n", s->width, s->height);
830  return -1;
831  }
832  s->out_format = FMT_H263;
833  avctx->delay = 0;
834  s->low_delay = 1;
835  break;
836  case AV_CODEC_ID_H263P:
837  s->out_format = FMT_H263;
838  s->h263_plus = 1;
839  /* Fx */
840  s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
841  s->modified_quant = s->h263_aic;
842  s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
843  s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
844 
845  /* /Fx */
846  /* These are just to be sure */
847  avctx->delay = 0;
848  s->low_delay = 1;
849  break;
850  case AV_CODEC_ID_FLV1:
851  s->out_format = FMT_H263;
852  s->h263_flv = 2; /* format = 1; 11-bit codes */
853  s->unrestricted_mv = 1;
854  s->rtp_mode = 0; /* don't allow GOB */
855  avctx->delay = 0;
856  s->low_delay = 1;
857  break;
858  case AV_CODEC_ID_RV10:
859  s->out_format = FMT_H263;
860  avctx->delay = 0;
861  s->low_delay = 1;
862  break;
863  case AV_CODEC_ID_RV20:
864  s->out_format = FMT_H263;
865  avctx->delay = 0;
866  s->low_delay = 1;
867  s->modified_quant = 1;
868  s->h263_aic = 1;
869  s->h263_plus = 1;
870  s->loop_filter = 1;
871  s->unrestricted_mv = 0;
872  break;
873  case AV_CODEC_ID_MPEG4:
874  s->out_format = FMT_H263;
875  s->h263_pred = 1;
876  s->unrestricted_mv = 1;
877  s->low_delay = s->max_b_frames ? 0 : 1;
878  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
879  break;
881  s->out_format = FMT_H263;
882  s->h263_pred = 1;
883  s->unrestricted_mv = 1;
884  s->msmpeg4_version = 2;
885  avctx->delay = 0;
886  s->low_delay = 1;
887  break;
889  s->out_format = FMT_H263;
890  s->h263_pred = 1;
891  s->unrestricted_mv = 1;
892  s->msmpeg4_version = 3;
893  s->flipflop_rounding = 1;
894  avctx->delay = 0;
895  s->low_delay = 1;
896  break;
897  case AV_CODEC_ID_WMV1:
898  s->out_format = FMT_H263;
899  s->h263_pred = 1;
900  s->unrestricted_mv = 1;
901  s->msmpeg4_version = 4;
902  s->flipflop_rounding = 1;
903  avctx->delay = 0;
904  s->low_delay = 1;
905  break;
906  case AV_CODEC_ID_WMV2:
907  s->out_format = FMT_H263;
908  s->h263_pred = 1;
909  s->unrestricted_mv = 1;
910  s->msmpeg4_version = 5;
911  s->flipflop_rounding = 1;
912  avctx->delay = 0;
913  s->low_delay = 1;
914  break;
915  default:
916  return -1;
917  }
918 
919 #if FF_API_PRIVATE_OPT
921  if (avctx->noise_reduction)
922  s->noise_reduction = avctx->noise_reduction;
924 #endif
925 
926  avctx->has_b_frames = !s->low_delay;
927 
928  s->encoding = 1;
929 
930  s->progressive_frame =
933  s->alternate_scan);
934 
935  /* init */
936  ff_mpv_idct_init(s);
937  if (ff_mpv_common_init(s) < 0)
938  return -1;
939 
940  ff_fdctdsp_init(&s->fdsp, avctx);
941  ff_me_cmp_init(&s->mecc, avctx);
943  ff_pixblockdsp_init(&s->pdsp, avctx);
944  ff_qpeldsp_init(&s->qdsp);
945 
946  if (s->msmpeg4_version) {
948  2 * 2 * (MAX_LEVEL + 1) *
949  (MAX_RUN + 1) * 2 * sizeof(int), fail);
950  }
951  FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
952 
953  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail);
954  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail);
955  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail);
956  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
957  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
958  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
960  MAX_PICTURE_COUNT * sizeof(Picture *), fail);
962  MAX_PICTURE_COUNT * sizeof(Picture *), fail);
963 
964 
965  if (s->noise_reduction) {
967  2 * 64 * sizeof(uint16_t), fail);
968  }
969 
971 
972  if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
974 
975  if (s->slice_context_count > 1) {
976  s->rtp_mode = 1;
977 
978  if (avctx->codec_id == AV_CODEC_ID_H263P)
979  s->h263_slice_structured = 1;
980  }
981 
982  s->quant_precision = 5;
983 
984 #if FF_API_PRIVATE_OPT
986  if (avctx->frame_skip_threshold)
988  if (avctx->frame_skip_factor)
990  if (avctx->frame_skip_exp)
991  s->frame_skip_exp = avctx->frame_skip_exp;
992  if (avctx->frame_skip_cmp != FF_CMP_DCTMAX)
993  s->frame_skip_cmp = avctx->frame_skip_cmp;
995 #endif
996 
999 
1000  if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
1002  if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
1005  if ((ret = ff_msmpeg4_encode_init(s)) < 0)
1006  return ret;
1007  if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
1008  && s->out_format == FMT_MPEG1)
1010 
1011  /* init q matrix */
1012  for (i = 0; i < 64; i++) {
1013  int j = s->idsp.idct_permutation[i];
1014  if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
1015  s->mpeg_quant) {
1018  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1019  s->intra_matrix[j] =
1021  } else {
1022  /* MPEG-1/2 */
1023  s->chroma_intra_matrix[j] =
1026  }
1027  if (s->avctx->intra_matrix)
1028  s->intra_matrix[j] = s->avctx->intra_matrix[i];
1029  if (s->avctx->inter_matrix)
1030  s->inter_matrix[j] = s->avctx->inter_matrix[i];
1031  }
1032 
1033  /* precompute matrix */
1034  /* for mjpeg, we do include qscale in the matrix */
1035  if (s->out_format != FMT_MJPEG) {
1037  s->intra_matrix, s->intra_quant_bias, avctx->qmin,
1038  31, 1);
1040  s->inter_matrix, s->inter_quant_bias, avctx->qmin,
1041  31, 0);
1042  }
1043 
1044 #if FF_API_RC_STRATEGY
1046  if (!s->rc_strategy)
1047  s->rc_strategy = s->avctx->rc_strategy;
1049 #endif
1050 
1051  if (ff_rate_control_init(s) < 0)
1052  return -1;
1053 
1054 #if FF_API_RC_STRATEGY
1056 #endif
1057 
1059 #if CONFIG_LIBXVID
1060  ret = ff_xvid_rate_control_init(s);
1061 #else
1062  ret = AVERROR(ENOSYS);
1064  "Xvid ratecontrol requires libavcodec compiled with Xvid support.\n");
1065 #endif
1066  if (ret < 0)
1067  return ret;
1068  }
1069 
1070 #if FF_API_ERROR_RATE
1072  if (avctx->error_rate)
1073  s->error_rate = avctx->error_rate;
1075 #endif
1076 
1077 #if FF_API_NORMALIZE_AQP
1079  if (avctx->flags & CODEC_FLAG_NORMALIZE_AQP)
1080  s->mpv_flags |= FF_MPV_FLAG_NAQ;
1082 #endif
1083 
1084 #if FF_API_MV0
1086  if (avctx->flags & CODEC_FLAG_MV0)
1087  s->mpv_flags |= FF_MPV_FLAG_MV0;
1089 #endif
1090 
1091 #if FF_API_MPV_OPT
1093  if (avctx->rc_qsquish != 0.0)
1094  s->rc_qsquish = avctx->rc_qsquish;
1095  if (avctx->rc_qmod_amp != 0.0)
1096  s->rc_qmod_amp = avctx->rc_qmod_amp;
1097  if (avctx->rc_qmod_freq)
1098  s->rc_qmod_freq = avctx->rc_qmod_freq;
1099  if (avctx->rc_buffer_aggressivity != 1.0)
1101  if (avctx->rc_initial_cplx != 0.0)
1102  s->rc_initial_cplx = avctx->rc_initial_cplx;
1103  if (avctx->lmin)
1104  s->lmin = avctx->lmin;
1105  if (avctx->lmax)
1106  s->lmax = avctx->lmax;
1107 
1108  if (avctx->rc_eq) {
1109  av_freep(&s->rc_eq);
1110  s->rc_eq = av_strdup(avctx->rc_eq);
1111  if (!s->rc_eq)
1112  return AVERROR(ENOMEM);
1113  }
1115 #endif
1116 
1117 #if FF_API_PRIVATE_OPT
1119  if (avctx->brd_scale)
1120  s->brd_scale = avctx->brd_scale;
1121 
1122  if (avctx->prediction_method)
1123  s->pred = avctx->prediction_method + 1;
1125 #endif
1126 
1127  if (s->b_frame_strategy == 2) {
1128  for (i = 0; i < s->max_b_frames + 2; i++) {
1129  s->tmp_frames[i] = av_frame_alloc();
1130  if (!s->tmp_frames[i])
1131  return AVERROR(ENOMEM);
1132 
1134  s->tmp_frames[i]->width = s->width >> s->brd_scale;
1135  s->tmp_frames[i]->height = s->height >> s->brd_scale;
1136 
1137  ret = av_frame_get_buffer(s->tmp_frames[i], 32);
1138  if (ret < 0)
1139  return ret;
1140  }
1141  }
1142 
1143  cpb_props = ff_add_cpb_side_data(avctx);
1144  if (!cpb_props)
1145  return AVERROR(ENOMEM);
1146  cpb_props->max_bitrate = avctx->rc_max_rate;
1147  cpb_props->min_bitrate = avctx->rc_min_rate;
1148  cpb_props->avg_bitrate = avctx->bit_rate;
1149  cpb_props->buffer_size = avctx->rc_buffer_size;
1150 
1151  return 0;
1152 fail:
1153  ff_mpv_encode_end(avctx);
1154  return AVERROR_UNKNOWN;
1155 }
1156 
1158 {
1159  MpegEncContext *s = avctx->priv_data;
1160  int i;
1161 
1163 #if CONFIG_LIBXVID
1166 #endif
1167 
1168  ff_mpv_common_end(s);
1169  if (CONFIG_MJPEG_ENCODER &&
1170  s->out_format == FMT_MJPEG)
1172 
1173  av_freep(&avctx->extradata);
1174 
1175  for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1176  av_frame_free(&s->tmp_frames[i]);
1177 
1180 
1181  av_freep(&s->avctx->stats_out);
1182  av_freep(&s->ac_stats);
1183 
1188  av_freep(&s->q_intra_matrix);
1189  av_freep(&s->q_inter_matrix);
1192  av_freep(&s->input_picture);
1194  av_freep(&s->dct_offset);
1195 
1196  return 0;
1197 }
1198 
1199 static int get_sae(uint8_t *src, int ref, int stride)
1200 {
1201  int x,y;
1202  int acc = 0;
1203 
1204  for (y = 0; y < 16; y++) {
1205  for (x = 0; x < 16; x++) {
1206  acc += FFABS(src[x + y * stride] - ref);
1207  }
1208  }
1209 
1210  return acc;
1211 }
1212 
1214  uint8_t *ref, int stride)
1215 {
1216  int x, y, w, h;
1217  int acc = 0;
1218 
1219  w = s->width & ~15;
1220  h = s->height & ~15;
1221 
1222  for (y = 0; y < h; y += 16) {
1223  for (x = 0; x < w; x += 16) {
1224  int offset = x + y * stride;
1225  int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1226  stride, 16);
1227  int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1228  int sae = get_sae(src + offset, mean, stride);
1229 
1230  acc += sae + 500 < sad;
1231  }
1232  }
1233  return acc;
1234 }
1235 
1236 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1237 {
1238  return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1240  s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1241  &s->linesize, &s->uvlinesize);
1242 }
1243 
1244 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1245 {
1246  Picture *pic = NULL;
1247  int64_t pts;
1248  int i, display_picture_number = 0, ret;
1249  int encoding_delay = s->max_b_frames ? s->max_b_frames
1250  : (s->low_delay ? 0 : 1);
1251  int flush_offset = 1;
1252  int direct = 1;
1253 
1254  if (pic_arg) {
1255  pts = pic_arg->pts;
1256  display_picture_number = s->input_picture_number++;
1257 
1258  if (pts != AV_NOPTS_VALUE) {
1259  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1260  int64_t last = s->user_specified_pts;
1261 
1262  if (pts <= last) {
1264  "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1265  pts, last);
1266  return AVERROR(EINVAL);
1267  }
1268 
1269  if (!s->low_delay && display_picture_number == 1)
1270  s->dts_delta = pts - last;
1271  }
1272  s->user_specified_pts = pts;
1273  } else {
1274  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1275  s->user_specified_pts =
1276  pts = s->user_specified_pts + 1;
1277  av_log(s->avctx, AV_LOG_INFO,
1278  "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1279  pts);
1280  } else {
1281  pts = display_picture_number;
1282  }
1283  }
1284 
1285  if (!pic_arg->buf[0] ||
1286  pic_arg->linesize[0] != s->linesize ||
1287  pic_arg->linesize[1] != s->uvlinesize ||
1288  pic_arg->linesize[2] != s->uvlinesize)
1289  direct = 0;
1290  if ((s->width & 15) || (s->height & 15))
1291  direct = 0;
1292  if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1293  direct = 0;
1294  if (s->linesize & (STRIDE_ALIGN-1))
1295  direct = 0;
1296 
1297  ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1298  pic_arg->linesize[1], s->linesize, s->uvlinesize);
1299 
1300  i = ff_find_unused_picture(s->avctx, s->picture, direct);
1301  if (i < 0)
1302  return i;
1303 
1304  pic = &s->picture[i];
1305  pic->reference = 3;
1306 
1307  if (direct) {
1308  if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1309  return ret;
1310  }
1311  ret = alloc_picture(s, pic, direct);
1312  if (ret < 0)
1313  return ret;
1314 
1315  if (!direct) {
1316  if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1317  pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1318  pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1319  // empty
1320  } else {
1321  int h_chroma_shift, v_chroma_shift;
1323  &h_chroma_shift,
1324  &v_chroma_shift);
1325 
1326  for (i = 0; i < 3; i++) {
1327  int src_stride = pic_arg->linesize[i];
1328  int dst_stride = i ? s->uvlinesize : s->linesize;
1329  int h_shift = i ? h_chroma_shift : 0;
1330  int v_shift = i ? v_chroma_shift : 0;
1331  int w = s->width >> h_shift;
1332  int h = s->height >> v_shift;
1333  uint8_t *src = pic_arg->data[i];
1334  uint8_t *dst = pic->f->data[i];
1335  int vpad = 16;
1336 
1337  if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1338  && !s->progressive_sequence
1339  && FFALIGN(s->height, 32) - s->height > 16)
1340  vpad = 32;
1341 
1342  if (!s->avctx->rc_buffer_size)
1343  dst += INPLACE_OFFSET;
1344 
1345  if (src_stride == dst_stride)
1346  memcpy(dst, src, src_stride * h);
1347  else {
1348  int h2 = h;
1349  uint8_t *dst2 = dst;
1350  while (h2--) {
1351  memcpy(dst2, src, w);
1352  dst2 += dst_stride;
1353  src += src_stride;
1354  }
1355  }
1356  if ((s->width & 15) || (s->height & (vpad-1))) {
1357  s->mpvencdsp.draw_edges(dst, dst_stride,
1358  w, h,
1359  16 >> h_shift,
1360  vpad >> v_shift,
1361  EDGE_BOTTOM);
1362  }
1363  }
1364  emms_c();
1365  }
1366  }
1367  ret = av_frame_copy_props(pic->f, pic_arg);
1368  if (ret < 0)
1369  return ret;
1370 
1371  pic->f->display_picture_number = display_picture_number;
1372  pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1373  } else {
1374  /* Flushing: When we have not received enough input frames,
1375  * ensure s->input_picture[0] contains the first picture */
1376  for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1377  if (s->input_picture[flush_offset])
1378  break;
1379 
1380  if (flush_offset <= 1)
1381  flush_offset = 1;
1382  else
1383  encoding_delay = encoding_delay - flush_offset + 1;
1384  }
1385 
1386  /* shift buffer entries */
1387  for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1388  s->input_picture[i - flush_offset] = s->input_picture[i];
1389 
1390  s->input_picture[encoding_delay] = (Picture*) pic;
1391 
1392  return 0;
1393 }
1394 
1396 {
1397  int x, y, plane;
1398  int score = 0;
1399  int64_t score64 = 0;
1400 
1401  for (plane = 0; plane < 3; plane++) {
1402  const int stride = p->f->linesize[plane];
1403  const int bw = plane ? 1 : 2;
1404  for (y = 0; y < s->mb_height * bw; y++) {
1405  for (x = 0; x < s->mb_width * bw; x++) {
1406  int off = p->shared ? 0 : 16;
1407  uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1408  uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1409  int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1410 
1411  switch (FFABS(s->frame_skip_exp)) {
1412  case 0: score = FFMAX(score, v); break;
1413  case 1: score += FFABS(v); break;
1414  case 2: score64 += v * (int64_t)v; break;
1415  case 3: score64 += FFABS(v * (int64_t)v * v); break;
1416  case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1417  }
1418  }
1419  }
1420  }
1421  emms_c();
1422 
1423  if (score)
1424  score64 = score;
1425  if (s->frame_skip_exp < 0)
1426  score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1427  -1.0/s->frame_skip_exp);
1428 
1429  if (score64 < s->frame_skip_threshold)
1430  return 1;
1431  if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1432  return 1;
1433  return 0;
1434 }
1435 
1437 {
1438  AVPacket pkt = { 0 };
1439  int ret;
1440  int size = 0;
1441 
1442  av_init_packet(&pkt);
1443 
1444  ret = avcodec_send_frame(c, frame);
1445  if (ret < 0)
1446  return ret;
1447 
1448  do {
1449  ret = avcodec_receive_packet(c, &pkt);
1450  if (ret >= 0) {
1451  size += pkt.size;
1452  av_packet_unref(&pkt);
1453  } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1454  return ret;
1455  } while (ret >= 0);
1456 
1457  return size;
1458 }
1459 
1461 {
1462  const AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1463  const int scale = s->brd_scale;
1464  int width = s->width >> scale;
1465  int height = s->height >> scale;
1466  int i, j, out_size, p_lambda, b_lambda, lambda2;
1467  int64_t best_rd = INT64_MAX;
1468  int best_b_count = -1;
1469  int ret = 0;
1470 
1471  av_assert0(scale >= 0 && scale <= 3);
1472 
1473  //emms_c();
1474  //s->next_picture_ptr->quality;
1475  p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1476  //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1477  b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1478  if (!b_lambda) // FIXME we should do this somewhere else
1479  b_lambda = p_lambda;
1480  lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1482 
1483  for (i = 0; i < s->max_b_frames + 2; i++) {
1484  Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1485  s->next_picture_ptr;
1486  uint8_t *data[4];
1487 
1488  if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1489  pre_input = *pre_input_ptr;
1490  memcpy(data, pre_input_ptr->f->data, sizeof(data));
1491 
1492  if (!pre_input.shared && i) {
1493  data[0] += INPLACE_OFFSET;
1494  data[1] += INPLACE_OFFSET;
1495  data[2] += INPLACE_OFFSET;
1496  }
1497 
1498  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1499  s->tmp_frames[i]->linesize[0],
1500  data[0],
1501  pre_input.f->linesize[0],
1502  width, height);
1503  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1504  s->tmp_frames[i]->linesize[1],
1505  data[1],
1506  pre_input.f->linesize[1],
1507  width >> 1, height >> 1);
1508  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1509  s->tmp_frames[i]->linesize[2],
1510  data[2],
1511  pre_input.f->linesize[2],
1512  width >> 1, height >> 1);
1513  }
1514  }
1515 
1516  for (j = 0; j < s->max_b_frames + 1; j++) {
1517  AVCodecContext *c;
1518  int64_t rd = 0;
1519 
1520  if (!s->input_picture[j])
1521  break;
1522 
1524  if (!c)
1525  return AVERROR(ENOMEM);
1526 
1527  c->width = width;
1528  c->height = height;
1530  c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1531  c->mb_decision = s->avctx->mb_decision;
1532  c->me_cmp = s->avctx->me_cmp;
1533  c->mb_cmp = s->avctx->mb_cmp;
1534  c->me_sub_cmp = s->avctx->me_sub_cmp;
1536  c->time_base = s->avctx->time_base;
1537  c->max_b_frames = s->max_b_frames;
1538 
1539  ret = avcodec_open2(c, codec, NULL);
1540  if (ret < 0)
1541  goto fail;
1542 
1544  s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1545 
1546  out_size = encode_frame(c, s->tmp_frames[0]);
1547  if (out_size < 0) {
1548  ret = out_size;
1549  goto fail;
1550  }
1551 
1552  //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1553 
1554  for (i = 0; i < s->max_b_frames + 1; i++) {
1555  int is_p = i % (j + 1) == j || i == s->max_b_frames;
1556 
1557  s->tmp_frames[i + 1]->pict_type = is_p ?
1559  s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1560 
1561  out_size = encode_frame(c, s->tmp_frames[i + 1]);
1562  if (out_size < 0) {
1563  ret = out_size;
1564  goto fail;
1565  }
1566 
1567  rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1568  }
1569 
1570  /* get the delayed frames */
1571  out_size = encode_frame(c, NULL);
1572  if (out_size < 0) {
1573  ret = out_size;
1574  goto fail;
1575  }
1576  rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1577 
1578  rd += c->error[0] + c->error[1] + c->error[2];
1579 
1580  if (rd < best_rd) {
1581  best_rd = rd;
1582  best_b_count = j;
1583  }
1584 
1585 fail:
1587  if (ret < 0)
1588  return ret;
1589  }
1590 
1591  return best_b_count;
1592 }
1593 
1595 {
1596  int i, ret;
1597 
1598  for (i = 1; i < MAX_PICTURE_COUNT; i++)
1600  s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1601 
1602  /* set next picture type & ordering */
1603  if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1604  if (s->frame_skip_threshold || s->frame_skip_factor) {
1605  if (s->picture_in_gop_number < s->gop_size &&
1606  s->next_picture_ptr &&
1607  skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1608  // FIXME check that the gop check above is +-1 correct
1609  av_frame_unref(s->input_picture[0]->f);
1610 
1611  ff_vbv_update(s, 0);
1612 
1613  goto no_output_pic;
1614  }
1615  }
1616 
1617  if (/*s->picture_in_gop_number >= s->gop_size ||*/
1618  !s->next_picture_ptr || s->intra_only) {
1619  s->reordered_input_picture[0] = s->input_picture[0];
1622  s->coded_picture_number++;
1623  } else {
1624  int b_frames = 0;
1625 
1626  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1627  for (i = 0; i < s->max_b_frames + 1; i++) {
1628  int pict_num = s->input_picture[0]->f->display_picture_number + i;
1629 
1630  if (pict_num >= s->rc_context.num_entries)
1631  break;
1632  if (!s->input_picture[i]) {
1633  s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1634  break;
1635  }
1636 
1637  s->input_picture[i]->f->pict_type =
1638  s->rc_context.entry[pict_num].new_pict_type;
1639  }
1640  }
1641 
1642  if (s->b_frame_strategy == 0) {
1643  b_frames = s->max_b_frames;
1644  while (b_frames && !s->input_picture[b_frames])
1645  b_frames--;
1646  } else if (s->b_frame_strategy == 1) {
1647  for (i = 1; i < s->max_b_frames + 1; i++) {
1648  if (s->input_picture[i] &&
1649  s->input_picture[i]->b_frame_score == 0) {
1650  s->input_picture[i]->b_frame_score =
1651  get_intra_count(s,
1652  s->input_picture[i ]->f->data[0],
1653  s->input_picture[i - 1]->f->data[0],
1654  s->linesize) + 1;
1655  }
1656  }
1657  for (i = 0; i < s->max_b_frames + 1; i++) {
1658  if (!s->input_picture[i] ||
1659  s->input_picture[i]->b_frame_score - 1 >
1660  s->mb_num / s->b_sensitivity)
1661  break;
1662  }
1663 
1664  b_frames = FFMAX(0, i - 1);
1665 
1666  /* reset scores */
1667  for (i = 0; i < b_frames + 1; i++) {
1668  s->input_picture[i]->b_frame_score = 0;
1669  }
1670  } else if (s->b_frame_strategy == 2) {
1671  b_frames = estimate_best_b_count(s);
1672  if (b_frames < 0)
1673  return b_frames;
1674  }
1675 
1676  emms_c();
1677 
1678  for (i = b_frames - 1; i >= 0; i--) {
1679  int type = s->input_picture[i]->f->pict_type;
1680  if (type && type != AV_PICTURE_TYPE_B)
1681  b_frames = i;
1682  }
1683  if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1684  b_frames == s->max_b_frames) {
1686  "warning, too many B-frames in a row\n");
1687  }
1688 
1689  if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1690  if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1691  s->gop_size > s->picture_in_gop_number) {
1692  b_frames = s->gop_size - s->picture_in_gop_number - 1;
1693  } else {
1695  b_frames = 0;
1696  s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1697  }
1698  }
1699 
1700  if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1701  s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1702  b_frames--;
1703 
1704  s->reordered_input_picture[0] = s->input_picture[b_frames];
1708  s->coded_picture_number++;
1709  for (i = 0; i < b_frames; i++) {
1710  s->reordered_input_picture[i + 1] = s->input_picture[i];
1711  s->reordered_input_picture[i + 1]->f->pict_type =
1714  s->coded_picture_number++;
1715  }
1716  }
1717  }
1718 no_output_pic:
1720 
1721  if (s->reordered_input_picture[0]) {
1724  AV_PICTURE_TYPE_B ? 3 : 0;
1725 
1726  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1727  return ret;
1728 
1729  if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1730  // input is a shared pix, so we can't modify it -> allocate a new
1731  // one & ensure that the shared one is reuseable
1732 
1733  Picture *pic;
1734  int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1735  if (i < 0)
1736  return i;
1737  pic = &s->picture[i];
1738 
1740  if (alloc_picture(s, pic, 0) < 0) {
1741  return -1;
1742  }
1743 
1744  ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1745  if (ret < 0)
1746  return ret;
1747 
1748  /* mark us unused / free shared pic */
1750  s->reordered_input_picture[0]->shared = 0;
1751 
1752  s->current_picture_ptr = pic;
1753  } else {
1754  // input is not a shared pix -> reuse buffer for current_pix
1756  for (i = 0; i < 4; i++) {
1757  s->new_picture.f->data[i] += INPLACE_OFFSET;
1758  }
1759  }
1761  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1762  s->current_picture_ptr)) < 0)
1763  return ret;
1764 
1766  }
1767  return 0;
1768 }
1769 
1770 static void frame_end(MpegEncContext *s)
1771 {
1772  if (s->unrestricted_mv &&
1774  !s->intra_only) {
1776  int hshift = desc->log2_chroma_w;
1777  int vshift = desc->log2_chroma_h;
1779  s->current_picture.f->linesize[0],
1780  s->h_edge_pos, s->v_edge_pos,
1782  EDGE_TOP | EDGE_BOTTOM);
1784  s->current_picture.f->linesize[1],
1785  s->h_edge_pos >> hshift,
1786  s->v_edge_pos >> vshift,
1787  EDGE_WIDTH >> hshift,
1788  EDGE_WIDTH >> vshift,
1789  EDGE_TOP | EDGE_BOTTOM);
1791  s->current_picture.f->linesize[2],
1792  s->h_edge_pos >> hshift,
1793  s->v_edge_pos >> vshift,
1794  EDGE_WIDTH >> hshift,
1795  EDGE_WIDTH >> vshift,
1796  EDGE_TOP | EDGE_BOTTOM);
1797  }
1798 
1799  emms_c();
1800 
1801  s->last_pict_type = s->pict_type;
1803  if (s->pict_type!= AV_PICTURE_TYPE_B)
1805 
1806 #if FF_API_CODED_FRAME
1811 #endif
1812 #if FF_API_ERROR_FRAME
1815  sizeof(s->current_picture.encoding_error));
1817 #endif
1818 }
1819 
1821 {
1822  int intra, i;
1823 
1824  for (intra = 0; intra < 2; intra++) {
1825  if (s->dct_count[intra] > (1 << 16)) {
1826  for (i = 0; i < 64; i++) {
1827  s->dct_error_sum[intra][i] >>= 1;
1828  }
1829  s->dct_count[intra] >>= 1;
1830  }
1831 
1832  for (i = 0; i < 64; i++) {
1833  s->dct_offset[intra][i] = (s->noise_reduction *
1834  s->dct_count[intra] +
1835  s->dct_error_sum[intra][i] / 2) /
1836  (s->dct_error_sum[intra][i] + 1);
1837  }
1838  }
1839 }
1840 
1842 {
1843  int ret;
1844 
1845  /* mark & release old frames */
1846  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1848  s->last_picture_ptr->f->buf[0]) {
1850  }
1851 
1854 
1856  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1857  s->current_picture_ptr)) < 0)
1858  return ret;
1859 
1860  if (s->pict_type != AV_PICTURE_TYPE_B) {
1862  if (!s->droppable)
1864  }
1865 
1866  if (s->last_picture_ptr) {
1868  if (s->last_picture_ptr->f->buf[0] &&
1869  (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1870  s->last_picture_ptr)) < 0)
1871  return ret;
1872  }
1873  if (s->next_picture_ptr) {
1875  if (s->next_picture_ptr->f->buf[0] &&
1876  (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1877  s->next_picture_ptr)) < 0)
1878  return ret;
1879  }
1880 
1881  if (s->picture_structure!= PICT_FRAME) {
1882  int i;
1883  for (i = 0; i < 4; i++) {
1885  s->current_picture.f->data[i] +=
1886  s->current_picture.f->linesize[i];
1887  }
1888  s->current_picture.f->linesize[i] *= 2;
1889  s->last_picture.f->linesize[i] *= 2;
1890  s->next_picture.f->linesize[i] *= 2;
1891  }
1892  }
1893 
1894  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1897  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1900  } else {
1903  }
1904 
1905  if (s->dct_error_sum) {
1908  }
1909 
1910  return 0;
1911 }
1912 
1914  const AVFrame *pic_arg, int *got_packet)
1915 {
1916  MpegEncContext *s = avctx->priv_data;
1917  int i, stuffing_count, ret;
1918  int context_count = s->slice_context_count;
1919 
1920  s->vbv_ignore_qmax = 0;
1921 
1922  s->picture_in_gop_number++;
1923 
1924  if (load_input_picture(s, pic_arg) < 0)
1925  return -1;
1926 
1927  if (select_input_picture(s) < 0) {
1928  return -1;
1929  }
1930 
1931  /* output? */
1932  if (s->new_picture.f->data[0]) {
1933  int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1934  int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1935  :
1936  s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1937  if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1938  return ret;
1939  if (s->mb_info) {
1942  s->mb_width*s->mb_height*12);
1943  s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1944  }
1945 
1946  for (i = 0; i < context_count; i++) {
1947  int start_y = s->thread_context[i]->start_mb_y;
1948  int end_y = s->thread_context[i]-> end_mb_y;
1949  int h = s->mb_height;
1950  uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1951  uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1952 
1953  init_put_bits(&s->thread_context[i]->pb, start, end - start);
1954  }
1955 
1956  s->pict_type = s->new_picture.f->pict_type;
1957  //emms_c();
1958  ret = frame_start(s);
1959  if (ret < 0)
1960  return ret;
1961 vbv_retry:
1962  ret = encode_picture(s, s->picture_number);
1963  if (growing_buffer) {
1964  av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1965  pkt->data = s->pb.buf;
1966  pkt->size = avctx->internal->byte_buffer_size;
1967  }
1968  if (ret < 0)
1969  return -1;
1970 
1971 #if FF_API_STAT_BITS
1973  avctx->header_bits = s->header_bits;
1974  avctx->mv_bits = s->mv_bits;
1975  avctx->misc_bits = s->misc_bits;
1976  avctx->i_tex_bits = s->i_tex_bits;
1977  avctx->p_tex_bits = s->p_tex_bits;
1978  avctx->i_count = s->i_count;
1979  // FIXME f/b_count in avctx
1980  avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1981  avctx->skip_count = s->skip_count;
1983 #endif
1984 
1985  frame_end(s);
1986 
1987  if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1989 
1990  if (avctx->rc_buffer_size) {
1991  RateControlContext *rcc = &s->rc_context;
1992  int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1993  int hq = (s->avctx->mb_decision == FF_MB_DECISION_RD || s->avctx->trellis);
1994  int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1995 
1996  if (put_bits_count(&s->pb) > max_size &&
1997  s->lambda < s->lmax) {
1998  s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1999  (s->qscale + 1) / s->qscale);
2000  if (s->adaptive_quant) {
2001  int i;
2002  for (i = 0; i < s->mb_height * s->mb_stride; i++)
2003  s->lambda_table[i] =
2004  FFMAX(s->lambda_table[i] + min_step,
2005  s->lambda_table[i] * (s->qscale + 1) /
2006  s->qscale);
2007  }
2008  s->mb_skipped = 0; // done in frame_start()
2009  // done in encode_picture() so we must undo it
2010  if (s->pict_type == AV_PICTURE_TYPE_P) {
2011  if (s->flipflop_rounding ||
2012  s->codec_id == AV_CODEC_ID_H263P ||
2014  s->no_rounding ^= 1;
2015  }
2016  if (s->pict_type != AV_PICTURE_TYPE_B) {
2017  s->time_base = s->last_time_base;
2018  s->last_non_b_time = s->time - s->pp_time;
2019  }
2020  for (i = 0; i < context_count; i++) {
2021  PutBitContext *pb = &s->thread_context[i]->pb;
2022  init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
2023  }
2024  s->vbv_ignore_qmax = 1;
2025  av_log(s->avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
2026  goto vbv_retry;
2027  }
2028 
2030  }
2031 
2032  if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
2034 
2035  for (i = 0; i < 4; i++) {
2037  avctx->error[i] += s->current_picture_ptr->encoding_error[i];
2038  }
2041  (s->avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
2042  s->pict_type);
2043 
2044  if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
2045  assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
2046  s->misc_bits + s->i_tex_bits +
2047  s->p_tex_bits);
2048  flush_put_bits(&s->pb);
2049  s->frame_bits = put_bits_count(&s->pb);
2050 
2051  stuffing_count = ff_vbv_update(s, s->frame_bits);
2052  s->stuffing_bits = 8*stuffing_count;
2053  if (stuffing_count) {
2054  if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
2055  stuffing_count + 50) {
2056  av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
2057  return -1;
2058  }
2059 
2060  switch (s->codec_id) {
2063  while (stuffing_count--) {
2064  put_bits(&s->pb, 8, 0);
2065  }
2066  break;
2067  case AV_CODEC_ID_MPEG4:
2068  put_bits(&s->pb, 16, 0);
2069  put_bits(&s->pb, 16, 0x1C3);
2070  stuffing_count -= 4;
2071  while (stuffing_count--) {
2072  put_bits(&s->pb, 8, 0xFF);
2073  }
2074  break;
2075  default:
2076  av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
2077  }
2078  flush_put_bits(&s->pb);
2079  s->frame_bits = put_bits_count(&s->pb);
2080  }
2081 
2082  /* update MPEG-1/2 vbv_delay for CBR */
2083  if (s->avctx->rc_max_rate &&
2084  s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
2085  s->out_format == FMT_MPEG1 &&
2086  90000LL * (avctx->rc_buffer_size - 1) <=
2087  s->avctx->rc_max_rate * 0xFFFFLL) {
2088  AVCPBProperties *props;
2089  size_t props_size;
2090 
2091  int vbv_delay, min_delay;
2092  double inbits = s->avctx->rc_max_rate *
2093  av_q2d(s->avctx->time_base);
2094  int minbits = s->frame_bits - 8 *
2095  (s->vbv_delay_ptr - s->pb.buf - 1);
2096  double bits = s->rc_context.buffer_index + minbits - inbits;
2097 
2098  if (bits < 0)
2100  "Internal error, negative bits\n");
2101 
2102  assert(s->repeat_first_field == 0);
2103 
2104  vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
2105  min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
2106  s->avctx->rc_max_rate;
2107 
2108  vbv_delay = FFMAX(vbv_delay, min_delay);
2109 
2110  av_assert0(vbv_delay < 0xFFFF);
2111 
2112  s->vbv_delay_ptr[0] &= 0xF8;
2113  s->vbv_delay_ptr[0] |= vbv_delay >> 13;
2114  s->vbv_delay_ptr[1] = vbv_delay >> 5;
2115  s->vbv_delay_ptr[2] &= 0x07;
2116  s->vbv_delay_ptr[2] |= vbv_delay << 3;
2117 
2118  props = av_cpb_properties_alloc(&props_size);
2119  if (!props)
2120  return AVERROR(ENOMEM);
2121  props->vbv_delay = vbv_delay * 300;
2122 
2124  (uint8_t*)props, props_size);
2125  if (ret < 0) {
2126  av_freep(&props);
2127  return ret;
2128  }
2129 
2130 #if FF_API_VBV_DELAY
2132  avctx->vbv_delay = vbv_delay * 300;
2134 #endif
2135  }
2136  s->total_bits += s->frame_bits;
2137 #if FF_API_STAT_BITS
2139  avctx->frame_bits = s->frame_bits;
2141 #endif
2142 
2143 
2144  pkt->pts = s->current_picture.f->pts;
2145  if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2147  pkt->dts = pkt->pts - s->dts_delta;
2148  else
2149  pkt->dts = s->reordered_pts;
2150  s->reordered_pts = pkt->pts;
2151  } else
2152  pkt->dts = pkt->pts;
2153  if (s->current_picture.f->key_frame)
2154  pkt->flags |= AV_PKT_FLAG_KEY;
2155  if (s->mb_info)
2157  } else {
2158  s->frame_bits = 0;
2159  }
2160 
2161  /* release non-reference frames */
2162  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2163  if (!s->picture[i].reference)
2164  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
2165  }
2166 
2167  av_assert1((s->frame_bits & 7) == 0);
2168 
2169  pkt->size = s->frame_bits / 8;
2170  *got_packet = !!pkt->size;
2171  return 0;
2172 }
2173 
2175  int n, int threshold)
2176 {
2177  static const char tab[64] = {
2178  3, 2, 2, 1, 1, 1, 1, 1,
2179  1, 1, 1, 1, 1, 1, 1, 1,
2180  1, 1, 1, 1, 1, 1, 1, 1,
2181  0, 0, 0, 0, 0, 0, 0, 0,
2182  0, 0, 0, 0, 0, 0, 0, 0,
2183  0, 0, 0, 0, 0, 0, 0, 0,
2184  0, 0, 0, 0, 0, 0, 0, 0,
2185  0, 0, 0, 0, 0, 0, 0, 0
2186  };
2187  int score = 0;
2188  int run = 0;
2189  int i;
2190  int16_t *block = s->block[n];
2191  const int last_index = s->block_last_index[n];
2192  int skip_dc;
2193 
2194  if (threshold < 0) {
2195  skip_dc = 0;
2196  threshold = -threshold;
2197  } else
2198  skip_dc = 1;
2199 
2200  /* Are all we could set to zero already zero? */
2201  if (last_index <= skip_dc - 1)
2202  return;
2203 
2204  for (i = 0; i <= last_index; i++) {
2205  const int j = s->intra_scantable.permutated[i];
2206  const int level = FFABS(block[j]);
2207  if (level == 1) {
2208  if (skip_dc && i == 0)
2209  continue;
2210  score += tab[run];
2211  run = 0;
2212  } else if (level > 1) {
2213  return;
2214  } else {
2215  run++;
2216  }
2217  }
2218  if (score >= threshold)
2219  return;
2220  for (i = skip_dc; i <= last_index; i++) {
2221  const int j = s->intra_scantable.permutated[i];
2222  block[j] = 0;
2223  }
2224  if (block[0])
2225  s->block_last_index[n] = 0;
2226  else
2227  s->block_last_index[n] = -1;
2228 }
2229 
2230 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2231  int last_index)
2232 {
2233  int i;
2234  const int maxlevel = s->max_qcoeff;
2235  const int minlevel = s->min_qcoeff;
2236  int overflow = 0;
2237 
2238  if (s->mb_intra) {
2239  i = 1; // skip clipping of intra dc
2240  } else
2241  i = 0;
2242 
2243  for (; i <= last_index; i++) {
2244  const int j = s->intra_scantable.permutated[i];
2245  int level = block[j];
2246 
2247  if (level > maxlevel) {
2248  level = maxlevel;
2249  overflow++;
2250  } else if (level < minlevel) {
2251  level = minlevel;
2252  overflow++;
2253  }
2254 
2255  block[j] = level;
2256  }
2257 
2258  if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2259  av_log(s->avctx, AV_LOG_INFO,
2260  "warning, clipping %d dct coefficients to %d..%d\n",
2261  overflow, minlevel, maxlevel);
2262 }
2263 
2264 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2265 {
2266  int x, y;
2267  // FIXME optimize
2268  for (y = 0; y < 8; y++) {
2269  for (x = 0; x < 8; x++) {
2270  int x2, y2;
2271  int sum = 0;
2272  int sqr = 0;
2273  int count = 0;
2274 
2275  for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2276  for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2277  int v = ptr[x2 + y2 * stride];
2278  sum += v;
2279  sqr += v * v;
2280  count++;
2281  }
2282  }
2283  weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2284  }
2285  }
2286 }
2287 
2289  int motion_x, int motion_y,
2290  int mb_block_height,
2291  int mb_block_width,
2292  int mb_block_count)
2293 {
2294  int16_t weight[12][64];
2295  int16_t orig[12][64];
2296  const int mb_x = s->mb_x;
2297  const int mb_y = s->mb_y;
2298  int i;
2299  int skip_dct[12];
2300  int dct_offset = s->linesize * 8; // default for progressive frames
2301  int uv_dct_offset = s->uvlinesize * 8;
2302  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2303  ptrdiff_t wrap_y, wrap_c;
2304 
2305  for (i = 0; i < mb_block_count; i++)
2306  skip_dct[i] = s->skipdct;
2307 
2308  if (s->adaptive_quant) {
2309  const int last_qp = s->qscale;
2310  const int mb_xy = mb_x + mb_y * s->mb_stride;
2311 
2312  s->lambda = s->lambda_table[mb_xy];
2313  update_qscale(s);
2314 
2315  if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2316  s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2317  s->dquant = s->qscale - last_qp;
2318 
2319  if (s->out_format == FMT_H263) {
2320  s->dquant = av_clip(s->dquant, -2, 2);
2321 
2322  if (s->codec_id == AV_CODEC_ID_MPEG4) {
2323  if (!s->mb_intra) {
2324  if (s->pict_type == AV_PICTURE_TYPE_B) {
2325  if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2326  s->dquant = 0;
2327  }
2328  if (s->mv_type == MV_TYPE_8X8)
2329  s->dquant = 0;
2330  }
2331  }
2332  }
2333  }
2334  ff_set_qscale(s, last_qp + s->dquant);
2335  } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2336  ff_set_qscale(s, s->qscale + s->dquant);
2337 
2338  wrap_y = s->linesize;
2339  wrap_c = s->uvlinesize;
2340  ptr_y = s->new_picture.f->data[0] +
2341  (mb_y * 16 * wrap_y) + mb_x * 16;
2342  ptr_cb = s->new_picture.f->data[1] +
2343  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2344  ptr_cr = s->new_picture.f->data[2] +
2345  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2346 
2347  if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2348  uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2349  int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2350  int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2351  s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2352  wrap_y, wrap_y,
2353  16, 16, mb_x * 16, mb_y * 16,
2354  s->width, s->height);
2355  ptr_y = ebuf;
2356  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2357  wrap_c, wrap_c,
2358  mb_block_width, mb_block_height,
2359  mb_x * mb_block_width, mb_y * mb_block_height,
2360  cw, ch);
2361  ptr_cb = ebuf + 16 * wrap_y;
2362  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2363  wrap_c, wrap_c,
2364  mb_block_width, mb_block_height,
2365  mb_x * mb_block_width, mb_y * mb_block_height,
2366  cw, ch);
2367  ptr_cr = ebuf + 16 * wrap_y + 16;
2368  }
2369 
2370  if (s->mb_intra) {
2372  int progressive_score, interlaced_score;
2373 
2374  s->interlaced_dct = 0;
2375  progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2376  s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2377  NULL, wrap_y, 8) - 400;
2378 
2379  if (progressive_score > 0) {
2380  interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2381  NULL, wrap_y * 2, 8) +
2382  s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2383  NULL, wrap_y * 2, 8);
2384  if (progressive_score > interlaced_score) {
2385  s->interlaced_dct = 1;
2386 
2387  dct_offset = wrap_y;
2388  uv_dct_offset = wrap_c;
2389  wrap_y <<= 1;
2390  if (s->chroma_format == CHROMA_422 ||
2391  s->chroma_format == CHROMA_444)
2392  wrap_c <<= 1;
2393  }
2394  }
2395  }
2396 
2397  s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2398  s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2399  s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2400  s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2401 
2402  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2403  skip_dct[4] = 1;
2404  skip_dct[5] = 1;
2405  } else {
2406  s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2407  s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2408  if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2409  s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2410  s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2411  } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2412  s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2413  s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2414  s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2415  s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2416  s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2417  s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2418  }
2419  }
2420  } else {
2421  op_pixels_func (*op_pix)[4];
2422  qpel_mc_func (*op_qpix)[16];
2423  uint8_t *dest_y, *dest_cb, *dest_cr;
2424 
2425  dest_y = s->dest[0];
2426  dest_cb = s->dest[1];
2427  dest_cr = s->dest[2];
2428 
2429  if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2430  op_pix = s->hdsp.put_pixels_tab;
2431  op_qpix = s->qdsp.put_qpel_pixels_tab;
2432  } else {
2433  op_pix = s->hdsp.put_no_rnd_pixels_tab;
2434  op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2435  }
2436 
2437  if (s->mv_dir & MV_DIR_FORWARD) {
2438  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2439  s->last_picture.f->data,
2440  op_pix, op_qpix);
2441  op_pix = s->hdsp.avg_pixels_tab;
2442  op_qpix = s->qdsp.avg_qpel_pixels_tab;
2443  }
2444  if (s->mv_dir & MV_DIR_BACKWARD) {
2445  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2446  s->next_picture.f->data,
2447  op_pix, op_qpix);
2448  }
2449 
2451  int progressive_score, interlaced_score;
2452 
2453  s->interlaced_dct = 0;
2454  progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2455  s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2456  ptr_y + wrap_y * 8,
2457  wrap_y, 8) - 400;
2458 
2459  if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2460  progressive_score -= 400;
2461 
2462  if (progressive_score > 0) {
2463  interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2464  wrap_y * 2, 8) +
2465  s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2466  ptr_y + wrap_y,
2467  wrap_y * 2, 8);
2468 
2469  if (progressive_score > interlaced_score) {
2470  s->interlaced_dct = 1;
2471 
2472  dct_offset = wrap_y;
2473  uv_dct_offset = wrap_c;
2474  wrap_y <<= 1;
2475  if (s->chroma_format == CHROMA_422)
2476  wrap_c <<= 1;
2477  }
2478  }
2479  }
2480 
2481  s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2482  s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2483  s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2484  dest_y + dct_offset, wrap_y);
2485  s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2486  dest_y + dct_offset + 8, wrap_y);
2487 
2488  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2489  skip_dct[4] = 1;
2490  skip_dct[5] = 1;
2491  } else {
2492  s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2493  s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2494  if (!s->chroma_y_shift) { /* 422 */
2495  s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2496  dest_cb + uv_dct_offset, wrap_c);
2497  s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2498  dest_cr + uv_dct_offset, wrap_c);
2499  }
2500  }
2501  /* pre quantization */
2502  if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2503  2 * s->qscale * s->qscale) {
2504  // FIXME optimize
2505  if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2506  skip_dct[0] = 1;
2507  if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2508  skip_dct[1] = 1;
2509  if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2510  wrap_y, 8) < 20 * s->qscale)
2511  skip_dct[2] = 1;
2512  if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2513  wrap_y, 8) < 20 * s->qscale)
2514  skip_dct[3] = 1;
2515  if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2516  skip_dct[4] = 1;
2517  if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2518  skip_dct[5] = 1;
2519  if (!s->chroma_y_shift) { /* 422 */
2520  if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2521  dest_cb + uv_dct_offset,
2522  wrap_c, 8) < 20 * s->qscale)
2523  skip_dct[6] = 1;
2524  if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2525  dest_cr + uv_dct_offset,
2526  wrap_c, 8) < 20 * s->qscale)
2527  skip_dct[7] = 1;
2528  }
2529  }
2530  }
2531 
2532  if (s->quantizer_noise_shaping) {
2533  if (!skip_dct[0])
2534  get_visual_weight(weight[0], ptr_y , wrap_y);
2535  if (!skip_dct[1])
2536  get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2537  if (!skip_dct[2])
2538  get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2539  if (!skip_dct[3])
2540  get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2541  if (!skip_dct[4])
2542  get_visual_weight(weight[4], ptr_cb , wrap_c);
2543  if (!skip_dct[5])
2544  get_visual_weight(weight[5], ptr_cr , wrap_c);
2545  if (!s->chroma_y_shift) { /* 422 */
2546  if (!skip_dct[6])
2547  get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2548  wrap_c);
2549  if (!skip_dct[7])
2550  get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2551  wrap_c);
2552  }
2553  memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2554  }
2555 
2556  /* DCT & quantize */
2557  av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2558  {
2559  for (i = 0; i < mb_block_count; i++) {
2560  if (!skip_dct[i]) {
2561  int overflow;
2562  s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2563  // FIXME we could decide to change to quantizer instead of
2564  // clipping
2565  // JS: I don't think that would be a good idea it could lower
2566  // quality instead of improve it. Just INTRADC clipping
2567  // deserves changes in quantizer
2568  if (overflow)
2569  clip_coeffs(s, s->block[i], s->block_last_index[i]);
2570  } else
2571  s->block_last_index[i] = -1;
2572  }
2573  if (s->quantizer_noise_shaping) {
2574  for (i = 0; i < mb_block_count; i++) {
2575  if (!skip_dct[i]) {
2576  s->block_last_index[i] =
2577  dct_quantize_refine(s, s->block[i], weight[i],
2578  orig[i], i, s->qscale);
2579  }
2580  }
2581  }
2582 
2583  if (s->luma_elim_threshold && !s->mb_intra)
2584  for (i = 0; i < 4; i++)
2586  if (s->chroma_elim_threshold && !s->mb_intra)
2587  for (i = 4; i < mb_block_count; i++)
2589 
2590  if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2591  for (i = 0; i < mb_block_count; i++) {
2592  if (s->block_last_index[i] == -1)
2593  s->coded_score[i] = INT_MAX / 256;
2594  }
2595  }
2596  }
2597 
2598  if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2599  s->block_last_index[4] =
2600  s->block_last_index[5] = 0;
2601  s->block[4][0] =
2602  s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2603  if (!s->chroma_y_shift) { /* 422 / 444 */
2604  for (i=6; i<12; i++) {
2605  s->block_last_index[i] = 0;
2606  s->block[i][0] = s->block[4][0];
2607  }
2608  }
2609  }
2610 
2611  // non c quantize code returns incorrect block_last_index FIXME
2612  if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2613  for (i = 0; i < mb_block_count; i++) {
2614  int j;
2615  if (s->block_last_index[i] > 0) {
2616  for (j = 63; j > 0; j--) {
2617  if (s->block[i][s->intra_scantable.permutated[j]])
2618  break;
2619  }
2620  s->block_last_index[i] = j;
2621  }
2622  }
2623  }
2624 
2625  /* huffman encode */
2626  switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2629  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2630  ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2631  break;
2632  case AV_CODEC_ID_MPEG4:
2633  if (CONFIG_MPEG4_ENCODER)
2634  ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2635  break;
2636  case AV_CODEC_ID_MSMPEG4V2:
2637  case AV_CODEC_ID_MSMPEG4V3:
2638  case AV_CODEC_ID_WMV1:
2640  ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2641  break;
2642  case AV_CODEC_ID_WMV2:
2643  if (CONFIG_WMV2_ENCODER)
2644  ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2645  break;
2646  case AV_CODEC_ID_H261:
2647  if (CONFIG_H261_ENCODER)
2648  ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2649  break;
2650  case AV_CODEC_ID_H263:
2651  case AV_CODEC_ID_H263P:
2652  case AV_CODEC_ID_FLV1:
2653  case AV_CODEC_ID_RV10:
2654  case AV_CODEC_ID_RV20:
2655  if (CONFIG_H263_ENCODER)
2656  ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2657  break;
2658  case AV_CODEC_ID_MJPEG:
2659  case AV_CODEC_ID_AMV:
2660  if (CONFIG_MJPEG_ENCODER)
2661  ff_mjpeg_encode_mb(s, s->block);
2662  break;
2663  default:
2664  av_assert1(0);
2665  }
2666 }
2667 
2668 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2669 {
2670  if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2671  else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2672  else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2673 }
2674 
2676  int i;
2677 
2678  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2679 
2680  /* MPEG-1 */
2681  d->mb_skip_run= s->mb_skip_run;
2682  for(i=0; i<3; i++)
2683  d->last_dc[i] = s->last_dc[i];
2684 
2685  /* statistics */
2686  d->mv_bits= s->mv_bits;
2687  d->i_tex_bits= s->i_tex_bits;
2688  d->p_tex_bits= s->p_tex_bits;
2689  d->i_count= s->i_count;
2690  d->f_count= s->f_count;
2691  d->b_count= s->b_count;
2692  d->skip_count= s->skip_count;
2693  d->misc_bits= s->misc_bits;
2694  d->last_bits= 0;
2695 
2696  d->mb_skipped= 0;
2697  d->qscale= s->qscale;
2698  d->dquant= s->dquant;
2699 
2701 }
2702 
2704  int i;
2705 
2706  memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2707  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2708 
2709  /* MPEG-1 */
2710  d->mb_skip_run= s->mb_skip_run;
2711  for(i=0; i<3; i++)
2712  d->last_dc[i] = s->last_dc[i];
2713 
2714  /* statistics */
2715  d->mv_bits= s->mv_bits;
2716  d->i_tex_bits= s->i_tex_bits;
2717  d->p_tex_bits= s->p_tex_bits;
2718  d->i_count= s->i_count;
2719  d->f_count= s->f_count;
2720  d->b_count= s->b_count;
2721  d->skip_count= s->skip_count;
2722  d->misc_bits= s->misc_bits;
2723 
2724  d->mb_intra= s->mb_intra;
2725  d->mb_skipped= s->mb_skipped;
2726  d->mv_type= s->mv_type;
2727  d->mv_dir= s->mv_dir;
2728  d->pb= s->pb;
2729  if(s->data_partitioning){
2730  d->pb2= s->pb2;
2731  d->tex_pb= s->tex_pb;
2732  }
2733  d->block= s->block;
2734  for(i=0; i<8; i++)
2735  d->block_last_index[i]= s->block_last_index[i];
2737  d->qscale= s->qscale;
2738 
2740 }
2741 
2742 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2744  int *dmin, int *next_block, int motion_x, int motion_y)
2745 {
2746  int score;
2747  uint8_t *dest_backup[3];
2748 
2749  copy_context_before_encode(s, backup, type);
2750 
2751  s->block= s->blocks[*next_block];
2752  s->pb= pb[*next_block];
2753  if(s->data_partitioning){
2754  s->pb2 = pb2 [*next_block];
2755  s->tex_pb= tex_pb[*next_block];
2756  }
2757 
2758  if(*next_block){
2759  memcpy(dest_backup, s->dest, sizeof(s->dest));
2760  s->dest[0] = s->sc.rd_scratchpad;
2761  s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2762  s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2763  av_assert0(s->linesize >= 32); //FIXME
2764  }
2765 
2766  encode_mb(s, motion_x, motion_y);
2767 
2768  score= put_bits_count(&s->pb);
2769  if(s->data_partitioning){
2770  score+= put_bits_count(&s->pb2);
2771  score+= put_bits_count(&s->tex_pb);
2772  }
2773 
2774  if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2775  ff_mpv_decode_mb(s, s->block);
2776 
2777  score *= s->lambda2;
2778  score += sse_mb(s) << FF_LAMBDA_SHIFT;
2779  }
2780 
2781  if(*next_block){
2782  memcpy(s->dest, dest_backup, sizeof(s->dest));
2783  }
2784 
2785  if(score<*dmin){
2786  *dmin= score;
2787  *next_block^=1;
2788 
2789  copy_context_after_encode(best, s, type);
2790  }
2791 }
2792 
2793 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2794  uint32_t *sq = ff_square_tab + 256;
2795  int acc=0;
2796  int x,y;
2797 
2798  if(w==16 && h==16)
2799  return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2800  else if(w==8 && h==8)
2801  return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2802 
2803  for(y=0; y<h; y++){
2804  for(x=0; x<w; x++){
2805  acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2806  }
2807  }
2808 
2809  av_assert2(acc>=0);
2810 
2811  return acc;
2812 }
2813 
2814 static int sse_mb(MpegEncContext *s){
2815  int w= 16;
2816  int h= 16;
2817 
2818  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2819  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2820 
2821  if(w==16 && h==16)
2822  if(s->avctx->mb_cmp == FF_CMP_NSSE){
2823  return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2824  s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2825  s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2826  }else{
2827  return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2828  s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2829  s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2830  }
2831  else
2832  return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2833  +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2834  +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2835 }
2836 
2838  MpegEncContext *s= *(void**)arg;
2839 
2840 
2841  s->me.pre_pass=1;
2842  s->me.dia_size= s->avctx->pre_dia_size;
2843  s->first_slice_line=1;
2844  for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2845  for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2847  }
2848  s->first_slice_line=0;
2849  }
2850 
2851  s->me.pre_pass=0;
2852 
2853  return 0;
2854 }
2855 
2857  MpegEncContext *s= *(void**)arg;
2858 
2860 
2861  s->me.dia_size= s->avctx->dia_size;
2862  s->first_slice_line=1;
2863  for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2864  s->mb_x=0; //for block init below
2866  for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2867  s->block_index[0]+=2;
2868  s->block_index[1]+=2;
2869  s->block_index[2]+=2;
2870  s->block_index[3]+=2;
2871 
2872  /* compute motion vector & mb_type and store in context */
2875  else
2877  }
2878  s->first_slice_line=0;
2879  }
2880  return 0;
2881 }
2882 
2883 static int mb_var_thread(AVCodecContext *c, void *arg){
2884  MpegEncContext *s= *(void**)arg;
2885  int mb_x, mb_y;
2886 
2888 
2889  for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2890  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2891  int xx = mb_x * 16;
2892  int yy = mb_y * 16;
2893  uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2894  int varc;
2895  int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2896 
2897  varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2898  (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2899 
2900  s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2901  s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2902  s->me.mb_var_sum_temp += varc;
2903  }
2904  }
2905  return 0;
2906 }
2907 
2909  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2910  if(s->partitioned_frame){
2912  }
2913 
2914  ff_mpeg4_stuffing(&s->pb);
2915  }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2917  }
2918 
2920  flush_put_bits(&s->pb);
2921 
2922  if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2923  s->misc_bits+= get_bits_diff(s);
2924 }
2925 
2927 {
2928  uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2929  int offset = put_bits_count(&s->pb);
2930  int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2931  int gobn = s->mb_y / s->gob_index;
2932  int pred_x, pred_y;
2933  if (CONFIG_H263_ENCODER)
2934  ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2935  bytestream_put_le32(&ptr, offset);
2936  bytestream_put_byte(&ptr, s->qscale);
2937  bytestream_put_byte(&ptr, gobn);
2938  bytestream_put_le16(&ptr, mba);
2939  bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2940  bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2941  /* 4MV not implemented */
2942  bytestream_put_byte(&ptr, 0); /* hmv2 */
2943  bytestream_put_byte(&ptr, 0); /* vmv2 */
2944 }
2945 
2946 static void update_mb_info(MpegEncContext *s, int startcode)
2947 {
2948  if (!s->mb_info)
2949  return;
2950  if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2951  s->mb_info_size += 12;
2952  s->prev_mb_info = s->last_mb_info;
2953  }
2954  if (startcode) {
2955  s->prev_mb_info = put_bits_count(&s->pb)/8;
2956  /* This might have incremented mb_info_size above, and we return without
2957  * actually writing any info into that slot yet. But in that case,
2958  * this will be called again at the start of the after writing the
2959  * start code, actually writing the mb info. */
2960  return;
2961  }
2962 
2963  s->last_mb_info = put_bits_count(&s->pb)/8;
2964  if (!s->mb_info_size)
2965  s->mb_info_size += 12;
2966  write_mb_info(s);
2967 }
2968 
2969 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2970 {
2971  if ( s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold
2972  && s->slice_context_count == 1
2973  && s->pb.buf == s->avctx->internal->byte_buffer) {
2974  int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2975  int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2976 
2977  uint8_t *new_buffer = NULL;
2978  int new_buffer_size = 0;
2979 
2980  if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2981  av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2982  return AVERROR(ENOMEM);
2983  }
2984 
2985  emms_c();
2986 
2987  av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2988  s->avctx->internal->byte_buffer_size + size_increase);
2989  if (!new_buffer)
2990  return AVERROR(ENOMEM);
2991 
2992  memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2994  s->avctx->internal->byte_buffer = new_buffer;
2995  s->avctx->internal->byte_buffer_size = new_buffer_size;
2996  rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2997  s->ptr_lastgob = s->pb.buf + lastgob_pos;
2998  s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2999  }
3000  if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold)
3001  return AVERROR(EINVAL);
3002  return 0;
3003 }
3004 
3005 static int encode_thread(AVCodecContext *c, void *arg){
3006  MpegEncContext *s= *(void**)arg;
3007  int mb_x, mb_y;
3008  int chr_h= 16>>s->chroma_y_shift;
3009  int i, j;
3010  MpegEncContext best_s = { 0 }, backup_s;
3011  uint8_t bit_buf[2][MAX_MB_BYTES];
3012  uint8_t bit_buf2[2][MAX_MB_BYTES];
3013  uint8_t bit_buf_tex[2][MAX_MB_BYTES];
3014  PutBitContext pb[2], pb2[2], tex_pb[2];
3015 
3017 
3018  for(i=0; i<2; i++){
3019  init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
3020  init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
3021  init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
3022  }
3023 
3024  s->last_bits= put_bits_count(&s->pb);
3025  s->mv_bits=0;
3026  s->misc_bits=0;
3027  s->i_tex_bits=0;
3028  s->p_tex_bits=0;
3029  s->i_count=0;
3030  s->f_count=0;
3031  s->b_count=0;
3032  s->skip_count=0;
3033 
3034  for(i=0; i<3; i++){
3035  /* init last dc values */
3036  /* note: quant matrix value (8) is implied here */
3037  s->last_dc[i] = 128 << s->intra_dc_precision;
3038 
3039  s->current_picture.encoding_error[i] = 0;
3040  }
3041  if(s->codec_id==AV_CODEC_ID_AMV){
3042  s->last_dc[0] = 128*8/13;
3043  s->last_dc[1] = 128*8/14;
3044  s->last_dc[2] = 128*8/14;
3045  }
3046  s->mb_skip_run = 0;
3047  memset(s->last_mv, 0, sizeof(s->last_mv));
3048 
3049  s->last_mv_dir = 0;
3050 
3051  switch(s->codec_id){
3052  case AV_CODEC_ID_H263:
3053  case AV_CODEC_ID_H263P:
3054  case AV_CODEC_ID_FLV1:
3055  if (CONFIG_H263_ENCODER)
3056  s->gob_index = H263_GOB_HEIGHT(s->height);
3057  break;
3058  case AV_CODEC_ID_MPEG4:
3059  if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
3061  break;
3062  }
3063 
3064  s->resync_mb_x=0;
3065  s->resync_mb_y=0;
3066  s->first_slice_line = 1;
3067  s->ptr_lastgob = s->pb.buf;
3068  for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
3069  s->mb_x=0;
3070  s->mb_y= mb_y;
3071 
3072  ff_set_qscale(s, s->qscale);
3074 
3075  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3076  int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
3077  int mb_type= s->mb_type[xy];
3078 // int d;
3079  int dmin= INT_MAX;
3080  int dir;
3081  int size_increase = s->avctx->internal->byte_buffer_size/4
3082  + s->mb_width*MAX_MB_BYTES;
3083 
3084  ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
3085  if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
3086  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
3087  return -1;
3088  }
3089  if(s->data_partitioning){
3090  if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
3091  || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
3092  av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3093  return -1;
3094  }
3095  }
3096 
3097  s->mb_x = mb_x;
3098  s->mb_y = mb_y; // moved into loop, can get changed by H.261
3100 
3101  if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
3103  xy= s->mb_y*s->mb_stride + s->mb_x;
3104  mb_type= s->mb_type[xy];
3105  }
3106 
3107  /* write gob / video packet header */
3108  if(s->rtp_mode){
3109  int current_packet_size, is_gob_start;
3110 
3111  current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
3112 
3113  is_gob_start = s->rtp_payload_size &&
3114  current_packet_size >= s->rtp_payload_size &&
3115  mb_y + mb_x > 0;
3116 
3117  if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
3118 
3119  switch(s->codec_id){
3120  case AV_CODEC_ID_H263:
3121  case AV_CODEC_ID_H263P:
3122  if(!s->h263_slice_structured)
3123  if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3124  break;
3126  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3128  if(s->mb_skip_run) is_gob_start=0;
3129  break;
3130  case AV_CODEC_ID_MJPEG:
3131  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3132  break;
3133  }
3134 
3135  if(is_gob_start){
3136  if(s->start_mb_y != mb_y || mb_x!=0){
3137  write_slice_end(s);
3138 
3139  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3141  }
3142  }
3143 
3144  av_assert2((put_bits_count(&s->pb)&7) == 0);
3145  current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3146 
3147  if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3148  int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
3149  int d = 100 / s->error_rate;
3150  if(r % d == 0){
3151  current_packet_size=0;
3152  s->pb.buf_ptr= s->ptr_lastgob;
3153  assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3154  }
3155  }
3156 
3157 #if FF_API_RTP_CALLBACK
3159  if (s->avctx->rtp_callback){
3160  int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3161  s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3162  }
3164 #endif
3165  update_mb_info(s, 1);
3166 
3167  switch(s->codec_id){
3168  case AV_CODEC_ID_MPEG4:
3169  if (CONFIG_MPEG4_ENCODER) {
3172  }
3173  break;
3176  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3179  }
3180  break;
3181  case AV_CODEC_ID_H263:
3182  case AV_CODEC_ID_H263P:
3183  if (CONFIG_H263_ENCODER)
3184  ff_h263_encode_gob_header(s, mb_y);
3185  break;
3186  }
3187 
3188  if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3189  int bits= put_bits_count(&s->pb);
3190  s->misc_bits+= bits - s->last_bits;
3191  s->last_bits= bits;
3192  }
3193 
3194  s->ptr_lastgob += current_packet_size;
3195  s->first_slice_line=1;
3196  s->resync_mb_x=mb_x;
3197  s->resync_mb_y=mb_y;
3198  }
3199  }
3200 
3201  if( (s->resync_mb_x == s->mb_x)
3202  && s->resync_mb_y+1 == s->mb_y){
3203  s->first_slice_line=0;
3204  }
3205 
3206  s->mb_skipped=0;
3207  s->dquant=0; //only for QP_RD
3208 
3209  update_mb_info(s, 0);
3210 
3211  if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3212  int next_block=0;
3213  int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3214 
3215  copy_context_before_encode(&backup_s, s, -1);
3216  backup_s.pb= s->pb;
3219  if(s->data_partitioning){
3220  backup_s.pb2= s->pb2;
3221  backup_s.tex_pb= s->tex_pb;
3222  }
3223 
3224  if(mb_type&CANDIDATE_MB_TYPE_INTER){
3225  s->mv_dir = MV_DIR_FORWARD;
3226  s->mv_type = MV_TYPE_16X16;
3227  s->mb_intra= 0;
3228  s->mv[0][0][0] = s->p_mv_table[xy][0];
3229  s->mv[0][0][1] = s->p_mv_table[xy][1];
3230  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3231  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3232  }
3233  if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3234  s->mv_dir = MV_DIR_FORWARD;
3235  s->mv_type = MV_TYPE_FIELD;
3236  s->mb_intra= 0;
3237  for(i=0; i<2; i++){
3238  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3239  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3240  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3241  }
3242  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3243  &dmin, &next_block, 0, 0);
3244  }
3245  if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3246  s->mv_dir = MV_DIR_FORWARD;
3247  s->mv_type = MV_TYPE_16X16;
3248  s->mb_intra= 0;
3249  s->mv[0][0][0] = 0;
3250  s->mv[0][0][1] = 0;
3251  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3252  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3253  }
3254  if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3255  s->mv_dir = MV_DIR_FORWARD;
3256  s->mv_type = MV_TYPE_8X8;
3257  s->mb_intra= 0;
3258  for(i=0; i<4; i++){
3259  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3260  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3261  }
3262  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3263  &dmin, &next_block, 0, 0);
3264  }
3265  if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3266  s->mv_dir = MV_DIR_FORWARD;
3267  s->mv_type = MV_TYPE_16X16;
3268  s->mb_intra= 0;
3269  s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3270  s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3271  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3272  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3273  }
3274  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3275  s->mv_dir = MV_DIR_BACKWARD;
3276  s->mv_type = MV_TYPE_16X16;
3277  s->mb_intra= 0;
3278  s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3279  s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3280  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3281  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3282  }
3283  if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3285  s->mv_type = MV_TYPE_16X16;
3286  s->mb_intra= 0;
3287  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3288  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3289  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3290  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3291  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3292  &dmin, &next_block, 0, 0);
3293  }
3294  if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3295  s->mv_dir = MV_DIR_FORWARD;
3296  s->mv_type = MV_TYPE_FIELD;
3297  s->mb_intra= 0;
3298  for(i=0; i<2; i++){
3299  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3300  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3301  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3302  }
3303  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3304  &dmin, &next_block, 0, 0);
3305  }
3306  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3307  s->mv_dir = MV_DIR_BACKWARD;
3308  s->mv_type = MV_TYPE_FIELD;
3309  s->mb_intra= 0;
3310  for(i=0; i<2; i++){
3311  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3312  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3313  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3314  }
3315  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3316  &dmin, &next_block, 0, 0);
3317  }
3318  if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3320  s->mv_type = MV_TYPE_FIELD;
3321  s->mb_intra= 0;
3322  for(dir=0; dir<2; dir++){
3323  for(i=0; i<2; i++){
3324  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3325  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3326  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3327  }
3328  }
3329  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3330  &dmin, &next_block, 0, 0);
3331  }
3332  if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3333  s->mv_dir = 0;
3334  s->mv_type = MV_TYPE_16X16;
3335  s->mb_intra= 1;
3336  s->mv[0][0][0] = 0;
3337  s->mv[0][0][1] = 0;
3338  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3339  &dmin, &next_block, 0, 0);
3340  if(s->h263_pred || s->h263_aic){
3341  if(best_s.mb_intra)
3342  s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3343  else
3344  ff_clean_intra_table_entries(s); //old mode?
3345  }
3346  }
3347 
3348  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3349  if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3350  const int last_qp= backup_s.qscale;
3351  int qpi, qp, dc[6];
3352  int16_t ac[6][16];
3353  const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3354  static const int dquant_tab[4]={-1,1,-2,2};
3355  int storecoefs = s->mb_intra && s->dc_val[0];
3356 
3357  av_assert2(backup_s.dquant == 0);
3358 
3359  //FIXME intra
3360  s->mv_dir= best_s.mv_dir;
3361  s->mv_type = MV_TYPE_16X16;
3362  s->mb_intra= best_s.mb_intra;
3363  s->mv[0][0][0] = best_s.mv[0][0][0];
3364  s->mv[0][0][1] = best_s.mv[0][0][1];
3365  s->mv[1][0][0] = best_s.mv[1][0][0];
3366  s->mv[1][0][1] = best_s.mv[1][0][1];
3367 
3368  qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3369  for(; qpi<4; qpi++){
3370  int dquant= dquant_tab[qpi];
3371  qp= last_qp + dquant;
3372  if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3373  continue;
3374  backup_s.dquant= dquant;
3375  if(storecoefs){
3376  for(i=0; i<6; i++){
3377  dc[i]= s->dc_val[0][ s->block_index[i] ];
3378  memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3379  }
3380  }
3381 
3382  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3383  &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3384  if(best_s.qscale != qp){
3385  if(storecoefs){
3386  for(i=0; i<6; i++){
3387  s->dc_val[0][ s->block_index[i] ]= dc[i];
3388  memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3389  }
3390  }
3391  }
3392  }
3393  }
3394  }
3395  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3396  int mx= s->b_direct_mv_table[xy][0];
3397  int my= s->b_direct_mv_table[xy][1];
3398 
3399  backup_s.dquant = 0;
3401  s->mb_intra= 0;
3402  ff_mpeg4_set_direct_mv(s, mx, my);
3403  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3404  &dmin, &next_block, mx, my);
3405  }
3406  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3407  backup_s.dquant = 0;
3409  s->mb_intra= 0;
3410  ff_mpeg4_set_direct_mv(s, 0, 0);
3411  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3412  &dmin, &next_block, 0, 0);
3413  }
3414  if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3415  int coded=0;
3416  for(i=0; i<6; i++)
3417  coded |= s->block_last_index[i];
3418  if(coded){
3419  int mx,my;
3420  memcpy(s->mv, best_s.mv, sizeof(s->mv));
3421  if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3422  mx=my=0; //FIXME find the one we actually used
3423  ff_mpeg4_set_direct_mv(s, mx, my);
3424  }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3425  mx= s->mv[1][0][0];
3426  my= s->mv[1][0][1];
3427  }else{
3428  mx= s->mv[0][0][0];
3429  my= s->mv[0][0][1];
3430  }
3431 
3432  s->mv_dir= best_s.mv_dir;
3433  s->mv_type = best_s.mv_type;
3434  s->mb_intra= 0;
3435 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3436  s->mv[0][0][1] = best_s.mv[0][0][1];
3437  s->mv[1][0][0] = best_s.mv[1][0][0];
3438  s->mv[1][0][1] = best_s.mv[1][0][1];*/
3439  backup_s.dquant= 0;
3440  s->skipdct=1;
3441  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3442  &dmin, &next_block, mx, my);
3443  s->skipdct=0;
3444  }
3445  }
3446 
3447  s->current_picture.qscale_table[xy] = best_s.qscale;
3448 
3449  copy_context_after_encode(s, &best_s, -1);
3450 
3451  pb_bits_count= put_bits_count(&s->pb);
3452  flush_put_bits(&s->pb);
3453  avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3454  s->pb= backup_s.pb;
3455 
3456  if(s->data_partitioning){
3457  pb2_bits_count= put_bits_count(&s->pb2);
3458  flush_put_bits(&s->pb2);
3459  avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3460  s->pb2= backup_s.pb2;
3461 
3462  tex_pb_bits_count= put_bits_count(&s->tex_pb);
3463  flush_put_bits(&s->tex_pb);
3464  avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3465  s->tex_pb= backup_s.tex_pb;
3466  }
3467  s->last_bits= put_bits_count(&s->pb);
3468 
3469  if (CONFIG_H263_ENCODER &&
3472 
3473  if(next_block==0){ //FIXME 16 vs linesize16
3474  s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3475  s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3476  s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3477  }
3478 
3480  ff_mpv_decode_mb(s, s->block);
3481  } else {
3482  int motion_x = 0, motion_y = 0;
3484  // only one MB-Type possible
3485 
3486  switch(mb_type){
3488  s->mv_dir = 0;
3489  s->mb_intra= 1;
3490  motion_x= s->mv[0][0][0] = 0;
3491  motion_y= s->mv[0][0][1] = 0;
3492  break;
3494  s->mv_dir = MV_DIR_FORWARD;
3495  s->mb_intra= 0;
3496  motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3497  motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3498  break;
3500  s->mv_dir = MV_DIR_FORWARD;
3501  s->mv_type = MV_TYPE_FIELD;
3502  s->mb_intra= 0;
3503  for(i=0; i<2; i++){
3504  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3505  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3506  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3507  }
3508  break;
3510  s->mv_dir = MV_DIR_FORWARD;
3511  s->mv_type = MV_TYPE_8X8;
3512  s->mb_intra= 0;
3513  for(i=0; i<4; i++){
3514  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3515  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3516  }
3517  break;
3519  if (CONFIG_MPEG4_ENCODER) {
3521  s->mb_intra= 0;
3522  motion_x=s->b_direct_mv_table[xy][0];
3523  motion_y=s->b_direct_mv_table[xy][1];
3524  ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3525  }
3526  break;
3528  if (CONFIG_MPEG4_ENCODER) {
3530  s->mb_intra= 0;
3531  ff_mpeg4_set_direct_mv(s, 0, 0);
3532  }
3533  break;
3536  s->mb_intra= 0;
3537  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3538  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3539  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3540  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3541  break;
3543  s->mv_dir = MV_DIR_BACKWARD;
3544  s->mb_intra= 0;
3545  motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3546  motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3547  break;
3549  s->mv_dir = MV_DIR_FORWARD;
3550  s->mb_intra= 0;
3551  motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3552  motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3553  break;
3555  s->mv_dir = MV_DIR_FORWARD;
3556  s->mv_type = MV_TYPE_FIELD;
3557  s->mb_intra= 0;
3558  for(i=0; i<2; i++){
3559  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3560  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3561  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3562  }
3563  break;
3565  s->mv_dir = MV_DIR_BACKWARD;
3566  s->mv_type = MV_TYPE_FIELD;
3567  s->mb_intra= 0;
3568  for(i=0; i<2; i++){
3569  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3570  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3571  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3572  }
3573  break;
3576  s->mv_type = MV_TYPE_FIELD;
3577  s->mb_intra= 0;
3578  for(dir=0; dir<2; dir++){
3579  for(i=0; i<2; i++){
3580  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3581  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3582  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3583  }
3584  }
3585  break;
3586  default:
3587  av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3588  }
3589 
3590  encode_mb(s, motion_x, motion_y);
3591 
3592  // RAL: Update last macroblock type
3593  s->last_mv_dir = s->mv_dir;
3594 
3595  if (CONFIG_H263_ENCODER &&
3598 
3599  ff_mpv_decode_mb(s, s->block);
3600  }
3601 
3602  /* clean the MV table in IPS frames for direct mode in B-frames */
3603  if(s->mb_intra /* && I,P,S_TYPE */){
3604  s->p_mv_table[xy][0]=0;
3605  s->p_mv_table[xy][1]=0;
3606  }
3607 
3608  if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3609  int w= 16;
3610  int h= 16;
3611 
3612  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3613  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3614 
3616  s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3617  s->dest[0], w, h, s->linesize);
3619  s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3620  s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3622  s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3623  s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3624  }
3625  if(s->loop_filter){
3626  if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3628  }
3629  ff_dlog(s->avctx, "MB %d %d bits\n",
3630  s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3631  }
3632  }
3633 
3634  //not beautiful here but we must write it before flushing so it has to be here
3637 
3638  write_slice_end(s);
3639 
3640 #if FF_API_RTP_CALLBACK
3642  /* Send the last GOB if RTP */
3643  if (s->avctx->rtp_callback) {
3644  int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3645  int pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3646  /* Call the RTP callback to send the last GOB */
3647  emms_c();
3648  s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3649  }
3651 #endif
3652 
3653  return 0;
3654 }
3655 
3656 #define MERGE(field) dst->field += src->field; src->field=0
3658  MERGE(me.scene_change_score);
3659  MERGE(me.mc_mb_var_sum_temp);
3660  MERGE(me.mb_var_sum_temp);
3661 }
3662 
3664  int i;
3665 
3666  MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3667  MERGE(dct_count[1]);
3668  MERGE(mv_bits);
3669  MERGE(i_tex_bits);
3670  MERGE(p_tex_bits);
3671  MERGE(i_count);
3672  MERGE(f_count);
3673  MERGE(b_count);
3674  MERGE(skip_count);
3675  MERGE(misc_bits);
3676  MERGE(er.error_count);
3681 
3682  if (dst->noise_reduction){
3683  for(i=0; i<64; i++){
3684  MERGE(dct_error_sum[0][i]);
3685  MERGE(dct_error_sum[1][i]);
3686  }
3687  }
3688 
3689  assert(put_bits_count(&src->pb) % 8 ==0);
3690  assert(put_bits_count(&dst->pb) % 8 ==0);
3691  avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3692  flush_put_bits(&dst->pb);
3693 }
3694 
3695 static int estimate_qp(MpegEncContext *s, int dry_run){
3696  if (s->next_lambda){
3699  if(!dry_run) s->next_lambda= 0;
3700  } else if (!s->fixed_qscale) {
3701  int quality;
3702 #if CONFIG_LIBXVID
3704  quality = ff_xvid_rate_estimate_qscale(s, dry_run);
3705  else
3706 #endif
3707  quality = ff_rate_estimate_qscale(s, dry_run);
3709  s->current_picture.f->quality = quality;
3710  if (s->current_picture.f->quality < 0)
3711  return -1;
3712  }
3713 
3714  if(s->adaptive_quant){
3715  switch(s->codec_id){
3716  case AV_CODEC_ID_MPEG4:
3717  if (CONFIG_MPEG4_ENCODER)
3719  break;
3720  case AV_CODEC_ID_H263:
3721  case AV_CODEC_ID_H263P:
3722  case AV_CODEC_ID_FLV1:
3723  if (CONFIG_H263_ENCODER)
3725  break;
3726  default:
3727  ff_init_qscale_tab(s);
3728  }
3729 
3730  s->lambda= s->lambda_table[0];
3731  //FIXME broken
3732  }else
3733  s->lambda = s->current_picture.f->quality;
3734  update_qscale(s);
3735  return 0;
3736 }
3737 
3738 /* must be called before writing the header */
3741  s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3742 
3743  if(s->pict_type==AV_PICTURE_TYPE_B){
3744  s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3745  assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3746  }else{
3747  s->pp_time= s->time - s->last_non_b_time;
3748  s->last_non_b_time= s->time;
3749  assert(s->picture_number==0 || s->pp_time > 0);
3750  }
3751 }
3752 
3754 {
3755  int i, ret;
3756  int bits;
3757  int context_count = s->slice_context_count;
3758 
3760 
3761  /* Reset the average MB variance */
3762  s->me.mb_var_sum_temp =
3763  s->me.mc_mb_var_sum_temp = 0;
3764 
3765  /* we need to initialize some time vars before we can encode B-frames */
3766  // RAL: Condition added for MPEG1VIDEO
3769  if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3770  ff_set_mpeg4_time(s);
3771 
3772  s->me.scene_change_score=0;
3773 
3774 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3775 
3776  if(s->pict_type==AV_PICTURE_TYPE_I){
3777  if(s->msmpeg4_version >= 3) s->no_rounding=1;
3778  else s->no_rounding=0;
3779  }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3781  s->no_rounding ^= 1;
3782  }
3783 
3784  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3785  if (estimate_qp(s,1) < 0)
3786  return -1;
3787  ff_get_2pass_fcode(s);
3788  } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3790  s->lambda= s->last_lambda_for[s->pict_type];
3791  else
3793  update_qscale(s);
3794  }
3795 
3801  }
3802 
3803  s->mb_intra=0; //for the rate distortion & bit compare functions
3804  for(i=1; i<context_count; i++){
3806  if (ret < 0)
3807  return ret;
3808  }
3809 
3810  if(ff_init_me(s)<0)
3811  return -1;
3812 
3813  /* Estimate motion for every MB */
3814  if(s->pict_type != AV_PICTURE_TYPE_I){
3815  s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3816  s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3817  if (s->pict_type != AV_PICTURE_TYPE_B) {
3818  if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3819  s->me_pre == 2) {
3820  s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3821  }
3822  }
3823 
3824  s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3825  }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3826  /* I-Frame */
3827  for(i=0; i<s->mb_stride*s->mb_height; i++)
3829 
3830  if(!s->fixed_qscale){
3831  /* finding spatial complexity for I-frame rate control */
3832  s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3833  }
3834  }
3835  for(i=1; i<context_count; i++){
3837  }
3839  s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3840  emms_c();
3841 
3843  s->pict_type == AV_PICTURE_TYPE_P) {
3845  for(i=0; i<s->mb_stride*s->mb_height; i++)
3847  if(s->msmpeg4_version >= 3)
3848  s->no_rounding=1;
3849  ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3851  }
3852 
3853  if(!s->umvplus){
3856 
3858  int a,b;
3859  a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3861  s->f_code= FFMAX3(s->f_code, a, b);
3862  }
3863 
3864  ff_fix_long_p_mvs(s);
3867  int j;
3868  for(i=0; i<2; i++){
3869  for(j=0; j<2; j++)
3872  }
3873  }
3874  }
3875 
3876  if(s->pict_type==AV_PICTURE_TYPE_B){
3877  int a, b;
3878 
3881  s->f_code = FFMAX(a, b);
3882 
3885  s->b_code = FFMAX(a, b);
3886 
3892  int dir, j;
3893  for(dir=0; dir<2; dir++){
3894  for(i=0; i<2; i++){
3895  for(j=0; j<2; j++){
3898  ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3899  s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3900  }
3901  }
3902  }
3903  }
3904  }
3905  }
3906 
3907  if (estimate_qp(s, 0) < 0)
3908  return -1;
3909 
3910  if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3911  s->pict_type == AV_PICTURE_TYPE_I &&
3912  !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3913  s->qscale= 3; //reduce clipping problems
3914 
3915  if (s->out_format == FMT_MJPEG) {
3916  const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3917  const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3918 
3919  if (s->avctx->intra_matrix) {
3920  chroma_matrix =
3921  luma_matrix = s->avctx->intra_matrix;
3922  }
3923  if (s->avctx->chroma_intra_matrix)
3924  chroma_matrix = s->avctx->chroma_intra_matrix;
3925 
3926  /* for mjpeg, we do include qscale in the matrix */
3927  for(i=1;i<64;i++){
3928  int j = s->idsp.idct_permutation[i];
3929 
3930  s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3931  s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3932  }
3933  s->y_dc_scale_table=
3935  s->chroma_intra_matrix[0] =
3938  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3940  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3941  s->qscale= 8;
3942  }
3943  if(s->codec_id == AV_CODEC_ID_AMV){
3944  static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3945  static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3946  for(i=1;i<64;i++){
3947  int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3948 
3949  s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
3950  s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
3951  }
3952  s->y_dc_scale_table= y;
3953  s->c_dc_scale_table= c;
3954  s->intra_matrix[0] = 13;
3955  s->chroma_intra_matrix[0] = 14;
3957  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3959  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3960  s->qscale= 8;
3961  }
3962 
3963  //FIXME var duplication
3965  s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3968 
3969  if (s->current_picture.f->key_frame)
3970  s->picture_in_gop_number=0;
3971 
3972  s->mb_x = s->mb_y = 0;
3973  s->last_bits= put_bits_count(&s->pb);
3974  switch(s->out_format) {
3975  case FMT_MJPEG:
3976  if (CONFIG_MJPEG_ENCODER && s->huffman != HUFFMAN_TABLE_OPTIMAL)
3979  break;
3980  case FMT_H261:
3981  if (CONFIG_H261_ENCODER)
3982  ff_h261_encode_picture_header(s, picture_number);
3983  break;
3984  case FMT_H263:
3985  if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3986  ff_wmv2_encode_picture_header(s, picture_number);
3987  else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3988  ff_msmpeg4_encode_picture_header(s, picture_number);
3989  else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3990  ret = ff_mpeg4_encode_picture_header(s, picture_number);
3991  if (ret < 0)
3992  return ret;
3993  } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3994  ret = ff_rv10_encode_picture_header(s, picture_number);
3995  if (ret < 0)
3996  return ret;
3997  }
3998  else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3999  ff_rv20_encode_picture_header(s, picture_number);
4000  else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
4001  ff_flv_encode_picture_header(s, picture_number);
4002  else if (CONFIG_H263_ENCODER)
4003  ff_h263_encode_picture_header(s, picture_number);
4004  break;
4005  case FMT_MPEG1:
4006  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
4007  ff_mpeg1_encode_picture_header(s, picture_number);
4008  break;
4009  default:
4010  av_assert0(0);
4011  }
4012  bits= put_bits_count(&s->pb);
4013  s->header_bits= bits - s->last_bits;
4014 
4015  for(i=1; i<context_count; i++){
4017  }
4018  s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
4019  for(i=1; i<context_count; i++){
4020  if (s->pb.buf_end == s->thread_context[i]->pb.buf)
4021  set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-32));
4023  }
4024  emms_c();
4025  return 0;
4026 }
4027 
4028 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
4029  const int intra= s->mb_intra;
4030  int i;
4031 
4032  s->dct_count[intra]++;
4033 
4034  for(i=0; i<64; i++){
4035  int level= block[i];
4036 
4037  if(level){
4038  if(level>0){
4039  s->dct_error_sum[intra][i] += level;
4040  level -= s->dct_offset[intra][i];
4041  if(level<0) level=0;
4042  }else{
4043  s->dct_error_sum[intra][i] -= level;
4044  level += s->dct_offset[intra][i];
4045  if(level>0) level=0;
4046  }
4047  block[i]= level;
4048  }
4049  }
4050 }
4051 
4053  int16_t *block, int n,
4054  int qscale, int *overflow){
4055  const int *qmat;
4056  const uint16_t *matrix;
4057  const uint8_t *scantable= s->intra_scantable.scantable;
4058  const uint8_t *perm_scantable= s->intra_scantable.permutated;
4059  int max=0;
4060  unsigned int threshold1, threshold2;
4061  int bias=0;
4062  int run_tab[65];
4063  int level_tab[65];
4064  int score_tab[65];
4065  int survivor[65];
4066  int survivor_count;
4067  int last_run=0;
4068  int last_level=0;
4069  int last_score= 0;
4070  int last_i;
4071  int coeff[2][64];
4072  int coeff_count[64];
4073  int qmul, qadd, start_i, last_non_zero, i, dc;
4074  const int esc_length= s->ac_esc_length;
4075  uint8_t * length;
4076  uint8_t * last_length;
4077  const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
4078  int mpeg2_qscale;
4079 
4080  s->fdsp.fdct(block);
4081 
4082  if(s->dct_error_sum)
4083  s->denoise_dct(s, block);
4084  qmul= qscale*16;
4085  qadd= ((qscale-1)|1)*8;
4086 
4087  if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
4088  else mpeg2_qscale = qscale << 1;
4089 
4090  if (s->mb_intra) {
4091  int q;
4092  if (!s->h263_aic) {
4093  if (n < 4)
4094  q = s->y_dc_scale;
4095  else
4096  q = s->c_dc_scale;
4097  q = q << 3;
4098  } else{
4099  /* For AIC we skip quant/dequant of INTRADC */
4100  q = 1 << 3;
4101  qadd=0;
4102  }
4103 
4104  /* note: block[0] is assumed to be positive */
4105  block[0] = (block[0] + (q >> 1)) / q;
4106  start_i = 1;
4107  last_non_zero = 0;
4108  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4109  matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
4110  if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
4111  bias= 1<<(QMAT_SHIFT-1);
4112 
4113  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4114  length = s->intra_chroma_ac_vlc_length;
4115  last_length= s->intra_chroma_ac_vlc_last_length;
4116  } else {
4117  length = s->intra_ac_vlc_length;
4118  last_length= s->intra_ac_vlc_last_length;
4119  }
4120  } else {
4121  start_i = 0;
4122  last_non_zero = -1;
4123  qmat = s->q_inter_matrix[qscale];
4124  matrix = s->inter_matrix;
4125  length = s->inter_ac_vlc_length;
4126  last_length= s->inter_ac_vlc_last_length;
4127  }
4128  last_i= start_i;
4129 
4130  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4131  threshold2= (threshold1<<1);
4132 
4133  for(i=63; i>=start_i; i--) {
4134  const int j = scantable[i];
4135  int level = block[j] * qmat[j];
4136 
4137  if(((unsigned)(level+threshold1))>threshold2){
4138  last_non_zero = i;
4139  break;
4140  }
4141  }
4142 
4143  for(i=start_i; i<=last_non_zero; i++) {
4144  const int j = scantable[i];
4145  int level = block[j] * qmat[j];
4146 
4147 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4148 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4149  if(((unsigned)(level+threshold1))>threshold2){
4150  if(level>0){
4151  level= (bias + level)>>QMAT_SHIFT;
4152  coeff[0][i]= level;
4153  coeff[1][i]= level-1;
4154 // coeff[2][k]= level-2;
4155  }else{
4156  level= (bias - level)>>QMAT_SHIFT;
4157  coeff[0][i]= -level;
4158  coeff[1][i]= -level+1;
4159 // coeff[2][k]= -level+2;
4160  }
4161  coeff_count[i]= FFMIN(level, 2);
4162  av_assert2(coeff_count[i]);
4163  max |=level;
4164  }else{
4165  coeff[0][i]= (level>>31)|1;
4166  coeff_count[i]= 1;
4167  }
4168  }
4169 
4170  *overflow= s->max_qcoeff < max; //overflow might have happened
4171 
4172  if(last_non_zero < start_i){
4173  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4174  return last_non_zero;
4175  }
4176 
4177  score_tab[start_i]= 0;
4178  survivor[0]= start_i;
4179  survivor_count= 1;
4180 
4181  for(i=start_i; i<=last_non_zero; i++){
4182  int level_index, j, zero_distortion;
4183  int dct_coeff= FFABS(block[ scantable[i] ]);
4184  int best_score=256*256*256*120;
4185 
4186  if (s->fdsp.fdct == ff_fdct_ifast)
4187  dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4188  zero_distortion= dct_coeff*dct_coeff;
4189 
4190  for(level_index=0; level_index < coeff_count[i]; level_index++){
4191  int distortion;
4192  int level= coeff[level_index][i];
4193  const int alevel= FFABS(level);
4194  int unquant_coeff;
4195 
4196  av_assert2(level);
4197 
4198  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4199  unquant_coeff= alevel*qmul + qadd;
4200  } else if(s->out_format == FMT_MJPEG) {
4201  j = s->idsp.idct_permutation[scantable[i]];
4202  unquant_coeff = alevel * matrix[j] * 8;
4203  }else{ // MPEG-1
4204  j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4205  if(s->mb_intra){
4206  unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4207  unquant_coeff = (unquant_coeff - 1) | 1;
4208  }else{
4209  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4210  unquant_coeff = (unquant_coeff - 1) | 1;
4211  }
4212  unquant_coeff<<= 3;
4213  }
4214 
4215  distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4216  level+=64;
4217  if((level&(~127)) == 0){
4218  for(j=survivor_count-1; j>=0; j--){
4219  int run= i - survivor[j];
4220  int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4221  score += score_tab[i-run];
4222 
4223  if(score < best_score){
4224  best_score= score;
4225  run_tab[i+1]= run;
4226  level_tab[i+1]= level-64;
4227  }
4228  }
4229 
4230  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4231  for(j=survivor_count-1; j>=0; j--){
4232  int run= i - survivor[j];
4233  int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4234  score += score_tab[i-run];
4235  if(score < last_score){
4236  last_score= score;
4237  last_run= run;
4238  last_level= level-64;
4239  last_i= i+1;
4240  }
4241  }
4242  }
4243  }else{
4244  distortion += esc_length*lambda;
4245  for(j=survivor_count-1; j>=0; j--){
4246  int run= i - survivor[j];
4247  int score= distortion + score_tab[i-run];
4248 
4249  if(score < best_score){
4250  best_score= score;
4251  run_tab[i+1]= run;
4252  level_tab[i+1]= level-64;
4253  }
4254  }
4255 
4256  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4257  for(j=survivor_count-1; j>=0; j--){
4258  int run= i - survivor[j];
4259  int score= distortion + score_tab[i-run];
4260  if(score < last_score){
4261  last_score= score;
4262  last_run= run;
4263  last_level= level-64;
4264  last_i= i+1;
4265  }
4266  }
4267  }
4268  }
4269  }
4270 
4271  score_tab[i+1]= best_score;
4272 
4273  // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4274  if(last_non_zero <= 27){
4275  for(; survivor_count; survivor_count--){
4276  if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4277  break;
4278  }
4279  }else{
4280  for(; survivor_count; survivor_count--){
4281  if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4282  break;
4283  }
4284  }
4285 
4286  survivor[ survivor_count++ ]= i+1;
4287  }
4288 
4289  if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4290  last_score= 256*256*256*120;
4291  for(i= survivor[0]; i<=last_non_zero + 1; i++){
4292  int score= score_tab[i];
4293  if (i)
4294  score += lambda * 2; // FIXME more exact?
4295 
4296  if(score < last_score){
4297  last_score= score;
4298  last_i= i;
4299  last_level= level_tab[i];
4300  last_run= run_tab[i];
4301  }
4302  }
4303  }
4304 
4305  s->coded_score[n] = last_score;
4306 
4307  dc= FFABS(block[0]);
4308  last_non_zero= last_i - 1;
4309  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4310 
4311  if(last_non_zero < start_i)
4312  return last_non_zero;
4313 
4314  if(last_non_zero == 0 && start_i == 0){
4315  int best_level= 0;
4316  int best_score= dc * dc;
4317 
4318  for(i=0; i<coeff_count[0]; i++){
4319  int level= coeff[i][0];
4320  int alevel= FFABS(level);
4321  int unquant_coeff, score, distortion;
4322 
4323  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4324  unquant_coeff= (alevel*qmul + qadd)>>3;
4325  } else{ // MPEG-1
4326  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4327  unquant_coeff = (unquant_coeff - 1) | 1;
4328  }
4329  unquant_coeff = (unquant_coeff + 4) >> 3;
4330  unquant_coeff<<= 3 + 3;
4331 
4332  distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4333  level+=64;
4334  if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4335  else score= distortion + esc_length*lambda;
4336 
4337  if(score < best_score){
4338  best_score= score;
4339  best_level= level - 64;
4340  }
4341  }
4342  block[0]= best_level;
4343  s->coded_score[n] = best_score - dc*dc;
4344  if(best_level == 0) return -1;
4345  else return last_non_zero;
4346  }
4347 
4348  i= last_i;
4349  av_assert2(last_level);
4350 
4351  block[ perm_scantable[last_non_zero] ]= last_level;
4352  i -= last_run + 1;
4353 
4354  for(; i>start_i; i -= run_tab[i] + 1){
4355  block[ perm_scantable[i-1] ]= level_tab[i];
4356  }
4357 
4358  return last_non_zero;
4359 }
4360 
4361 //#define REFINE_STATS 1
4362 static int16_t basis[64][64];
4363 
4364 static void build_basis(uint8_t *perm){
4365  int i, j, x, y;
4366  emms_c();
4367  for(i=0; i<8; i++){
4368  for(j=0; j<8; j++){
4369  for(y=0; y<8; y++){
4370  for(x=0; x<8; x++){
4371  double s= 0.25*(1<<BASIS_SHIFT);
4372  int index= 8*i + j;
4373  int perm_index= perm[index];
4374  if(i==0) s*= sqrt(0.5);
4375  if(j==0) s*= sqrt(0.5);
4376  basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4377  }
4378  }
4379  }
4380  }
4381 }
4382 
4383 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4384  int16_t *block, int16_t *weight, int16_t *orig,
4385  int n, int qscale){
4386  int16_t rem[64];
4387  LOCAL_ALIGNED_16(int16_t, d1, [64]);
4388  const uint8_t *scantable= s->intra_scantable.scantable;
4389  const uint8_t *perm_scantable= s->intra_scantable.permutated;
4390 // unsigned int threshold1, threshold2;
4391 // int bias=0;
4392  int run_tab[65];
4393  int prev_run=0;
4394  int prev_level=0;
4395  int qmul, qadd, start_i, last_non_zero, i, dc;
4396  uint8_t * length;
4397  uint8_t * last_length;
4398  int lambda;
4399  int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4400 #ifdef REFINE_STATS
4401 static int count=0;
4402 static int after_last=0;
4403 static int to_zero=0;
4404 static int from_zero=0;
4405 static int raise=0;
4406 static int lower=0;
4407 static int messed_sign=0;
4408 #endif
4409 
4410  if(basis[0][0] == 0)
4412 
4413  qmul= qscale*2;
4414  qadd= (qscale-1)|1;
4415  if (s->mb_intra) {
4416  if (!s->h263_aic) {
4417  if (n < 4)
4418  q = s->y_dc_scale;
4419  else
4420  q = s->c_dc_scale;
4421  } else{
4422  /* For AIC we skip quant/dequant of INTRADC */
4423  q = 1;
4424  qadd=0;
4425  }
4426  q <<= RECON_SHIFT-3;
4427  /* note: block[0] is assumed to be positive */
4428  dc= block[0]*q;
4429 // block[0] = (block[0] + (q >> 1)) / q;
4430  start_i = 1;
4431 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4432 // bias= 1<<(QMAT_SHIFT-1);
4433  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4434  length = s->intra_chroma_ac_vlc_length;
4435  last_length= s->intra_chroma_ac_vlc_last_length;
4436  } else {
4437  length = s->intra_ac_vlc_length;
4438  last_length= s->intra_ac_vlc_last_length;
4439  }
4440  } else {
4441  dc= 0;
4442  start_i = 0;
4443  length = s->inter_ac_vlc_length;
4444  last_length= s->inter_ac_vlc_last_length;
4445  }
4446  last_non_zero = s->block_last_index[n];
4447 
4448 #ifdef REFINE_STATS
4449 {START_TIMER
4450 #endif
4451  dc += (1<<(RECON_SHIFT-1));
4452  for(i=0; i<64; i++){
4453  rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4454  }
4455 #ifdef REFINE_STATS
4456 STOP_TIMER("memset rem[]")}
4457 #endif
4458  sum=0;
4459  for(i=0; i<64; i++){
4460  int one= 36;
4461  int qns=4;
4462  int w;
4463 
4464  w= FFABS(weight[i]) + qns*one;
4465  w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4466 
4467  weight[i] = w;
4468 // w=weight[i] = (63*qns + (w/2)) / w;
4469 
4470  av_assert2(w>0);
4471  av_assert2(w<(1<<6));
4472  sum += w*w;
4473  }
4474  lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4475 #ifdef REFINE_STATS
4476 {START_TIMER
4477 #endif
4478  run=0;
4479  rle_index=0;
4480  for(i=start_i; i<=last_non_zero; i++){
4481  int j= perm_scantable[i];
4482  const int level= block[j];
4483  int coeff;
4484 
4485  if(level){
4486  if(level<0) coeff= qmul*level - qadd;
4487  else coeff= qmul*level + qadd;
4488  run_tab[rle_index++]=run;
4489  run=0;
4490 
4491  s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4492  }else{
4493  run++;
4494  }
4495  }
4496 #ifdef REFINE_STATS
4497 if(last_non_zero>0){
4498 STOP_TIMER("init rem[]")
4499 }
4500 }
4501 
4502 {START_TIMER
4503 #endif
4504  for(;;){
4505  int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4506  int best_coeff=0;
4507  int best_change=0;
4508  int run2, best_unquant_change=0, analyze_gradient;
4509 #ifdef REFINE_STATS
4510 {START_TIMER
4511 #endif
4512  analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4513 
4514  if(analyze_gradient){
4515 #ifdef REFINE_STATS
4516 {START_TIMER
4517 #endif
4518  for(i=0; i<64; i++){
4519  int w= weight[i];
4520 
4521  d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4522  }
4523 #ifdef REFINE_STATS
4524 STOP_TIMER("rem*w*w")}
4525 {START_TIMER
4526 #endif
4527  s->fdsp.fdct(d1);
4528 #ifdef REFINE_STATS
4529 STOP_TIMER("dct")}
4530 #endif
4531  }
4532 
4533  if(start_i){
4534  const int level= block[0];
4535  int change, old_coeff;
4536 
4537  av_assert2(s->mb_intra);
4538 
4539  old_coeff= q*level;
4540 
4541  for(change=-1; change<=1; change+=2){
4542  int new_level= level + change;
4543  int score, new_coeff;
4544 
4545  new_coeff= q*new_level;
4546  if(new_coeff >= 2048 || new_coeff < 0)
4547  continue;
4548 
4549  score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4550  new_coeff - old_coeff);
4551  if(score<best_score){
4552  best_score= score;
4553  best_coeff= 0;
4554  best_change= change;
4555  best_unquant_change= new_coeff - old_coeff;
4556  }
4557  }
4558  }
4559 
4560  run=0;
4561  rle_index=0;
4562  run2= run_tab[rle_index++];
4563  prev_level=0;
4564  prev_run=0;
4565 
4566  for(i=start_i; i<64; i++){
4567  int j= perm_scantable[i];
4568  const int level= block[j];
4569  int change, old_coeff;
4570 
4571  if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4572  break;
4573 
4574  if(level){
4575  if(level<0) old_coeff= qmul*level - qadd;
4576  else old_coeff= qmul*level + qadd;
4577  run2= run_tab[rle_index++]; //FIXME ! maybe after last
4578  }else{
4579  old_coeff=0;
4580  run2--;
4581  av_assert2(run2>=0 || i >= last_non_zero );
4582  }
4583 
4584  for(change=-1; change<=1; change+=2){
4585  int new_level= level + change;
4586  int score, new_coeff, unquant_change;
4587 
4588  score=0;
4589  if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4590  continue;
4591 
4592  if(new_level){
4593  if(new_level<0) new_coeff= qmul*new_level - qadd;
4594  else new_coeff= qmul*new_level + qadd;
4595  if(new_coeff >= 2048 || new_coeff <= -2048)
4596  continue;
4597  //FIXME check for overflow
4598 
4599  if(level){
4600  if(level < 63 && level > -63){
4601  if(i < last_non_zero)
4602  score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4603  - length[UNI_AC_ENC_INDEX(run, level+64)];
4604  else
4605  score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4606  - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4607  }
4608  }else{
4609  av_assert2(FFABS(new_level)==1);
4610 
4611  if(analyze_gradient){
4612  int g= d1[ scantable[i] ];
4613  if(g && (g^new_level) >= 0)
4614  continue;
4615  }
4616 
4617  if(i < last_non_zero){
4618  int next_i= i + run2 + 1;
4619  int next_level= block[ perm_scantable[next_i] ] + 64;
4620 
4621  if(next_level&(~127))
4622  next_level= 0;
4623 
4624  if(next_i < last_non_zero)
4625  score += length[UNI_AC_ENC_INDEX(run, 65)]
4626  + length[UNI_AC_ENC_INDEX(run2, next_level)]
4627  - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4628  else
4629  score += length[UNI_AC_ENC_INDEX(run, 65)]
4630  + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4631  - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4632  }else{
4633  score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4634  if(prev_level){
4635  score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4636  - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4637  }
4638  }
4639  }
4640  }else{
4641  new_coeff=0;
4642  av_assert2(FFABS(level)==1);
4643 
4644  if(i < last_non_zero){
4645  int next_i= i + run2 + 1;
4646  int next_level= block[ perm_scantable[next_i] ] + 64;
4647 
4648  if(next_level&(~127))
4649  next_level= 0;
4650 
4651  if(next_i < last_non_zero)
4652  score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4653  - length[UNI_AC_ENC_INDEX(run2, next_level)]
4654  - length[UNI_AC_ENC_INDEX(run, 65)];
4655  else
4656  score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4657  - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4658  - length[UNI_AC_ENC_INDEX(run, 65)];
4659  }else{
4660  score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4661  if(prev_level){
4662  score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4663  - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4664  }
4665  }
4666  }
4667 
4668  score *= lambda;
4669 
4670  unquant_change= new_coeff - old_coeff;
4671  av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4672 
4673  score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4674  unquant_change);
4675  if(score<best_score){
4676  best_score= score;
4677  best_coeff= i;
4678  best_change= change;
4679  best_unquant_change= unquant_change;
4680  }
4681  }
4682  if(level){
4683  prev_level= level + 64;
4684  if(prev_level&(~127))
4685  prev_level= 0;
4686  prev_run= run;
4687  run=0;
4688  }else{
4689  run++;
4690  }
4691  }
4692 #ifdef REFINE_STATS
4693 STOP_TIMER("iterative step")}
4694 #endif
4695 
4696  if(best_change){
4697  int j= perm_scantable[ best_coeff ];
4698 
4699  block[j] += best_change;
4700 
4701  if(best_coeff > last_non_zero){
4702  last_non_zero= best_coeff;
4703  av_assert2(block[j]);
4704 #ifdef REFINE_STATS
4705 after_last++;
4706 #endif
4707  }else{
4708 #ifdef REFINE_STATS
4709 if(block[j]){
4710  if(block[j] - best_change){
4711  if(FFABS(block[j]) > FFABS(block[j] - best_change)){
4712  raise++;
4713  }else{
4714  lower++;
4715  }
4716  }else{
4717  from_zero++;
4718  }
4719 }else{
4720  to_zero++;
4721 }
4722 #endif
4723  for(; last_non_zero>=start_i; last_non_zero--){
4724  if(block[perm_scantable[last_non_zero]])
4725  break;
4726  }
4727  }
4728 #ifdef REFINE_STATS
4729 count++;
4730 if(256*256*256*64 % count == 0){
4731  av_log(s->avctx, AV_LOG_DEBUG, "after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
4732 }
4733 #endif
4734  run=0;
4735  rle_index=0;
4736  for(i=start_i; i<=last_non_zero; i++){
4737  int j= perm_scantable[i];
4738  const int level= block[j];
4739 
4740  if(level){
4741  run_tab[rle_index++]=run;
4742  run=0;
4743  }else{
4744  run++;
4745  }
4746  }
4747 
4748  s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4749  }else{
4750  break;
4751  }
4752  }
4753 #ifdef REFINE_STATS
4754 if(last_non_zero>0){
4755 STOP_TIMER("iterative search")
4756 }
4757 }
4758 #endif
4759 
4760  return last_non_zero;
4761 }
4762 
4763 /**
4764  * Permute an 8x8 block according to permutation.
4765  * @param block the block which will be permuted according to
4766  * the given permutation vector
4767  * @param permutation the permutation vector
4768  * @param last the last non zero coefficient in scantable order, used to
4769  * speed the permutation up
4770  * @param scantable the used scantable, this is only used to speed the
4771  * permutation up, the block is not (inverse) permutated
4772  * to scantable order!
4773  */
4774 void ff_block_permute(int16_t *block, uint8_t *permutation,
4775  const uint8_t *scantable, int last)
4776 {
4777  int i;
4778  int16_t temp[64];
4779 
4780  if (last <= 0)
4781  return;
4782  //FIXME it is ok but not clean and might fail for some permutations
4783  // if (permutation[1] == 1)
4784  // return;
4785 
4786  for (i = 0; i <= last; i++) {
4787  const int j = scantable[i];
4788  temp[j] = block[j];
4789  block[j] = 0;
4790  }
4791 
4792  for (i = 0; i <= last; i++) {
4793  const int j = scantable[i];
4794  const int perm_j = permutation[j];
4795  block[perm_j] = temp[j];
4796  }
4797 }
4798 
4800  int16_t *block, int n,
4801  int qscale, int *overflow)
4802 {
4803  int i, j, level, last_non_zero, q, start_i;
4804  const int *qmat;
4805  const uint8_t *scantable= s->intra_scantable.scantable;
4806  int bias;
4807  int max=0;
4808  unsigned int threshold1, threshold2;
4809 
4810  s->fdsp.fdct(block);
4811 
4812  if(s->dct_error_sum)
4813  s->denoise_dct(s, block);
4814 
4815  if (s->mb_intra) {
4816  if (!s->h263_aic) {
4817  if (n < 4)
4818  q = s->y_dc_scale;
4819  else
4820  q = s->c_dc_scale;
4821  q = q << 3;
4822  } else
4823  /* For AIC we skip quant/dequant of INTRADC */
4824  q = 1 << 3;
4825 
4826  /* note: block[0] is assumed to be positive */
4827  block[0] = (block[0] + (q >> 1)) / q;
4828  start_i = 1;
4829  last_non_zero = 0;
4830  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4831  bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4832  } else {
4833  start_i = 0;
4834  last_non_zero = -1;
4835  qmat = s->q_inter_matrix[qscale];
4836  bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4837  }
4838  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4839  threshold2= (threshold1<<1);
4840  for(i=63;i>=start_i;i--) {
4841  j = scantable[i];
4842  level = block[j] * qmat[j];
4843 
4844  if(((unsigned)(level+threshold1))>threshold2){
4845  last_non_zero = i;
4846  break;
4847  }else{
4848  block[j]=0;
4849  }
4850  }
4851  for(i=start_i; i<=last_non_zero; i++) {
4852  j = scantable[i];
4853  level = block[j] * qmat[j];
4854 
4855 // if( bias+level >= (1<<QMAT_SHIFT)
4856 // || bias-level >= (1<<QMAT_SHIFT)){
4857  if(((unsigned)(level+threshold1))>threshold2){
4858  if(level>0){
4859  level= (bias + level)>>QMAT_SHIFT;
4860  block[j]= level;
4861  }else{
4862  level= (bias - level)>>QMAT_SHIFT;
4863  block[j]= -level;
4864  }
4865  max |=level;
4866  }else{
4867  block[j]=0;
4868  }
4869  }
4870  *overflow= s->max_qcoeff < max; //overflow might have happened
4871 
4872  /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4873  if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4875  scantable, last_non_zero);
4876 
4877  return last_non_zero;
4878 }
4879 
4880 #define OFFSET(x) offsetof(MpegEncContext, x)
4881 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4882 static const AVOption h263_options[] = {
4883  { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4884  { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4886  { NULL },
4887 };
4888 
4889 static const AVClass h263_class = {
4890  .class_name = "H.263 encoder",
4891  .item_name = av_default_item_name,
4892  .option = h263_options,
4893  .version = LIBAVUTIL_VERSION_INT,
4894 };
4895 
4897  .name = "h263",
4898  .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4899  .type = AVMEDIA_TYPE_VIDEO,
4900  .id = AV_CODEC_ID_H263,
4901  .priv_data_size = sizeof(MpegEncContext),
4903  .encode2 = ff_mpv_encode_picture,
4904  .close = ff_mpv_encode_end,
4906  .priv_class = &h263_class,
4907 };
4908 
4909 static const AVOption h263p_options[] = {
4910  { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4911  { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4912  { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4913  { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4915  { NULL },
4916 };
4917 static const AVClass h263p_class = {
4918  .class_name = "H.263p encoder",
4919  .item_name = av_default_item_name,
4920  .option = h263p_options,
4921  .version = LIBAVUTIL_VERSION_INT,
4922 };
4923 
4925  .name = "h263p",
4926  .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4927  .type = AVMEDIA_TYPE_VIDEO,
4928  .id = AV_CODEC_ID_H263P,
4929  .priv_data_size = sizeof(MpegEncContext),
4931  .encode2 = ff_mpv_encode_picture,
4932  .close = ff_mpv_encode_end,
4933  .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4935  .priv_class = &h263p_class,
4936 };
4937 
4938 static const AVClass msmpeg4v2_class = {
4939  .class_name = "msmpeg4v2 encoder",
4940  .item_name = av_default_item_name,
4941  .option = ff_mpv_generic_options,
4942  .version = LIBAVUTIL_VERSION_INT,
4943 };
4944 
4946  .name = "msmpeg4v2",
4947  .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4948  .type = AVMEDIA_TYPE_VIDEO,
4949  .id = AV_CODEC_ID_MSMPEG4V2,
4950  .priv_data_size = sizeof(MpegEncContext),
4952  .encode2 = ff_mpv_encode_picture,
4953  .close = ff_mpv_encode_end,
4955  .priv_class = &msmpeg4v2_class,
4956 };
4957 
4958 static const AVClass msmpeg4v3_class = {
4959  .class_name = "msmpeg4v3 encoder",
4960  .item_name = av_default_item_name,
4961  .option = ff_mpv_generic_options,
4962  .version = LIBAVUTIL_VERSION_INT,
4963 };
4964 
4966  .name = "msmpeg4",
4967  .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4968  .type = AVMEDIA_TYPE_VIDEO,
4969  .id = AV_CODEC_ID_MSMPEG4V3,
4970  .priv_data_size = sizeof(MpegEncContext),
4972  .encode2 = ff_mpv_encode_picture,
4973  .close = ff_mpv_encode_end,
4975  .priv_class = &msmpeg4v3_class,
4976 };
4977 
4978 static const AVClass wmv1_class = {
4979  .class_name = "wmv1 encoder",
4980  .item_name = av_default_item_name,
4981  .option = ff_mpv_generic_options,
4982  .version = LIBAVUTIL_VERSION_INT,
4983 };
4984 
4986  .name = "wmv1",
4987  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4988  .type = AVMEDIA_TYPE_VIDEO,
4989  .id = AV_CODEC_ID_WMV1,
4990  .priv_data_size = sizeof(MpegEncContext),
4992  .encode2 = ff_mpv_encode_picture,
4993  .close = ff_mpv_encode_end,
4995  .priv_class = &wmv1_class,
4996 };
int last_time_base
Definition: mpegvideo.h:386
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:41
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:930
int plane
Definition: avisynth_c.h:422
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: avcodec.h:2956
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:1009
static const AVClass wmv1_class
void ff_h261_reorder_mb_index(MpegEncContext *s)
Definition: h261enc.c:108
int(* try_8x8basis)(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale)
int chroma_elim_threshold
Definition: mpegvideo.h:114
#define INPLACE_OFFSET
Definition: mpegutils.h:123
void ff_jpeg_fdct_islow_10(int16_t *data)
static const AVOption h263_options[]
int frame_bits
bits used for the current frame
Definition: mpegvideo.h:338
IDCTDSPContext idsp
Definition: mpegvideo.h:227
av_cold int ff_dct_encode_init(MpegEncContext *s)
#define NULL
Definition: coverity.c:32
RateControlContext rc_context
contains stuff only accessed in ratecontrol.c
Definition: mpegvideo.h:341
const struct AVCodec * codec
Definition: avcodec.h:1741
int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
av_cold void ff_rate_control_uninit(MpegEncContext *s)
Definition: ratecontrol.c:672
#define FF_MPV_FLAG_STRICT_GOP
Definition: mpegvideo.h:572
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:2741
void ff_estimate_b_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1510
qpel_mc_func avg_qpel_pixels_tab[2][16]
Definition: qpeldsp.h:74
int picture_number
Definition: mpegvideo.h:124
const char * s
Definition: avisynth_c.h:768
#define RECON_SHIFT
attribute_deprecated int intra_quant_bias
Definition: avcodec.h:2261
me_cmp_func frame_skip_cmp[6]
Definition: me_cmp.h:76
#define CANDIDATE_MB_TYPE_SKIPPED
Definition: mpegutils.h:109
rate control context.
Definition: ratecontrol.h:63
static int shift(int a, int b)
Definition: sonic.c:82
S(GMC)-VOP MPEG-4.
Definition: avutil.h:277
void(* dct_unquantize_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:520
void ff_mpeg1_encode_init(MpegEncContext *s)
Definition: mpeg12enc.c:1002
int esc3_level_length
Definition: mpegvideo.h:438
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2333
static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
int time_increment_bits
< number of bits to represent the fractional part of time (encoder only)
Definition: mpegvideo.h:385
void ff_h263_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: ituh263enc.c:103
This structure describes decoded (raw) audio or video data.
Definition: frame.h:187
AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
Definition: utils.c:3149
int16_t(* p_mv_table)[2]
MV table (1MV per MB) P-frame encoding.
Definition: mpegvideo.h:245
#define FF_CMP_DCTMAX
Definition: avcodec.h:2182
void ff_fdct_ifast(int16_t *data)
Definition: jfdctfst.c:208
#define FF_MB_DECISION_BITS
chooses the one which needs the fewest bits
Definition: avcodec.h:2297
AVOption.
Definition: opt.h:246
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
Definition: avpacket.c:668
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
uint8_t * fcode_tab
smallest fcode needed for each MV
Definition: mpegvideo.h:279
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
Definition: mpegvideo.h:150
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:269
void ff_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:905
const uint8_t * y_dc_scale_table
qscale -> y_dc_scale table
Definition: mpegvideo.h:185
uint8_t * mb_mean
Table for MB luminance.
Definition: mpegpicture.h:74
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:3071
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:909
int last_mv[2][2][2]
last MV, used for MV prediction in MPEG-1 & B-frame MPEG-4
Definition: mpegvideo.h:278
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
Definition: mpegpicture.h:36
int pre_pass
= 1 for the pre pass
Definition: motion_est.h:72
#define AV_CODEC_FLAG_LOOP_FILTER
loop filter.
Definition: avcodec.h:892
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:68
#define FF_MPV_FLAG_SKIP_RD
Definition: mpegvideo.h:571
AVFrame * tmp_frames[MAX_B_FRAMES+2]
Definition: mpegvideo.h:556
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:67
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:206
attribute_deprecated int rc_qmod_freq
Definition: avcodec.h:2701
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
RateControlEntry * entry
Definition: ratecontrol.h:65
qpel_mc_func put_no_rnd_qpel_pixels_tab[2][16]
Definition: qpeldsp.h:75
void(* shrink[4])(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height)
#define CANDIDATE_MB_TYPE_INTER_I
Definition: mpegutils.h:116
int64_t bit_rate
the average bitrate
Definition: avcodec.h:1797
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
else temp
Definition: vf_mcdeint.c:259
attribute_deprecated void(* rtp_callback)(struct AVCodecContext *avctx, void *data, int size, int mb_nb)
Definition: avcodec.h:2858
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:370
const char * g
Definition: vf_curves.c:112
void ff_h263_encode_init(MpegEncContext *s)
Definition: ituh263enc.c:761
const char * desc
Definition: nvenc.c:60
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
Definition: mpegvideo.h:151
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
void ff_init_qscale_tab(MpegEncContext *s)
init s->current_picture.qscale_table from s->lambda_table
#define OFFSET(x)
uint16_t * mb_var
Table for MB variances.
Definition: mpegpicture.h:65
uint16_t(* q_chroma_intra_matrix16)[2][64]
Definition: mpegvideo.h:328
uint16_t chroma_intra_matrix[64]
Definition: mpegvideo.h:301
static int estimate_qp(MpegEncContext *s, int dry_run)
#define MAX_MV
Definition: motion_est.h:35
int acc
Definition: yuv2rgb.c:546
int max_bitrate
Maximum bitrate of the stream, in bits per second.
Definition: avcodec.h:1351
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:2018
int16_t(*[3] ac_val)[16]
used for MPEG-4 AC prediction, all 3 arrays must be continuous
Definition: mpegvideo.h:191
MJPEG encoder.
void(* add_8x8basis)(int16_t rem[64], int16_t basis[64], int scale)
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:129
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: utils.c:3031
attribute_deprecated int frame_skip_cmp
Definition: avcodec.h:2823
#define FF_MPV_COMMON_OPTS
Definition: mpegvideo.h:605
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2469
#define me
int frame_skip_cmp
Definition: mpegvideo.h:564
int msmpeg4_version
0=not msmpeg4, 1=mp41, 2=mp42, 3=mp43/divx3 4=wmv1/7 5=wmv2/8
Definition: mpegvideo.h:436
int b_frame_strategy
Definition: mpegvideo.h:557
#define CANDIDATE_MB_TYPE_BIDIR
Definition: mpegutils.h:114
int num
Numerator.
Definition: rational.h:59
av_cold void ff_h263dsp_init(H263DSPContext *ctx)
Definition: h263dsp.c:117
int size
Definition: avcodec.h:1658
attribute_deprecated int lmax
Definition: avcodec.h:2805
enum AVCodecID codec_id
Definition: mpegvideo.h:109
const char * b
Definition: vf_curves.c:113
void ff_get_2pass_fcode(MpegEncContext *s)
Definition: ratecontrol.c:857
void avpriv_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
Definition: bitstream.c:64
int av_log2(unsigned v)
Definition: intmath.c:26
int obmc
overlapped block motion compensation
Definition: mpegvideo.h:366
void avpriv_align_put_bits(PutBitContext *s)
Pad the bitstream with zeros up to the next byte boundary.
Definition: bitstream.c:48
void ff_mpeg1_clean_buffers(MpegEncContext *s)
Definition: mpeg12.c:115
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:2143
int frame_skip_exp
Definition: mpegvideo.h:563
int ff_h261_get_picture_format(int width, int height)
Definition: h261enc.c:40
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1960
#define FF_MPV_FLAG_NAQ
Definition: mpegvideo.h:575
int16_t(*[2][2] p_field_mv_table)[2]
MV table (2MV per MB) interlaced P-frame encoding.
Definition: mpegvideo.h:251
static int select_input_picture(MpegEncContext *s)
static const AVClass msmpeg4v3_class
int min_qcoeff
minimum encodable coefficient
Definition: mpegvideo.h:308
static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride)
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:120
int out_size
Definition: movenc.c:55
int ildct_cmp
interlaced DCT comparison function
Definition: avcodec.h:2168
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
int coded_score[12]
Definition: mpegvideo.h:320
mpegvideo header.
const uint16_t ff_h263_format[8][2]
Definition: h263data.c:238
av_cold int ff_mjpeg_encode_init(MpegEncContext *s)
Definition: mjpegenc.c:71
int scene_change_score
Definition: motion_est.h:87
int mpv_flags
flags set by private options
Definition: mpegvideo.h:526
uint8_t permutated[64]
Definition: idctdsp.h:33
static const AVClass h263_class
uint8_t run
Definition: svq3.c:206
static AVPacket pkt
void ff_xvid_rate_control_uninit(struct MpegEncContext *s)
Definition: libxvid_rc.c:158
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:3133
uint8_t * intra_ac_vlc_length
Definition: mpegvideo.h:311
#define EDGE_TOP
int padding_bug_score
used to detect the VERY common padding bug in MPEG-4
Definition: mpegvideo.h:409
int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
Definition: mpegpicture.c:358
#define UNI_AC_ENC_INDEX(run, level)
Definition: mpegvideo.h:318
int mb_num
number of MBs of a picture
Definition: mpegvideo.h:130
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
Definition: fdctdsp.c:26
#define src
Definition: vp8dsp.c:254
#define FF_LAMBDA_SHIFT
Definition: avutil.h:225
static void clip_coeffs(MpegEncContext *s, int16_t *block, int last_index)
QpelDSPContext qdsp
Definition: mpegvideo.h:232
An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of structures with info about macroblo...
Definition: avcodec.h:1450
AVCodec.
Definition: avcodec.h:3681
#define MAX_FCODE
Definition: mpegutils.h:48
static void write_mb_info(MpegEncContext *s)
int time_base
time in seconds of last I,P,S Frame
Definition: mpegvideo.h:387
uint8_t(* mv_penalty)[MAX_DMV *2+1]
bit amount needed to encode a MV
Definition: motion_est.h:93
int qscale
QP.
Definition: mpegvideo.h:201
int h263_aic
Advanced INTRA Coding (AIC)
Definition: mpegvideo.h:84
int16_t(* b_back_mv_table)[2]
MV table (1MV per MB) backward mode B-frame encoding.
Definition: mpegvideo.h:247
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
Definition: h263.c:307
int min_bitrate
Minimum bitrate of the stream, in bits per second.
Definition: avcodec.h:1356
int chroma_x_shift
Definition: mpegvideo.h:476
int encoding
true if we are encoding (vs decoding)
Definition: mpegvideo.h:111
void(* dct_unquantize_h263_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:514
int field_select[2][2]
Definition: mpegvideo.h:277
void(* dct_unquantize_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:518
attribute_deprecated int frame_skip_exp
Definition: avcodec.h:2819
attribute_deprecated int me_method
This option does nothing.
Definition: avcodec.h:1967
uint32_t ff_square_tab[512]
Definition: me_cmp.c:32
int quant_precision
Definition: mpegvideo.h:398
void ff_mpeg4_merge_partitions(MpegEncContext *s)
static int mb_var_thread(AVCodecContext *c, void *arg)
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
Definition: mpegvideo.c:2433
void(* dct_unquantize_h263_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:516
common functions for use with the Xvid wrappers
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1869
int modified_quant
Definition: mpegvideo.h:379
float ff_xvid_rate_estimate_qscale(struct MpegEncContext *s, int dry_run)
Definition: libxvid_rc.c:101
static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
#define FF_MPV_FLAG_CBP_RD
Definition: mpegvideo.h:574
int skipdct
skip dct and code zero residual
Definition: mpegvideo.h:217
float rc_buffer_aggressivity
Definition: mpegvideo.h:537
int b_frame_score
Definition: mpegpicture.h:84
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:27
uint64_t vbv_delay
The delay between the time the packet this structure is associated with is received and the time when...
Definition: avcodec.h:1376
static int16_t block[64]
Definition: dct.c:115
void ff_mpeg4_clean_buffers(MpegEncContext *s)
Definition: mpeg4video.c:45
attribute_deprecated int mv_bits
Definition: avcodec.h:2875
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
#define CANDIDATE_MB_TYPE_INTER
Definition: mpegutils.h:107
float p_masking
p block masking (0-> disabled)
Definition: avcodec.h:2104
int picture_in_gop_number
0-> first pic in gop, ...
Definition: mpegvideo.h:125
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:63
attribute_deprecated int rc_strategy
Definition: avcodec.h:2031
int alt_inter_vlc
alternative inter vlc
Definition: mpegvideo.h:378
void ff_mpeg1_encode_slice_header(MpegEncContext *s)
Definition: mpeg12enc.c:407
int ff_dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
uint8_t * ptr_lastgob
Definition: mpegvideo.h:493
int64_t time
time of current frame
Definition: mpegvideo.h:388
static int encode_picture(MpegEncContext *s, int picture_number)
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
int bit_rate_tolerance
number of bits the bitstream is allowed to diverge from the reference.
Definition: avcodec.h:1805
static const AVClass msmpeg4v2_class
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4) ...
Definition: mpegvideo.h:264
ScratchpadContext sc
Definition: mpegvideo.h:199
uint8_t bits
Definition: crc.c:296
attribute_deprecated const char * rc_eq
Definition: avcodec.h:2724
attribute_deprecated float rc_buffer_aggressivity
Definition: avcodec.h:2746
uint8_t
#define av_cold
Definition: attributes.h:82
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:150
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
Picture ** input_picture
next pictures on display order for encoding
Definition: mpegvideo.h:134
#define CANDIDATE_MB_TYPE_INTER4V
Definition: mpegutils.h:108
AVOptions.
void(* denoise_dct)(struct MpegEncContext *s, int16_t *block)
Definition: mpegvideo.h:524
PutBitContext pb2
used for data partitioned VOPs
Definition: mpegvideo.h:407
enum OutputFormat out_format
output format
Definition: mpegvideo.h:101
attribute_deprecated int i_count
Definition: avcodec.h:2883
#define CANDIDATE_MB_TYPE_FORWARD_I
Definition: mpegutils.h:117
uint16_t(* dct_offset)[64]
Definition: mpegvideo.h:334
int noise_reduction
Definition: mpegvideo.h:567
void ff_dct_encode_init_x86(MpegEncContext *s)
Definition: mpegvideoenc.c:204
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
uint16_t * chroma_intra_matrix
custom intra quantization matrix
Definition: avcodec.h:3527
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
void ff_msmpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: msmpeg4enc.c:224
static void mpv_encode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for encoding.
#define FF_RC_STRATEGY_XVID
Definition: avcodec.h:2032
Multithreading support functions.
int pre_dia_size
ME prepass diamond size & shape.
Definition: avcodec.h:2219
AVCodec ff_h263_encoder
int frame_skip_threshold
Definition: mpegvideo.h:561
static const AVOption h263p_options[]
static int get_sae(uint8_t *src, int ref, int stride)
#define FF_CMP_VSSE
Definition: avcodec.h:2178
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:913
void ff_free_picture_tables(Picture *pic)
Definition: mpegpicture.c:455
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:388
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:271
int misc_bits
cbp, mb_type
Definition: mpegvideo.h:352
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1847
int no_rounding
apply no rounding to motion compensation (MPEG-4, msmpeg4, ...) for B-frames rounding mode is always ...
Definition: mpegvideo.h:284
H.263 tables.
#define CANDIDATE_MB_TYPE_BACKWARD_I
Definition: mpegutils.h:118
int interlaced_dct
Definition: mpegvideo.h:481
int(* q_chroma_intra_matrix)[64]
Definition: mpegvideo.h:324
int me_cmp
motion estimation comparison function
Definition: avcodec.h:2150
void ff_mpv_decode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo.c:2722
#define QUANT_BIAS_SHIFT
Definition: mpegvideo_enc.c:71
void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:177
void(* diff_pixels)(int16_t *av_restrict block, const uint8_t *s1, const uint8_t *s2, ptrdiff_t stride)
Definition: pixblockdsp.h:32
#define CHROMA_420
Definition: mpegvideo.h:473
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
Definition: mpegpicture.c:441
int intra_dc_precision
Definition: mpegvideo.h:461
int repeat_first_field
Definition: mpegvideo.h:470
static AVFrame * frame
quarterpel DSP functions
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:38
#define height
int16_t(* b_bidir_forw_mv_table)[2]
MV table (1MV per MB) bidir mode B-frame encoding.
Definition: mpegvideo.h:248
static void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
uint8_t * data
Definition: avcodec.h:1657
const uint16_t ff_aanscales[64]
Definition: aandcttab.c:26
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
int ff_wmv2_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: wmv2enc.c:74
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:75
#define ff_dlog(a,...)
#define AVERROR_EOF
End of file.
Definition: error.h:55
uint16_t pp_time
time distance between the last 2 p,s,i frames
Definition: mpegvideo.h:390
me_cmp_func nsse[6]
Definition: me_cmp.h:65
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
#define lrintf(x)
Definition: libm_mips.h:70
#define CODEC_FLAG_MV0
Definition: avcodec.h:1115
const uint8_t * scantable
Definition: idctdsp.h:32
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:330
int mb_height
number of MBs horizontally & vertically
Definition: mpegvideo.h:126
static void rebase_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Rebase the bit writer onto a reallocated buffer.
Definition: put_bits.h:71
int buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
Definition: avcodec.h:1367
ptrdiff_t size
Definition: opengl_enc.c:101
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
float lumi_masking
luminance masking (0-> disabled)
Definition: avcodec.h:2083
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:2901
int max_qcoeff
maximum encodable coefficient
Definition: mpegvideo.h:309
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:896
high precision timer, useful to profile code
static void update_noise_reduction(MpegEncContext *s)
#define FF_MPV_FLAG_QP_RD
Definition: mpegvideo.h:573
int scenechange_threshold
Definition: mpegvideo.h:566
AVCPBProperties * av_cpb_properties_alloc(size_t *size)
Allocate a CPB properties structure and initialize its fields to default values.
Definition: utils.c:4108
void ff_mpeg1_encode_mb(MpegEncContext *s, int16_t block[8][64], int motion_x, int motion_y)
Definition: mpeg12enc.c:993
#define FFALIGN(x, a)
Definition: macros.h:48
attribute_deprecated uint64_t error[AV_NUM_DATA_POINTERS]
Definition: frame.h:313
#define MAX_LEVEL
Definition: rl.h:36
attribute_deprecated int frame_skip_threshold
Definition: avcodec.h:2811
void ff_h261_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: h261enc.c:53
int dquant
qscale difference to prev qscale
Definition: mpegvideo.h:207
int flipflop_rounding
Definition: mpegvideo.h:435
#define CHROMA_444
Definition: mpegvideo.h:475
int num_entries
number of RateControlEntries
Definition: ratecontrol.h:64
int gop_picture_number
index of the first picture of a GOP based on fake_pic_num & MPEG-1 specific
Definition: mpegvideo.h:449
uint8_t * mb_info_ptr
Definition: mpegvideo.h:369
#define av_log(a,...)
static void ff_update_block_index(MpegEncContext *s)
Definition: mpegvideo.h:731
#define ff_sqrt
Definition: mathops.h:206
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:2804
#define ROUNDED_DIV(a, b)
Definition: common.h:56
int(* q_inter_matrix)[64]
Definition: mpegvideo.h:325
#define FF_COMPLIANCE_UNOFFICIAL
Allow unofficial extensions.
Definition: avcodec.h:2955
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1689
static int get_bits_diff(MpegEncContext *s)
Definition: mpegvideo.h:745
attribute_deprecated int skip_count
Definition: avcodec.h:2887
#define EDGE_WIDTH
Definition: mpegpicture.h:33
int(* q_intra_matrix)[64]
precomputed matrix (combine qscale and DCT renorm)
Definition: mpegvideo.h:323
#define FF_MPV_FLAG_MV0
Definition: mpegvideo.h:576
int intra_only
if true, only intra pictures are generated
Definition: mpegvideo.h:99
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
int16_t * dc_val[3]
used for MPEG-4 DC prediction, all 3 arrays must be continuous
Definition: mpegvideo.h:184
enum AVCodecID id
Definition: avcodec.h:3695
int h263_plus
H.263+ headers.
Definition: mpegvideo.h:106
H263DSPContext h263dsp
Definition: mpegvideo.h:234
int slice_context_count
number of used thread_contexts
Definition: mpegvideo.h:153
#define MAX_DMV
Definition: motion_est.h:37
int last_non_b_pict_type
used for MPEG-4 gmc B-frames & ratecontrol
Definition: mpegvideo.h:212
int width
width and height of the video frame
Definition: frame.h:239
void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:227
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:2054
int last_dc[3]
last DC values for MPEG-1
Definition: mpegvideo.h:182
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
attribute_deprecated float rc_initial_cplx
Definition: avcodec.h:2749
uint8_t * inter_ac_vlc_last_length
Definition: mpegvideo.h:316
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
Definition: avcodec.h:872
#define MAX_MB_BYTES
Definition: mpegutils.h:47
int64_t total_bits
Definition: mpegvideo.h:337
#define PTRDIFF_SPECIFIER
Definition: internal.h:254
int mb_skipped
MUST BE SET only during DECODING.
Definition: mpegvideo.h:192
int chroma_y_shift
Definition: mpegvideo.h:477
int strict_std_compliance
strictly follow the std (MPEG-4, ...)
Definition: mpegvideo.h:115
av_default_item_name
int partitioned_frame
is current frame partitioned
Definition: mpegvideo.h:403
uint8_t * rd_scratchpad
scratchpad for rate distortion mb decision
Definition: mpegpicture.h:37
#define AVERROR(e)
Definition: error.h:43
uint64_t encoding_error[AV_NUM_DATA_POINTERS]
Definition: mpegpicture.h:90
#define MAX_PICTURE_COUNT
Definition: mpegpicture.h:32
av_cold int ff_rate_control_init(MpegEncContext *s)
Definition: ratecontrol.c:472
int me_sub_cmp
subpixel motion estimation comparison function
Definition: avcodec.h:2156
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:163
attribute_deprecated uint64_t vbv_delay
VBV delay coded in the last frame (in periods of a 27 MHz clock).
Definition: avcodec.h:3387
int qmax
maximum quantizer
Definition: avcodec.h:2682
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2361
static void update_mb_info(MpegEncContext *s, int startcode)
#define MERGE(field)
void ff_write_pass1_stats(MpegEncContext *s)
Definition: ratecontrol.c:38
int unrestricted_mv
mv can point outside of the coded picture
Definition: mpegvideo.h:220
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:179
ERContext er
Definition: mpegvideo.h:551
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:3180
int last_lambda_for[5]
last lambda for a specific pict type
Definition: mpegvideo.h:216
static int sse_mb(MpegEncContext *s)
int reference
Definition: mpegpicture.h:87
const char * r
Definition: vf_curves.c:111
int ff_xvid_rate_control_init(struct MpegEncContext *s)
Definition: libxvid_rc.c:42
void(* get_pixels)(int16_t *av_restrict block, const uint8_t *pixels, ptrdiff_t stride)
Definition: pixblockdsp.h:29
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
void(* dct_unquantize_mpeg2_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:512
PixblockDSPContext pdsp
Definition: mpegvideo.h:231
const char * arg
Definition: jacosubdec.c:66
uint8_t * intra_chroma_ac_vlc_length
Definition: mpegvideo.h:313
void(* dct_unquantize_mpeg1_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:506
int h263_slice_structured
Definition: mpegvideo.h:377
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1827
uint8_t * buf
Definition: put_bits.h:38
int64_t av_gcd(int64_t a, int64_t b)
Compute the greatest common divisor of two integer operands.
Definition: mathematics.c:37
GLsizei GLsizei * length
Definition: opengl_enc.c:115
MpegvideoEncDSPContext mpvencdsp
Definition: mpegvideo.h:230
attribute_deprecated int inter_quant_bias
Definition: avcodec.h:2267
const char * name
Name of the codec implementation.
Definition: avcodec.h:3688
int quarter_sample
1->qpel, 0->half pel ME/MC
Definition: mpegvideo.h:399
uint16_t * mb_type
Table for candidate MB types for encoding (defines in mpegutils.h)
Definition: mpegvideo.h:291
int me_pre
prepass for motion estimation
Definition: mpegvideo.h:260
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Shrink the already allocated side data buffer.
Definition: avpacket.c:541
int low_delay
no reordering needed / has no B-frames
Definition: mpegvideo.h:404
qpel_mc_func put_qpel_pixels_tab[2][16]
Definition: qpeldsp.h:73
uint8_t *[2][2] b_field_select_table
Definition: mpegvideo.h:254
static const uint8_t offset[127][2]
Definition: vf_spp.c:92
GLsizei count
Definition: opengl_enc.c:109
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:1115
#define FFMAX(a, b)
Definition: common.h:94
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, AVCodecContext *avctx)
Definition: pixblockdsp.c:81
#define fail()
Definition: checkasm.h:89
int64_t mb_var_sum_temp
Definition: motion_est.h:86
attribute_deprecated int b_sensitivity
Definition: avcodec.h:2440
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1663
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:85
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:66
static void frame_end(MpegEncContext *s)
int resync_mb_x
x position of last resync marker
Definition: mpegvideo.h:356
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:2709
void ff_clean_h263_qscales(MpegEncContext *s)
modify qscale so that encoding is actually possible in H.263 (limit difference to -2...
Definition: ituh263enc.c:266
int coded_picture_number
used to set pic->coded_picture_number, should not be used for/by anything else
Definition: mpegvideo.h:123
int * lambda_table
Definition: mpegvideo.h:205
static int estimate_best_b_count(MpegEncContext *s)
int intra_dc_precision
precision of the intra DC coefficient - 8
Definition: avcodec.h:2343
void ff_wmv2_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: wmv2enc.c:147
int64_t rc_min_rate
minimum bitrate
Definition: avcodec.h:2739
common internal API header
uint8_t * intra_ac_vlc_last_length
Definition: mpegvideo.h:312
static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, int motion_y, int mb_block_height, int mb_block_width, int mb_block_count)
const uint8_t *const ff_mpeg2_dc_scale_table[4]
Definition: mpegvideodata.c:82
void ff_h263_loop_filter(MpegEncContext *s)
Definition: h263.c:135
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
#define CHROMA_422
Definition: mpegvideo.h:474
float border_masking
Definition: mpegvideo.h:538
int progressive_frame
Definition: mpegvideo.h:479
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:261
const uint8_t ff_h263_chroma_qscale_table[32]
Definition: h263data.c:262
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
Definition: avcodec.h:868
#define FFMIN(a, b)
Definition: common.h:96
int display_picture_number
picture number in display order
Definition: frame.h:296
uint16_t(* q_inter_matrix16)[2][64]
Definition: mpegvideo.h:329
uint8_t * vbv_delay_ptr
pointer to vbv_delay in the bitstream
Definition: mpegvideo.h:451
int fixed_qscale
fixed qscale if non zero
Definition: mpegvideo.h:110
void ff_clean_mpeg4_qscales(MpegEncContext *s)
modify mb_type & qscale so that encoding is actually possible in MPEG-4
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:74
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
Definition: avcodec.h:926
int me_method
ME algorithm.
Definition: mpegvideo.h:256
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:157
int umvplus
== H.263+ && unrestricted_mv
Definition: mpegvideo.h:375
Picture new_picture
copy of the source picture structure for encoding.
Definition: mpegvideo.h:171
#define width
int intra_quant_bias
bias for the quantizer
Definition: mpegvideo.h:306
int width
picture width / height.
Definition: avcodec.h:1919
int(* pix_sum)(uint8_t *pix, int line_size)
int16_t(*[2] motion_val)[2]
Definition: mpegpicture.h:53
Picture * current_picture_ptr
pointer to the current picture
Definition: mpegvideo.h:181
Picture.
Definition: mpegpicture.h:45
attribute_deprecated int noise_reduction
Definition: avcodec.h:2321
int alternate_scan
Definition: mpegvideo.h:468
float rc_max_available_vbv_use
Ratecontrol attempt to use, at maximum, of what can be used without an underflow. ...
Definition: avcodec.h:2757
int ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: rv10enc.c:32
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:900
float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
Definition: ratecontrol.c:868
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:884
uint16_t(* q_intra_matrix16)[2][64]
identical to the above but for MMX & these are not permutated, second 64 entries are bias ...
Definition: mpegvideo.h:327
attribute_deprecated int frame_skip_factor
Definition: avcodec.h:2815
#define FF_MB_DECISION_SIMPLE
uses mb_cmp
Definition: avcodec.h:2296
perm
Definition: f_perms.c:74
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:3173
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:301
int(* ac_stats)[2][MAX_LEVEL+1][MAX_RUN+1][2]
[mb_intra][isChroma][level][run][last]
Definition: mpegvideo.h:441
int block_last_index[12]
last non zero coefficient in block
Definition: mpegvideo.h:83
MotionEstContext me
Definition: mpegvideo.h:282
int frame_skip_factor
Definition: mpegvideo.h:562
int n
Definition: avisynth_c.h:684
static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale)
uint8_t idct_permutation[64]
IDCT input permutation.
Definition: idctdsp.h:96
const int16_t ff_mpeg4_default_non_intra_matrix[64]
Definition: mpeg4data.h:348
int mb_decision
macroblock decision mode
Definition: avcodec.h:2295
static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride)
attribute_deprecated float rc_qsquish
Definition: avcodec.h:2696
uint8_t * mbintra_table
used to avoid setting {ac, dc, cbp}-pred stuff to zero on inter MB decoding
Definition: mpegvideo.h:195
#define MAX_B_FRAMES
Definition: mpegvideo.h:63
int ff_msmpeg4_encode_init(MpegEncContext *s)
Definition: msmpeg4enc.c:121
int ac_esc_length
num of bits needed to encode the longest esc
Definition: mpegvideo.h:310
preferred ID for MPEG-1/2 video decoding
Definition: avcodec.h:219
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
Definition: put_bits.h:261
#define FF_ARRAY_ELEMS(a)
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:3161
int block_index[6]
index to current MB in block based arrays with edges
Definition: mpegvideo.h:293
Compute and use optimal Huffman tables.
Definition: mjpegenc.h:97
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:476
int * mb_index2xy
mb_index -> mb_x + mb_y*mb_stride
Definition: mpegvideo.h:297
static uint8_t default_fcode_tab[MAX_MV *2+1]
Definition: mpegvideo_enc.c:83
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: avcodec.h:1061
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:251
AVCodec ff_h263p_encoder
attribute_deprecated int i_tex_bits
Definition: avcodec.h:2879
static void build_basis(uint8_t *perm)
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:266
int first_slice_line
used in MPEG-4 too to handle resync markers
Definition: mpegvideo.h:434
int frame_pred_frame_dct
Definition: mpegvideo.h:462
attribute_deprecated int misc_bits
Definition: avcodec.h:2889
This structure describes the bitrate properties of an encoded bitstream.
Definition: avcodec.h:1346
uint16_t * mc_mb_var
Table for motion compensated MB variances.
Definition: mpegpicture.h:68
void ff_flv_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: flvenc.c:27
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:263
int coded_picture_number
picture number in bitstream order
Definition: frame.h:292
#define src1
Definition: h264pred.c:139
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
uint16_t inter_matrix[64]
Definition: mpegvideo.h:302
#define FF_LAMBDA_SCALE
Definition: avutil.h:226
void ff_jpeg_fdct_islow_8(int16_t *data)
int64_t last_non_b_time
Definition: mpegvideo.h:389
void ff_h261_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: h261enc.c:237
#define QMAT_SHIFT
Definition: mpegvideo_enc.c:74
struct MpegEncContext * thread_context[MAX_THREADS]
Definition: mpegvideo.h:152
#define CONFIG_MSMPEG4_ENCODER
Definition: msmpeg4.h:75
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:237
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
Definition: mpegvideo.h:204
#define CODEC_FLAG_NORMALIZE_AQP
Definition: avcodec.h:1142
void ff_faandct(int16_t *data)
Definition: faandct.c:114
double buffer_index
amount of bits in the video/audio buffer
Definition: ratecontrol.h:66
Libavcodec external API header.
attribute_deprecated int mpeg_quant
Definition: avcodec.h:2059
void ff_h263_update_motion_val(MpegEncContext *s)
Definition: h263.c:42
int h263_flv
use flv H.263 header
Definition: mpegvideo.h:107
attribute_deprecated int scenechange_threshold
Definition: avcodec.h:2317
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
Definition: options.c:172
static const AVClass h263p_class
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:131
enum AVCodecID codec_id
Definition: avcodec.h:1749
#define QMAT_SHIFT_MMX
Definition: mpegvideo_enc.c:73
attribute_deprecated int prediction_method
Definition: avcodec.h:2123
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:218
void ff_convert_matrix(MpegEncContext *s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
Definition: mpegvideo_enc.c:90
const uint16_t ff_inv_aanscales[64]
Definition: aandcttab.c:38
attribute_deprecated int b_frame_strategy
Definition: avcodec.h:2038
void ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type)
Definition: me_cmp.c:440
#define START_TIMER
Definition: timer.h:94
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
uint8_t * intra_chroma_ac_vlc_last_length
Definition: mpegvideo.h:314
void(* fdct)(int16_t *block)
Definition: fdctdsp.h:27
main external API structure.
Definition: avcodec.h:1732
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int shared, int encoding, int chroma_x_shift, int chroma_y_shift, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
Definition: mpegpicture.c:228
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:589
ScanTable intra_scantable
Definition: mpegvideo.h:88
int qmin
minimum quantizer
Definition: avcodec.h:2675
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:97
#define FF_CMP_NSSE
Definition: avcodec.h:2179
#define FF_DEFAULT_QUANT_BIAS
Definition: avcodec.h:2262
static void write_slice_end(MpegEncContext *s)
int64_t dts_delta
pts difference between the first and second input frame, used for calculating dts of the first frame ...
Definition: mpegvideo.h:141
int64_t user_specified_pts
last non-zero pts from AVFrame which was passed into avcodec_encode_video2()
Definition: mpegvideo.h:137
FDCTDSPContext fdsp
Definition: mpegvideo.h:224
static void denoise_dct_c(MpegEncContext *s, int16_t *block)
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: utils.c:3001
uint8_t * buf_end
Definition: put_bits.h:38
static int frame_start(MpegEncContext *s)
float spatial_cplx_masking
spatial complexity masking (0-> disabled)
Definition: avcodec.h:2097
float rc_qmod_amp
Definition: mpegvideo.h:534
int luma_elim_threshold
Definition: mpegvideo.h:113
attribute_deprecated int header_bits
Definition: avcodec.h:2877
GLint GLenum type
Definition: opengl_enc.c:105
void ff_fix_long_p_mvs(MpegEncContext *s)
Definition: motion_est.c:1671
Picture * picture
main picture buffer
Definition: mpegvideo.h:133
int data_partitioning
data partitioning flag from header
Definition: mpegvideo.h:402
uint8_t * inter_ac_vlc_length
Definition: mpegvideo.h:315
int progressive_sequence
Definition: mpegvideo.h:454
uint16_t * intra_matrix
custom intra quantization matrix
Definition: avcodec.h:2305
H.261 codec.
void ff_h263_encode_gob_header(MpegEncContext *s, int mb_line)
Encode a group of blocks header.
Definition: ituh263enc.c:240
uint8_t * buf_ptr
Definition: put_bits.h:38
Describe the class of an AVClass context structure.
Definition: log.h:67
int stuffing_bits
bits used for stuffing
Definition: mpegvideo.h:339
op_pixels_func put_no_rnd_pixels_tab[4][4]
Halfpel motion compensation with no rounding (a+b)>>1.
Definition: hpeldsp.h:82
int16_t(*[2][2][2] b_field_mv_table)[2]
MV table (4MV per MB) interlaced B-frame encoding.
Definition: mpegvideo.h:252
int(* pix_norm1)(uint8_t *pix, int line_size)
int64_t mc_mb_var_sum
motion compensated MB variance for current frame
Definition: mpegpicture.h:82
int index
Definition: gxfenc.c:89
#define CANDIDATE_MB_TYPE_DIRECT
Definition: mpegutils.h:111
struct AVFrame * f
Definition: mpegpicture.h:46
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:2298
static void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type)
int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
Definition: avpacket.c:295
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:30
int input_picture_number
used to set pic->display_picture_number, should not be used for/by anything else
Definition: mpegvideo.h:122
AVCodec ff_wmv1_encoder
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:132
int mb_info
interval for outputting info about mb offsets as side data
Definition: mpegvideo.h:367
void ff_set_mpeg4_time(MpegEncContext *s)
static void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type)
attribute_deprecated int brd_scale
Definition: avcodec.h:2398
#define STRIDE_ALIGN
Definition: internal.h:82
av_cold void ff_mjpeg_encode_close(MpegEncContext *s)
Definition: mjpegenc.c:126
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:1242
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
Definition: utils.c:1736
#define CANDIDATE_MB_TYPE_BIDIR_I
Definition: mpegutils.h:119
const int16_t ff_mpeg4_default_intra_matrix[64]
Definition: mpeg4data.h:337
int f_code
forward MV resolution
Definition: mpegvideo.h:235
int ff_pre_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1081
#define CANDIDATE_MB_TYPE_DIRECT0
Definition: mpegutils.h:121
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
Definition: mpeg4video.c:117
int ff_mjpeg_encode_stuffing(MpegEncContext *s)
Writes the complete JPEG frame when optimal huffman tables are enabled, otherwise writes the stuffing...
void ff_mjpeg_encode_picture_header(AVCodecContext *avctx, PutBitContext *pb, ScanTable *intra_scantable, int pred, uint16_t luma_intra_matrix[64], uint16_t chroma_intra_matrix[64])
attribute_deprecated int p_tex_bits
Definition: avcodec.h:2881
static int weight(int i, int blen, int offset)
Definition: diracdec.c:1523
#define MV_DIR_FORWARD
Definition: mpegvideo.h:262
uint16_t * inter_matrix
custom inter quantization matrix
Definition: avcodec.h:2312
int max_b_frames
max number of B-frames for encoding
Definition: mpegvideo.h:112
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:209
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:262
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
int last_mv_dir
last mv_dir, used for B-frame encoding
Definition: mpegvideo.h:450
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:280
int h263_pred
use MPEG-4/H.263 ac/dc predictions
Definition: mpegvideo.h:102
int16_t(* b_bidir_back_mv_table)[2]
MV table (1MV per MB) bidir mode B-frame encoding.
Definition: mpegvideo.h:249
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:498
float dark_masking
darkness masking (0-> disabled)
Definition: avcodec.h:2111
static int64_t pts
Global timestamp for the audio frames.
float temporal_cplx_masking
temporary complexity masking (0-> disabled)
Definition: avcodec.h:2090
int ff_init_me(MpegEncContext *s)
Definition: motion_est.c:306
uint8_t *[2] p_field_select_table
Definition: mpegvideo.h:253
int16_t(* b_direct_mv_table)[2]
MV table (1MV per MB) direct mode B-frame encoding.
Definition: mpegvideo.h:250
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
Definition: avcodec.h:880
AAN (Arai, Agui and Nakajima) (I)DCT tables.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:201
const uint8_t * c_dc_scale_table
qscale -> c_dc_scale table
Definition: mpegvideo.h:186
uint8_t level
Definition: svq3.c:207
me_cmp_func sad[6]
Definition: me_cmp.h:56
int me_penalty_compensation
Definition: mpegvideo.h:259
int64_t mc_mb_var_sum_temp
Definition: motion_est.h:85
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
Definition: mpegvideo.h:276
int16_t(* b_forw_mv_table)[2]
MV table (1MV per MB) forward mode B-frame encoding.
Definition: mpegvideo.h:246
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
Definition: mpegvideo.h:128
me_cmp_func sse[6]
Definition: me_cmp.h:57
static int estimate_motion_thread(AVCodecContext *c, void *arg)
int vbv_ignore_qmax
Definition: mpegvideo.h:540
#define BASIS_SHIFT
MpegEncContext.
Definition: mpegvideo.h:78
Picture * next_picture_ptr
pointer to the next picture (for bidir pred)
Definition: mpegvideo.h:180
char * rc_eq
Definition: mpegvideo.h:542
int8_t * qscale_table
Definition: mpegpicture.h:50
#define MAX_RUN
Definition: rl.h:35
struct AVCodecContext * avctx
Definition: mpegvideo.h:95
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1945
PutBitContext pb
bit output
Definition: mpegvideo.h:148
static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
Deallocate a picture.
Definition: mpegpicture.c:291
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
volatile int error_count
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:105
int
static void update_qscale(MpegEncContext *s)
int mb_cmp
macroblock comparison function (not supported yet)
Definition: avcodec.h:2162
int quantizer_noise_shaping
Definition: mpegvideo.h:527
int(* dct_error_sum)[64]
Definition: mpegvideo.h:332
MECmpContext mecc
Definition: mpegvideo.h:228
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:62
void ff_msmpeg4_encode_ext_header(MpegEncContext *s)
Definition: msmpeg4enc.c:284
float rc_initial_cplx
Definition: mpegvideo.h:536
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg)
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:83
static const int32_t qmat16[MAT_SIZE]
Definition: hq_hqadata.c:342
common internal api header.
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 ...
Definition: mpegvideo.h:127
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:101
if(ret< 0)
Definition: vf_mcdeint.c:282
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
#define CANDIDATE_MB_TYPE_FORWARD
Definition: mpegutils.h:112
attribute_deprecated int rtp_payload_size
Definition: avcodec.h:2864
uint8_t * dest[3]
Definition: mpegvideo.h:295
int shared
Definition: mpegpicture.h:88
static double c[64]
int last_pict_type
Definition: mpegvideo.h:211
#define COPY(a)
void ff_h263_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: ituh263enc.c:447
int adaptive_quant
use adaptive quantization
Definition: mpegvideo.h:206
static int16_t basis[64][64]
attribute_deprecated float border_masking
Definition: avcodec.h:2364
static int score_tab[256]
Definition: zmbvenc.c:59
Picture last_picture
copy of the previous picture structure.
Definition: mpegvideo.h:159
Picture * last_picture_ptr
pointer to the previous picture.
Definition: mpegvideo.h:179
Bi-dir predicted.
Definition: avutil.h:276
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:76
float rc_qsquish
ratecontrol qmin qmax limiting method 0-> clipping, 1-> use a nice continuous function to limit qscal...
Definition: mpegvideo.h:533
int64_t reordered_pts
reordered pts to be used as dts for the next output frame when there's a delay
Definition: mpegvideo.h:145
attribute_deprecated AVFrame * coded_frame
the picture in the bitstream
Definition: avcodec.h:3152
int ff_vbv_update(MpegEncContext *s, int frame_size)
Definition: ratecontrol.c:681
#define H263_GOB_HEIGHT(h)
Definition: h263.h:44
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:33
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:48
int den
Denominator.
Definition: rational.h:60
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(constuint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(constint16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(constint32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(constint64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(constfloat *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(constdouble *) pi *(INT64_C(1)<< 63)))#defineFMT_PAIR_FUNC(out, in) staticconv_func_type *constfmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};staticvoidcpy1(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, len);}staticvoidcpy2(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 2 *len);}staticvoidcpy4(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 4 *len);}staticvoidcpy8(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, constint *ch_map, intflags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) returnNULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) returnNULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case1:ctx->simd_f=cpy1;break;case2:ctx->simd_f=cpy2;break;case4:ctx->simd_f=cpy4;break;case8:ctx->simd_f=cpy8;break;}}if(HAVE_YASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);returnctx;}voidswri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}intswri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, intlen){intch;intoff=0;constintos=(out->planar?1:out->ch_count)*out->bps;unsignedmisaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){intplanes=in->planar?in->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){intplanes=out->planar?out->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){intplanes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
attribute_deprecated float rc_qmod_amp
Definition: avcodec.h:2699
const uint8_t * chroma_qscale_table
qscale -> chroma_qscale (H.263)
Definition: mpegvideo.h:187
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:71
AVCodec ff_msmpeg4v3_encoder
int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo.c:455
int trellis
trellis RD quantization
Definition: avcodec.h:2831
void(* dct_unquantize_mpeg1_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:508
AVCPBProperties * ff_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
Definition: utils.c:4122
static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:769
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:888
void ff_mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: mpeg12enc.c:421
void ff_mpeg4_stuffing(PutBitContext *pbc)
add MPEG-4 stuffing bits (01...1)
#define CANDIDATE_MB_TYPE_INTRA
Definition: mpegutils.h:106
int16_t(* blocks)[12][64]
Definition: mpegvideo.h:499
#define STOP_TIMER(id)
Definition: timer.h:95
int slices
Number of slices.
Definition: avcodec.h:2485
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
void * priv_data
Definition: avcodec.h:1774
const AVOption ff_mpv_generic_options[]
Definition: mpegvideo_enc.c:85
#define PICT_FRAME
Definition: mpegutils.h:39
int last_bits
temp var used for calculating the above vars
Definition: mpegvideo.h:353
void ff_mpeg4_init_partitions(MpegEncContext *s)
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:877
static av_always_inline int diff(const uint32_t a, const uint32_t b)
int picture_structure
Definition: mpegvideo.h:458
int dia_size
ME diamond size & shape.
Definition: avcodec.h:2192
#define av_free(p)
attribute_deprecated int frame_bits
Definition: avcodec.h:2893
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:3201
VideoDSPContext vdsp
Definition: mpegvideo.h:233
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
#define VE
static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src)
attribute_deprecated int me_penalty_compensation
Definition: avcodec.h:2386
int avg_bitrate
Average bitrate of the stream, in bits per second.
Definition: avcodec.h:1361
void(* draw_edges)(uint8_t *buf, int wrap, int width, int height, int w, int h, int sides)
int ff_get_best_fcode(MpegEncContext *s, int16_t(*mv_table)[2], int type)
Definition: motion_est.c:1618
int resync_mb_y
y position of last resync marker
Definition: mpegvideo.h:357
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1782
int16_t(* block)[64]
points to one of the following blocks
Definition: mpegvideo.h:498
void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mjpegenc.c:331
int64_t bit_rate
wanted bit rate
Definition: mpegvideo.h:100
This side data corresponds to the AVCPBProperties struct.
Definition: avcodec.h:1502
PutBitContext tex_pb
used for data partitioned VOPs
Definition: mpegvideo.h:406
Picture next_picture
copy of the next picture structure.
Definition: mpegvideo.h:165
attribute_deprecated int p_count
Definition: avcodec.h:2885
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:256
attribute_deprecated int error_rate
Definition: avcodec.h:3374
static void set_frame_distances(MpegEncContext *s)
static const double coeff[2][5]
Definition: vf_owdenoise.c:72
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:227
#define EDGE_BOTTOM
void ff_fix_long_mvs(MpegEncContext *s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
Definition: motion_est.c:1720
Picture ** reordered_input_picture
pointer to the next pictures in coded order for encoding
Definition: mpegvideo.h:135
static const struct twinvq_data tab
unsigned int byte_buffer_size
Definition: internal.h:150
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> dc
void ff_rv20_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: rv20enc.c:35
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1656
void(* dct_unquantize_mpeg2_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:510
static int encode_thread(AVCodecContext *c, void *arg)
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding). ...
Definition: mpegvideo.c:634
int height
Definition: frame.h:239
int(* fast_dct_quantize)(struct MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo.h:523
void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:300
#define LOCAL_ALIGNED_16(t, v,...)
Definition: internal.h:124
static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src)
#define av_freep(p)
static void update_duplicate_context_after_me(MpegEncContext *dst, MpegEncContext *src)
void INT64 start
Definition: avisynth_c.h:690
#define av_always_inline
Definition: attributes.h:39
#define M_PI
Definition: mathematics.h:52
int ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
int rtp_payload_size
Definition: mpegvideo.h:488
#define AV_CODEC_FLAG_CLOSED_GOP
Definition: avcodec.h:931
Floating point AAN DCT
static uint8_t default_mv_penalty[MAX_FCODE+1][MAX_DMV *2+1]
Definition: mpegvideo_enc.c:82
int inter_quant_bias
bias for the quantizer
Definition: mpegvideo.h:307
av_cold void ff_qpeldsp_init(QpelDSPContext *c)
Definition: qpeldsp.c:783
attribute_deprecated int lmin
Definition: avcodec.h:2799
#define CANDIDATE_MB_TYPE_BACKWARD
Definition: mpegutils.h:113
#define stride
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Allocate new information of a packet.
Definition: avpacket.c:329
int(* dct_quantize)(struct MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo.h:522
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:267
int b_code
backward MV resolution for B-frames (MPEG-4)
Definition: mpegvideo.h:236
void ff_msmpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: msmpeg4enc.c:376
void ff_h261_encode_init(MpegEncContext *s)
Definition: h261enc.c:365
int dct_count[2]
Definition: mpegvideo.h:333
int64_t mb_var_sum
sum of MB variance for current frame
Definition: mpegpicture.h:81
static int encode_frame(AVCodecContext *c, AVFrame *frame)
AVPixelFormat
Pixel format.
Definition: pixfmt.h:60
This structure stores compressed data.
Definition: avcodec.h:1634
uint8_t * byte_buffer
temporary buffer used for encoders to store their bitstream
Definition: internal.h:149
int delay
Codec delay.
Definition: avcodec.h:1902
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:2951
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1650
int ff_check_alignment(void)
Definition: me_cmp.c:988
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:596
#define FF_ALLOCZ_OR_GOTO(ctx, p, size, label)
Definition: internal.h:142
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
Definition: utils.c:3817
me_cmp_func ildct_cmp[6]
Definition: me_cmp.h:75
#define FFMAX3(a, b, c)
Definition: common.h:95
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
static void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type, PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], int *dmin, int *next_block, int motion_x, int motion_y)
Predicted.
Definition: avutil.h:275
unsigned int lambda
Lagrange multiplier used in rate distortion.
Definition: mpegvideo.h:203
AVCodec ff_msmpeg4v2_encoder
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:2732
uint16_t pb_time
time distance between the last b and p,s,i frame
Definition: mpegvideo.h:391
enum idct_permutation_type perm_type
Definition: idctdsp.h:97
attribute_deprecated int pre_me
Definition: avcodec.h:2204
HpelDSPContext hdsp
Definition: mpegvideo.h:226
static const uint8_t sp5x_quant_table[20][64]
Definition: sp5x.h:135
int next_lambda
next lambda used for retrying to encode a frame
Definition: mpegvideo.h:340