FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mpegvideo_enc.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /*
26  * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
27  */
28 
29 /**
30  * @file
31  * The simplest mpeg encoder (well, it was the simplest!).
32  */
33 
34 #include <stdint.h>
35 
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/pixdesc.h"
40 #include "libavutil/opt.h"
41 #include "libavutil/timer.h"
42 #include "avcodec.h"
43 #include "dct.h"
44 #include "idctdsp.h"
45 #include "mpeg12.h"
46 #include "mpegvideo.h"
47 #include "mpegvideodata.h"
48 #include "h261.h"
49 #include "h263.h"
50 #include "h263data.h"
51 #include "mjpegenc_common.h"
52 #include "mathops.h"
53 #include "mpegutils.h"
54 #include "mjpegenc.h"
55 #include "msmpeg4.h"
56 #include "pixblockdsp.h"
57 #include "qpeldsp.h"
58 #include "faandct.h"
59 #include "thread.h"
60 #include "aandcttab.h"
61 #include "flv.h"
62 #include "mpeg4video.h"
63 #include "internal.h"
64 #include "bytestream.h"
65 #include "wmv2.h"
66 #include "rv10.h"
67 #include "libxvid.h"
68 #include <limits.h>
69 #include "sp5x.h"
70 
71 #define QUANT_BIAS_SHIFT 8
72 
73 #define QMAT_SHIFT_MMX 16
74 #define QMAT_SHIFT 21
75 
77 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
78 static int sse_mb(MpegEncContext *s);
79 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
80 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
81 
84 
87  { NULL },
88 };
89 
90 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
91  uint16_t (*qmat16)[2][64],
92  const uint16_t *quant_matrix,
93  int bias, int qmin, int qmax, int intra)
94 {
95  FDCTDSPContext *fdsp = &s->fdsp;
96  int qscale;
97  int shift = 0;
98 
99  for (qscale = qmin; qscale <= qmax; qscale++) {
100  int i;
101  int qscale2;
102 
104  else qscale2 = qscale << 1;
105 
106  if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
107 #if CONFIG_FAANDCT
108  fdsp->fdct == ff_faandct ||
109 #endif /* CONFIG_FAANDCT */
110  fdsp->fdct == ff_jpeg_fdct_islow_10) {
111  for (i = 0; i < 64; i++) {
112  const int j = s->idsp.idct_permutation[i];
113  int64_t den = (int64_t) qscale2 * quant_matrix[j];
114  /* 16 <= qscale * quant_matrix[i] <= 7905
115  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
116  * 19952 <= x <= 249205026
117  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
118  * 3444240 >= (1 << 36) / (x) >= 275 */
119 
120  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
121  }
122  } else if (fdsp->fdct == ff_fdct_ifast) {
123  for (i = 0; i < 64; i++) {
124  const int j = s->idsp.idct_permutation[i];
125  int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
126  /* 16 <= qscale * quant_matrix[i] <= 7905
127  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
128  * 19952 <= x <= 249205026
129  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
130  * 3444240 >= (1 << 36) / (x) >= 275 */
131 
132  qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
133  }
134  } else {
135  for (i = 0; i < 64; i++) {
136  const int j = s->idsp.idct_permutation[i];
137  int64_t den = (int64_t) qscale2 * quant_matrix[j];
138  /* We can safely suppose that 16 <= quant_matrix[i] <= 255
139  * Assume x = qscale * quant_matrix[i]
140  * So 16 <= x <= 7905
141  * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
142  * so 32768 >= (1 << 19) / (x) >= 67 */
143  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
144  //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
145  // (qscale * quant_matrix[i]);
146  qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
147 
148  if (qmat16[qscale][0][i] == 0 ||
149  qmat16[qscale][0][i] == 128 * 256)
150  qmat16[qscale][0][i] = 128 * 256 - 1;
151  qmat16[qscale][1][i] =
152  ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
153  qmat16[qscale][0][i]);
154  }
155  }
156 
157  for (i = intra; i < 64; i++) {
158  int64_t max = 8191;
159  if (fdsp->fdct == ff_fdct_ifast) {
160  max = (8191LL * ff_aanscales[i]) >> 14;
161  }
162  while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
163  shift++;
164  }
165  }
166  }
167  if (shift) {
169  "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
170  QMAT_SHIFT - shift);
171  }
172 }
173 
174 static inline void update_qscale(MpegEncContext *s)
175 {
176  if (s->q_scale_type == 1 && 0) {
177  int i;
178  int bestdiff=INT_MAX;
179  int best = 1;
180 
181  for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
182  int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
183  if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
185  continue;
186  if (diff < bestdiff) {
187  bestdiff = diff;
188  best = i;
189  }
190  }
191  s->qscale = best;
192  } else {
193  s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
194  (FF_LAMBDA_SHIFT + 7);
195  s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
196  }
197 
198  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
200 }
201 
202 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
203 {
204  int i;
205 
206  if (matrix) {
207  put_bits(pb, 1, 1);
208  for (i = 0; i < 64; i++) {
209  put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
210  }
211  } else
212  put_bits(pb, 1, 0);
213 }
214 
215 /**
216  * init s->current_picture.qscale_table from s->lambda_table
217  */
219 {
220  int8_t * const qscale_table = s->current_picture.qscale_table;
221  int i;
222 
223  for (i = 0; i < s->mb_num; i++) {
224  unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
225  int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
226  qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
227  s->avctx->qmax);
228  }
229 }
230 
233 {
234 #define COPY(a) dst->a= src->a
235  COPY(pict_type);
237  COPY(f_code);
238  COPY(b_code);
239  COPY(qscale);
240  COPY(lambda);
241  COPY(lambda2);
244  COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
245  COPY(progressive_frame); // FIXME don't set in encode_header
246  COPY(partitioned_frame); // FIXME don't set in encode_header
247 #undef COPY
248 }
249 
250 /**
251  * Set the given MpegEncContext to defaults for encoding.
252  * the changed fields will not depend upon the prior state of the MpegEncContext.
253  */
255 {
256  int i;
258 
259  for (i = -16; i < 16; i++) {
260  default_fcode_tab[i + MAX_MV] = 1;
261  }
264 
265  s->input_picture_number = 0;
266  s->picture_in_gop_number = 0;
267 }
268 
270 {
271  if (ARCH_X86)
273 
274  if (CONFIG_H263_ENCODER)
276  if (!s->dct_quantize)
278  if (!s->denoise_dct)
281  if (s->avctx->trellis)
283 
284  return 0;
285 }
286 
287 /* init video encoder */
289 {
290  MpegEncContext *s = avctx->priv_data;
291  AVCPBProperties *cpb_props;
292  int i, ret, format_supported;
293 
295 
296  switch (avctx->codec_id) {
298  if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
299  avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
300  av_log(avctx, AV_LOG_ERROR,
301  "only YUV420 and YUV422 are supported\n");
302  return -1;
303  }
304  break;
305  case AV_CODEC_ID_MJPEG:
306  case AV_CODEC_ID_AMV:
307  format_supported = 0;
308  /* JPEG color space */
309  if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
310  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
311  avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
312  (avctx->color_range == AVCOL_RANGE_JPEG &&
313  (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
314  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
315  avctx->pix_fmt == AV_PIX_FMT_YUV444P)))
316  format_supported = 1;
317  /* MPEG color space */
318  else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
319  (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
320  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
321  avctx->pix_fmt == AV_PIX_FMT_YUV444P))
322  format_supported = 1;
323 
324  if (!format_supported) {
325  av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
326  return -1;
327  }
328  break;
329  default:
330  if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
331  av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
332  return -1;
333  }
334  }
335 
336  switch (avctx->pix_fmt) {
337  case AV_PIX_FMT_YUVJ444P:
338  case AV_PIX_FMT_YUV444P:
340  break;
341  case AV_PIX_FMT_YUVJ422P:
342  case AV_PIX_FMT_YUV422P:
344  break;
345  case AV_PIX_FMT_YUVJ420P:
346  case AV_PIX_FMT_YUV420P:
347  default:
349  break;
350  }
351 
352  avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
353 
354 #if FF_API_PRIVATE_OPT
356  if (avctx->rtp_payload_size)
358  if (avctx->me_penalty_compensation)
360  if (avctx->pre_me)
361  s->me_pre = avctx->pre_me;
363 #endif
364 
365  s->bit_rate = avctx->bit_rate;
366  s->width = avctx->width;
367  s->height = avctx->height;
368  if (avctx->gop_size > 600 &&
370  av_log(avctx, AV_LOG_WARNING,
371  "keyframe interval too large!, reducing it from %d to %d\n",
372  avctx->gop_size, 600);
373  avctx->gop_size = 600;
374  }
375  s->gop_size = avctx->gop_size;
376  s->avctx = avctx;
377  if (avctx->max_b_frames > MAX_B_FRAMES) {
378  av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
379  "is %d.\n", MAX_B_FRAMES);
380  avctx->max_b_frames = MAX_B_FRAMES;
381  }
382  s->max_b_frames = avctx->max_b_frames;
383  s->codec_id = avctx->codec->id;
385  s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
386  s->rtp_mode = !!s->rtp_payload_size;
388 
389  // workaround some differences between how applications specify dc precision
390  if (s->intra_dc_precision < 0) {
391  s->intra_dc_precision += 8;
392  } else if (s->intra_dc_precision >= 8)
393  s->intra_dc_precision -= 8;
394 
395  if (s->intra_dc_precision < 0) {
396  av_log(avctx, AV_LOG_ERROR,
397  "intra dc precision must be positive, note some applications use"
398  " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
399  return AVERROR(EINVAL);
400  }
401 
402  if (avctx->codec_id == AV_CODEC_ID_AMV || (avctx->active_thread_type & FF_THREAD_SLICE))
403  s->huffman = 0;
404 
405  if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
406  av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
407  return AVERROR(EINVAL);
408  }
410 
411  if (s->gop_size <= 1) {
412  s->intra_only = 1;
413  s->gop_size = 12;
414  } else {
415  s->intra_only = 0;
416  }
417 
418 #if FF_API_MOTION_EST
420  s->me_method = avctx->me_method;
422 #endif
423 
424  /* Fixed QSCALE */
425  s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
426 
427 #if FF_API_MPV_OPT
429  if (avctx->border_masking != 0.0)
430  s->border_masking = avctx->border_masking;
432 #endif
433 
434  s->adaptive_quant = (s->avctx->lumi_masking ||
435  s->avctx->dark_masking ||
438  s->avctx->p_masking ||
439  s->border_masking ||
440  (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
441  !s->fixed_qscale;
442 
444 
445  if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
446  switch(avctx->codec_id) {
449  avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
450  break;
451  case AV_CODEC_ID_MPEG4:
455  if (avctx->rc_max_rate >= 15000000) {
456  avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
457  } else if(avctx->rc_max_rate >= 2000000) {
458  avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
459  } else if(avctx->rc_max_rate >= 384000) {
460  avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
461  } else
462  avctx->rc_buffer_size = 40;
463  avctx->rc_buffer_size *= 16384;
464  break;
465  }
466  if (avctx->rc_buffer_size) {
467  av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
468  }
469  }
470 
471  if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
472  av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
473  return -1;
474  }
475 
476  if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
477  av_log(avctx, AV_LOG_INFO,
478  "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
479  }
480 
481  if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
482  av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
483  return -1;
484  }
485 
486  if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
487  av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
488  return -1;
489  }
490 
491  if (avctx->rc_max_rate &&
492  avctx->rc_max_rate == avctx->bit_rate &&
493  avctx->rc_max_rate != avctx->rc_min_rate) {
494  av_log(avctx, AV_LOG_INFO,
495  "impossible bitrate constraints, this will fail\n");
496  }
497 
498  if (avctx->rc_buffer_size &&
499  avctx->bit_rate * (int64_t)avctx->time_base.num >
500  avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
501  av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
502  return -1;
503  }
504 
505  if (!s->fixed_qscale &&
506  avctx->bit_rate * av_q2d(avctx->time_base) >
507  avctx->bit_rate_tolerance) {
508  av_log(avctx, AV_LOG_WARNING,
509  "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
510  avctx->bit_rate_tolerance = 5 * avctx->bit_rate * av_q2d(avctx->time_base);
511  }
512 
513  if (s->avctx->rc_max_rate &&
514  s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
517  90000LL * (avctx->rc_buffer_size - 1) >
518  s->avctx->rc_max_rate * 0xFFFFLL) {
519  av_log(avctx, AV_LOG_INFO,
520  "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
521  "specified vbv buffer is too large for the given bitrate!\n");
522  }
523 
524  if ((s->avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
526  s->codec_id != AV_CODEC_ID_FLV1) {
527  av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
528  return -1;
529  }
530 
531  if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
532  av_log(avctx, AV_LOG_ERROR,
533  "OBMC is only supported with simple mb decision\n");
534  return -1;
535  }
536 
537  if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
538  av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
539  return -1;
540  }
541 
542  if (s->max_b_frames &&
543  s->codec_id != AV_CODEC_ID_MPEG4 &&
546  av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
547  return -1;
548  }
549  if (s->max_b_frames < 0) {
550  av_log(avctx, AV_LOG_ERROR,
551  "max b frames must be 0 or positive for mpegvideo based encoders\n");
552  return -1;
553  }
554 
555  if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
556  s->codec_id == AV_CODEC_ID_H263 ||
557  s->codec_id == AV_CODEC_ID_H263P) &&
558  (avctx->sample_aspect_ratio.num > 255 ||
559  avctx->sample_aspect_ratio.den > 255)) {
560  av_log(avctx, AV_LOG_WARNING,
561  "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
564  avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
565  }
566 
567  if ((s->codec_id == AV_CODEC_ID_H263 ||
568  s->codec_id == AV_CODEC_ID_H263P) &&
569  (avctx->width > 2048 ||
570  avctx->height > 1152 )) {
571  av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
572  return -1;
573  }
574  if ((s->codec_id == AV_CODEC_ID_H263 ||
575  s->codec_id == AV_CODEC_ID_H263P) &&
576  ((avctx->width &3) ||
577  (avctx->height&3) )) {
578  av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
579  return -1;
580  }
581 
582  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
583  (avctx->width > 4095 ||
584  avctx->height > 4095 )) {
585  av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
586  return -1;
587  }
588 
589  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
590  (avctx->width > 16383 ||
591  avctx->height > 16383 )) {
592  av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
593  return -1;
594  }
595 
596  if (s->codec_id == AV_CODEC_ID_RV10 &&
597  (avctx->width &15 ||
598  avctx->height&15 )) {
599  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
600  return AVERROR(EINVAL);
601  }
602 
603  if (s->codec_id == AV_CODEC_ID_RV20 &&
604  (avctx->width &3 ||
605  avctx->height&3 )) {
606  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
607  return AVERROR(EINVAL);
608  }
609 
610  if ((s->codec_id == AV_CODEC_ID_WMV1 ||
611  s->codec_id == AV_CODEC_ID_WMV2) &&
612  avctx->width & 1) {
613  av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
614  return -1;
615  }
616 
619  av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
620  return -1;
621  }
622 
623 #if FF_API_PRIVATE_OPT
625  if (avctx->mpeg_quant)
626  s->mpeg_quant = avctx->mpeg_quant;
628 #endif
629 
630  // FIXME mpeg2 uses that too
631  if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
632  && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
633  av_log(avctx, AV_LOG_ERROR,
634  "mpeg2 style quantization not supported by codec\n");
635  return -1;
636  }
637 
638  if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
639  av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
640  return -1;
641  }
642 
643  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
645  av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
646  return -1;
647  }
648 
649  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
650  (s->codec_id == AV_CODEC_ID_AMV ||
651  s->codec_id == AV_CODEC_ID_MJPEG)) {
652  // Used to produce garbage with MJPEG.
653  av_log(avctx, AV_LOG_ERROR,
654  "QP RD is no longer compatible with MJPEG or AMV\n");
655  return -1;
656  }
657 
658 #if FF_API_PRIVATE_OPT
660  if (avctx->scenechange_threshold)
663 #endif
664 
665  if (s->scenechange_threshold < 1000000000 &&
667  av_log(avctx, AV_LOG_ERROR,
668  "closed gop with scene change detection are not supported yet, "
669  "set threshold to 1000000000\n");
670  return -1;
671  }
672 
673  if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
674  if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
676  av_log(avctx, AV_LOG_ERROR,
677  "low delay forcing is only available for mpeg2, "
678  "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
679  return -1;
680  }
681  if (s->max_b_frames != 0) {
682  av_log(avctx, AV_LOG_ERROR,
683  "B-frames cannot be used with low delay\n");
684  return -1;
685  }
686  }
687 
688  if (s->q_scale_type == 1) {
689  if (avctx->qmax > 28) {
690  av_log(avctx, AV_LOG_ERROR,
691  "non linear quant only supports qmax <= 28 currently\n");
692  return -1;
693  }
694  }
695 
696  if (avctx->slices > 1 &&
697  (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
698  av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
699  return AVERROR(EINVAL);
700  }
701 
702  if (s->avctx->thread_count > 1 &&
703  s->codec_id != AV_CODEC_ID_MPEG4 &&
706  s->codec_id != AV_CODEC_ID_MJPEG &&
707  (s->codec_id != AV_CODEC_ID_H263P)) {
708  av_log(avctx, AV_LOG_ERROR,
709  "multi threaded encoding not supported by codec\n");
710  return -1;
711  }
712 
713  if (s->avctx->thread_count < 1) {
714  av_log(avctx, AV_LOG_ERROR,
715  "automatic thread number detection not supported by codec, "
716  "patch welcome\n");
717  return -1;
718  }
719 
720  if (!avctx->time_base.den || !avctx->time_base.num) {
721  av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
722  return -1;
723  }
724 
725 #if FF_API_PRIVATE_OPT
727  if (avctx->b_frame_strategy)
729  if (avctx->b_sensitivity != 40)
730  s->b_sensitivity = avctx->b_sensitivity;
732 #endif
733 
734  if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
735  av_log(avctx, AV_LOG_INFO,
736  "notice: b_frame_strategy only affects the first pass\n");
737  s->b_frame_strategy = 0;
738  }
739 
740  i = av_gcd(avctx->time_base.den, avctx->time_base.num);
741  if (i > 1) {
742  av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
743  avctx->time_base.den /= i;
744  avctx->time_base.num /= i;
745  //return -1;
746  }
747 
749  // (a + x * 3 / 8) / x
750  s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
751  s->inter_quant_bias = 0;
752  } else {
753  s->intra_quant_bias = 0;
754  // (a - x / 4) / x
755  s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
756  }
757 
758  if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
759  av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
760  return AVERROR(EINVAL);
761  }
762 
763 #if FF_API_QUANT_BIAS
770 #endif
771 
772  av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
773 
774  if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
775  s->avctx->time_base.den > (1 << 16) - 1) {
776  av_log(avctx, AV_LOG_ERROR,
777  "timebase %d/%d not supported by MPEG 4 standard, "
778  "the maximum admitted value for the timebase denominator "
779  "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
780  (1 << 16) - 1);
781  return -1;
782  }
783  s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
784 
785  switch (avctx->codec->id) {
787  s->out_format = FMT_MPEG1;
789  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
790  break;
792  s->out_format = FMT_MPEG1;
794  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
795  s->rtp_mode = 1;
796  break;
797  case AV_CODEC_ID_MJPEG:
798  case AV_CODEC_ID_AMV:
799  s->out_format = FMT_MJPEG;
800  s->intra_only = 1; /* force intra only for jpeg */
801  if (!CONFIG_MJPEG_ENCODER ||
802  ff_mjpeg_encode_init(s) < 0)
803  return -1;
804  avctx->delay = 0;
805  s->low_delay = 1;
806  break;
807  case AV_CODEC_ID_H261:
808  if (!CONFIG_H261_ENCODER)
809  return -1;
810  if (ff_h261_get_picture_format(s->width, s->height) < 0) {
811  av_log(avctx, AV_LOG_ERROR,
812  "The specified picture size of %dx%d is not valid for the "
813  "H.261 codec.\nValid sizes are 176x144, 352x288\n",
814  s->width, s->height);
815  return -1;
816  }
817  s->out_format = FMT_H261;
818  avctx->delay = 0;
819  s->low_delay = 1;
820  s->rtp_mode = 0; /* Sliced encoding not supported */
821  break;
822  case AV_CODEC_ID_H263:
823  if (!CONFIG_H263_ENCODER)
824  return -1;
826  s->width, s->height) == 8) {
827  av_log(avctx, AV_LOG_ERROR,
828  "The specified picture size of %dx%d is not valid for "
829  "the H.263 codec.\nValid sizes are 128x96, 176x144, "
830  "352x288, 704x576, and 1408x1152. "
831  "Try H.263+.\n", s->width, s->height);
832  return -1;
833  }
834  s->out_format = FMT_H263;
835  avctx->delay = 0;
836  s->low_delay = 1;
837  break;
838  case AV_CODEC_ID_H263P:
839  s->out_format = FMT_H263;
840  s->h263_plus = 1;
841  /* Fx */
842  s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
843  s->modified_quant = s->h263_aic;
844  s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
845  s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
846 
847  /* /Fx */
848  /* These are just to be sure */
849  avctx->delay = 0;
850  s->low_delay = 1;
851  break;
852  case AV_CODEC_ID_FLV1:
853  s->out_format = FMT_H263;
854  s->h263_flv = 2; /* format = 1; 11-bit codes */
855  s->unrestricted_mv = 1;
856  s->rtp_mode = 0; /* don't allow GOB */
857  avctx->delay = 0;
858  s->low_delay = 1;
859  break;
860  case AV_CODEC_ID_RV10:
861  s->out_format = FMT_H263;
862  avctx->delay = 0;
863  s->low_delay = 1;
864  break;
865  case AV_CODEC_ID_RV20:
866  s->out_format = FMT_H263;
867  avctx->delay = 0;
868  s->low_delay = 1;
869  s->modified_quant = 1;
870  s->h263_aic = 1;
871  s->h263_plus = 1;
872  s->loop_filter = 1;
873  s->unrestricted_mv = 0;
874  break;
875  case AV_CODEC_ID_MPEG4:
876  s->out_format = FMT_H263;
877  s->h263_pred = 1;
878  s->unrestricted_mv = 1;
879  s->low_delay = s->max_b_frames ? 0 : 1;
880  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
881  break;
883  s->out_format = FMT_H263;
884  s->h263_pred = 1;
885  s->unrestricted_mv = 1;
886  s->msmpeg4_version = 2;
887  avctx->delay = 0;
888  s->low_delay = 1;
889  break;
891  s->out_format = FMT_H263;
892  s->h263_pred = 1;
893  s->unrestricted_mv = 1;
894  s->msmpeg4_version = 3;
895  s->flipflop_rounding = 1;
896  avctx->delay = 0;
897  s->low_delay = 1;
898  break;
899  case AV_CODEC_ID_WMV1:
900  s->out_format = FMT_H263;
901  s->h263_pred = 1;
902  s->unrestricted_mv = 1;
903  s->msmpeg4_version = 4;
904  s->flipflop_rounding = 1;
905  avctx->delay = 0;
906  s->low_delay = 1;
907  break;
908  case AV_CODEC_ID_WMV2:
909  s->out_format = FMT_H263;
910  s->h263_pred = 1;
911  s->unrestricted_mv = 1;
912  s->msmpeg4_version = 5;
913  s->flipflop_rounding = 1;
914  avctx->delay = 0;
915  s->low_delay = 1;
916  break;
917  default:
918  return -1;
919  }
920 
921 #if FF_API_PRIVATE_OPT
923  if (avctx->noise_reduction)
924  s->noise_reduction = avctx->noise_reduction;
926 #endif
927 
928  avctx->has_b_frames = !s->low_delay;
929 
930  s->encoding = 1;
931 
932  s->progressive_frame =
935  s->alternate_scan);
936 
937  /* init */
938  ff_mpv_idct_init(s);
939  if (ff_mpv_common_init(s) < 0)
940  return -1;
941 
942  ff_fdctdsp_init(&s->fdsp, avctx);
943  ff_me_cmp_init(&s->mecc, avctx);
945  ff_pixblockdsp_init(&s->pdsp, avctx);
946  ff_qpeldsp_init(&s->qdsp);
947 
948  if (s->msmpeg4_version) {
950  2 * 2 * (MAX_LEVEL + 1) *
951  (MAX_RUN + 1) * 2 * sizeof(int), fail);
952  }
953  FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
954 
955  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail);
956  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail);
957  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail);
958  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
959  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
960  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
962  MAX_PICTURE_COUNT * sizeof(Picture *), fail);
964  MAX_PICTURE_COUNT * sizeof(Picture *), fail);
965 
966 
967  if (s->noise_reduction) {
969  2 * 64 * sizeof(uint16_t), fail);
970  }
971 
973 
974  if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
976 
977  if (s->slice_context_count > 1) {
978  s->rtp_mode = 1;
979 
980  if (avctx->codec_id == AV_CODEC_ID_H263P)
981  s->h263_slice_structured = 1;
982  }
983 
984  s->quant_precision = 5;
985 
986 #if FF_API_PRIVATE_OPT
988  if (avctx->frame_skip_threshold)
990  if (avctx->frame_skip_factor)
992  if (avctx->frame_skip_exp)
993  s->frame_skip_exp = avctx->frame_skip_exp;
994  if (avctx->frame_skip_cmp != FF_CMP_DCTMAX)
995  s->frame_skip_cmp = avctx->frame_skip_cmp;
997 #endif
998 
1001 
1002  if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
1004  if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
1007  if ((ret = ff_msmpeg4_encode_init(s)) < 0)
1008  return ret;
1009  if ((CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
1010  && s->out_format == FMT_MPEG1)
1012 
1013  /* init q matrix */
1014  for (i = 0; i < 64; i++) {
1015  int j = s->idsp.idct_permutation[i];
1016  if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
1017  s->mpeg_quant) {
1020  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1021  s->intra_matrix[j] =
1023  } else {
1024  /* MPEG-1/2 */
1025  s->chroma_intra_matrix[j] =
1028  }
1029  if (s->avctx->intra_matrix)
1030  s->intra_matrix[j] = s->avctx->intra_matrix[i];
1031  if (s->avctx->inter_matrix)
1032  s->inter_matrix[j] = s->avctx->inter_matrix[i];
1033  }
1034 
1035  /* precompute matrix */
1036  /* for mjpeg, we do include qscale in the matrix */
1037  if (s->out_format != FMT_MJPEG) {
1039  s->intra_matrix, s->intra_quant_bias, avctx->qmin,
1040  31, 1);
1042  s->inter_matrix, s->inter_quant_bias, avctx->qmin,
1043  31, 0);
1044  }
1045 
1046 #if FF_API_RC_STRATEGY
1048  if (!s->rc_strategy)
1049  s->rc_strategy = s->avctx->rc_strategy;
1051 #endif
1052 
1053  if (ff_rate_control_init(s) < 0)
1054  return -1;
1055 
1056 #if FF_API_RC_STRATEGY
1058 #endif
1059 
1061 #if CONFIG_LIBXVID
1062  ret = ff_xvid_rate_control_init(s);
1063 #else
1064  ret = AVERROR(ENOSYS);
1066  "Xvid ratecontrol requires libavcodec compiled with Xvid support.\n");
1067 #endif
1068  if (ret < 0)
1069  return ret;
1070  }
1071 
1072 #if FF_API_ERROR_RATE
1074  if (avctx->error_rate)
1075  s->error_rate = avctx->error_rate;
1077 #endif
1078 
1079 #if FF_API_NORMALIZE_AQP
1081  if (avctx->flags & CODEC_FLAG_NORMALIZE_AQP)
1082  s->mpv_flags |= FF_MPV_FLAG_NAQ;
1084 #endif
1085 
1086 #if FF_API_MV0
1088  if (avctx->flags & CODEC_FLAG_MV0)
1089  s->mpv_flags |= FF_MPV_FLAG_MV0;
1091 #endif
1092 
1093 #if FF_API_MPV_OPT
1095  if (avctx->rc_qsquish != 0.0)
1096  s->rc_qsquish = avctx->rc_qsquish;
1097  if (avctx->rc_qmod_amp != 0.0)
1098  s->rc_qmod_amp = avctx->rc_qmod_amp;
1099  if (avctx->rc_qmod_freq)
1100  s->rc_qmod_freq = avctx->rc_qmod_freq;
1101  if (avctx->rc_buffer_aggressivity != 1.0)
1103  if (avctx->rc_initial_cplx != 0.0)
1104  s->rc_initial_cplx = avctx->rc_initial_cplx;
1105  if (avctx->lmin)
1106  s->lmin = avctx->lmin;
1107  if (avctx->lmax)
1108  s->lmax = avctx->lmax;
1109 
1110  if (avctx->rc_eq) {
1111  av_freep(&s->rc_eq);
1112  s->rc_eq = av_strdup(avctx->rc_eq);
1113  if (!s->rc_eq)
1114  return AVERROR(ENOMEM);
1115  }
1117 #endif
1118 
1119 #if FF_API_PRIVATE_OPT
1121  if (avctx->brd_scale)
1122  s->brd_scale = avctx->brd_scale;
1123 
1124  if (avctx->prediction_method)
1125  s->pred = avctx->prediction_method + 1;
1127 #endif
1128 
1129  if (s->b_frame_strategy == 2) {
1130  for (i = 0; i < s->max_b_frames + 2; i++) {
1131  s->tmp_frames[i] = av_frame_alloc();
1132  if (!s->tmp_frames[i])
1133  return AVERROR(ENOMEM);
1134 
1136  s->tmp_frames[i]->width = s->width >> s->brd_scale;
1137  s->tmp_frames[i]->height = s->height >> s->brd_scale;
1138 
1139  ret = av_frame_get_buffer(s->tmp_frames[i], 32);
1140  if (ret < 0)
1141  return ret;
1142  }
1143  }
1144 
1145  cpb_props = ff_add_cpb_side_data(avctx);
1146  if (!cpb_props)
1147  return AVERROR(ENOMEM);
1148  cpb_props->max_bitrate = avctx->rc_max_rate;
1149  cpb_props->min_bitrate = avctx->rc_min_rate;
1150  cpb_props->avg_bitrate = avctx->bit_rate;
1151  cpb_props->buffer_size = avctx->rc_buffer_size;
1152 
1153  return 0;
1154 fail:
1155  ff_mpv_encode_end(avctx);
1156  return AVERROR_UNKNOWN;
1157 }
1158 
1160 {
1161  MpegEncContext *s = avctx->priv_data;
1162  int i;
1163 
1165 #if CONFIG_LIBXVID
1168 #endif
1169 
1170  ff_mpv_common_end(s);
1171  if (CONFIG_MJPEG_ENCODER &&
1172  s->out_format == FMT_MJPEG)
1174 
1175  av_freep(&avctx->extradata);
1176 
1177  for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1178  av_frame_free(&s->tmp_frames[i]);
1179 
1182 
1183  av_freep(&s->avctx->stats_out);
1184  av_freep(&s->ac_stats);
1185 
1190  av_freep(&s->q_intra_matrix);
1191  av_freep(&s->q_inter_matrix);
1194  av_freep(&s->input_picture);
1196  av_freep(&s->dct_offset);
1197 
1198  return 0;
1199 }
1200 
1201 static int get_sae(uint8_t *src, int ref, int stride)
1202 {
1203  int x,y;
1204  int acc = 0;
1205 
1206  for (y = 0; y < 16; y++) {
1207  for (x = 0; x < 16; x++) {
1208  acc += FFABS(src[x + y * stride] - ref);
1209  }
1210  }
1211 
1212  return acc;
1213 }
1214 
1216  uint8_t *ref, int stride)
1217 {
1218  int x, y, w, h;
1219  int acc = 0;
1220 
1221  w = s->width & ~15;
1222  h = s->height & ~15;
1223 
1224  for (y = 0; y < h; y += 16) {
1225  for (x = 0; x < w; x += 16) {
1226  int offset = x + y * stride;
1227  int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1228  stride, 16);
1229  int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1230  int sae = get_sae(src + offset, mean, stride);
1231 
1232  acc += sae + 500 < sad;
1233  }
1234  }
1235  return acc;
1236 }
1237 
1238 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1239 {
1240  return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1242  s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1243  &s->linesize, &s->uvlinesize);
1244 }
1245 
1246 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1247 {
1248  Picture *pic = NULL;
1249  int64_t pts;
1250  int i, display_picture_number = 0, ret;
1251  int encoding_delay = s->max_b_frames ? s->max_b_frames
1252  : (s->low_delay ? 0 : 1);
1253  int flush_offset = 1;
1254  int direct = 1;
1255 
1256  if (pic_arg) {
1257  pts = pic_arg->pts;
1258  display_picture_number = s->input_picture_number++;
1259 
1260  if (pts != AV_NOPTS_VALUE) {
1261  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1262  int64_t last = s->user_specified_pts;
1263 
1264  if (pts <= last) {
1266  "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1267  pts, last);
1268  return AVERROR(EINVAL);
1269  }
1270 
1271  if (!s->low_delay && display_picture_number == 1)
1272  s->dts_delta = pts - last;
1273  }
1274  s->user_specified_pts = pts;
1275  } else {
1276  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1277  s->user_specified_pts =
1278  pts = s->user_specified_pts + 1;
1279  av_log(s->avctx, AV_LOG_INFO,
1280  "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1281  pts);
1282  } else {
1283  pts = display_picture_number;
1284  }
1285  }
1286 
1287  if (!pic_arg->buf[0] ||
1288  pic_arg->linesize[0] != s->linesize ||
1289  pic_arg->linesize[1] != s->uvlinesize ||
1290  pic_arg->linesize[2] != s->uvlinesize)
1291  direct = 0;
1292  if ((s->width & 15) || (s->height & 15))
1293  direct = 0;
1294  if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1295  direct = 0;
1296  if (s->linesize & (STRIDE_ALIGN-1))
1297  direct = 0;
1298 
1299  ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1300  pic_arg->linesize[1], s->linesize, s->uvlinesize);
1301 
1302  i = ff_find_unused_picture(s->avctx, s->picture, direct);
1303  if (i < 0)
1304  return i;
1305 
1306  pic = &s->picture[i];
1307  pic->reference = 3;
1308 
1309  if (direct) {
1310  if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1311  return ret;
1312  }
1313  ret = alloc_picture(s, pic, direct);
1314  if (ret < 0)
1315  return ret;
1316 
1317  if (!direct) {
1318  if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1319  pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1320  pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1321  // empty
1322  } else {
1323  int h_chroma_shift, v_chroma_shift;
1325  &h_chroma_shift,
1326  &v_chroma_shift);
1327 
1328  for (i = 0; i < 3; i++) {
1329  int src_stride = pic_arg->linesize[i];
1330  int dst_stride = i ? s->uvlinesize : s->linesize;
1331  int h_shift = i ? h_chroma_shift : 0;
1332  int v_shift = i ? v_chroma_shift : 0;
1333  int w = s->width >> h_shift;
1334  int h = s->height >> v_shift;
1335  uint8_t *src = pic_arg->data[i];
1336  uint8_t *dst = pic->f->data[i];
1337  int vpad = 16;
1338 
1339  if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1340  && !s->progressive_sequence
1341  && FFALIGN(s->height, 32) - s->height > 16)
1342  vpad = 32;
1343 
1344  if (!s->avctx->rc_buffer_size)
1345  dst += INPLACE_OFFSET;
1346 
1347  if (src_stride == dst_stride)
1348  memcpy(dst, src, src_stride * h);
1349  else {
1350  int h2 = h;
1351  uint8_t *dst2 = dst;
1352  while (h2--) {
1353  memcpy(dst2, src, w);
1354  dst2 += dst_stride;
1355  src += src_stride;
1356  }
1357  }
1358  if ((s->width & 15) || (s->height & (vpad-1))) {
1359  s->mpvencdsp.draw_edges(dst, dst_stride,
1360  w, h,
1361  16 >> h_shift,
1362  vpad >> v_shift,
1363  EDGE_BOTTOM);
1364  }
1365  }
1366  emms_c();
1367  }
1368  }
1369  ret = av_frame_copy_props(pic->f, pic_arg);
1370  if (ret < 0)
1371  return ret;
1372 
1373  pic->f->display_picture_number = display_picture_number;
1374  pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1375  } else {
1376  /* Flushing: When we have not received enough input frames,
1377  * ensure s->input_picture[0] contains the first picture */
1378  for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1379  if (s->input_picture[flush_offset])
1380  break;
1381 
1382  if (flush_offset <= 1)
1383  flush_offset = 1;
1384  else
1385  encoding_delay = encoding_delay - flush_offset + 1;
1386  }
1387 
1388  /* shift buffer entries */
1389  for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1390  s->input_picture[i - flush_offset] = s->input_picture[i];
1391 
1392  s->input_picture[encoding_delay] = (Picture*) pic;
1393 
1394  return 0;
1395 }
1396 
1398 {
1399  int x, y, plane;
1400  int score = 0;
1401  int64_t score64 = 0;
1402 
1403  for (plane = 0; plane < 3; plane++) {
1404  const int stride = p->f->linesize[plane];
1405  const int bw = plane ? 1 : 2;
1406  for (y = 0; y < s->mb_height * bw; y++) {
1407  for (x = 0; x < s->mb_width * bw; x++) {
1408  int off = p->shared ? 0 : 16;
1409  uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1410  uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1411  int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1412 
1413  switch (FFABS(s->frame_skip_exp)) {
1414  case 0: score = FFMAX(score, v); break;
1415  case 1: score += FFABS(v); break;
1416  case 2: score64 += v * (int64_t)v; break;
1417  case 3: score64 += FFABS(v * (int64_t)v * v); break;
1418  case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1419  }
1420  }
1421  }
1422  }
1423  emms_c();
1424 
1425  if (score)
1426  score64 = score;
1427  if (s->frame_skip_exp < 0)
1428  score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1429  -1.0/s->frame_skip_exp);
1430 
1431  if (score64 < s->frame_skip_threshold)
1432  return 1;
1433  if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1434  return 1;
1435  return 0;
1436 }
1437 
1439 {
1440  AVPacket pkt = { 0 };
1441  int ret;
1442  int size = 0;
1443 
1444  av_init_packet(&pkt);
1445 
1446  ret = avcodec_send_frame(c, frame);
1447  if (ret < 0)
1448  return ret;
1449 
1450  do {
1451  ret = avcodec_receive_packet(c, &pkt);
1452  if (ret >= 0) {
1453  size += pkt.size;
1454  av_packet_unref(&pkt);
1455  } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1456  return ret;
1457  } while (ret >= 0);
1458 
1459  return size;
1460 }
1461 
1463 {
1464  const AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1465  const int scale = s->brd_scale;
1466  int width = s->width >> scale;
1467  int height = s->height >> scale;
1468  int i, j, out_size, p_lambda, b_lambda, lambda2;
1469  int64_t best_rd = INT64_MAX;
1470  int best_b_count = -1;
1471  int ret = 0;
1472 
1473  av_assert0(scale >= 0 && scale <= 3);
1474 
1475  //emms_c();
1476  //s->next_picture_ptr->quality;
1477  p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1478  //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1479  b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1480  if (!b_lambda) // FIXME we should do this somewhere else
1481  b_lambda = p_lambda;
1482  lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1484 
1485  for (i = 0; i < s->max_b_frames + 2; i++) {
1486  Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1487  s->next_picture_ptr;
1488  uint8_t *data[4];
1489 
1490  if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1491  pre_input = *pre_input_ptr;
1492  memcpy(data, pre_input_ptr->f->data, sizeof(data));
1493 
1494  if (!pre_input.shared && i) {
1495  data[0] += INPLACE_OFFSET;
1496  data[1] += INPLACE_OFFSET;
1497  data[2] += INPLACE_OFFSET;
1498  }
1499 
1500  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1501  s->tmp_frames[i]->linesize[0],
1502  data[0],
1503  pre_input.f->linesize[0],
1504  width, height);
1505  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1506  s->tmp_frames[i]->linesize[1],
1507  data[1],
1508  pre_input.f->linesize[1],
1509  width >> 1, height >> 1);
1510  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1511  s->tmp_frames[i]->linesize[2],
1512  data[2],
1513  pre_input.f->linesize[2],
1514  width >> 1, height >> 1);
1515  }
1516  }
1517 
1518  for (j = 0; j < s->max_b_frames + 1; j++) {
1519  AVCodecContext *c;
1520  int64_t rd = 0;
1521 
1522  if (!s->input_picture[j])
1523  break;
1524 
1526  if (!c)
1527  return AVERROR(ENOMEM);
1528 
1529  c->width = width;
1530  c->height = height;
1532  c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1533  c->mb_decision = s->avctx->mb_decision;
1534  c->me_cmp = s->avctx->me_cmp;
1535  c->mb_cmp = s->avctx->mb_cmp;
1536  c->me_sub_cmp = s->avctx->me_sub_cmp;
1538  c->time_base = s->avctx->time_base;
1539  c->max_b_frames = s->max_b_frames;
1540 
1541  ret = avcodec_open2(c, codec, NULL);
1542  if (ret < 0)
1543  goto fail;
1544 
1546  s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1547 
1548  out_size = encode_frame(c, s->tmp_frames[0]);
1549  if (out_size < 0) {
1550  ret = out_size;
1551  goto fail;
1552  }
1553 
1554  //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1555 
1556  for (i = 0; i < s->max_b_frames + 1; i++) {
1557  int is_p = i % (j + 1) == j || i == s->max_b_frames;
1558 
1559  s->tmp_frames[i + 1]->pict_type = is_p ?
1561  s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1562 
1563  out_size = encode_frame(c, s->tmp_frames[i + 1]);
1564  if (out_size < 0) {
1565  ret = out_size;
1566  goto fail;
1567  }
1568 
1569  rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1570  }
1571 
1572  /* get the delayed frames */
1573  out_size = encode_frame(c, NULL);
1574  if (out_size < 0) {
1575  ret = out_size;
1576  goto fail;
1577  }
1578  rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
1579 
1580  rd += c->error[0] + c->error[1] + c->error[2];
1581 
1582  if (rd < best_rd) {
1583  best_rd = rd;
1584  best_b_count = j;
1585  }
1586 
1587 fail:
1589  if (ret < 0)
1590  return ret;
1591  }
1592 
1593  return best_b_count;
1594 }
1595 
1597 {
1598  int i, ret;
1599 
1600  for (i = 1; i < MAX_PICTURE_COUNT; i++)
1602  s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1603 
1604  /* set next picture type & ordering */
1605  if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1606  if (s->frame_skip_threshold || s->frame_skip_factor) {
1607  if (s->picture_in_gop_number < s->gop_size &&
1608  s->next_picture_ptr &&
1609  skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1610  // FIXME check that the gop check above is +-1 correct
1611  av_frame_unref(s->input_picture[0]->f);
1612 
1613  ff_vbv_update(s, 0);
1614 
1615  goto no_output_pic;
1616  }
1617  }
1618 
1619  if (/*s->picture_in_gop_number >= s->gop_size ||*/
1620  !s->next_picture_ptr || s->intra_only) {
1621  s->reordered_input_picture[0] = s->input_picture[0];
1624  s->coded_picture_number++;
1625  } else {
1626  int b_frames = 0;
1627 
1628  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1629  for (i = 0; i < s->max_b_frames + 1; i++) {
1630  int pict_num = s->input_picture[0]->f->display_picture_number + i;
1631 
1632  if (pict_num >= s->rc_context.num_entries)
1633  break;
1634  if (!s->input_picture[i]) {
1635  s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1636  break;
1637  }
1638 
1639  s->input_picture[i]->f->pict_type =
1640  s->rc_context.entry[pict_num].new_pict_type;
1641  }
1642  }
1643 
1644  if (s->b_frame_strategy == 0) {
1645  b_frames = s->max_b_frames;
1646  while (b_frames && !s->input_picture[b_frames])
1647  b_frames--;
1648  } else if (s->b_frame_strategy == 1) {
1649  for (i = 1; i < s->max_b_frames + 1; i++) {
1650  if (s->input_picture[i] &&
1651  s->input_picture[i]->b_frame_score == 0) {
1652  s->input_picture[i]->b_frame_score =
1653  get_intra_count(s,
1654  s->input_picture[i ]->f->data[0],
1655  s->input_picture[i - 1]->f->data[0],
1656  s->linesize) + 1;
1657  }
1658  }
1659  for (i = 0; i < s->max_b_frames + 1; i++) {
1660  if (!s->input_picture[i] ||
1661  s->input_picture[i]->b_frame_score - 1 >
1662  s->mb_num / s->b_sensitivity)
1663  break;
1664  }
1665 
1666  b_frames = FFMAX(0, i - 1);
1667 
1668  /* reset scores */
1669  for (i = 0; i < b_frames + 1; i++) {
1670  s->input_picture[i]->b_frame_score = 0;
1671  }
1672  } else if (s->b_frame_strategy == 2) {
1673  b_frames = estimate_best_b_count(s);
1674  if (b_frames < 0)
1675  return b_frames;
1676  }
1677 
1678  emms_c();
1679 
1680  for (i = b_frames - 1; i >= 0; i--) {
1681  int type = s->input_picture[i]->f->pict_type;
1682  if (type && type != AV_PICTURE_TYPE_B)
1683  b_frames = i;
1684  }
1685  if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1686  b_frames == s->max_b_frames) {
1688  "warning, too many B-frames in a row\n");
1689  }
1690 
1691  if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1692  if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1693  s->gop_size > s->picture_in_gop_number) {
1694  b_frames = s->gop_size - s->picture_in_gop_number - 1;
1695  } else {
1697  b_frames = 0;
1698  s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1699  }
1700  }
1701 
1702  if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1703  s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1704  b_frames--;
1705 
1706  s->reordered_input_picture[0] = s->input_picture[b_frames];
1710  s->coded_picture_number++;
1711  for (i = 0; i < b_frames; i++) {
1712  s->reordered_input_picture[i + 1] = s->input_picture[i];
1713  s->reordered_input_picture[i + 1]->f->pict_type =
1716  s->coded_picture_number++;
1717  }
1718  }
1719  }
1720 no_output_pic:
1722 
1723  if (s->reordered_input_picture[0]) {
1726  AV_PICTURE_TYPE_B ? 3 : 0;
1727 
1728  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1729  return ret;
1730 
1731  if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1732  // input is a shared pix, so we can't modify it -> allocate a new
1733  // one & ensure that the shared one is reuseable
1734 
1735  Picture *pic;
1736  int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1737  if (i < 0)
1738  return i;
1739  pic = &s->picture[i];
1740 
1742  if (alloc_picture(s, pic, 0) < 0) {
1743  return -1;
1744  }
1745 
1746  ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1747  if (ret < 0)
1748  return ret;
1749 
1750  /* mark us unused / free shared pic */
1752  s->reordered_input_picture[0]->shared = 0;
1753 
1754  s->current_picture_ptr = pic;
1755  } else {
1756  // input is not a shared pix -> reuse buffer for current_pix
1758  for (i = 0; i < 4; i++) {
1759  s->new_picture.f->data[i] += INPLACE_OFFSET;
1760  }
1761  }
1763  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1764  s->current_picture_ptr)) < 0)
1765  return ret;
1766 
1768  }
1769  return 0;
1770 }
1771 
1772 static void frame_end(MpegEncContext *s)
1773 {
1774  if (s->unrestricted_mv &&
1776  !s->intra_only) {
1778  int hshift = desc->log2_chroma_w;
1779  int vshift = desc->log2_chroma_h;
1781  s->current_picture.f->linesize[0],
1782  s->h_edge_pos, s->v_edge_pos,
1784  EDGE_TOP | EDGE_BOTTOM);
1786  s->current_picture.f->linesize[1],
1787  s->h_edge_pos >> hshift,
1788  s->v_edge_pos >> vshift,
1789  EDGE_WIDTH >> hshift,
1790  EDGE_WIDTH >> vshift,
1791  EDGE_TOP | EDGE_BOTTOM);
1793  s->current_picture.f->linesize[2],
1794  s->h_edge_pos >> hshift,
1795  s->v_edge_pos >> vshift,
1796  EDGE_WIDTH >> hshift,
1797  EDGE_WIDTH >> vshift,
1798  EDGE_TOP | EDGE_BOTTOM);
1799  }
1800 
1801  emms_c();
1802 
1803  s->last_pict_type = s->pict_type;
1805  if (s->pict_type!= AV_PICTURE_TYPE_B)
1807 
1808 #if FF_API_CODED_FRAME
1813 #endif
1814 #if FF_API_ERROR_FRAME
1817  sizeof(s->current_picture.encoding_error));
1819 #endif
1820 }
1821 
1823 {
1824  int intra, i;
1825 
1826  for (intra = 0; intra < 2; intra++) {
1827  if (s->dct_count[intra] > (1 << 16)) {
1828  for (i = 0; i < 64; i++) {
1829  s->dct_error_sum[intra][i] >>= 1;
1830  }
1831  s->dct_count[intra] >>= 1;
1832  }
1833 
1834  for (i = 0; i < 64; i++) {
1835  s->dct_offset[intra][i] = (s->noise_reduction *
1836  s->dct_count[intra] +
1837  s->dct_error_sum[intra][i] / 2) /
1838  (s->dct_error_sum[intra][i] + 1);
1839  }
1840  }
1841 }
1842 
1844 {
1845  int ret;
1846 
1847  /* mark & release old frames */
1848  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1850  s->last_picture_ptr->f->buf[0]) {
1852  }
1853 
1856 
1858  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1859  s->current_picture_ptr)) < 0)
1860  return ret;
1861 
1862  if (s->pict_type != AV_PICTURE_TYPE_B) {
1864  if (!s->droppable)
1866  }
1867 
1868  if (s->last_picture_ptr) {
1870  if (s->last_picture_ptr->f->buf[0] &&
1871  (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1872  s->last_picture_ptr)) < 0)
1873  return ret;
1874  }
1875  if (s->next_picture_ptr) {
1877  if (s->next_picture_ptr->f->buf[0] &&
1878  (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1879  s->next_picture_ptr)) < 0)
1880  return ret;
1881  }
1882 
1883  if (s->picture_structure!= PICT_FRAME) {
1884  int i;
1885  for (i = 0; i < 4; i++) {
1887  s->current_picture.f->data[i] +=
1888  s->current_picture.f->linesize[i];
1889  }
1890  s->current_picture.f->linesize[i] *= 2;
1891  s->last_picture.f->linesize[i] *= 2;
1892  s->next_picture.f->linesize[i] *= 2;
1893  }
1894  }
1895 
1896  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1899  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1902  } else {
1905  }
1906 
1907  if (s->dct_error_sum) {
1910  }
1911 
1912  return 0;
1913 }
1914 
1916  const AVFrame *pic_arg, int *got_packet)
1917 {
1918  MpegEncContext *s = avctx->priv_data;
1919  int i, stuffing_count, ret;
1920  int context_count = s->slice_context_count;
1921 
1922  s->vbv_ignore_qmax = 0;
1923 
1924  s->picture_in_gop_number++;
1925 
1926  if (load_input_picture(s, pic_arg) < 0)
1927  return -1;
1928 
1929  if (select_input_picture(s) < 0) {
1930  return -1;
1931  }
1932 
1933  /* output? */
1934  if (s->new_picture.f->data[0]) {
1935  int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1936  int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1937  :
1938  s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1939  if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1940  return ret;
1941  if (s->mb_info) {
1944  s->mb_width*s->mb_height*12);
1945  s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1946  }
1947 
1948  for (i = 0; i < context_count; i++) {
1949  int start_y = s->thread_context[i]->start_mb_y;
1950  int end_y = s->thread_context[i]-> end_mb_y;
1951  int h = s->mb_height;
1952  uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1953  uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1954 
1955  init_put_bits(&s->thread_context[i]->pb, start, end - start);
1956  }
1957 
1958  s->pict_type = s->new_picture.f->pict_type;
1959  //emms_c();
1960  ret = frame_start(s);
1961  if (ret < 0)
1962  return ret;
1963 vbv_retry:
1964  ret = encode_picture(s, s->picture_number);
1965  if (growing_buffer) {
1966  av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1967  pkt->data = s->pb.buf;
1968  pkt->size = avctx->internal->byte_buffer_size;
1969  }
1970  if (ret < 0)
1971  return -1;
1972 
1973 #if FF_API_STAT_BITS
1975  avctx->header_bits = s->header_bits;
1976  avctx->mv_bits = s->mv_bits;
1977  avctx->misc_bits = s->misc_bits;
1978  avctx->i_tex_bits = s->i_tex_bits;
1979  avctx->p_tex_bits = s->p_tex_bits;
1980  avctx->i_count = s->i_count;
1981  // FIXME f/b_count in avctx
1982  avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1983  avctx->skip_count = s->skip_count;
1985 #endif
1986 
1987  frame_end(s);
1988 
1989  if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1991 
1992  if (avctx->rc_buffer_size) {
1993  RateControlContext *rcc = &s->rc_context;
1994  int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1995  int hq = (s->avctx->mb_decision == FF_MB_DECISION_RD || s->avctx->trellis);
1996  int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1997 
1998  if (put_bits_count(&s->pb) > max_size &&
1999  s->lambda < s->lmax) {
2000  s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
2001  (s->qscale + 1) / s->qscale);
2002  if (s->adaptive_quant) {
2003  int i;
2004  for (i = 0; i < s->mb_height * s->mb_stride; i++)
2005  s->lambda_table[i] =
2006  FFMAX(s->lambda_table[i] + min_step,
2007  s->lambda_table[i] * (s->qscale + 1) /
2008  s->qscale);
2009  }
2010  s->mb_skipped = 0; // done in frame_start()
2011  // done in encode_picture() so we must undo it
2012  if (s->pict_type == AV_PICTURE_TYPE_P) {
2013  if (s->flipflop_rounding ||
2014  s->codec_id == AV_CODEC_ID_H263P ||
2016  s->no_rounding ^= 1;
2017  }
2018  if (s->pict_type != AV_PICTURE_TYPE_B) {
2019  s->time_base = s->last_time_base;
2020  s->last_non_b_time = s->time - s->pp_time;
2021  }
2022  for (i = 0; i < context_count; i++) {
2023  PutBitContext *pb = &s->thread_context[i]->pb;
2024  init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
2025  }
2026  s->vbv_ignore_qmax = 1;
2027  av_log(s->avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
2028  goto vbv_retry;
2029  }
2030 
2032  }
2033 
2034  if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
2036 
2037  for (i = 0; i < 4; i++) {
2039  avctx->error[i] += s->current_picture_ptr->encoding_error[i];
2040  }
2043  (s->avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
2044  s->pict_type);
2045 
2046  if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
2047  assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
2048  s->misc_bits + s->i_tex_bits +
2049  s->p_tex_bits);
2050  flush_put_bits(&s->pb);
2051  s->frame_bits = put_bits_count(&s->pb);
2052 
2053  stuffing_count = ff_vbv_update(s, s->frame_bits);
2054  s->stuffing_bits = 8*stuffing_count;
2055  if (stuffing_count) {
2056  if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
2057  stuffing_count + 50) {
2058  av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
2059  return -1;
2060  }
2061 
2062  switch (s->codec_id) {
2065  while (stuffing_count--) {
2066  put_bits(&s->pb, 8, 0);
2067  }
2068  break;
2069  case AV_CODEC_ID_MPEG4:
2070  put_bits(&s->pb, 16, 0);
2071  put_bits(&s->pb, 16, 0x1C3);
2072  stuffing_count -= 4;
2073  while (stuffing_count--) {
2074  put_bits(&s->pb, 8, 0xFF);
2075  }
2076  break;
2077  default:
2078  av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
2079  }
2080  flush_put_bits(&s->pb);
2081  s->frame_bits = put_bits_count(&s->pb);
2082  }
2083 
2084  /* update MPEG-1/2 vbv_delay for CBR */
2085  if (s->avctx->rc_max_rate &&
2086  s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
2087  s->out_format == FMT_MPEG1 &&
2088  90000LL * (avctx->rc_buffer_size - 1) <=
2089  s->avctx->rc_max_rate * 0xFFFFLL) {
2090  AVCPBProperties *props;
2091  size_t props_size;
2092 
2093  int vbv_delay, min_delay;
2094  double inbits = s->avctx->rc_max_rate *
2095  av_q2d(s->avctx->time_base);
2096  int minbits = s->frame_bits - 8 *
2097  (s->vbv_delay_ptr - s->pb.buf - 1);
2098  double bits = s->rc_context.buffer_index + minbits - inbits;
2099 
2100  if (bits < 0)
2102  "Internal error, negative bits\n");
2103 
2104  assert(s->repeat_first_field == 0);
2105 
2106  vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
2107  min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
2108  s->avctx->rc_max_rate;
2109 
2110  vbv_delay = FFMAX(vbv_delay, min_delay);
2111 
2112  av_assert0(vbv_delay < 0xFFFF);
2113 
2114  s->vbv_delay_ptr[0] &= 0xF8;
2115  s->vbv_delay_ptr[0] |= vbv_delay >> 13;
2116  s->vbv_delay_ptr[1] = vbv_delay >> 5;
2117  s->vbv_delay_ptr[2] &= 0x07;
2118  s->vbv_delay_ptr[2] |= vbv_delay << 3;
2119 
2120  props = av_cpb_properties_alloc(&props_size);
2121  if (!props)
2122  return AVERROR(ENOMEM);
2123  props->vbv_delay = vbv_delay * 300;
2124 
2126  (uint8_t*)props, props_size);
2127  if (ret < 0) {
2128  av_freep(&props);
2129  return ret;
2130  }
2131 
2132 #if FF_API_VBV_DELAY
2134  avctx->vbv_delay = vbv_delay * 300;
2136 #endif
2137  }
2138  s->total_bits += s->frame_bits;
2139 #if FF_API_STAT_BITS
2141  avctx->frame_bits = s->frame_bits;
2143 #endif
2144 
2145 
2146  pkt->pts = s->current_picture.f->pts;
2147  if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2149  pkt->dts = pkt->pts - s->dts_delta;
2150  else
2151  pkt->dts = s->reordered_pts;
2152  s->reordered_pts = pkt->pts;
2153  } else
2154  pkt->dts = pkt->pts;
2155  if (s->current_picture.f->key_frame)
2156  pkt->flags |= AV_PKT_FLAG_KEY;
2157  if (s->mb_info)
2159  } else {
2160  s->frame_bits = 0;
2161  }
2162 
2163  /* release non-reference frames */
2164  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2165  if (!s->picture[i].reference)
2166  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
2167  }
2168 
2169  av_assert1((s->frame_bits & 7) == 0);
2170 
2171  pkt->size = s->frame_bits / 8;
2172  *got_packet = !!pkt->size;
2173  return 0;
2174 }
2175 
2177  int n, int threshold)
2178 {
2179  static const char tab[64] = {
2180  3, 2, 2, 1, 1, 1, 1, 1,
2181  1, 1, 1, 1, 1, 1, 1, 1,
2182  1, 1, 1, 1, 1, 1, 1, 1,
2183  0, 0, 0, 0, 0, 0, 0, 0,
2184  0, 0, 0, 0, 0, 0, 0, 0,
2185  0, 0, 0, 0, 0, 0, 0, 0,
2186  0, 0, 0, 0, 0, 0, 0, 0,
2187  0, 0, 0, 0, 0, 0, 0, 0
2188  };
2189  int score = 0;
2190  int run = 0;
2191  int i;
2192  int16_t *block = s->block[n];
2193  const int last_index = s->block_last_index[n];
2194  int skip_dc;
2195 
2196  if (threshold < 0) {
2197  skip_dc = 0;
2198  threshold = -threshold;
2199  } else
2200  skip_dc = 1;
2201 
2202  /* Are all we could set to zero already zero? */
2203  if (last_index <= skip_dc - 1)
2204  return;
2205 
2206  for (i = 0; i <= last_index; i++) {
2207  const int j = s->intra_scantable.permutated[i];
2208  const int level = FFABS(block[j]);
2209  if (level == 1) {
2210  if (skip_dc && i == 0)
2211  continue;
2212  score += tab[run];
2213  run = 0;
2214  } else if (level > 1) {
2215  return;
2216  } else {
2217  run++;
2218  }
2219  }
2220  if (score >= threshold)
2221  return;
2222  for (i = skip_dc; i <= last_index; i++) {
2223  const int j = s->intra_scantable.permutated[i];
2224  block[j] = 0;
2225  }
2226  if (block[0])
2227  s->block_last_index[n] = 0;
2228  else
2229  s->block_last_index[n] = -1;
2230 }
2231 
2232 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2233  int last_index)
2234 {
2235  int i;
2236  const int maxlevel = s->max_qcoeff;
2237  const int minlevel = s->min_qcoeff;
2238  int overflow = 0;
2239 
2240  if (s->mb_intra) {
2241  i = 1; // skip clipping of intra dc
2242  } else
2243  i = 0;
2244 
2245  for (; i <= last_index; i++) {
2246  const int j = s->intra_scantable.permutated[i];
2247  int level = block[j];
2248 
2249  if (level > maxlevel) {
2250  level = maxlevel;
2251  overflow++;
2252  } else if (level < minlevel) {
2253  level = minlevel;
2254  overflow++;
2255  }
2256 
2257  block[j] = level;
2258  }
2259 
2260  if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2261  av_log(s->avctx, AV_LOG_INFO,
2262  "warning, clipping %d dct coefficients to %d..%d\n",
2263  overflow, minlevel, maxlevel);
2264 }
2265 
2266 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2267 {
2268  int x, y;
2269  // FIXME optimize
2270  for (y = 0; y < 8; y++) {
2271  for (x = 0; x < 8; x++) {
2272  int x2, y2;
2273  int sum = 0;
2274  int sqr = 0;
2275  int count = 0;
2276 
2277  for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2278  for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2279  int v = ptr[x2 + y2 * stride];
2280  sum += v;
2281  sqr += v * v;
2282  count++;
2283  }
2284  }
2285  weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2286  }
2287  }
2288 }
2289 
2291  int motion_x, int motion_y,
2292  int mb_block_height,
2293  int mb_block_width,
2294  int mb_block_count)
2295 {
2296  int16_t weight[12][64];
2297  int16_t orig[12][64];
2298  const int mb_x = s->mb_x;
2299  const int mb_y = s->mb_y;
2300  int i;
2301  int skip_dct[12];
2302  int dct_offset = s->linesize * 8; // default for progressive frames
2303  int uv_dct_offset = s->uvlinesize * 8;
2304  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2305  ptrdiff_t wrap_y, wrap_c;
2306 
2307  for (i = 0; i < mb_block_count; i++)
2308  skip_dct[i] = s->skipdct;
2309 
2310  if (s->adaptive_quant) {
2311  const int last_qp = s->qscale;
2312  const int mb_xy = mb_x + mb_y * s->mb_stride;
2313 
2314  s->lambda = s->lambda_table[mb_xy];
2315  update_qscale(s);
2316 
2317  if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2318  s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2319  s->dquant = s->qscale - last_qp;
2320 
2321  if (s->out_format == FMT_H263) {
2322  s->dquant = av_clip(s->dquant, -2, 2);
2323 
2324  if (s->codec_id == AV_CODEC_ID_MPEG4) {
2325  if (!s->mb_intra) {
2326  if (s->pict_type == AV_PICTURE_TYPE_B) {
2327  if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2328  s->dquant = 0;
2329  }
2330  if (s->mv_type == MV_TYPE_8X8)
2331  s->dquant = 0;
2332  }
2333  }
2334  }
2335  }
2336  ff_set_qscale(s, last_qp + s->dquant);
2337  } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2338  ff_set_qscale(s, s->qscale + s->dquant);
2339 
2340  wrap_y = s->linesize;
2341  wrap_c = s->uvlinesize;
2342  ptr_y = s->new_picture.f->data[0] +
2343  (mb_y * 16 * wrap_y) + mb_x * 16;
2344  ptr_cb = s->new_picture.f->data[1] +
2345  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2346  ptr_cr = s->new_picture.f->data[2] +
2347  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2348 
2349  if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2350  uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2351  int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2352  int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2353  s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2354  wrap_y, wrap_y,
2355  16, 16, mb_x * 16, mb_y * 16,
2356  s->width, s->height);
2357  ptr_y = ebuf;
2358  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2359  wrap_c, wrap_c,
2360  mb_block_width, mb_block_height,
2361  mb_x * mb_block_width, mb_y * mb_block_height,
2362  cw, ch);
2363  ptr_cb = ebuf + 16 * wrap_y;
2364  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2365  wrap_c, wrap_c,
2366  mb_block_width, mb_block_height,
2367  mb_x * mb_block_width, mb_y * mb_block_height,
2368  cw, ch);
2369  ptr_cr = ebuf + 16 * wrap_y + 16;
2370  }
2371 
2372  if (s->mb_intra) {
2374  int progressive_score, interlaced_score;
2375 
2376  s->interlaced_dct = 0;
2377  progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2378  s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2379  NULL, wrap_y, 8) - 400;
2380 
2381  if (progressive_score > 0) {
2382  interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2383  NULL, wrap_y * 2, 8) +
2384  s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2385  NULL, wrap_y * 2, 8);
2386  if (progressive_score > interlaced_score) {
2387  s->interlaced_dct = 1;
2388 
2389  dct_offset = wrap_y;
2390  uv_dct_offset = wrap_c;
2391  wrap_y <<= 1;
2392  if (s->chroma_format == CHROMA_422 ||
2393  s->chroma_format == CHROMA_444)
2394  wrap_c <<= 1;
2395  }
2396  }
2397  }
2398 
2399  s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2400  s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2401  s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2402  s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2403 
2404  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2405  skip_dct[4] = 1;
2406  skip_dct[5] = 1;
2407  } else {
2408  s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2409  s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2410  if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2411  s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2412  s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2413  } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2414  s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2415  s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2416  s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2417  s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2418  s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2419  s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2420  }
2421  }
2422  } else {
2423  op_pixels_func (*op_pix)[4];
2424  qpel_mc_func (*op_qpix)[16];
2425  uint8_t *dest_y, *dest_cb, *dest_cr;
2426 
2427  dest_y = s->dest[0];
2428  dest_cb = s->dest[1];
2429  dest_cr = s->dest[2];
2430 
2431  if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2432  op_pix = s->hdsp.put_pixels_tab;
2433  op_qpix = s->qdsp.put_qpel_pixels_tab;
2434  } else {
2435  op_pix = s->hdsp.put_no_rnd_pixels_tab;
2436  op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2437  }
2438 
2439  if (s->mv_dir & MV_DIR_FORWARD) {
2440  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2441  s->last_picture.f->data,
2442  op_pix, op_qpix);
2443  op_pix = s->hdsp.avg_pixels_tab;
2444  op_qpix = s->qdsp.avg_qpel_pixels_tab;
2445  }
2446  if (s->mv_dir & MV_DIR_BACKWARD) {
2447  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2448  s->next_picture.f->data,
2449  op_pix, op_qpix);
2450  }
2451 
2453  int progressive_score, interlaced_score;
2454 
2455  s->interlaced_dct = 0;
2456  progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2457  s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2458  ptr_y + wrap_y * 8,
2459  wrap_y, 8) - 400;
2460 
2461  if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2462  progressive_score -= 400;
2463 
2464  if (progressive_score > 0) {
2465  interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2466  wrap_y * 2, 8) +
2467  s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2468  ptr_y + wrap_y,
2469  wrap_y * 2, 8);
2470 
2471  if (progressive_score > interlaced_score) {
2472  s->interlaced_dct = 1;
2473 
2474  dct_offset = wrap_y;
2475  uv_dct_offset = wrap_c;
2476  wrap_y <<= 1;
2477  if (s->chroma_format == CHROMA_422)
2478  wrap_c <<= 1;
2479  }
2480  }
2481  }
2482 
2483  s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2484  s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2485  s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2486  dest_y + dct_offset, wrap_y);
2487  s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2488  dest_y + dct_offset + 8, wrap_y);
2489 
2490  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2491  skip_dct[4] = 1;
2492  skip_dct[5] = 1;
2493  } else {
2494  s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2495  s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2496  if (!s->chroma_y_shift) { /* 422 */
2497  s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2498  dest_cb + uv_dct_offset, wrap_c);
2499  s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2500  dest_cr + uv_dct_offset, wrap_c);
2501  }
2502  }
2503  /* pre quantization */
2504  if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2505  2 * s->qscale * s->qscale) {
2506  // FIXME optimize
2507  if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2508  skip_dct[0] = 1;
2509  if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2510  skip_dct[1] = 1;
2511  if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2512  wrap_y, 8) < 20 * s->qscale)
2513  skip_dct[2] = 1;
2514  if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2515  wrap_y, 8) < 20 * s->qscale)
2516  skip_dct[3] = 1;
2517  if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2518  skip_dct[4] = 1;
2519  if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2520  skip_dct[5] = 1;
2521  if (!s->chroma_y_shift) { /* 422 */
2522  if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2523  dest_cb + uv_dct_offset,
2524  wrap_c, 8) < 20 * s->qscale)
2525  skip_dct[6] = 1;
2526  if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2527  dest_cr + uv_dct_offset,
2528  wrap_c, 8) < 20 * s->qscale)
2529  skip_dct[7] = 1;
2530  }
2531  }
2532  }
2533 
2534  if (s->quantizer_noise_shaping) {
2535  if (!skip_dct[0])
2536  get_visual_weight(weight[0], ptr_y , wrap_y);
2537  if (!skip_dct[1])
2538  get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2539  if (!skip_dct[2])
2540  get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2541  if (!skip_dct[3])
2542  get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2543  if (!skip_dct[4])
2544  get_visual_weight(weight[4], ptr_cb , wrap_c);
2545  if (!skip_dct[5])
2546  get_visual_weight(weight[5], ptr_cr , wrap_c);
2547  if (!s->chroma_y_shift) { /* 422 */
2548  if (!skip_dct[6])
2549  get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2550  wrap_c);
2551  if (!skip_dct[7])
2552  get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2553  wrap_c);
2554  }
2555  memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2556  }
2557 
2558  /* DCT & quantize */
2559  av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2560  {
2561  for (i = 0; i < mb_block_count; i++) {
2562  if (!skip_dct[i]) {
2563  int overflow;
2564  s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2565  // FIXME we could decide to change to quantizer instead of
2566  // clipping
2567  // JS: I don't think that would be a good idea it could lower
2568  // quality instead of improve it. Just INTRADC clipping
2569  // deserves changes in quantizer
2570  if (overflow)
2571  clip_coeffs(s, s->block[i], s->block_last_index[i]);
2572  } else
2573  s->block_last_index[i] = -1;
2574  }
2575  if (s->quantizer_noise_shaping) {
2576  for (i = 0; i < mb_block_count; i++) {
2577  if (!skip_dct[i]) {
2578  s->block_last_index[i] =
2579  dct_quantize_refine(s, s->block[i], weight[i],
2580  orig[i], i, s->qscale);
2581  }
2582  }
2583  }
2584 
2585  if (s->luma_elim_threshold && !s->mb_intra)
2586  for (i = 0; i < 4; i++)
2588  if (s->chroma_elim_threshold && !s->mb_intra)
2589  for (i = 4; i < mb_block_count; i++)
2591 
2592  if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2593  for (i = 0; i < mb_block_count; i++) {
2594  if (s->block_last_index[i] == -1)
2595  s->coded_score[i] = INT_MAX / 256;
2596  }
2597  }
2598  }
2599 
2600  if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2601  s->block_last_index[4] =
2602  s->block_last_index[5] = 0;
2603  s->block[4][0] =
2604  s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2605  if (!s->chroma_y_shift) { /* 422 / 444 */
2606  for (i=6; i<12; i++) {
2607  s->block_last_index[i] = 0;
2608  s->block[i][0] = s->block[4][0];
2609  }
2610  }
2611  }
2612 
2613  // non c quantize code returns incorrect block_last_index FIXME
2614  if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2615  for (i = 0; i < mb_block_count; i++) {
2616  int j;
2617  if (s->block_last_index[i] > 0) {
2618  for (j = 63; j > 0; j--) {
2619  if (s->block[i][s->intra_scantable.permutated[j]])
2620  break;
2621  }
2622  s->block_last_index[i] = j;
2623  }
2624  }
2625  }
2626 
2627  /* huffman encode */
2628  switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2631  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
2632  ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2633  break;
2634  case AV_CODEC_ID_MPEG4:
2635  if (CONFIG_MPEG4_ENCODER)
2636  ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2637  break;
2638  case AV_CODEC_ID_MSMPEG4V2:
2639  case AV_CODEC_ID_MSMPEG4V3:
2640  case AV_CODEC_ID_WMV1:
2642  ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2643  break;
2644  case AV_CODEC_ID_WMV2:
2645  if (CONFIG_WMV2_ENCODER)
2646  ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2647  break;
2648  case AV_CODEC_ID_H261:
2649  if (CONFIG_H261_ENCODER)
2650  ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2651  break;
2652  case AV_CODEC_ID_H263:
2653  case AV_CODEC_ID_H263P:
2654  case AV_CODEC_ID_FLV1:
2655  case AV_CODEC_ID_RV10:
2656  case AV_CODEC_ID_RV20:
2657  if (CONFIG_H263_ENCODER)
2658  ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2659  break;
2660  case AV_CODEC_ID_MJPEG:
2661  case AV_CODEC_ID_AMV:
2662  if (CONFIG_MJPEG_ENCODER)
2663  ff_mjpeg_encode_mb(s, s->block);
2664  break;
2665  default:
2666  av_assert1(0);
2667  }
2668 }
2669 
2670 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2671 {
2672  if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2673  else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2674  else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2675 }
2676 
2678  int i;
2679 
2680  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2681 
2682  /* MPEG-1 */
2683  d->mb_skip_run= s->mb_skip_run;
2684  for(i=0; i<3; i++)
2685  d->last_dc[i] = s->last_dc[i];
2686 
2687  /* statistics */
2688  d->mv_bits= s->mv_bits;
2689  d->i_tex_bits= s->i_tex_bits;
2690  d->p_tex_bits= s->p_tex_bits;
2691  d->i_count= s->i_count;
2692  d->f_count= s->f_count;
2693  d->b_count= s->b_count;
2694  d->skip_count= s->skip_count;
2695  d->misc_bits= s->misc_bits;
2696  d->last_bits= 0;
2697 
2698  d->mb_skipped= 0;
2699  d->qscale= s->qscale;
2700  d->dquant= s->dquant;
2701 
2703 }
2704 
2706  int i;
2707 
2708  memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2709  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2710 
2711  /* MPEG-1 */
2712  d->mb_skip_run= s->mb_skip_run;
2713  for(i=0; i<3; i++)
2714  d->last_dc[i] = s->last_dc[i];
2715 
2716  /* statistics */
2717  d->mv_bits= s->mv_bits;
2718  d->i_tex_bits= s->i_tex_bits;
2719  d->p_tex_bits= s->p_tex_bits;
2720  d->i_count= s->i_count;
2721  d->f_count= s->f_count;
2722  d->b_count= s->b_count;
2723  d->skip_count= s->skip_count;
2724  d->misc_bits= s->misc_bits;
2725 
2726  d->mb_intra= s->mb_intra;
2727  d->mb_skipped= s->mb_skipped;
2728  d->mv_type= s->mv_type;
2729  d->mv_dir= s->mv_dir;
2730  d->pb= s->pb;
2731  if(s->data_partitioning){
2732  d->pb2= s->pb2;
2733  d->tex_pb= s->tex_pb;
2734  }
2735  d->block= s->block;
2736  for(i=0; i<8; i++)
2737  d->block_last_index[i]= s->block_last_index[i];
2739  d->qscale= s->qscale;
2740 
2742 }
2743 
2744 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2746  int *dmin, int *next_block, int motion_x, int motion_y)
2747 {
2748  int score;
2749  uint8_t *dest_backup[3];
2750 
2751  copy_context_before_encode(s, backup, type);
2752 
2753  s->block= s->blocks[*next_block];
2754  s->pb= pb[*next_block];
2755  if(s->data_partitioning){
2756  s->pb2 = pb2 [*next_block];
2757  s->tex_pb= tex_pb[*next_block];
2758  }
2759 
2760  if(*next_block){
2761  memcpy(dest_backup, s->dest, sizeof(s->dest));
2762  s->dest[0] = s->sc.rd_scratchpad;
2763  s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2764  s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2765  av_assert0(s->linesize >= 32); //FIXME
2766  }
2767 
2768  encode_mb(s, motion_x, motion_y);
2769 
2770  score= put_bits_count(&s->pb);
2771  if(s->data_partitioning){
2772  score+= put_bits_count(&s->pb2);
2773  score+= put_bits_count(&s->tex_pb);
2774  }
2775 
2776  if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2778 
2779  score *= s->lambda2;
2780  score += sse_mb(s) << FF_LAMBDA_SHIFT;
2781  }
2782 
2783  if(*next_block){
2784  memcpy(s->dest, dest_backup, sizeof(s->dest));
2785  }
2786 
2787  if(score<*dmin){
2788  *dmin= score;
2789  *next_block^=1;
2790 
2791  copy_context_after_encode(best, s, type);
2792  }
2793 }
2794 
2795 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2796  uint32_t *sq = ff_square_tab + 256;
2797  int acc=0;
2798  int x,y;
2799 
2800  if(w==16 && h==16)
2801  return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2802  else if(w==8 && h==8)
2803  return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2804 
2805  for(y=0; y<h; y++){
2806  for(x=0; x<w; x++){
2807  acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2808  }
2809  }
2810 
2811  av_assert2(acc>=0);
2812 
2813  return acc;
2814 }
2815 
2816 static int sse_mb(MpegEncContext *s){
2817  int w= 16;
2818  int h= 16;
2819 
2820  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2821  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2822 
2823  if(w==16 && h==16)
2824  if(s->avctx->mb_cmp == FF_CMP_NSSE){
2825  return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2826  s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2827  s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2828  }else{
2829  return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2830  s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2831  s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2832  }
2833  else
2834  return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2835  +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2836  +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2837 }
2838 
2840  MpegEncContext *s= *(void**)arg;
2841 
2842 
2843  s->me.pre_pass=1;
2844  s->me.dia_size= s->avctx->pre_dia_size;
2845  s->first_slice_line=1;
2846  for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2847  for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2849  }
2850  s->first_slice_line=0;
2851  }
2852 
2853  s->me.pre_pass=0;
2854 
2855  return 0;
2856 }
2857 
2859  MpegEncContext *s= *(void**)arg;
2860 
2862 
2863  s->me.dia_size= s->avctx->dia_size;
2864  s->first_slice_line=1;
2865  for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2866  s->mb_x=0; //for block init below
2868  for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2869  s->block_index[0]+=2;
2870  s->block_index[1]+=2;
2871  s->block_index[2]+=2;
2872  s->block_index[3]+=2;
2873 
2874  /* compute motion vector & mb_type and store in context */
2877  else
2879  }
2880  s->first_slice_line=0;
2881  }
2882  return 0;
2883 }
2884 
2885 static int mb_var_thread(AVCodecContext *c, void *arg){
2886  MpegEncContext *s= *(void**)arg;
2887  int mb_x, mb_y;
2888 
2890 
2891  for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2892  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2893  int xx = mb_x * 16;
2894  int yy = mb_y * 16;
2895  uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2896  int varc;
2897  int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2898 
2899  varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2900  (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2901 
2902  s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2903  s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2904  s->me.mb_var_sum_temp += varc;
2905  }
2906  }
2907  return 0;
2908 }
2909 
2911  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2912  if(s->partitioned_frame){
2914  }
2915 
2916  ff_mpeg4_stuffing(&s->pb);
2917  }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2919  }
2920 
2922  flush_put_bits(&s->pb);
2923 
2924  if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2925  s->misc_bits+= get_bits_diff(s);
2926 }
2927 
2929 {
2930  uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2931  int offset = put_bits_count(&s->pb);
2932  int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2933  int gobn = s->mb_y / s->gob_index;
2934  int pred_x, pred_y;
2935  if (CONFIG_H263_ENCODER)
2936  ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2937  bytestream_put_le32(&ptr, offset);
2938  bytestream_put_byte(&ptr, s->qscale);
2939  bytestream_put_byte(&ptr, gobn);
2940  bytestream_put_le16(&ptr, mba);
2941  bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2942  bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2943  /* 4MV not implemented */
2944  bytestream_put_byte(&ptr, 0); /* hmv2 */
2945  bytestream_put_byte(&ptr, 0); /* vmv2 */
2946 }
2947 
2948 static void update_mb_info(MpegEncContext *s, int startcode)
2949 {
2950  if (!s->mb_info)
2951  return;
2952  if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2953  s->mb_info_size += 12;
2954  s->prev_mb_info = s->last_mb_info;
2955  }
2956  if (startcode) {
2957  s->prev_mb_info = put_bits_count(&s->pb)/8;
2958  /* This might have incremented mb_info_size above, and we return without
2959  * actually writing any info into that slot yet. But in that case,
2960  * this will be called again at the start of the after writing the
2961  * start code, actually writing the mb info. */
2962  return;
2963  }
2964 
2965  s->last_mb_info = put_bits_count(&s->pb)/8;
2966  if (!s->mb_info_size)
2967  s->mb_info_size += 12;
2968  write_mb_info(s);
2969 }
2970 
2971 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2972 {
2973  if ( s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold
2974  && s->slice_context_count == 1
2975  && s->pb.buf == s->avctx->internal->byte_buffer) {
2976  int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2977  int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2978 
2979  uint8_t *new_buffer = NULL;
2980  int new_buffer_size = 0;
2981 
2982  if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2983  av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2984  return AVERROR(ENOMEM);
2985  }
2986 
2987  emms_c();
2988 
2989  av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2990  s->avctx->internal->byte_buffer_size + size_increase);
2991  if (!new_buffer)
2992  return AVERROR(ENOMEM);
2993 
2994  memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2996  s->avctx->internal->byte_buffer = new_buffer;
2997  s->avctx->internal->byte_buffer_size = new_buffer_size;
2998  rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2999  s->ptr_lastgob = s->pb.buf + lastgob_pos;
3000  s->vbv_delay_ptr = s->pb.buf + vbv_pos;
3001  }
3002  if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold)
3003  return AVERROR(EINVAL);
3004  return 0;
3005 }
3006 
3007 static int encode_thread(AVCodecContext *c, void *arg){
3008  MpegEncContext *s= *(void**)arg;
3009  int mb_x, mb_y;
3010  int chr_h= 16>>s->chroma_y_shift;
3011  int i, j;
3012  MpegEncContext best_s = { 0 }, backup_s;
3013  uint8_t bit_buf[2][MAX_MB_BYTES];
3014  uint8_t bit_buf2[2][MAX_MB_BYTES];
3015  uint8_t bit_buf_tex[2][MAX_MB_BYTES];
3016  PutBitContext pb[2], pb2[2], tex_pb[2];
3017 
3019 
3020  for(i=0; i<2; i++){
3021  init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
3022  init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
3023  init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
3024  }
3025 
3026  s->last_bits= put_bits_count(&s->pb);
3027  s->mv_bits=0;
3028  s->misc_bits=0;
3029  s->i_tex_bits=0;
3030  s->p_tex_bits=0;
3031  s->i_count=0;
3032  s->f_count=0;
3033  s->b_count=0;
3034  s->skip_count=0;
3035 
3036  for(i=0; i<3; i++){
3037  /* init last dc values */
3038  /* note: quant matrix value (8) is implied here */
3039  s->last_dc[i] = 128 << s->intra_dc_precision;
3040 
3041  s->current_picture.encoding_error[i] = 0;
3042  }
3043  if(s->codec_id==AV_CODEC_ID_AMV){
3044  s->last_dc[0] = 128*8/13;
3045  s->last_dc[1] = 128*8/14;
3046  s->last_dc[2] = 128*8/14;
3047  }
3048  s->mb_skip_run = 0;
3049  memset(s->last_mv, 0, sizeof(s->last_mv));
3050 
3051  s->last_mv_dir = 0;
3052 
3053  switch(s->codec_id){
3054  case AV_CODEC_ID_H263:
3055  case AV_CODEC_ID_H263P:
3056  case AV_CODEC_ID_FLV1:
3057  if (CONFIG_H263_ENCODER)
3058  s->gob_index = H263_GOB_HEIGHT(s->height);
3059  break;
3060  case AV_CODEC_ID_MPEG4:
3061  if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
3063  break;
3064  }
3065 
3066  s->resync_mb_x=0;
3067  s->resync_mb_y=0;
3068  s->first_slice_line = 1;
3069  s->ptr_lastgob = s->pb.buf;
3070  for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
3071  s->mb_x=0;
3072  s->mb_y= mb_y;
3073 
3074  ff_set_qscale(s, s->qscale);
3076 
3077  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3078  int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
3079  int mb_type= s->mb_type[xy];
3080 // int d;
3081  int dmin= INT_MAX;
3082  int dir;
3083  int size_increase = s->avctx->internal->byte_buffer_size/4
3084  + s->mb_width*MAX_MB_BYTES;
3085 
3086  ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
3087  if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
3088  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
3089  return -1;
3090  }
3091  if(s->data_partitioning){
3092  if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
3093  || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
3094  av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3095  return -1;
3096  }
3097  }
3098 
3099  s->mb_x = mb_x;
3100  s->mb_y = mb_y; // moved into loop, can get changed by H.261
3102 
3103  if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
3105  xy= s->mb_y*s->mb_stride + s->mb_x;
3106  mb_type= s->mb_type[xy];
3107  }
3108 
3109  /* write gob / video packet header */
3110  if(s->rtp_mode){
3111  int current_packet_size, is_gob_start;
3112 
3113  current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
3114 
3115  is_gob_start = s->rtp_payload_size &&
3116  current_packet_size >= s->rtp_payload_size &&
3117  mb_y + mb_x > 0;
3118 
3119  if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
3120 
3121  switch(s->codec_id){
3122  case AV_CODEC_ID_H263:
3123  case AV_CODEC_ID_H263P:
3124  if(!s->h263_slice_structured)
3125  if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3126  break;
3128  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3130  if(s->mb_skip_run) is_gob_start=0;
3131  break;
3132  case AV_CODEC_ID_MJPEG:
3133  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3134  break;
3135  }
3136 
3137  if(is_gob_start){
3138  if(s->start_mb_y != mb_y || mb_x!=0){
3139  write_slice_end(s);
3140 
3141  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3143  }
3144  }
3145 
3146  av_assert2((put_bits_count(&s->pb)&7) == 0);
3147  current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3148 
3149  if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3150  int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
3151  int d = 100 / s->error_rate;
3152  if(r % d == 0){
3153  current_packet_size=0;
3154  s->pb.buf_ptr= s->ptr_lastgob;
3155  assert(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3156  }
3157  }
3158 
3159 #if FF_API_RTP_CALLBACK
3161  if (s->avctx->rtp_callback){
3162  int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3163  s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3164  }
3166 #endif
3167  update_mb_info(s, 1);
3168 
3169  switch(s->codec_id){
3170  case AV_CODEC_ID_MPEG4:
3171  if (CONFIG_MPEG4_ENCODER) {
3174  }
3175  break;
3178  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3181  }
3182  break;
3183  case AV_CODEC_ID_H263:
3184  case AV_CODEC_ID_H263P:
3185  if (CONFIG_H263_ENCODER)
3186  ff_h263_encode_gob_header(s, mb_y);
3187  break;
3188  }
3189 
3190  if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3191  int bits= put_bits_count(&s->pb);
3192  s->misc_bits+= bits - s->last_bits;
3193  s->last_bits= bits;
3194  }
3195 
3196  s->ptr_lastgob += current_packet_size;
3197  s->first_slice_line=1;
3198  s->resync_mb_x=mb_x;
3199  s->resync_mb_y=mb_y;
3200  }
3201  }
3202 
3203  if( (s->resync_mb_x == s->mb_x)
3204  && s->resync_mb_y+1 == s->mb_y){
3205  s->first_slice_line=0;
3206  }
3207 
3208  s->mb_skipped=0;
3209  s->dquant=0; //only for QP_RD
3210 
3211  update_mb_info(s, 0);
3212 
3213  if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3214  int next_block=0;
3215  int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3216 
3217  copy_context_before_encode(&backup_s, s, -1);
3218  backup_s.pb= s->pb;
3221  if(s->data_partitioning){
3222  backup_s.pb2= s->pb2;
3223  backup_s.tex_pb= s->tex_pb;
3224  }
3225 
3226  if(mb_type&CANDIDATE_MB_TYPE_INTER){
3227  s->mv_dir = MV_DIR_FORWARD;
3228  s->mv_type = MV_TYPE_16X16;
3229  s->mb_intra= 0;
3230  s->mv[0][0][0] = s->p_mv_table[xy][0];
3231  s->mv[0][0][1] = s->p_mv_table[xy][1];
3232  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3233  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3234  }
3235  if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3236  s->mv_dir = MV_DIR_FORWARD;
3237  s->mv_type = MV_TYPE_FIELD;
3238  s->mb_intra= 0;
3239  for(i=0; i<2; i++){
3240  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3241  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3242  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3243  }
3244  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3245  &dmin, &next_block, 0, 0);
3246  }
3247  if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3248  s->mv_dir = MV_DIR_FORWARD;
3249  s->mv_type = MV_TYPE_16X16;
3250  s->mb_intra= 0;
3251  s->mv[0][0][0] = 0;
3252  s->mv[0][0][1] = 0;
3253  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3254  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3255  }
3256  if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3257  s->mv_dir = MV_DIR_FORWARD;
3258  s->mv_type = MV_TYPE_8X8;
3259  s->mb_intra= 0;
3260  for(i=0; i<4; i++){
3261  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3262  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3263  }
3264  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3265  &dmin, &next_block, 0, 0);
3266  }
3267  if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3268  s->mv_dir = MV_DIR_FORWARD;
3269  s->mv_type = MV_TYPE_16X16;
3270  s->mb_intra= 0;
3271  s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3272  s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3273  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3274  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3275  }
3276  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3277  s->mv_dir = MV_DIR_BACKWARD;
3278  s->mv_type = MV_TYPE_16X16;
3279  s->mb_intra= 0;
3280  s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3281  s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3282  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3283  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3284  }
3285  if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3287  s->mv_type = MV_TYPE_16X16;
3288  s->mb_intra= 0;
3289  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3290  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3291  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3292  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3293  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3294  &dmin, &next_block, 0, 0);
3295  }
3296  if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3297  s->mv_dir = MV_DIR_FORWARD;
3298  s->mv_type = MV_TYPE_FIELD;
3299  s->mb_intra= 0;
3300  for(i=0; i<2; i++){
3301  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3302  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3303  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3304  }
3305  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3306  &dmin, &next_block, 0, 0);
3307  }
3308  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3309  s->mv_dir = MV_DIR_BACKWARD;
3310  s->mv_type = MV_TYPE_FIELD;
3311  s->mb_intra= 0;
3312  for(i=0; i<2; i++){
3313  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3314  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3315  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3316  }
3317  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3318  &dmin, &next_block, 0, 0);
3319  }
3320  if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3322  s->mv_type = MV_TYPE_FIELD;
3323  s->mb_intra= 0;
3324  for(dir=0; dir<2; dir++){
3325  for(i=0; i<2; i++){
3326  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3327  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3328  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3329  }
3330  }
3331  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3332  &dmin, &next_block, 0, 0);
3333  }
3334  if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3335  s->mv_dir = 0;
3336  s->mv_type = MV_TYPE_16X16;
3337  s->mb_intra= 1;
3338  s->mv[0][0][0] = 0;
3339  s->mv[0][0][1] = 0;
3340  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3341  &dmin, &next_block, 0, 0);
3342  if(s->h263_pred || s->h263_aic){
3343  if(best_s.mb_intra)
3344  s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3345  else
3346  ff_clean_intra_table_entries(s); //old mode?
3347  }
3348  }
3349 
3350  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3351  if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3352  const int last_qp= backup_s.qscale;
3353  int qpi, qp, dc[6];
3354  int16_t ac[6][16];
3355  const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3356  static const int dquant_tab[4]={-1,1,-2,2};
3357  int storecoefs = s->mb_intra && s->dc_val[0];
3358 
3359  av_assert2(backup_s.dquant == 0);
3360 
3361  //FIXME intra
3362  s->mv_dir= best_s.mv_dir;
3363  s->mv_type = MV_TYPE_16X16;
3364  s->mb_intra= best_s.mb_intra;
3365  s->mv[0][0][0] = best_s.mv[0][0][0];
3366  s->mv[0][0][1] = best_s.mv[0][0][1];
3367  s->mv[1][0][0] = best_s.mv[1][0][0];
3368  s->mv[1][0][1] = best_s.mv[1][0][1];
3369 
3370  qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3371  for(; qpi<4; qpi++){
3372  int dquant= dquant_tab[qpi];
3373  qp= last_qp + dquant;
3374  if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3375  continue;
3376  backup_s.dquant= dquant;
3377  if(storecoefs){
3378  for(i=0; i<6; i++){
3379  dc[i]= s->dc_val[0][ s->block_index[i] ];
3380  memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3381  }
3382  }
3383 
3384  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3385  &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3386  if(best_s.qscale != qp){
3387  if(storecoefs){
3388  for(i=0; i<6; i++){
3389  s->dc_val[0][ s->block_index[i] ]= dc[i];
3390  memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3391  }
3392  }
3393  }
3394  }
3395  }
3396  }
3397  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3398  int mx= s->b_direct_mv_table[xy][0];
3399  int my= s->b_direct_mv_table[xy][1];
3400 
3401  backup_s.dquant = 0;
3403  s->mb_intra= 0;
3404  ff_mpeg4_set_direct_mv(s, mx, my);
3405  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3406  &dmin, &next_block, mx, my);
3407  }
3408  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3409  backup_s.dquant = 0;
3411  s->mb_intra= 0;
3412  ff_mpeg4_set_direct_mv(s, 0, 0);
3413  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3414  &dmin, &next_block, 0, 0);
3415  }
3416  if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3417  int coded=0;
3418  for(i=0; i<6; i++)
3419  coded |= s->block_last_index[i];
3420  if(coded){
3421  int mx,my;
3422  memcpy(s->mv, best_s.mv, sizeof(s->mv));
3423  if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3424  mx=my=0; //FIXME find the one we actually used
3425  ff_mpeg4_set_direct_mv(s, mx, my);
3426  }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3427  mx= s->mv[1][0][0];
3428  my= s->mv[1][0][1];
3429  }else{
3430  mx= s->mv[0][0][0];
3431  my= s->mv[0][0][1];
3432  }
3433 
3434  s->mv_dir= best_s.mv_dir;
3435  s->mv_type = best_s.mv_type;
3436  s->mb_intra= 0;
3437 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3438  s->mv[0][0][1] = best_s.mv[0][0][1];
3439  s->mv[1][0][0] = best_s.mv[1][0][0];
3440  s->mv[1][0][1] = best_s.mv[1][0][1];*/
3441  backup_s.dquant= 0;
3442  s->skipdct=1;
3443  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3444  &dmin, &next_block, mx, my);
3445  s->skipdct=0;
3446  }
3447  }
3448 
3449  s->current_picture.qscale_table[xy] = best_s.qscale;
3450 
3451  copy_context_after_encode(s, &best_s, -1);
3452 
3453  pb_bits_count= put_bits_count(&s->pb);
3454  flush_put_bits(&s->pb);
3455  avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3456  s->pb= backup_s.pb;
3457 
3458  if(s->data_partitioning){
3459  pb2_bits_count= put_bits_count(&s->pb2);
3460  flush_put_bits(&s->pb2);
3461  avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3462  s->pb2= backup_s.pb2;
3463 
3464  tex_pb_bits_count= put_bits_count(&s->tex_pb);
3465  flush_put_bits(&s->tex_pb);
3466  avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3467  s->tex_pb= backup_s.tex_pb;
3468  }
3469  s->last_bits= put_bits_count(&s->pb);
3470 
3471  if (CONFIG_H263_ENCODER &&
3474 
3475  if(next_block==0){ //FIXME 16 vs linesize16
3476  s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3477  s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3478  s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3479  }
3480 
3483  } else {
3484  int motion_x = 0, motion_y = 0;
3486  // only one MB-Type possible
3487 
3488  switch(mb_type){
3490  s->mv_dir = 0;
3491  s->mb_intra= 1;
3492  motion_x= s->mv[0][0][0] = 0;
3493  motion_y= s->mv[0][0][1] = 0;
3494  break;
3496  s->mv_dir = MV_DIR_FORWARD;
3497  s->mb_intra= 0;
3498  motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3499  motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3500  break;
3502  s->mv_dir = MV_DIR_FORWARD;
3503  s->mv_type = MV_TYPE_FIELD;
3504  s->mb_intra= 0;
3505  for(i=0; i<2; i++){
3506  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3507  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3508  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3509  }
3510  break;
3512  s->mv_dir = MV_DIR_FORWARD;
3513  s->mv_type = MV_TYPE_8X8;
3514  s->mb_intra= 0;
3515  for(i=0; i<4; i++){
3516  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3517  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3518  }
3519  break;
3521  if (CONFIG_MPEG4_ENCODER) {
3523  s->mb_intra= 0;
3524  motion_x=s->b_direct_mv_table[xy][0];
3525  motion_y=s->b_direct_mv_table[xy][1];
3526  ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3527  }
3528  break;
3530  if (CONFIG_MPEG4_ENCODER) {
3532  s->mb_intra= 0;
3533  ff_mpeg4_set_direct_mv(s, 0, 0);
3534  }
3535  break;
3538  s->mb_intra= 0;
3539  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3540  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3541  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3542  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3543  break;
3545  s->mv_dir = MV_DIR_BACKWARD;
3546  s->mb_intra= 0;
3547  motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3548  motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3549  break;
3551  s->mv_dir = MV_DIR_FORWARD;
3552  s->mb_intra= 0;
3553  motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3554  motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3555  break;
3557  s->mv_dir = MV_DIR_FORWARD;
3558  s->mv_type = MV_TYPE_FIELD;
3559  s->mb_intra= 0;
3560  for(i=0; i<2; i++){
3561  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3562  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3563  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3564  }
3565  break;
3567  s->mv_dir = MV_DIR_BACKWARD;
3568  s->mv_type = MV_TYPE_FIELD;
3569  s->mb_intra= 0;
3570  for(i=0; i<2; i++){
3571  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3572  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3573  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3574  }
3575  break;
3578  s->mv_type = MV_TYPE_FIELD;
3579  s->mb_intra= 0;
3580  for(dir=0; dir<2; dir++){
3581  for(i=0; i<2; i++){
3582  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3583  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3584  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3585  }
3586  }
3587  break;
3588  default:
3589  av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3590  }
3591 
3592  encode_mb(s, motion_x, motion_y);
3593 
3594  // RAL: Update last macroblock type
3595  s->last_mv_dir = s->mv_dir;
3596 
3597  if (CONFIG_H263_ENCODER &&
3600 
3602  }
3603 
3604  /* clean the MV table in IPS frames for direct mode in B-frames */
3605  if(s->mb_intra /* && I,P,S_TYPE */){
3606  s->p_mv_table[xy][0]=0;
3607  s->p_mv_table[xy][1]=0;
3608  }
3609 
3610  if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3611  int w= 16;
3612  int h= 16;
3613 
3614  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3615  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3616 
3618  s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3619  s->dest[0], w, h, s->linesize);
3621  s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3622  s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3624  s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3625  s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3626  }
3627  if(s->loop_filter){
3628  if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3630  }
3631  ff_dlog(s->avctx, "MB %d %d bits\n",
3632  s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3633  }
3634  }
3635 
3636  //not beautiful here but we must write it before flushing so it has to be here
3639 
3640  write_slice_end(s);
3641 
3642 #if FF_API_RTP_CALLBACK
3644  /* Send the last GOB if RTP */
3645  if (s->avctx->rtp_callback) {
3646  int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3647  int pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3648  /* Call the RTP callback to send the last GOB */
3649  emms_c();
3650  s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3651  }
3653 #endif
3654 
3655  return 0;
3656 }
3657 
3658 #define MERGE(field) dst->field += src->field; src->field=0
3660  MERGE(me.scene_change_score);
3661  MERGE(me.mc_mb_var_sum_temp);
3662  MERGE(me.mb_var_sum_temp);
3663 }
3664 
3666  int i;
3667 
3668  MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3669  MERGE(dct_count[1]);
3670  MERGE(mv_bits);
3671  MERGE(i_tex_bits);
3672  MERGE(p_tex_bits);
3673  MERGE(i_count);
3674  MERGE(f_count);
3675  MERGE(b_count);
3676  MERGE(skip_count);
3677  MERGE(misc_bits);
3678  MERGE(er.error_count);
3683 
3684  if (dst->noise_reduction){
3685  for(i=0; i<64; i++){
3686  MERGE(dct_error_sum[0][i]);
3687  MERGE(dct_error_sum[1][i]);
3688  }
3689  }
3690 
3691  assert(put_bits_count(&src->pb) % 8 ==0);
3692  assert(put_bits_count(&dst->pb) % 8 ==0);
3693  avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3694  flush_put_bits(&dst->pb);
3695 }
3696 
3697 static int estimate_qp(MpegEncContext *s, int dry_run){
3698  if (s->next_lambda){
3701  if(!dry_run) s->next_lambda= 0;
3702  } else if (!s->fixed_qscale) {
3703  int quality;
3704 #if CONFIG_LIBXVID
3706  quality = ff_xvid_rate_estimate_qscale(s, dry_run);
3707  else
3708 #endif
3709  quality = ff_rate_estimate_qscale(s, dry_run);
3711  s->current_picture.f->quality = quality;
3712  if (s->current_picture.f->quality < 0)
3713  return -1;
3714  }
3715 
3716  if(s->adaptive_quant){
3717  switch(s->codec_id){
3718  case AV_CODEC_ID_MPEG4:
3719  if (CONFIG_MPEG4_ENCODER)
3721  break;
3722  case AV_CODEC_ID_H263:
3723  case AV_CODEC_ID_H263P:
3724  case AV_CODEC_ID_FLV1:
3725  if (CONFIG_H263_ENCODER)
3727  break;
3728  default:
3729  ff_init_qscale_tab(s);
3730  }
3731 
3732  s->lambda= s->lambda_table[0];
3733  //FIXME broken
3734  }else
3735  s->lambda = s->current_picture.f->quality;
3736  update_qscale(s);
3737  return 0;
3738 }
3739 
3740 /* must be called before writing the header */
3743  s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3744 
3745  if(s->pict_type==AV_PICTURE_TYPE_B){
3746  s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3747  assert(s->pb_time > 0 && s->pb_time < s->pp_time);
3748  }else{
3749  s->pp_time= s->time - s->last_non_b_time;
3750  s->last_non_b_time= s->time;
3751  assert(s->picture_number==0 || s->pp_time > 0);
3752  }
3753 }
3754 
3756 {
3757  int i, ret;
3758  int bits;
3759  int context_count = s->slice_context_count;
3760 
3762 
3763  /* Reset the average MB variance */
3764  s->me.mb_var_sum_temp =
3765  s->me.mc_mb_var_sum_temp = 0;
3766 
3767  /* we need to initialize some time vars before we can encode B-frames */
3768  // RAL: Condition added for MPEG1VIDEO
3771  if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3772  ff_set_mpeg4_time(s);
3773 
3774  s->me.scene_change_score=0;
3775 
3776 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3777 
3778  if(s->pict_type==AV_PICTURE_TYPE_I){
3779  if(s->msmpeg4_version >= 3) s->no_rounding=1;
3780  else s->no_rounding=0;
3781  }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3783  s->no_rounding ^= 1;
3784  }
3785 
3786  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3787  if (estimate_qp(s,1) < 0)
3788  return -1;
3789  ff_get_2pass_fcode(s);
3790  } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3792  s->lambda= s->last_lambda_for[s->pict_type];
3793  else
3795  update_qscale(s);
3796  }
3797 
3803  }
3804 
3805  s->mb_intra=0; //for the rate distortion & bit compare functions
3806  for(i=1; i<context_count; i++){
3808  if (ret < 0)
3809  return ret;
3810  }
3811 
3812  if(ff_init_me(s)<0)
3813  return -1;
3814 
3815  /* Estimate motion for every MB */
3816  if(s->pict_type != AV_PICTURE_TYPE_I){
3817  s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3818  s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3819  if (s->pict_type != AV_PICTURE_TYPE_B) {
3820  if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3821  s->me_pre == 2) {
3822  s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3823  }
3824  }
3825 
3826  s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3827  }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3828  /* I-Frame */
3829  for(i=0; i<s->mb_stride*s->mb_height; i++)
3831 
3832  if(!s->fixed_qscale){
3833  /* finding spatial complexity for I-frame rate control */
3834  s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3835  }
3836  }
3837  for(i=1; i<context_count; i++){
3839  }
3841  s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3842  emms_c();
3843 
3845  s->pict_type == AV_PICTURE_TYPE_P) {
3847  for(i=0; i<s->mb_stride*s->mb_height; i++)
3849  if(s->msmpeg4_version >= 3)
3850  s->no_rounding=1;
3851  ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3853  }
3854 
3855  if(!s->umvplus){
3858 
3860  int a,b;
3861  a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3863  s->f_code= FFMAX3(s->f_code, a, b);
3864  }
3865 
3866  ff_fix_long_p_mvs(s);
3869  int j;
3870  for(i=0; i<2; i++){
3871  for(j=0; j<2; j++)
3874  }
3875  }
3876  }
3877 
3878  if(s->pict_type==AV_PICTURE_TYPE_B){
3879  int a, b;
3880 
3883  s->f_code = FFMAX(a, b);
3884 
3887  s->b_code = FFMAX(a, b);
3888 
3894  int dir, j;
3895  for(dir=0; dir<2; dir++){
3896  for(i=0; i<2; i++){
3897  for(j=0; j<2; j++){
3900  ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3901  s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3902  }
3903  }
3904  }
3905  }
3906  }
3907  }
3908 
3909  if (estimate_qp(s, 0) < 0)
3910  return -1;
3911 
3912  if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3913  s->pict_type == AV_PICTURE_TYPE_I &&
3914  !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3915  s->qscale= 3; //reduce clipping problems
3916 
3917  if (s->out_format == FMT_MJPEG) {
3918  const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3919  const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3920 
3921  if (s->avctx->intra_matrix) {
3922  chroma_matrix =
3923  luma_matrix = s->avctx->intra_matrix;
3924  }
3925  if (s->avctx->chroma_intra_matrix)
3926  chroma_matrix = s->avctx->chroma_intra_matrix;
3927 
3928  /* for mjpeg, we do include qscale in the matrix */
3929  for(i=1;i<64;i++){
3930  int j = s->idsp.idct_permutation[i];
3931 
3932  s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3933  s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3934  }
3935  s->y_dc_scale_table=
3937  s->chroma_intra_matrix[0] =
3940  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3942  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3943  s->qscale= 8;
3944  }
3945  if(s->codec_id == AV_CODEC_ID_AMV){
3946  static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3947  static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3948  for(i=1;i<64;i++){
3949  int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3950 
3951  s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
3952  s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
3953  }
3954  s->y_dc_scale_table= y;
3955  s->c_dc_scale_table= c;
3956  s->intra_matrix[0] = 13;
3957  s->chroma_intra_matrix[0] = 14;
3959  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3961  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3962  s->qscale= 8;
3963  }
3964 
3965  //FIXME var duplication
3967  s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3970 
3971  if (s->current_picture.f->key_frame)
3972  s->picture_in_gop_number=0;
3973 
3974  s->mb_x = s->mb_y = 0;
3975  s->last_bits= put_bits_count(&s->pb);
3976  switch(s->out_format) {
3977  case FMT_MJPEG:
3978  if (CONFIG_MJPEG_ENCODER && s->huffman != HUFFMAN_TABLE_OPTIMAL)
3981  break;
3982  case FMT_H261:
3983  if (CONFIG_H261_ENCODER)
3984  ff_h261_encode_picture_header(s, picture_number);
3985  break;
3986  case FMT_H263:
3987  if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3988  ff_wmv2_encode_picture_header(s, picture_number);
3989  else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3990  ff_msmpeg4_encode_picture_header(s, picture_number);
3991  else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3992  ret = ff_mpeg4_encode_picture_header(s, picture_number);
3993  if (ret < 0)
3994  return ret;
3995  } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3996  ret = ff_rv10_encode_picture_header(s, picture_number);
3997  if (ret < 0)
3998  return ret;
3999  }
4000  else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
4001  ff_rv20_encode_picture_header(s, picture_number);
4002  else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
4003  ff_flv_encode_picture_header(s, picture_number);
4004  else if (CONFIG_H263_ENCODER)
4005  ff_h263_encode_picture_header(s, picture_number);
4006  break;
4007  case FMT_MPEG1:
4008  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER)
4009  ff_mpeg1_encode_picture_header(s, picture_number);
4010  break;
4011  default:
4012  av_assert0(0);
4013  }
4014  bits= put_bits_count(&s->pb);
4015  s->header_bits= bits - s->last_bits;
4016 
4017  for(i=1; i<context_count; i++){
4019  }
4020  s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
4021  for(i=1; i<context_count; i++){
4022  if (s->pb.buf_end == s->thread_context[i]->pb.buf)
4023  set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-32));
4025  }
4026  emms_c();
4027  return 0;
4028 }
4029 
4030 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
4031  const int intra= s->mb_intra;
4032  int i;
4033 
4034  s->dct_count[intra]++;
4035 
4036  for(i=0; i<64; i++){
4037  int level= block[i];
4038 
4039  if(level){
4040  if(level>0){
4041  s->dct_error_sum[intra][i] += level;
4042  level -= s->dct_offset[intra][i];
4043  if(level<0) level=0;
4044  }else{
4045  s->dct_error_sum[intra][i] -= level;
4046  level += s->dct_offset[intra][i];
4047  if(level>0) level=0;
4048  }
4049  block[i]= level;
4050  }
4051  }
4052 }
4053 
4055  int16_t *block, int n,
4056  int qscale, int *overflow){
4057  const int *qmat;
4058  const uint16_t *matrix;
4059  const uint8_t *scantable;
4060  const uint8_t *perm_scantable;
4061  int max=0;
4062  unsigned int threshold1, threshold2;
4063  int bias=0;
4064  int run_tab[65];
4065  int level_tab[65];
4066  int score_tab[65];
4067  int survivor[65];
4068  int survivor_count;
4069  int last_run=0;
4070  int last_level=0;
4071  int last_score= 0;
4072  int last_i;
4073  int coeff[2][64];
4074  int coeff_count[64];
4075  int qmul, qadd, start_i, last_non_zero, i, dc;
4076  const int esc_length= s->ac_esc_length;
4077  uint8_t * length;
4078  uint8_t * last_length;
4079  const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
4080  int mpeg2_qscale;
4081 
4082  s->fdsp.fdct(block);
4083 
4084  if(s->dct_error_sum)
4085  s->denoise_dct(s, block);
4086  qmul= qscale*16;
4087  qadd= ((qscale-1)|1)*8;
4088 
4089  if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
4090  else mpeg2_qscale = qscale << 1;
4091 
4092  if (s->mb_intra) {
4093  int q;
4094  scantable= s->intra_scantable.scantable;
4095  perm_scantable= s->intra_scantable.permutated;
4096  if (!s->h263_aic) {
4097  if (n < 4)
4098  q = s->y_dc_scale;
4099  else
4100  q = s->c_dc_scale;
4101  q = q << 3;
4102  } else{
4103  /* For AIC we skip quant/dequant of INTRADC */
4104  q = 1 << 3;
4105  qadd=0;
4106  }
4107 
4108  /* note: block[0] is assumed to be positive */
4109  block[0] = (block[0] + (q >> 1)) / q;
4110  start_i = 1;
4111  last_non_zero = 0;
4112  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4113  matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
4114  if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
4115  bias= 1<<(QMAT_SHIFT-1);
4116 
4117  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4118  length = s->intra_chroma_ac_vlc_length;
4119  last_length= s->intra_chroma_ac_vlc_last_length;
4120  } else {
4121  length = s->intra_ac_vlc_length;
4122  last_length= s->intra_ac_vlc_last_length;
4123  }
4124  } else {
4125  scantable= s->inter_scantable.scantable;
4126  perm_scantable= s->inter_scantable.permutated;
4127  start_i = 0;
4128  last_non_zero = -1;
4129  qmat = s->q_inter_matrix[qscale];
4130  matrix = s->inter_matrix;
4131  length = s->inter_ac_vlc_length;
4132  last_length= s->inter_ac_vlc_last_length;
4133  }
4134  last_i= start_i;
4135 
4136  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4137  threshold2= (threshold1<<1);
4138 
4139  for(i=63; i>=start_i; i--) {
4140  const int j = scantable[i];
4141  int level = block[j] * qmat[j];
4142 
4143  if(((unsigned)(level+threshold1))>threshold2){
4144  last_non_zero = i;
4145  break;
4146  }
4147  }
4148 
4149  for(i=start_i; i<=last_non_zero; i++) {
4150  const int j = scantable[i];
4151  int level = block[j] * qmat[j];
4152 
4153 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4154 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4155  if(((unsigned)(level+threshold1))>threshold2){
4156  if(level>0){
4157  level= (bias + level)>>QMAT_SHIFT;
4158  coeff[0][i]= level;
4159  coeff[1][i]= level-1;
4160 // coeff[2][k]= level-2;
4161  }else{
4162  level= (bias - level)>>QMAT_SHIFT;
4163  coeff[0][i]= -level;
4164  coeff[1][i]= -level+1;
4165 // coeff[2][k]= -level+2;
4166  }
4167  coeff_count[i]= FFMIN(level, 2);
4168  av_assert2(coeff_count[i]);
4169  max |=level;
4170  }else{
4171  coeff[0][i]= (level>>31)|1;
4172  coeff_count[i]= 1;
4173  }
4174  }
4175 
4176  *overflow= s->max_qcoeff < max; //overflow might have happened
4177 
4178  if(last_non_zero < start_i){
4179  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4180  return last_non_zero;
4181  }
4182 
4183  score_tab[start_i]= 0;
4184  survivor[0]= start_i;
4185  survivor_count= 1;
4186 
4187  for(i=start_i; i<=last_non_zero; i++){
4188  int level_index, j, zero_distortion;
4189  int dct_coeff= FFABS(block[ scantable[i] ]);
4190  int best_score=256*256*256*120;
4191 
4192  if (s->fdsp.fdct == ff_fdct_ifast)
4193  dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4194  zero_distortion= dct_coeff*dct_coeff;
4195 
4196  for(level_index=0; level_index < coeff_count[i]; level_index++){
4197  int distortion;
4198  int level= coeff[level_index][i];
4199  const int alevel= FFABS(level);
4200  int unquant_coeff;
4201 
4202  av_assert2(level);
4203 
4204  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4205  unquant_coeff= alevel*qmul + qadd;
4206  } else if(s->out_format == FMT_MJPEG) {
4207  j = s->idsp.idct_permutation[scantable[i]];
4208  unquant_coeff = alevel * matrix[j] * 8;
4209  }else{ // MPEG-1
4210  j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4211  if(s->mb_intra){
4212  unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4213  unquant_coeff = (unquant_coeff - 1) | 1;
4214  }else{
4215  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4216  unquant_coeff = (unquant_coeff - 1) | 1;
4217  }
4218  unquant_coeff<<= 3;
4219  }
4220 
4221  distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4222  level+=64;
4223  if((level&(~127)) == 0){
4224  for(j=survivor_count-1; j>=0; j--){
4225  int run= i - survivor[j];
4226  int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4227  score += score_tab[i-run];
4228 
4229  if(score < best_score){
4230  best_score= score;
4231  run_tab[i+1]= run;
4232  level_tab[i+1]= level-64;
4233  }
4234  }
4235 
4236  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4237  for(j=survivor_count-1; j>=0; j--){
4238  int run= i - survivor[j];
4239  int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4240  score += score_tab[i-run];
4241  if(score < last_score){
4242  last_score= score;
4243  last_run= run;
4244  last_level= level-64;
4245  last_i= i+1;
4246  }
4247  }
4248  }
4249  }else{
4250  distortion += esc_length*lambda;
4251  for(j=survivor_count-1; j>=0; j--){
4252  int run= i - survivor[j];
4253  int score= distortion + score_tab[i-run];
4254 
4255  if(score < best_score){
4256  best_score= score;
4257  run_tab[i+1]= run;
4258  level_tab[i+1]= level-64;
4259  }
4260  }
4261 
4262  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4263  for(j=survivor_count-1; j>=0; j--){
4264  int run= i - survivor[j];
4265  int score= distortion + score_tab[i-run];
4266  if(score < last_score){
4267  last_score= score;
4268  last_run= run;
4269  last_level= level-64;
4270  last_i= i+1;
4271  }
4272  }
4273  }
4274  }
4275  }
4276 
4277  score_tab[i+1]= best_score;
4278 
4279  // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4280  if(last_non_zero <= 27){
4281  for(; survivor_count; survivor_count--){
4282  if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4283  break;
4284  }
4285  }else{
4286  for(; survivor_count; survivor_count--){
4287  if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4288  break;
4289  }
4290  }
4291 
4292  survivor[ survivor_count++ ]= i+1;
4293  }
4294 
4295  if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4296  last_score= 256*256*256*120;
4297  for(i= survivor[0]; i<=last_non_zero + 1; i++){
4298  int score= score_tab[i];
4299  if (i)
4300  score += lambda * 2; // FIXME more exact?
4301 
4302  if(score < last_score){
4303  last_score= score;
4304  last_i= i;
4305  last_level= level_tab[i];
4306  last_run= run_tab[i];
4307  }
4308  }
4309  }
4310 
4311  s->coded_score[n] = last_score;
4312 
4313  dc= FFABS(block[0]);
4314  last_non_zero= last_i - 1;
4315  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4316 
4317  if(last_non_zero < start_i)
4318  return last_non_zero;
4319 
4320  if(last_non_zero == 0 && start_i == 0){
4321  int best_level= 0;
4322  int best_score= dc * dc;
4323 
4324  for(i=0; i<coeff_count[0]; i++){
4325  int level= coeff[i][0];
4326  int alevel= FFABS(level);
4327  int unquant_coeff, score, distortion;
4328 
4329  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4330  unquant_coeff= (alevel*qmul + qadd)>>3;
4331  } else{ // MPEG-1
4332  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4333  unquant_coeff = (unquant_coeff - 1) | 1;
4334  }
4335  unquant_coeff = (unquant_coeff + 4) >> 3;
4336  unquant_coeff<<= 3 + 3;
4337 
4338  distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4339  level+=64;
4340  if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4341  else score= distortion + esc_length*lambda;
4342 
4343  if(score < best_score){
4344  best_score= score;
4345  best_level= level - 64;
4346  }
4347  }
4348  block[0]= best_level;
4349  s->coded_score[n] = best_score - dc*dc;
4350  if(best_level == 0) return -1;
4351  else return last_non_zero;
4352  }
4353 
4354  i= last_i;
4355  av_assert2(last_level);
4356 
4357  block[ perm_scantable[last_non_zero] ]= last_level;
4358  i -= last_run + 1;
4359 
4360  for(; i>start_i; i -= run_tab[i] + 1){
4361  block[ perm_scantable[i-1] ]= level_tab[i];
4362  }
4363 
4364  return last_non_zero;
4365 }
4366 
4367 //#define REFINE_STATS 1
4368 static int16_t basis[64][64];
4369 
4370 static void build_basis(uint8_t *perm){
4371  int i, j, x, y;
4372  emms_c();
4373  for(i=0; i<8; i++){
4374  for(j=0; j<8; j++){
4375  for(y=0; y<8; y++){
4376  for(x=0; x<8; x++){
4377  double s= 0.25*(1<<BASIS_SHIFT);
4378  int index= 8*i + j;
4379  int perm_index= perm[index];
4380  if(i==0) s*= sqrt(0.5);
4381  if(j==0) s*= sqrt(0.5);
4382  basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4383  }
4384  }
4385  }
4386  }
4387 }
4388 
4389 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4390  int16_t *block, int16_t *weight, int16_t *orig,
4391  int n, int qscale){
4392  int16_t rem[64];
4393  LOCAL_ALIGNED_16(int16_t, d1, [64]);
4394  const uint8_t *scantable;
4395  const uint8_t *perm_scantable;
4396 // unsigned int threshold1, threshold2;
4397 // int bias=0;
4398  int run_tab[65];
4399  int prev_run=0;
4400  int prev_level=0;
4401  int qmul, qadd, start_i, last_non_zero, i, dc;
4402  uint8_t * length;
4403  uint8_t * last_length;
4404  int lambda;
4405  int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4406 #ifdef REFINE_STATS
4407 static int count=0;
4408 static int after_last=0;
4409 static int to_zero=0;
4410 static int from_zero=0;
4411 static int raise=0;
4412 static int lower=0;
4413 static int messed_sign=0;
4414 #endif
4415 
4416  if(basis[0][0] == 0)
4418 
4419  qmul= qscale*2;
4420  qadd= (qscale-1)|1;
4421  if (s->mb_intra) {
4422  scantable= s->intra_scantable.scantable;
4423  perm_scantable= s->intra_scantable.permutated;
4424  if (!s->h263_aic) {
4425  if (n < 4)
4426  q = s->y_dc_scale;
4427  else
4428  q = s->c_dc_scale;
4429  } else{
4430  /* For AIC we skip quant/dequant of INTRADC */
4431  q = 1;
4432  qadd=0;
4433  }
4434  q <<= RECON_SHIFT-3;
4435  /* note: block[0] is assumed to be positive */
4436  dc= block[0]*q;
4437 // block[0] = (block[0] + (q >> 1)) / q;
4438  start_i = 1;
4439 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4440 // bias= 1<<(QMAT_SHIFT-1);
4441  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4442  length = s->intra_chroma_ac_vlc_length;
4443  last_length= s->intra_chroma_ac_vlc_last_length;
4444  } else {
4445  length = s->intra_ac_vlc_length;
4446  last_length= s->intra_ac_vlc_last_length;
4447  }
4448  } else {
4449  scantable= s->inter_scantable.scantable;
4450  perm_scantable= s->inter_scantable.permutated;
4451  dc= 0;
4452  start_i = 0;
4453  length = s->inter_ac_vlc_length;
4454  last_length= s->inter_ac_vlc_last_length;
4455  }
4456  last_non_zero = s->block_last_index[n];
4457 
4458 #ifdef REFINE_STATS
4459 {START_TIMER
4460 #endif
4461  dc += (1<<(RECON_SHIFT-1));
4462  for(i=0; i<64; i++){
4463  rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4464  }
4465 #ifdef REFINE_STATS
4466 STOP_TIMER("memset rem[]")}
4467 #endif
4468  sum=0;
4469  for(i=0; i<64; i++){
4470  int one= 36;
4471  int qns=4;
4472  int w;
4473 
4474  w= FFABS(weight[i]) + qns*one;
4475  w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4476 
4477  weight[i] = w;
4478 // w=weight[i] = (63*qns + (w/2)) / w;
4479 
4480  av_assert2(w>0);
4481  av_assert2(w<(1<<6));
4482  sum += w*w;
4483  }
4484  lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4485 #ifdef REFINE_STATS
4486 {START_TIMER
4487 #endif
4488  run=0;
4489  rle_index=0;
4490  for(i=start_i; i<=last_non_zero; i++){
4491  int j= perm_scantable[i];
4492  const int level= block[j];
4493  int coeff;
4494 
4495  if(level){
4496  if(level<0) coeff= qmul*level - qadd;
4497  else coeff= qmul*level + qadd;
4498  run_tab[rle_index++]=run;
4499  run=0;
4500 
4501  s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4502  }else{
4503  run++;
4504  }
4505  }
4506 #ifdef REFINE_STATS
4507 if(last_non_zero>0){
4508 STOP_TIMER("init rem[]")
4509 }
4510 }
4511 
4512 {START_TIMER
4513 #endif
4514  for(;;){
4515  int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4516  int best_coeff=0;
4517  int best_change=0;
4518  int run2, best_unquant_change=0, analyze_gradient;
4519 #ifdef REFINE_STATS
4520 {START_TIMER
4521 #endif
4522  analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4523 
4524  if(analyze_gradient){
4525 #ifdef REFINE_STATS
4526 {START_TIMER
4527 #endif
4528  for(i=0; i<64; i++){
4529  int w= weight[i];
4530 
4531  d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4532  }
4533 #ifdef REFINE_STATS
4534 STOP_TIMER("rem*w*w")}
4535 {START_TIMER
4536 #endif
4537  s->fdsp.fdct(d1);
4538 #ifdef REFINE_STATS
4539 STOP_TIMER("dct")}
4540 #endif
4541  }
4542 
4543  if(start_i){
4544  const int level= block[0];
4545  int change, old_coeff;
4546 
4547  av_assert2(s->mb_intra);
4548 
4549  old_coeff= q*level;
4550 
4551  for(change=-1; change<=1; change+=2){
4552  int new_level= level + change;
4553  int score, new_coeff;
4554 
4555  new_coeff= q*new_level;
4556  if(new_coeff >= 2048 || new_coeff < 0)
4557  continue;
4558 
4559  score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4560  new_coeff - old_coeff);
4561  if(score<best_score){
4562  best_score= score;
4563  best_coeff= 0;
4564  best_change= change;
4565  best_unquant_change= new_coeff - old_coeff;
4566  }
4567  }
4568  }
4569 
4570  run=0;
4571  rle_index=0;
4572  run2= run_tab[rle_index++];
4573  prev_level=0;
4574  prev_run=0;
4575 
4576  for(i=start_i; i<64; i++){
4577  int j= perm_scantable[i];
4578  const int level= block[j];
4579  int change, old_coeff;
4580 
4581  if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4582  break;
4583 
4584  if(level){
4585  if(level<0) old_coeff= qmul*level - qadd;
4586  else old_coeff= qmul*level + qadd;
4587  run2= run_tab[rle_index++]; //FIXME ! maybe after last
4588  }else{
4589  old_coeff=0;
4590  run2--;
4591  av_assert2(run2>=0 || i >= last_non_zero );
4592  }
4593 
4594  for(change=-1; change<=1; change+=2){
4595  int new_level= level + change;
4596  int score, new_coeff, unquant_change;
4597 
4598  score=0;
4599  if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4600  continue;
4601 
4602  if(new_level){
4603  if(new_level<0) new_coeff= qmul*new_level - qadd;
4604  else new_coeff= qmul*new_level + qadd;
4605  if(new_coeff >= 2048 || new_coeff <= -2048)
4606  continue;
4607  //FIXME check for overflow
4608 
4609  if(level){
4610  if(level < 63 && level > -63){
4611  if(i < last_non_zero)
4612  score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4613  - length[UNI_AC_ENC_INDEX(run, level+64)];
4614  else
4615  score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4616  - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4617  }
4618  }else{
4619  av_assert2(FFABS(new_level)==1);
4620 
4621  if(analyze_gradient){
4622  int g= d1[ scantable[i] ];
4623  if(g && (g^new_level) >= 0)
4624  continue;
4625  }
4626 
4627  if(i < last_non_zero){
4628  int next_i= i + run2 + 1;
4629  int next_level= block[ perm_scantable[next_i] ] + 64;
4630 
4631  if(next_level&(~127))
4632  next_level= 0;
4633 
4634  if(next_i < last_non_zero)
4635  score += length[UNI_AC_ENC_INDEX(run, 65)]
4636  + length[UNI_AC_ENC_INDEX(run2, next_level)]
4637  - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4638  else
4639  score += length[UNI_AC_ENC_INDEX(run, 65)]
4640  + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4641  - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4642  }else{
4643  score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4644  if(prev_level){
4645  score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4646  - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4647  }
4648  }
4649  }
4650  }else{
4651  new_coeff=0;
4652  av_assert2(FFABS(level)==1);
4653 
4654  if(i < last_non_zero){
4655  int next_i= i + run2 + 1;
4656  int next_level= block[ perm_scantable[next_i] ] + 64;
4657 
4658  if(next_level&(~127))
4659  next_level= 0;
4660 
4661  if(next_i < last_non_zero)
4662  score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4663  - length[UNI_AC_ENC_INDEX(run2, next_level)]
4664  - length[UNI_AC_ENC_INDEX(run, 65)];
4665  else
4666  score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4667  - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4668  - length[UNI_AC_ENC_INDEX(run, 65)];
4669  }else{
4670  score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4671  if(prev_level){
4672  score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4673  - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4674  }
4675  }
4676  }
4677 
4678  score *= lambda;
4679 
4680  unquant_change= new_coeff - old_coeff;
4681  av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4682 
4683  score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4684  unquant_change);
4685  if(score<best_score){
4686  best_score= score;
4687  best_coeff= i;
4688  best_change= change;
4689  best_unquant_change= unquant_change;
4690  }
4691  }
4692  if(level){
4693  prev_level= level + 64;
4694  if(prev_level&(~127))
4695  prev_level= 0;
4696  prev_run= run;
4697  run=0;
4698  }else{
4699  run++;
4700  }
4701  }
4702 #ifdef REFINE_STATS
4703 STOP_TIMER("iterative step")}
4704 #endif
4705 
4706  if(best_change){
4707  int j= perm_scantable[ best_coeff ];
4708 
4709  block[j] += best_change;
4710 
4711  if(best_coeff > last_non_zero){
4712  last_non_zero= best_coeff;
4713  av_assert2(block[j]);
4714 #ifdef REFINE_STATS
4715 after_last++;
4716 #endif
4717  }else{
4718 #ifdef REFINE_STATS
4719 if(block[j]){
4720  if(block[j] - best_change){
4721  if(FFABS(block[j]) > FFABS(block[j] - best_change)){
4722  raise++;
4723  }else{
4724  lower++;
4725  }
4726  }else{
4727  from_zero++;
4728  }
4729 }else{
4730  to_zero++;
4731 }
4732 #endif
4733  for(; last_non_zero>=start_i; last_non_zero--){
4734  if(block[perm_scantable[last_non_zero]])
4735  break;
4736  }
4737  }
4738 #ifdef REFINE_STATS
4739 count++;
4740 if(256*256*256*64 % count == 0){
4741  av_log(s->avctx, AV_LOG_DEBUG, "after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero, raise, lower, messed_sign, s->mb_x, s->mb_y, s->picture_number);
4742 }
4743 #endif
4744  run=0;
4745  rle_index=0;
4746  for(i=start_i; i<=last_non_zero; i++){
4747  int j= perm_scantable[i];
4748  const int level= block[j];
4749 
4750  if(level){
4751  run_tab[rle_index++]=run;
4752  run=0;
4753  }else{
4754  run++;
4755  }
4756  }
4757 
4758  s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4759  }else{
4760  break;
4761  }
4762  }
4763 #ifdef REFINE_STATS
4764 if(last_non_zero>0){
4765 STOP_TIMER("iterative search")
4766 }
4767 }
4768 #endif
4769 
4770  return last_non_zero;
4771 }
4772 
4773 /**
4774  * Permute an 8x8 block according to permutation.
4775  * @param block the block which will be permuted according to
4776  * the given permutation vector
4777  * @param permutation the permutation vector
4778  * @param last the last non zero coefficient in scantable order, used to
4779  * speed the permutation up
4780  * @param scantable the used scantable, this is only used to speed the
4781  * permutation up, the block is not (inverse) permutated
4782  * to scantable order!
4783  */
4784 void ff_block_permute(int16_t *block, uint8_t *permutation,
4785  const uint8_t *scantable, int last)
4786 {
4787  int i;
4788  int16_t temp[64];
4789 
4790  if (last <= 0)
4791  return;
4792  //FIXME it is ok but not clean and might fail for some permutations
4793  // if (permutation[1] == 1)
4794  // return;
4795 
4796  for (i = 0; i <= last; i++) {
4797  const int j = scantable[i];
4798  temp[j] = block[j];
4799  block[j] = 0;
4800  }
4801 
4802  for (i = 0; i <= last; i++) {
4803  const int j = scantable[i];
4804  const int perm_j = permutation[j];
4805  block[perm_j] = temp[j];
4806  }
4807 }
4808 
4810  int16_t *block, int n,
4811  int qscale, int *overflow)
4812 {
4813  int i, j, level, last_non_zero, q, start_i;
4814  const int *qmat;
4815  const uint8_t *scantable;
4816  int bias;
4817  int max=0;
4818  unsigned int threshold1, threshold2;
4819 
4820  s->fdsp.fdct(block);
4821 
4822  if(s->dct_error_sum)
4823  s->denoise_dct(s, block);
4824 
4825  if (s->mb_intra) {
4826  scantable= s->intra_scantable.scantable;
4827  if (!s->h263_aic) {
4828  if (n < 4)
4829  q = s->y_dc_scale;
4830  else
4831  q = s->c_dc_scale;
4832  q = q << 3;
4833  } else
4834  /* For AIC we skip quant/dequant of INTRADC */
4835  q = 1 << 3;
4836 
4837  /* note: block[0] is assumed to be positive */
4838  block[0] = (block[0] + (q >> 1)) / q;
4839  start_i = 1;
4840  last_non_zero = 0;
4841  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4842  bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4843  } else {
4844  scantable= s->inter_scantable.scantable;
4845  start_i = 0;
4846  last_non_zero = -1;
4847  qmat = s->q_inter_matrix[qscale];
4848  bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4849  }
4850  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4851  threshold2= (threshold1<<1);
4852  for(i=63;i>=start_i;i--) {
4853  j = scantable[i];
4854  level = block[j] * qmat[j];
4855 
4856  if(((unsigned)(level+threshold1))>threshold2){
4857  last_non_zero = i;
4858  break;
4859  }else{
4860  block[j]=0;
4861  }
4862  }
4863  for(i=start_i; i<=last_non_zero; i++) {
4864  j = scantable[i];
4865  level = block[j] * qmat[j];
4866 
4867 // if( bias+level >= (1<<QMAT_SHIFT)
4868 // || bias-level >= (1<<QMAT_SHIFT)){
4869  if(((unsigned)(level+threshold1))>threshold2){
4870  if(level>0){
4871  level= (bias + level)>>QMAT_SHIFT;
4872  block[j]= level;
4873  }else{
4874  level= (bias - level)>>QMAT_SHIFT;
4875  block[j]= -level;
4876  }
4877  max |=level;
4878  }else{
4879  block[j]=0;
4880  }
4881  }
4882  *overflow= s->max_qcoeff < max; //overflow might have happened
4883 
4884  /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4885  if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4887  scantable, last_non_zero);
4888 
4889  return last_non_zero;
4890 }
4891 
4892 #define OFFSET(x) offsetof(MpegEncContext, x)
4893 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4894 static const AVOption h263_options[] = {
4895  { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4896  { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4898  { NULL },
4899 };
4900 
4901 static const AVClass h263_class = {
4902  .class_name = "H.263 encoder",
4903  .item_name = av_default_item_name,
4904  .option = h263_options,
4905  .version = LIBAVUTIL_VERSION_INT,
4906 };
4907 
4909  .name = "h263",
4910  .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4911  .type = AVMEDIA_TYPE_VIDEO,
4912  .id = AV_CODEC_ID_H263,
4913  .priv_data_size = sizeof(MpegEncContext),
4915  .encode2 = ff_mpv_encode_picture,
4916  .close = ff_mpv_encode_end,
4918  .priv_class = &h263_class,
4919 };
4920 
4921 static const AVOption h263p_options[] = {
4922  { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4923  { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4924  { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4925  { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4927  { NULL },
4928 };
4929 static const AVClass h263p_class = {
4930  .class_name = "H.263p encoder",
4931  .item_name = av_default_item_name,
4932  .option = h263p_options,
4933  .version = LIBAVUTIL_VERSION_INT,
4934 };
4935 
4937  .name = "h263p",
4938  .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4939  .type = AVMEDIA_TYPE_VIDEO,
4940  .id = AV_CODEC_ID_H263P,
4941  .priv_data_size = sizeof(MpegEncContext),
4943  .encode2 = ff_mpv_encode_picture,
4944  .close = ff_mpv_encode_end,
4945  .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4947  .priv_class = &h263p_class,
4948 };
4949 
4950 static const AVClass msmpeg4v2_class = {
4951  .class_name = "msmpeg4v2 encoder",
4952  .item_name = av_default_item_name,
4953  .option = ff_mpv_generic_options,
4954  .version = LIBAVUTIL_VERSION_INT,
4955 };
4956 
4958  .name = "msmpeg4v2",
4959  .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4960  .type = AVMEDIA_TYPE_VIDEO,
4961  .id = AV_CODEC_ID_MSMPEG4V2,
4962  .priv_data_size = sizeof(MpegEncContext),
4964  .encode2 = ff_mpv_encode_picture,
4965  .close = ff_mpv_encode_end,
4967  .priv_class = &msmpeg4v2_class,
4968 };
4969 
4970 static const AVClass msmpeg4v3_class = {
4971  .class_name = "msmpeg4v3 encoder",
4972  .item_name = av_default_item_name,
4973  .option = ff_mpv_generic_options,
4974  .version = LIBAVUTIL_VERSION_INT,
4975 };
4976 
4978  .name = "msmpeg4",
4979  .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4980  .type = AVMEDIA_TYPE_VIDEO,
4981  .id = AV_CODEC_ID_MSMPEG4V3,
4982  .priv_data_size = sizeof(MpegEncContext),
4984  .encode2 = ff_mpv_encode_picture,
4985  .close = ff_mpv_encode_end,
4987  .priv_class = &msmpeg4v3_class,
4988 };
4989 
4990 static const AVClass wmv1_class = {
4991  .class_name = "wmv1 encoder",
4992  .item_name = av_default_item_name,
4993  .option = ff_mpv_generic_options,
4994  .version = LIBAVUTIL_VERSION_INT,
4995 };
4996 
4998  .name = "wmv1",
4999  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
5000  .type = AVMEDIA_TYPE_VIDEO,
5001  .id = AV_CODEC_ID_WMV1,
5002  .priv_data_size = sizeof(MpegEncContext),
5004  .encode2 = ff_mpv_encode_picture,
5005  .close = ff_mpv_encode_end,
5007  .priv_class = &wmv1_class,
5008 };
int last_time_base
Definition: mpegvideo.h:386
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:41
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:938
int plane
Definition: avisynth_c.h:422
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: avcodec.h:2986
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:1009
static const AVClass wmv1_class
void ff_h261_reorder_mb_index(MpegEncContext *s)
Definition: h261enc.c:108
int(* try_8x8basis)(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale)
int chroma_elim_threshold
Definition: mpegvideo.h:114
#define INPLACE_OFFSET
Definition: mpegutils.h:123
void ff_jpeg_fdct_islow_10(int16_t *data)
static const AVOption h263_options[]
int frame_bits
bits used for the current frame
Definition: mpegvideo.h:338
IDCTDSPContext idsp
Definition: mpegvideo.h:227
av_cold int ff_dct_encode_init(MpegEncContext *s)
#define NULL
Definition: coverity.c:32
RateControlContext rc_context
contains stuff only accessed in ratecontrol.c
Definition: mpegvideo.h:341
const struct AVCodec * codec
Definition: avcodec.h:1770
int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
av_cold void ff_rate_control_uninit(MpegEncContext *s)
Definition: ratecontrol.c:672
#define FF_MPV_FLAG_STRICT_GOP
Definition: mpegvideo.h:572
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:2739
void ff_estimate_b_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1510
qpel_mc_func avg_qpel_pixels_tab[2][16]
Definition: qpeldsp.h:74
int picture_number
Definition: mpegvideo.h:124
const char * s
Definition: avisynth_c.h:768
#define RECON_SHIFT
attribute_deprecated int intra_quant_bias
Definition: avcodec.h:2290
me_cmp_func frame_skip_cmp[6]
Definition: me_cmp.h:76
#define CANDIDATE_MB_TYPE_SKIPPED
Definition: mpegutils.h:109
rate control context.
Definition: ratecontrol.h:63
static int shift(int a, int b)
Definition: sonic.c:82
S(GMC)-VOP MPEG-4.
Definition: avutil.h:277
void(* dct_unquantize_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:520
void ff_mpeg1_encode_init(MpegEncContext *s)
Definition: mpeg12enc.c:1002
int esc3_level_length
Definition: mpegvideo.h:438
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2419
static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
int time_increment_bits
< number of bits to represent the fractional part of time (encoder only)
Definition: mpegvideo.h:385
void ff_h263_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: ituh263enc.c:103
This structure describes decoded (raw) audio or video data.
Definition: frame.h:201
AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
Definition: utils.c:1256
int16_t(* p_mv_table)[2]
MV table (1MV per MB) P-frame encoding.
Definition: mpegvideo.h:245
#define FF_CMP_DCTMAX
Definition: avcodec.h:2211
void ff_fdct_ifast(int16_t *data)
Definition: jfdctfst.c:208
#define FF_MB_DECISION_BITS
chooses the one which needs the fewest bits
Definition: avcodec.h:2326
AVOption.
Definition: opt.h:246
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
Definition: avpacket.c:697
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
uint8_t * fcode_tab
smallest fcode needed for each MV
Definition: mpegvideo.h:279
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
Definition: mpegvideo.h:150
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:269
void ff_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:905
const uint8_t * y_dc_scale_table
qscale -> y_dc_scale table
Definition: mpegvideo.h:185
uint8_t * mb_mean
Table for MB luminance.
Definition: mpegpicture.h:74
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:3101
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:917
int last_mv[2][2][2]
last MV, used for MV prediction in MPEG-1 & B-frame MPEG-4
Definition: mpegvideo.h:278
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
Definition: mpegpicture.h:36
int pre_pass
= 1 for the pre pass
Definition: motion_est.h:72
#define AV_CODEC_FLAG_LOOP_FILTER
loop filter.
Definition: avcodec.h:900
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:68
#define FF_MPV_FLAG_SKIP_RD
Definition: mpegvideo.h:571
AVFrame * tmp_frames[MAX_B_FRAMES+2]
Definition: mpegvideo.h:556
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:67
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:206
attribute_deprecated int rc_qmod_freq
Definition: avcodec.h:2731
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
RateControlEntry * entry
Definition: ratecontrol.h:65
qpel_mc_func put_no_rnd_qpel_pixels_tab[2][16]
Definition: qpeldsp.h:75
void(* shrink[4])(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height)
#define CANDIDATE_MB_TYPE_INTER_I
Definition: mpegutils.h:116
int64_t bit_rate
the average bitrate
Definition: avcodec.h:1826
#define LIBAVUTIL_VERSION_INT
Definition: version.h:86
else temp
Definition: vf_mcdeint.c:256
attribute_deprecated void(* rtp_callback)(struct AVCodecContext *avctx, void *data, int size, int mb_nb)
Definition: avcodec.h:2888
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:393
const char * g
Definition: vf_curves.c:112
void ff_h263_encode_init(MpegEncContext *s)
Definition: ituh263enc.c:761
const char * desc
Definition: nvenc.c:60
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
Definition: mpegvideo.h:151
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
void ff_init_qscale_tab(MpegEncContext *s)
init s->current_picture.qscale_table from s->lambda_table
#define OFFSET(x)
uint16_t * mb_var
Table for MB variances.
Definition: mpegpicture.h:65
uint16_t(* q_chroma_intra_matrix16)[2][64]
Definition: mpegvideo.h:328
uint16_t chroma_intra_matrix[64]
Definition: mpegvideo.h:301
static int estimate_qp(MpegEncContext *s, int dry_run)
#define MAX_MV
Definition: motion_est.h:35
int acc
Definition: yuv2rgb.c:546
int max_bitrate
Maximum bitrate of the stream, in bits per second.
Definition: avcodec.h:1359
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:2047
int16_t(*[3] ac_val)[16]
used for MPEG-4 AC prediction, all 3 arrays must be continuous
Definition: mpegvideo.h:191
MJPEG encoder.
void(* add_8x8basis)(int16_t rem[64], int16_t basis[64], int scale)
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:129
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:430
attribute_deprecated int frame_skip_cmp
Definition: avcodec.h:2853
#define FF_MPV_COMMON_OPTS
Definition: mpegvideo.h:605
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2498
#define me
int frame_skip_cmp
Definition: mpegvideo.h:564
int msmpeg4_version
0=not msmpeg4, 1=mp41, 2=mp42, 3=mp43/divx3 4=wmv1/7 5=wmv2/8
Definition: mpegvideo.h:436
int b_frame_strategy
Definition: mpegvideo.h:557
#define CANDIDATE_MB_TYPE_BIDIR
Definition: mpegutils.h:114
int num
Numerator.
Definition: rational.h:59
av_cold void ff_h263dsp_init(H263DSPContext *ctx)
Definition: h263dsp.c:117
int size
Definition: avcodec.h:1680
attribute_deprecated int lmax
Definition: avcodec.h:2835
enum AVCodecID codec_id
Definition: mpegvideo.h:109
const char * b
Definition: vf_curves.c:113
void ff_get_2pass_fcode(MpegEncContext *s)
Definition: ratecontrol.c:857
void avpriv_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
Definition: bitstream.c:64
int av_log2(unsigned v)
Definition: intmath.c:26
int obmc
overlapped block motion compensation
Definition: mpegvideo.h:366
void avpriv_align_put_bits(PutBitContext *s)
Pad the bitstream with zeros up to the next byte boundary.
Definition: bitstream.c:48
void ff_mpeg1_clean_buffers(MpegEncContext *s)
Definition: mpeg12.c:115
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:2172
int frame_skip_exp
Definition: mpegvideo.h:563
int ff_h261_get_picture_format(int width, int height)
Definition: h261enc.c:40
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1989
#define FF_MPV_FLAG_NAQ
Definition: mpegvideo.h:575
int16_t(*[2][2] p_field_mv_table)[2]
MV table (2MV per MB) interlaced P-frame encoding.
Definition: mpegvideo.h:251
static int select_input_picture(MpegEncContext *s)
static const AVClass msmpeg4v3_class
int min_qcoeff
minimum encodable coefficient
Definition: mpegvideo.h:308
static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride)
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:121
int out_size
Definition: movenc.c:55
int ildct_cmp
interlaced DCT comparison function
Definition: avcodec.h:2197
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
int coded_score[12]
Definition: mpegvideo.h:320
mpegvideo header.
const uint16_t ff_h263_format[8][2]
Definition: h263data.c:238
av_cold int ff_mjpeg_encode_init(MpegEncContext *s)
Definition: mjpegenc.c:70
int scene_change_score
Definition: motion_est.h:87
int mpv_flags
flags set by private options
Definition: mpegvideo.h:526
uint8_t permutated[64]
Definition: idctdsp.h:33
static const AVClass h263_class
uint8_t run
Definition: svq3.c:206
static AVPacket pkt
void ff_xvid_rate_control_uninit(struct MpegEncContext *s)
Definition: libxvid_rc.c:158
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:3164
uint8_t * intra_ac_vlc_length
Definition: mpegvideo.h:311
#define EDGE_TOP
int padding_bug_score
used to detect the VERY common padding bug in MPEG-4
Definition: mpegvideo.h:409
int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
Definition: mpegpicture.c:361
#define UNI_AC_ENC_INDEX(run, level)
Definition: mpegvideo.h:318
int mb_num
number of MBs of a picture
Definition: mpegvideo.h:130
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
Definition: fdctdsp.c:26
#define src
Definition: vp8dsp.c:254
#define FF_LAMBDA_SHIFT
Definition: avutil.h:225
static void clip_coeffs(MpegEncContext *s, int16_t *block, int last_index)
QpelDSPContext qdsp
Definition: mpegvideo.h:232
An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of structures with info about macroblo...
Definition: avcodec.h:1458
AVCodec.
Definition: avcodec.h:3739
#define MAX_FCODE
Definition: mpegutils.h:48
static void write_mb_info(MpegEncContext *s)
int time_base
time in seconds of last I,P,S Frame
Definition: mpegvideo.h:387
uint8_t(* mv_penalty)[MAX_DMV *2+1]
bit amount needed to encode a MV
Definition: motion_est.h:93
int qscale
QP.
Definition: mpegvideo.h:201
int h263_aic
Advanced INTRA Coding (AIC)
Definition: mpegvideo.h:84
int16_t(* b_back_mv_table)[2]
MV table (1MV per MB) backward mode B-frame encoding.
Definition: mpegvideo.h:247
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
Definition: h263.c:307
int min_bitrate
Minimum bitrate of the stream, in bits per second.
Definition: avcodec.h:1364
int chroma_x_shift
Definition: mpegvideo.h:476
int encoding
true if we are encoding (vs decoding)
Definition: mpegvideo.h:111
void(* dct_unquantize_h263_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:514
int field_select[2][2]
Definition: mpegvideo.h:277
void(* dct_unquantize_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:518
attribute_deprecated int frame_skip_exp
Definition: avcodec.h:2849
attribute_deprecated int me_method
This option does nothing.
Definition: avcodec.h:1996
uint32_t ff_square_tab[512]
Definition: me_cmp.c:32
int quant_precision
Definition: mpegvideo.h:398
void ff_mpeg4_merge_partitions(MpegEncContext *s)
static int mb_var_thread(AVCodecContext *c, void *arg)
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
Definition: mpegvideo.c:2431
void(* dct_unquantize_h263_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:516
common functions for use with the Xvid wrappers
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1898
int modified_quant
Definition: mpegvideo.h:379
float ff_xvid_rate_estimate_qscale(struct MpegEncContext *s, int dry_run)
Definition: libxvid_rc.c:101
static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
#define FF_MPV_FLAG_CBP_RD
Definition: mpegvideo.h:574
int skipdct
skip dct and code zero residual
Definition: mpegvideo.h:217
float rc_buffer_aggressivity
Definition: mpegvideo.h:537
int b_frame_score
Definition: mpegpicture.h:84
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:27
uint64_t vbv_delay
The delay between the time the packet this structure is associated with is received and the time when...
Definition: avcodec.h:1384
static int16_t block[64]
Definition: dct.c:115
void ff_mpeg4_clean_buffers(MpegEncContext *s)
Definition: mpeg4video.c:45
attribute_deprecated int mv_bits
Definition: avcodec.h:2905
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
#define CANDIDATE_MB_TYPE_INTER
Definition: mpegutils.h:107
float p_masking
p block masking (0-> disabled)
Definition: avcodec.h:2133
int picture_in_gop_number
0-> first pic in gop, ...
Definition: mpegvideo.h:125
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:63
attribute_deprecated int rc_strategy
Definition: avcodec.h:2060
int alt_inter_vlc
alternative inter vlc
Definition: mpegvideo.h:378
void ff_mpeg1_encode_slice_header(MpegEncContext *s)
Definition: mpeg12enc.c:407
int ff_dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
uint8_t * ptr_lastgob
Definition: mpegvideo.h:493
int64_t time
time of current frame
Definition: mpegvideo.h:388
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
Definition: encode.c:32
static int encode_picture(MpegEncContext *s, int picture_number)
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
int bit_rate_tolerance
number of bits the bitstream is allowed to diverge from the reference.
Definition: avcodec.h:1834
static const AVClass msmpeg4v2_class
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4) ...
Definition: mpegvideo.h:264
ScratchpadContext sc
Definition: mpegvideo.h:199
uint8_t bits
Definition: crc.c:296
attribute_deprecated const char * rc_eq
Definition: avcodec.h:2754
attribute_deprecated float rc_buffer_aggressivity
Definition: avcodec.h:2776
uint8_t
#define av_cold
Definition: attributes.h:82
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:150
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
Picture ** input_picture
next pictures on display order for encoding
Definition: mpegvideo.h:134
#define CANDIDATE_MB_TYPE_INTER4V
Definition: mpegutils.h:108
AVOptions.
void(* denoise_dct)(struct MpegEncContext *s, int16_t *block)
Definition: mpegvideo.h:524
PutBitContext pb2
used for data partitioned VOPs
Definition: mpegvideo.h:407
enum OutputFormat out_format
output format
Definition: mpegvideo.h:101
attribute_deprecated int i_count
Definition: avcodec.h:2913
#define CANDIDATE_MB_TYPE_FORWARD_I
Definition: mpegutils.h:117
uint16_t(* dct_offset)[64]
Definition: mpegvideo.h:334
int noise_reduction
Definition: mpegvideo.h:567
void ff_dct_encode_init_x86(MpegEncContext *s)
Definition: mpegvideoenc.c:213
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
uint16_t * chroma_intra_matrix
custom intra quantization matrix
Definition: avcodec.h:3558
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
void ff_msmpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: msmpeg4enc.c:224
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(constuint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(constint16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(constint32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(constint64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(constfloat *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(constdouble *) pi *(INT64_C(1)<< 63)))#defineFMT_PAIR_FUNC(out, in) staticconv_func_type *constfmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};staticvoidcpy1(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, len);}staticvoidcpy2(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 2 *len);}staticvoidcpy4(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 4 *len);}staticvoidcpy8(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, constint *ch_map, intflags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) returnNULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) returnNULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case1:ctx->simd_f=cpy1;break;case2:ctx->simd_f=cpy2;break;case4:ctx->simd_f=cpy4;break;case8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);returnctx;}voidswri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}intswri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, intlen){intch;intoff=0;constintos=(out->planar?1:out->ch_count)*out->bps;unsignedmisaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){intplanes=in->planar?in->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){intplanes=out->planar?out->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){intplanes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
static void mpv_encode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for encoding.
#define FF_RC_STRATEGY_XVID
Definition: avcodec.h:2061
Multithreading support functions.
int pre_dia_size
ME prepass diamond size & shape.
Definition: avcodec.h:2248
AVCodec ff_h263_encoder
int frame_skip_threshold
Definition: mpegvideo.h:561
static const AVOption h263p_options[]
static int get_sae(uint8_t *src, int ref, int stride)
#define FF_CMP_VSSE
Definition: avcodec.h:2207
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:921
void ff_free_picture_tables(Picture *pic)
Definition: mpegpicture.c:458
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:395
#define emms_c()
Definition: internal.h:54
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:294
int misc_bits
cbp, mb_type
Definition: mpegvideo.h:352
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1876
int no_rounding
apply no rounding to motion compensation (MPEG-4, msmpeg4, ...) for B-frames rounding mode is always ...
Definition: mpegvideo.h:284
H.263 tables.
#define CANDIDATE_MB_TYPE_BACKWARD_I
Definition: mpegutils.h:118
int interlaced_dct
Definition: mpegvideo.h:481
int(* q_chroma_intra_matrix)[64]
Definition: mpegvideo.h:324
int me_cmp
motion estimation comparison function
Definition: avcodec.h:2179
#define QUANT_BIAS_SHIFT
Definition: mpegvideo_enc.c:71
void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:177
void(* diff_pixels)(int16_t *av_restrict block, const uint8_t *s1, const uint8_t *s2, ptrdiff_t stride)
Definition: pixblockdsp.h:32
#define CHROMA_420
Definition: mpegvideo.h:473
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
Definition: mpegpicture.c:444
int intra_dc_precision
Definition: mpegvideo.h:461
int repeat_first_field
Definition: mpegvideo.h:470
static AVFrame * frame
quarterpel DSP functions
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:38
#define height
int16_t(* b_bidir_forw_mv_table)[2]
MV table (1MV per MB) bidir mode B-frame encoding.
Definition: mpegvideo.h:248
static void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
uint8_t * data
Definition: avcodec.h:1679
const uint16_t ff_aanscales[64]
Definition: aandcttab.c:26
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
int ff_wmv2_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: wmv2enc.c:74
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:75
#define ff_dlog(a,...)
#define AVERROR_EOF
End of file.
Definition: error.h:55
uint16_t pp_time
time distance between the last 2 p,s,i frames
Definition: mpegvideo.h:390
me_cmp_func nsse[6]
Definition: me_cmp.h:65
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
#define lrintf(x)
Definition: libm_mips.h:70
#define CODEC_FLAG_MV0
Definition: avcodec.h:1123
const uint8_t * scantable
Definition: idctdsp.h:32
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:330
int mb_height
number of MBs horizontally & vertically
Definition: mpegvideo.h:126
static void rebase_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Rebase the bit writer onto a reallocated buffer.
Definition: put_bits.h:71
int buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
Definition: avcodec.h:1375
ptrdiff_t size
Definition: opengl_enc.c:101
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
float lumi_masking
luminance masking (0-> disabled)
Definition: avcodec.h:2112
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:2931
int max_qcoeff
maximum encodable coefficient
Definition: mpegvideo.h:309
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:904
high precision timer, useful to profile code
static void update_noise_reduction(MpegEncContext *s)
#define FF_MPV_FLAG_QP_RD
Definition: mpegvideo.h:573
int scenechange_threshold
Definition: mpegvideo.h:566
AVCPBProperties * av_cpb_properties_alloc(size_t *size)
Allocate a CPB properties structure and initialize its fields to default values.
Definition: utils.c:2198
void ff_mpeg1_encode_mb(MpegEncContext *s, int16_t block[8][64], int motion_x, int motion_y)
Definition: mpeg12enc.c:993
#define FFALIGN(x, a)
Definition: macros.h:48
attribute_deprecated uint64_t error[AV_NUM_DATA_POINTERS]
Definition: frame.h:336
#define MAX_LEVEL
Definition: rl.h:36
attribute_deprecated int frame_skip_threshold
Definition: avcodec.h:2841
void ff_h261_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: h261enc.c:53
int dquant
qscale difference to prev qscale
Definition: mpegvideo.h:207
int flipflop_rounding
Definition: mpegvideo.h:435
#define CHROMA_444
Definition: mpegvideo.h:475
int num_entries
number of RateControlEntries
Definition: ratecontrol.h:64
int gop_picture_number
index of the first picture of a GOP based on fake_pic_num & MPEG-1 specific
Definition: mpegvideo.h:449
uint8_t * mb_info_ptr
Definition: mpegvideo.h:369
#define av_log(a,...)
static void ff_update_block_index(MpegEncContext *s)
Definition: mpegvideo.h:731
#define ff_sqrt
Definition: mathops.h:206
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:2802
#define ROUNDED_DIV(a, b)
Definition: common.h:56
int(* q_inter_matrix)[64]
Definition: mpegvideo.h:325
#define FF_COMPLIANCE_UNOFFICIAL
Allow unofficial extensions.
Definition: avcodec.h:2985
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1711
static int get_bits_diff(MpegEncContext *s)
Definition: mpegvideo.h:745
attribute_deprecated int skip_count
Definition: avcodec.h:2917
#define EDGE_WIDTH
Definition: mpegpicture.h:33
int(* q_intra_matrix)[64]
precomputed matrix (combine qscale and DCT renorm)
Definition: mpegvideo.h:323
#define FF_MPV_FLAG_MV0
Definition: mpegvideo.h:576
int intra_only
if true, only intra pictures are generated
Definition: mpegvideo.h:99
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
int16_t * dc_val[3]
used for MPEG-4 DC prediction, all 3 arrays must be continuous
Definition: mpegvideo.h:184
enum AVCodecID id
Definition: avcodec.h:3753
int h263_plus
H.263+ headers.
Definition: mpegvideo.h:106
H263DSPContext h263dsp
Definition: mpegvideo.h:234
int slice_context_count
number of used thread_contexts
Definition: mpegvideo.h:153
#define MAX_DMV
Definition: motion_est.h:37
int last_non_b_pict_type
used for MPEG-4 gmc B-frames & ratecontrol
Definition: mpegvideo.h:212
int width
Definition: frame.h:259
void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:324
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:2083
int last_dc[3]
last DC values for MPEG-1
Definition: mpegvideo.h:182
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
attribute_deprecated float rc_initial_cplx
Definition: avcodec.h:2779
uint8_t * inter_ac_vlc_last_length
Definition: mpegvideo.h:316
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
Definition: avcodec.h:880
#define MAX_MB_BYTES
Definition: mpegutils.h:47
int64_t total_bits
Definition: mpegvideo.h:337
#define PTRDIFF_SPECIFIER
Definition: internal.h:254
int mb_skipped
MUST BE SET only during DECODING.
Definition: mpegvideo.h:192
int chroma_y_shift
Definition: mpegvideo.h:477
int strict_std_compliance
strictly follow the std (MPEG-4, ...)
Definition: mpegvideo.h:115
av_default_item_name
int partitioned_frame
is current frame partitioned
Definition: mpegvideo.h:403
uint8_t * rd_scratchpad
scratchpad for rate distortion mb decision
Definition: mpegpicture.h:37
#define AVERROR(e)
Definition: error.h:43
uint64_t encoding_error[AV_NUM_DATA_POINTERS]
Definition: mpegpicture.h:90
#define MAX_PICTURE_COUNT
Definition: mpegpicture.h:32
av_cold int ff_rate_control_init(MpegEncContext *s)
Definition: ratecontrol.c:472
int me_sub_cmp
subpixel motion estimation comparison function
Definition: avcodec.h:2185
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:163
attribute_deprecated uint64_t vbv_delay
VBV delay coded in the last frame (in periods of a 27 MHz clock).
Definition: avcodec.h:3418
int qmax
maximum quantizer
Definition: avcodec.h:2712
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2447
static void update_mb_info(MpegEncContext *s, int startcode)
#define MERGE(field)
void ff_write_pass1_stats(MpegEncContext *s)
Definition: ratecontrol.c:38
int unrestricted_mv
mv can point outside of the coded picture
Definition: mpegvideo.h:220
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:179
ERContext er
Definition: mpegvideo.h:551
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:3211
int last_lambda_for[5]
last lambda for a specific pict type
Definition: mpegvideo.h:216
static int sse_mb(MpegEncContext *s)
int reference
Definition: mpegpicture.h:87
const char * r
Definition: vf_curves.c:111
int ff_xvid_rate_control_init(struct MpegEncContext *s)
Definition: libxvid_rc.c:42
void(* get_pixels)(int16_t *av_restrict block, const uint8_t *pixels, ptrdiff_t stride)
Definition: pixblockdsp.h:29
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
void(* dct_unquantize_mpeg2_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:512
PixblockDSPContext pdsp
Definition: mpegvideo.h:231
const char * arg
Definition: jacosubdec.c:66
uint8_t * intra_chroma_ac_vlc_length
Definition: mpegvideo.h:313
void(* dct_unquantize_mpeg1_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:506
int h263_slice_structured
Definition: mpegvideo.h:377
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1856
uint8_t * buf
Definition: put_bits.h:38
uint16_t width
Definition: gdv.c:47
int64_t av_gcd(int64_t a, int64_t b)
Compute the greatest common divisor of two integer operands.
Definition: mathematics.c:37
GLsizei GLsizei * length
Definition: opengl_enc.c:115
MpegvideoEncDSPContext mpvencdsp
Definition: mpegvideo.h:230
attribute_deprecated int inter_quant_bias
Definition: avcodec.h:2296
const char * name
Name of the codec implementation.
Definition: avcodec.h:3746
int quarter_sample
1->qpel, 0->half pel ME/MC
Definition: mpegvideo.h:399
uint16_t * mb_type
Table for candidate MB types for encoding (defines in mpegutils.h)
Definition: mpegvideo.h:291
int me_pre
prepass for motion estimation
Definition: mpegvideo.h:260
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Shrink the already allocated side data buffer.
Definition: avpacket.c:570
int low_delay
no reordering needed / has no B-frames
Definition: mpegvideo.h:404
qpel_mc_func put_qpel_pixels_tab[2][16]
Definition: qpeldsp.h:73
uint8_t *[2][2] b_field_select_table
Definition: mpegvideo.h:254
static const uint8_t offset[127][2]
Definition: vf_spp.c:92
GLsizei count
Definition: opengl_enc.c:109
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:1115
#define FFMAX(a, b)
Definition: common.h:94
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, AVCodecContext *avctx)
Definition: pixblockdsp.c:81
#define fail()
Definition: checkasm.h:109
int64_t mb_var_sum_temp
Definition: motion_est.h:86
attribute_deprecated int b_sensitivity
Definition: avcodec.h:2469
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1685
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:85
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:66
static void frame_end(MpegEncContext *s)
int resync_mb_x
x position of last resync marker
Definition: mpegvideo.h:356
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:2739
void ff_clean_h263_qscales(MpegEncContext *s)
modify qscale so that encoding is actually possible in H.263 (limit difference to -2...
Definition: ituh263enc.c:266
int coded_picture_number
used to set pic->coded_picture_number, should not be used for/by anything else
Definition: mpegvideo.h:123
int * lambda_table
Definition: mpegvideo.h:205
static int estimate_best_b_count(MpegEncContext *s)
int intra_dc_precision
precision of the intra DC coefficient - 8
Definition: avcodec.h:2372
void ff_wmv2_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: wmv2enc.c:147
int64_t rc_min_rate
minimum bitrate
Definition: avcodec.h:2769
common internal API header
uint8_t * intra_ac_vlc_last_length
Definition: mpegvideo.h:312
static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, int motion_y, int mb_block_height, int mb_block_width, int mb_block_count)
const uint8_t *const ff_mpeg2_dc_scale_table[4]
Definition: mpegvideodata.c:82
void ff_h263_loop_filter(MpegEncContext *s)
Definition: h263.c:135
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
#define CHROMA_422
Definition: mpegvideo.h:474
float border_masking
Definition: mpegvideo.h:538
int progressive_frame
Definition: mpegvideo.h:479
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:284
const uint8_t ff_h263_chroma_qscale_table[32]
Definition: h263data.c:262
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
Definition: avcodec.h:876
#define FFMIN(a, b)
Definition: common.h:96
int display_picture_number
picture number in display order
Definition: frame.h:319
uint16_t(* q_inter_matrix16)[2][64]
Definition: mpegvideo.h:329
uint8_t * vbv_delay_ptr
pointer to vbv_delay in the bitstream
Definition: mpegvideo.h:451
int fixed_qscale
fixed qscale if non zero
Definition: mpegvideo.h:110
void ff_clean_mpeg4_qscales(MpegEncContext *s)
modify mb_type & qscale so that encoding is actually possible in MPEG-4
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:74
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
Definition: avcodec.h:934
int me_method
ME algorithm.
Definition: mpegvideo.h:256
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:157
int umvplus
== H.263+ && unrestricted_mv
Definition: mpegvideo.h:375
Picture new_picture
copy of the source picture structure for encoding.
Definition: mpegvideo.h:171
int intra_quant_bias
bias for the quantizer
Definition: mpegvideo.h:306
int width
picture width / height.
Definition: avcodec.h:1948
int(* pix_sum)(uint8_t *pix, int line_size)
int16_t(*[2] motion_val)[2]
Definition: mpegpicture.h:53
Picture * current_picture_ptr
pointer to the current picture
Definition: mpegvideo.h:181
Picture.
Definition: mpegpicture.h:45
attribute_deprecated int noise_reduction
Definition: avcodec.h:2350
int alternate_scan
Definition: mpegvideo.h:468
float rc_max_available_vbv_use
Ratecontrol attempt to use, at maximum, of what can be used without an underflow. ...
Definition: avcodec.h:2787
int ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: rv10enc.c:32
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:908
float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
Definition: ratecontrol.c:868
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:892
uint16_t(* q_intra_matrix16)[2][64]
identical to the above but for MMX & these are not permutated, second 64 entries are bias ...
Definition: mpegvideo.h:327
attribute_deprecated int frame_skip_factor
Definition: avcodec.h:2845
#define FF_MB_DECISION_SIMPLE
uses mb_cmp
Definition: avcodec.h:2325
perm
Definition: f_perms.c:74
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:3204
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:324
int(* ac_stats)[2][MAX_LEVEL+1][MAX_RUN+1][2]
[mb_intra][isChroma][level][run][last]
Definition: mpegvideo.h:441
int block_last_index[12]
last non zero coefficient in block
Definition: mpegvideo.h:83
MotionEstContext me
Definition: mpegvideo.h:282
int frame_skip_factor
Definition: mpegvideo.h:562
int n
Definition: avisynth_c.h:684
static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale)
uint8_t idct_permutation[64]
IDCT input permutation.
Definition: idctdsp.h:96
const int16_t ff_mpeg4_default_non_intra_matrix[64]
Definition: mpeg4data.h:348
int mb_decision
macroblock decision mode
Definition: avcodec.h:2324
static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride)
attribute_deprecated float rc_qsquish
Definition: avcodec.h:2726
uint8_t * mbintra_table
used to avoid setting {ac, dc, cbp}-pred stuff to zero on inter MB decoding
Definition: mpegvideo.h:195
#define MAX_B_FRAMES
Definition: mpegvideo.h:63
int ff_msmpeg4_encode_init(MpegEncContext *s)
Definition: msmpeg4enc.c:121
int ac_esc_length
num of bits needed to encode the longest esc
Definition: mpegvideo.h:310
preferred ID for MPEG-1/2 video decoding
Definition: avcodec.h:219
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
Definition: put_bits.h:358
#define FF_ARRAY_ELEMS(a)
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:3192
int block_index[6]
index to current MB in block based arrays with edges
Definition: mpegvideo.h:293
Compute and use optimal Huffman tables.
Definition: mjpegenc.h:97
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:510
int * mb_index2xy
mb_index -> mb_x + mb_y*mb_stride
Definition: mpegvideo.h:297
static uint8_t default_fcode_tab[MAX_MV *2+1]
Definition: mpegvideo_enc.c:83
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: avcodec.h:1069
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:274
AVCodec ff_h263p_encoder
attribute_deprecated int i_tex_bits
Definition: avcodec.h:2909
static void build_basis(uint8_t *perm)
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:266
int first_slice_line
used in MPEG-4 too to handle resync markers
Definition: mpegvideo.h:434
int frame_pred_frame_dct
Definition: mpegvideo.h:462
attribute_deprecated int misc_bits
Definition: avcodec.h:2919
This structure describes the bitrate properties of an encoded bitstream.
Definition: avcodec.h:1354
uint16_t * mc_mb_var
Table for motion compensated MB variances.
Definition: mpegpicture.h:68
void ff_flv_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: flvenc.c:27
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:263
int coded_picture_number
picture number in bitstream order
Definition: frame.h:315
#define src1
Definition: h264pred.c:139
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
uint16_t inter_matrix[64]
Definition: mpegvideo.h:302
#define FF_LAMBDA_SCALE
Definition: avutil.h:226
void ff_jpeg_fdct_islow_8(int16_t *data)
int64_t last_non_b_time
Definition: mpegvideo.h:389
void ff_h261_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: h261enc.c:237
#define QMAT_SHIFT
Definition: mpegvideo_enc.c:74
struct MpegEncContext * thread_context[MAX_THREADS]
Definition: mpegvideo.h:152
#define CONFIG_MSMPEG4_ENCODER
Definition: msmpeg4.h:75
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:237
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
Definition: mpegvideo.h:204
#define CODEC_FLAG_NORMALIZE_AQP
Definition: avcodec.h:1150
void ff_faandct(int16_t *data)
Definition: faandct.c:114
double buffer_index
amount of bits in the video/audio buffer
Definition: ratecontrol.h:66
Libavcodec external API header.
attribute_deprecated int mpeg_quant
Definition: avcodec.h:2088
void ff_h263_update_motion_val(MpegEncContext *s)
Definition: h263.c:42
int h263_flv
use flv H.263 header
Definition: mpegvideo.h:107
attribute_deprecated int scenechange_threshold
Definition: avcodec.h:2346
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
Definition: options.c:172
static const AVClass h263p_class
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:131
enum AVCodecID codec_id
Definition: avcodec.h:1778
#define QMAT_SHIFT_MMX
Definition: mpegvideo_enc.c:73
attribute_deprecated int prediction_method
Definition: avcodec.h:2152
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:232
void ff_convert_matrix(MpegEncContext *s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
Definition: mpegvideo_enc.c:90
const uint16_t ff_inv_aanscales[64]
Definition: aandcttab.c:38
attribute_deprecated int b_frame_strategy
Definition: avcodec.h:2067
void ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type)
Definition: me_cmp.c:440
#define START_TIMER
Definition: timer.h:137
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
uint8_t * intra_chroma_ac_vlc_last_length
Definition: mpegvideo.h:314
void(* fdct)(int16_t *block)
Definition: fdctdsp.h:27
main external API structure.
Definition: avcodec.h:1761
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int shared, int encoding, int chroma_x_shift, int chroma_y_shift, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
Definition: mpegpicture.c:231
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:618
ScanTable intra_scantable
Definition: mpegvideo.h:88
int qmin
minimum quantizer
Definition: avcodec.h:2705
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:97
#define FF_CMP_NSSE
Definition: avcodec.h:2208
#define FF_DEFAULT_QUANT_BIAS
Definition: avcodec.h:2291
static void write_slice_end(MpegEncContext *s)
int64_t dts_delta
pts difference between the first and second input frame, used for calculating dts of the first frame ...
Definition: mpegvideo.h:141
int64_t user_specified_pts
last non-zero pts from AVFrame which was passed into avcodec_encode_video2()
Definition: mpegvideo.h:137
FDCTDSPContext fdsp
Definition: mpegvideo.h:224
static void denoise_dct_c(MpegEncContext *s, int16_t *block)
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:400
uint8_t * buf_end
Definition: put_bits.h:38
static int frame_start(MpegEncContext *s)
float spatial_cplx_masking
spatial complexity masking (0-> disabled)
Definition: avcodec.h:2126
float rc_qmod_amp
Definition: mpegvideo.h:534
int luma_elim_threshold
Definition: mpegvideo.h:113
attribute_deprecated int header_bits
Definition: avcodec.h:2907
GLint GLenum type
Definition: opengl_enc.c:105
void ff_fix_long_p_mvs(MpegEncContext *s)
Definition: motion_est.c:1671
Picture * picture
main picture buffer
Definition: mpegvideo.h:133
int data_partitioning
data partitioning flag from header
Definition: mpegvideo.h:402
uint8_t * inter_ac_vlc_length
Definition: mpegvideo.h:315
int progressive_sequence
Definition: mpegvideo.h:454
uint16_t * intra_matrix
custom intra quantization matrix
Definition: avcodec.h:2334
H.261 codec.
void ff_h263_encode_gob_header(MpegEncContext *s, int mb_line)
Encode a group of blocks header.
Definition: ituh263enc.c:240
uint8_t * buf_ptr
Definition: put_bits.h:38
Describe the class of an AVClass context structure.
Definition: log.h:67
int stuffing_bits
bits used for stuffing
Definition: mpegvideo.h:339
op_pixels_func put_no_rnd_pixels_tab[4][4]
Halfpel motion compensation with no rounding (a+b)>>1.
Definition: hpeldsp.h:82
int16_t(*[2][2][2] b_field_mv_table)[2]
MV table (4MV per MB) interlaced B-frame encoding.
Definition: mpegvideo.h:252
int(* pix_norm1)(uint8_t *pix, int line_size)
#define FF_COMPLIANCE_NORMAL
Definition: avcodec.h:2984
int64_t mc_mb_var_sum
motion compensated MB variance for current frame
Definition: mpegpicture.h:82
int index
Definition: gxfenc.c:89
#define CANDIDATE_MB_TYPE_DIRECT
Definition: mpegutils.h:111
struct AVFrame * f
Definition: mpegpicture.h:46
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:2327
static void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type)
int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
Definition: avpacket.c:295
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:30
int input_picture_number
used to set pic->display_picture_number, should not be used for/by anything else
Definition: mpegvideo.h:122
AVCodec ff_wmv1_encoder
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:132
int mb_info
interval for outputting info about mb offsets as side data
Definition: mpegvideo.h:367
void ff_set_mpeg4_time(MpegEncContext *s)
static void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type)
attribute_deprecated int brd_scale
Definition: avcodec.h:2427
#define STRIDE_ALIGN
Definition: internal.h:99
av_cold void ff_mjpeg_encode_close(MpegEncContext *s)
Definition: mjpegenc.c:125
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:627
#define CANDIDATE_MB_TYPE_BIDIR_I
Definition: mpegutils.h:119
const int16_t ff_mpeg4_default_intra_matrix[64]
Definition: mpeg4data.h:337
int f_code
forward MV resolution
Definition: mpegvideo.h:235
int ff_pre_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1081
#define CANDIDATE_MB_TYPE_DIRECT0
Definition: mpegutils.h:121
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
Definition: mpeg4video.c:117
int ff_mjpeg_encode_stuffing(MpegEncContext *s)
Writes the complete JPEG frame when optimal huffman tables are enabled, otherwise writes the stuffing...
void ff_mjpeg_encode_picture_header(AVCodecContext *avctx, PutBitContext *pb, ScanTable *intra_scantable, int pred, uint16_t luma_intra_matrix[64], uint16_t chroma_intra_matrix[64])
attribute_deprecated int p_tex_bits
Definition: avcodec.h:2911
static int weight(int i, int blen, int offset)
Definition: diracdec.c:1522
#define MV_DIR_FORWARD
Definition: mpegvideo.h:262
uint16_t * inter_matrix
custom inter quantization matrix
Definition: avcodec.h:2341
int max_b_frames
max number of B-frames for encoding
Definition: mpegvideo.h:112
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:209
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:266
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
int last_mv_dir
last mv_dir, used for B-frame encoding
Definition: mpegvideo.h:450
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:283
int h263_pred
use MPEG-4/H.263 ac/dc predictions
Definition: mpegvideo.h:102
int16_t(* b_bidir_back_mv_table)[2]
MV table (1MV per MB) bidir mode B-frame encoding.
Definition: mpegvideo.h:249
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:505
float dark_masking
darkness masking (0-> disabled)
Definition: avcodec.h:2140
static int64_t pts
Global timestamp for the audio frames.
float temporal_cplx_masking
temporary complexity masking (0-> disabled)
Definition: avcodec.h:2119
int ff_init_me(MpegEncContext *s)
Definition: motion_est.c:306
uint8_t *[2] p_field_select_table
Definition: mpegvideo.h:253
int16_t(* b_direct_mv_table)[2]
MV table (1MV per MB) direct mode B-frame encoding.
Definition: mpegvideo.h:250
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
Definition: avcodec.h:888
AAN (Arai, Agui and Nakajima) (I)DCT tables.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:215
const uint8_t * c_dc_scale_table
qscale -> c_dc_scale table
Definition: mpegvideo.h:186
uint8_t level
Definition: svq3.c:207
me_cmp_func sad[6]
Definition: me_cmp.h:56
int me_penalty_compensation
Definition: mpegvideo.h:259
int64_t mc_mb_var_sum_temp
Definition: motion_est.h:85
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
Definition: mpegvideo.h:276
int16_t(* b_forw_mv_table)[2]
MV table (1MV per MB) forward mode B-frame encoding.
Definition: mpegvideo.h:246
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
Definition: mpegvideo.h:128
me_cmp_func sse[6]
Definition: me_cmp.h:57
static int estimate_motion_thread(AVCodecContext *c, void *arg)
int vbv_ignore_qmax
Definition: mpegvideo.h:540
#define BASIS_SHIFT
MpegEncContext.
Definition: mpegvideo.h:78
Picture * next_picture_ptr
pointer to the next picture (for bidir pred)
Definition: mpegvideo.h:180
char * rc_eq
Definition: mpegvideo.h:542
int8_t * qscale_table
Definition: mpegpicture.h:50
#define MAX_RUN
Definition: rl.h:35
struct AVCodecContext * avctx
Definition: mpegvideo.h:95
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1974
PutBitContext pb
bit output
Definition: mpegvideo.h:148
static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
Deallocate a picture.
Definition: mpegpicture.c:294
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
volatile int error_count
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:105
int
static void update_qscale(MpegEncContext *s)
int mb_cmp
macroblock comparison function (not supported yet)
Definition: avcodec.h:2191
int quantizer_noise_shaping
Definition: mpegvideo.h:527
int(* dct_error_sum)[64]
Definition: mpegvideo.h:332
MECmpContext mecc
Definition: mpegvideo.h:228
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:62
void ff_msmpeg4_encode_ext_header(MpegEncContext *s)
Definition: msmpeg4enc.c:284
float rc_initial_cplx
Definition: mpegvideo.h:536
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg)
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:83
static const int32_t qmat16[MAT_SIZE]
Definition: hq_hqadata.c:342
common internal api header.
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 ...
Definition: mpegvideo.h:127
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:101
if(ret< 0)
Definition: vf_mcdeint.c:279
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
#define CANDIDATE_MB_TYPE_FORWARD
Definition: mpegutils.h:112
attribute_deprecated int rtp_payload_size
Definition: avcodec.h:2894
uint8_t * dest[3]
Definition: mpegvideo.h:295
int shared
Definition: mpegpicture.h:88
static double c[64]
int last_pict_type
Definition: mpegvideo.h:211
#define COPY(a)
void ff_h263_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: ituh263enc.c:447
int adaptive_quant
use adaptive quantization
Definition: mpegvideo.h:206
static int16_t basis[64][64]
attribute_deprecated float border_masking
Definition: avcodec.h:2393
Picture last_picture
copy of the previous picture structure.
Definition: mpegvideo.h:159
Picture * last_picture_ptr
pointer to the previous picture.
Definition: mpegvideo.h:179
Bi-dir predicted.
Definition: avutil.h:276
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:76
float rc_qsquish
ratecontrol qmin qmax limiting method 0-> clipping, 1-> use a nice continuous function to limit qscal...
Definition: mpegvideo.h:533
int64_t reordered_pts
reordered pts to be used as dts for the next output frame when there's a delay
Definition: mpegvideo.h:145
attribute_deprecated AVFrame * coded_frame
the picture in the bitstream
Definition: avcodec.h:3183
int ff_vbv_update(MpegEncContext *s, int frame_size)
Definition: ratecontrol.c:681
#define H263_GOB_HEIGHT(h)
Definition: h263.h:44
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:33
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:48
int den
Denominator.
Definition: rational.h:60
attribute_deprecated float rc_qmod_amp
Definition: avcodec.h:2729
const uint8_t * chroma_qscale_table
qscale -> chroma_qscale (H.263)
Definition: mpegvideo.h:187
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:71
AVCodec ff_msmpeg4v3_encoder
int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo.c:455
int trellis
trellis RD quantization
Definition: avcodec.h:2861
void(* dct_unquantize_mpeg1_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:508
AVCPBProperties * ff_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
Definition: utils.c:2212
static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:777
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:896
void ff_mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: mpeg12enc.c:421
void ff_mpeg4_stuffing(PutBitContext *pbc)
add MPEG-4 stuffing bits (01...1)
#define CANDIDATE_MB_TYPE_INTRA
Definition: mpegutils.h:106
int16_t(* blocks)[12][64]
Definition: mpegvideo.h:499
#define STOP_TIMER(id)
Definition: timer.h:138
int slices
Number of slices.
Definition: avcodec.h:2514
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
void * priv_data
Definition: avcodec.h:1803
const AVOption ff_mpv_generic_options[]
Definition: mpegvideo_enc.c:85
#define PICT_FRAME
Definition: mpegutils.h:39
int last_bits
temp var used for calculating the above vars
Definition: mpegvideo.h:353
void ff_mpeg4_init_partitions(MpegEncContext *s)
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:877
static av_always_inline int diff(const uint32_t a, const uint32_t b)
int picture_structure
Definition: mpegvideo.h:458
int dia_size
ME diamond size & shape.
Definition: avcodec.h:2221
#define av_free(p)
attribute_deprecated int frame_bits
Definition: avcodec.h:2923
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:3232
VideoDSPContext vdsp
Definition: mpegvideo.h:233
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
#define VE
static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src)
attribute_deprecated int me_penalty_compensation
Definition: avcodec.h:2415
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo.c:2720
int avg_bitrate
Average bitrate of the stream, in bits per second.
Definition: avcodec.h:1369
void(* draw_edges)(uint8_t *buf, int wrap, int width, int height, int w, int h, int sides)
int ff_get_best_fcode(MpegEncContext *s, int16_t(*mv_table)[2], int type)
Definition: motion_est.c:1618
int resync_mb_y
y position of last resync marker
Definition: mpegvideo.h:357
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1811
int16_t(* block)[64]
points to one of the following blocks
Definition: mpegvideo.h:498
void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mjpegenc.c:282
int64_t bit_rate
wanted bit rate
Definition: mpegvideo.h:100
This side data corresponds to the AVCPBProperties struct.
Definition: avcodec.h:1510
PutBitContext tex_pb
used for data partitioned VOPs
Definition: mpegvideo.h:406
Picture next_picture
copy of the next picture structure.
Definition: mpegvideo.h:165
attribute_deprecated int p_count
Definition: avcodec.h:2915
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:279
attribute_deprecated int error_rate
Definition: avcodec.h:3405
static void set_frame_distances(MpegEncContext *s)
static const double coeff[2][5]
Definition: vf_owdenoise.c:72
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:227
#define EDGE_BOTTOM
void ff_fix_long_mvs(MpegEncContext *s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
Definition: motion_est.c:1720
Picture ** reordered_input_picture
pointer to the next pictures in coded order for encoding
Definition: mpegvideo.h:135
static const struct twinvq_data tab
unsigned int byte_buffer_size
Definition: internal.h:180
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> dc
void ff_rv20_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: rv20enc.c:35
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1678
void(* dct_unquantize_mpeg2_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:510
static int encode_thread(AVCodecContext *c, void *arg)
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding). ...
Definition: mpegvideo.c:634
int height
Definition: frame.h:259
int(* fast_dct_quantize)(struct MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo.h:523
void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:300
#define LOCAL_ALIGNED_16(t, v,...)
Definition: internal.h:124
static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src)
#define av_freep(p)
static void update_duplicate_context_after_me(MpegEncContext *dst, MpegEncContext *src)
void INT64 start
Definition: avisynth_c.h:690
ScanTable inter_scantable
if inter == intra then intra should be used to reduce the cache usage
Definition: mpegvideo.h:87
#define av_always_inline
Definition: attributes.h:39
#define M_PI
Definition: mathematics.h:52
int ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
int rtp_payload_size
Definition: mpegvideo.h:488
#define AV_CODEC_FLAG_CLOSED_GOP
Definition: avcodec.h:939
Floating point AAN DCT
static uint8_t default_mv_penalty[MAX_FCODE+1][MAX_DMV *2+1]
Definition: mpegvideo_enc.c:82
int inter_quant_bias
bias for the quantizer
Definition: mpegvideo.h:307
av_cold void ff_qpeldsp_init(QpelDSPContext *c)
Definition: qpeldsp.c:783
attribute_deprecated int lmin
Definition: avcodec.h:2829
#define CANDIDATE_MB_TYPE_BACKWARD
Definition: mpegutils.h:113
#define stride
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Allocate new information of a packet.
Definition: avpacket.c:329
int(* dct_quantize)(struct MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo.h:522
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:267
int b_code
backward MV resolution for B-frames (MPEG-4)
Definition: mpegvideo.h:236
void ff_msmpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: msmpeg4enc.c:376
void ff_h261_encode_init(MpegEncContext *s)
Definition: h261enc.c:365
int dct_count[2]
Definition: mpegvideo.h:333
int64_t mb_var_sum
sum of MB variance for current frame
Definition: mpegpicture.h:81
static int encode_frame(AVCodecContext *c, AVFrame *frame)
AVPixelFormat
Pixel format.
Definition: pixfmt.h:60
This structure stores compressed data.
Definition: avcodec.h:1656
uint8_t * byte_buffer
temporary buffer used for encoders to store their bitstream
Definition: internal.h:179
int delay
Codec delay.
Definition: avcodec.h:1931
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:2981
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1672
int ff_check_alignment(void)
Definition: me_cmp.c:988
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:603
#define FF_ALLOCZ_OR_GOTO(ctx, p, size, label)
Definition: internal.h:142
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
Definition: utils.c:1907
me_cmp_func ildct_cmp[6]
Definition: me_cmp.h:75
#define FFMAX3(a, b, c)
Definition: common.h:95
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
static void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type, PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], int *dmin, int *next_block, int motion_x, int motion_y)
Predicted.
Definition: avutil.h:275
unsigned int lambda
Lagrange multiplier used in rate distortion.
Definition: mpegvideo.h:203
AVCodec ff_msmpeg4v2_encoder
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:2762
uint16_t pb_time
time distance between the last b and p,s,i frame
Definition: mpegvideo.h:391
enum idct_permutation_type perm_type
Definition: idctdsp.h:97
attribute_deprecated int pre_me
Definition: avcodec.h:2233
HpelDSPContext hdsp
Definition: mpegvideo.h:226
static const uint8_t sp5x_quant_table[20][64]
Definition: sp5x.h:135
int next_lambda
next lambda used for retrying to encode a frame
Definition: mpegvideo.h:340