FFmpeg
mpegvideo_enc.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /*
26  * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
27  */
28 
29 /**
30  * @file
31  * The simplest mpeg encoder (well, it was the simplest!).
32  */
33 
34 #include "config_components.h"
35 
36 #include <assert.h>
37 #include <stdint.h>
38 
39 #include "libavutil/emms.h"
40 #include "libavutil/internal.h"
41 #include "libavutil/intmath.h"
42 #include "libavutil/mathematics.h"
43 #include "libavutil/mem.h"
44 #include "libavutil/mem_internal.h"
45 #include "libavutil/opt.h"
46 #include "libavutil/thread.h"
47 #include "avcodec.h"
48 #include "encode.h"
49 #include "idctdsp.h"
50 #include "mpeg12data.h"
51 #include "mpeg12enc.h"
52 #include "mpegvideo.h"
53 #include "mpegvideodata.h"
54 #include "mpegvideoenc.h"
55 #include "h261enc.h"
56 #include "h263.h"
57 #include "h263data.h"
58 #include "h263enc.h"
59 #include "mjpegenc_common.h"
60 #include "mathops.h"
61 #include "mpegutils.h"
62 #include "mpegvideo_unquantize.h"
63 #include "mjpegenc.h"
64 #include "speedhqenc.h"
65 #include "msmpeg4enc.h"
66 #include "pixblockdsp.h"
67 #include "qpeldsp.h"
68 #include "faandct.h"
69 #include "aandcttab.h"
70 #include "mpeg4video.h"
71 #include "mpeg4videodata.h"
72 #include "mpeg4videoenc.h"
73 #include "internal.h"
74 #include "bytestream.h"
75 #include "rv10enc.h"
76 #include "libavutil/refstruct.h"
77 #include <limits.h>
78 #include "sp5x.h"
79 
80 #define QUANT_BIAS_SHIFT 8
81 
82 #define QMAT_SHIFT_MMX 16
83 #define QMAT_SHIFT 21
84 
85 static int encode_picture(MPVMainEncContext *const s, const AVPacket *pkt);
86 static int dct_quantize_refine(MPVEncContext *const s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
87 static int sse_mb(MPVEncContext *const s);
88 static int dct_quantize_c(MPVEncContext *const s,
89  int16_t *block, int n,
90  int qscale, int *overflow);
91 static int dct_quantize_trellis_c(MPVEncContext *const s, int16_t *block, int n, int qscale, int *overflow);
92 
93 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
94 
95 static const AVOption mpv_generic_options[] = {
98  { NULL },
99 };
100 
102  .class_name = "generic mpegvideo encoder",
103  .item_name = av_default_item_name,
104  .option = mpv_generic_options,
105  .version = LIBAVUTIL_VERSION_INT,
106 };
107 
108 void ff_convert_matrix(MPVEncContext *const s, int (*qmat)[64],
109  uint16_t (*qmat16)[2][64],
110  const uint16_t *quant_matrix,
111  int bias, int qmin, int qmax, int intra)
112 {
113  FDCTDSPContext *fdsp = &s->fdsp;
114  int qscale;
115  int shift = 0;
116 
117  for (qscale = qmin; qscale <= qmax; qscale++) {
118  int i;
119  int qscale2;
120 
121  if (s->c.q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
122  else qscale2 = qscale << 1;
123 
124  if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
125 #if CONFIG_FAANDCT
126  fdsp->fdct == ff_faandct ||
127 #endif /* CONFIG_FAANDCT */
128  fdsp->fdct == ff_jpeg_fdct_islow_10) {
129  for (i = 0; i < 64; i++) {
130  const int j = s->c.idsp.idct_permutation[i];
131  int64_t den = (int64_t) qscale2 * quant_matrix[j];
132  /* 1 * 1 <= qscale2 * quant_matrix[j] <= 112 * 255
133  * Assume x = qscale2 * quant_matrix[j]
134  * 1 <= x <= 28560
135  * (1 << 22) / 1 >= (1 << 22) / (x) >= (1 << 22) / 28560
136  * 4194304 >= (1 << 22) / (x) >= 146 */
137 
138  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
139  }
140  } else if (fdsp->fdct == ff_fdct_ifast) {
141  for (i = 0; i < 64; i++) {
142  const int j = s->c.idsp.idct_permutation[i];
143  int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
144  /* 1247 * 1 * 1 <= ff_aanscales[i] * qscale2 * quant_matrix[j] <= 31521 * 112 * 255
145  * Assume x = ff_aanscales[i] * qscale2 * quant_matrix[j]
146  * 1247 <= x <= 900239760
147  * (1 << 36) / 1247 >= (1 << 36) / (x) >= (1 << 36) / 900239760
148  * 55107840 >= (1 << 36) / (x) >= 76 */
149 
150  qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
151  }
152  } else {
153  for (i = 0; i < 64; i++) {
154  const int j = s->c.idsp.idct_permutation[i];
155  int64_t den = (int64_t) qscale2 * quant_matrix[j];
156  /* 1 * 1 <= qscale2 * quant_matrix[j] <= 112 * 255
157  * Assume x = qscale2 * quant_matrix[j]
158  * 1 <= x <= 28560
159  * (1 << 22) / 1 >= (1 << 22) / (x) >= (1 << 22) / 28560
160  * 4194304 >= (1 << 22) / (x) >= 146
161  *
162  * 1 <= x <= 28560
163  * (1 << 17) / 1 >= (1 << 17) / (x) >= (1 << 17) / 28560
164  * 131072 >= (1 << 17) / (x) >= 4 */
165 
166  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
167  qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
168 
169  if (qmat16[qscale][0][i] == 0 ||
170  qmat16[qscale][0][i] == 128 * 256)
171  qmat16[qscale][0][i] = 128 * 256 - 1;
172  qmat16[qscale][1][i] =
173  ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
174  qmat16[qscale][0][i]);
175  }
176  }
177 
178  for (i = intra; i < 64; i++) {
179  int64_t max = 8191;
180  if (fdsp->fdct == ff_fdct_ifast) {
181  max = (8191LL * ff_aanscales[i]) >> 14;
182  }
183  while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
184  shift++;
185  }
186  }
187  }
188  if (shift) {
189  av_log(s->c.avctx, AV_LOG_INFO,
190  "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
191  QMAT_SHIFT - shift);
192  }
193 }
194 
195 static inline void update_qscale(MPVMainEncContext *const m)
196 {
197  MPVEncContext *const s = &m->s;
198 
199  if (s->c.q_scale_type == 1 && 0) {
200  int i;
201  int bestdiff=INT_MAX;
202  int best = 1;
203 
204  for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
205  int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
206  if (ff_mpeg2_non_linear_qscale[i] < s->c.avctx->qmin ||
207  (ff_mpeg2_non_linear_qscale[i] > s->c.avctx->qmax && !m->vbv_ignore_qmax))
208  continue;
209  if (diff < bestdiff) {
210  bestdiff = diff;
211  best = i;
212  }
213  }
214  s->c.qscale = best;
215  } else {
216  s->c.qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
217  (FF_LAMBDA_SHIFT + 7);
218  s->c.qscale = av_clip(s->c.qscale, s->c.avctx->qmin, m->vbv_ignore_qmax ? 31 : s->c.avctx->qmax);
219  }
220 
221  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
223 }
224 
226 {
227  int i;
228 
229  if (matrix) {
230  put_bits(pb, 1, 1);
231  for (i = 0; i < 64; i++) {
232  put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
233  }
234  } else
235  put_bits(pb, 1, 0);
236 }
237 
238 /**
239  * init s->c.cur_pic.qscale_table from s->lambda_table
240  */
241 static void init_qscale_tab(MPVEncContext *const s)
242 {
243  int8_t *const qscale_table = s->c.cur_pic.qscale_table;
244 
245  for (int i = 0; i < s->c.mb_num; i++) {
246  unsigned int lam = s->lambda_table[s->c.mb_index2xy[i]];
247  int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
248  qscale_table[s->c.mb_index2xy[i]] = av_clip(qp, s->c.avctx->qmin,
249  s->c.avctx->qmax);
250  }
251 }
252 
254  const MPVEncContext *const src)
255 {
256 #define COPY(a) dst->a = src->a
257  COPY(c.pict_type);
258  COPY(f_code);
259  COPY(b_code);
260  COPY(c.qscale);
261  COPY(lambda);
262  COPY(lambda2);
263  COPY(c.frame_pred_frame_dct); // FIXME don't set in encode_header
264  COPY(c.progressive_frame); // FIXME don't set in encode_header
265  COPY(partitioned_frame); // FIXME don't set in encode_header
266 #undef COPY
267 }
268 
270 {
271  for (int i = -16; i < 16; i++)
272  default_fcode_tab[i + MAX_MV] = 1;
273 }
274 
275 /**
276  * Set the given MPVEncContext to defaults for encoding.
277  */
279 {
280  MPVEncContext *const s = &m->s;
281  static AVOnce init_static_once = AV_ONCE_INIT;
282 
284 
285  s->f_code = 1;
286  s->b_code = 1;
287 
288  if (!m->fcode_tab) {
290  ff_thread_once(&init_static_once, mpv_encode_init_static);
291  }
292  if (!s->c.y_dc_scale_table) {
293  s->c.y_dc_scale_table =
294  s->c.c_dc_scale_table = ff_mpeg1_dc_scale_table;
295  }
296 }
297 
299 {
300  s->dct_quantize = dct_quantize_c;
301 
302 #if ARCH_X86
304 #endif
305 
306  if (s->c.avctx->trellis)
307  s->dct_quantize = dct_quantize_trellis_c;
308 }
309 
311 {
312  MpegEncContext *const s = &s2->c;
313  MPVUnquantDSPContext unquant_dsp_ctx;
314 
315  ff_mpv_unquantize_init(&unquant_dsp_ctx,
316  avctx->flags & AV_CODEC_FLAG_BITEXACT, s->q_scale_type);
317 
318  if (s2->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
319  s->dct_unquantize_intra = unquant_dsp_ctx.dct_unquantize_mpeg2_intra;
320  s->dct_unquantize_inter = unquant_dsp_ctx.dct_unquantize_mpeg2_inter;
321  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
322  s->dct_unquantize_intra = unquant_dsp_ctx.dct_unquantize_h263_intra;
323  s->dct_unquantize_inter = unquant_dsp_ctx.dct_unquantize_h263_inter;
324  } else {
325  s->dct_unquantize_intra = unquant_dsp_ctx.dct_unquantize_mpeg1_intra;
326  s->dct_unquantize_inter = unquant_dsp_ctx.dct_unquantize_mpeg1_inter;
327  }
328 }
329 
331 {
332  MPVEncContext *const s = &m->s;
333  MECmpContext mecc;
334  me_cmp_func me_cmp[6];
335  int ret;
336 
337  ff_me_cmp_init(&mecc, avctx);
338  ret = ff_me_init(&s->me, avctx, &mecc, 1);
339  if (ret < 0)
340  return ret;
341  ret = ff_set_cmp(&mecc, me_cmp, m->frame_skip_cmp, 1);
342  if (ret < 0)
343  return ret;
344  m->frame_skip_cmp_fn = me_cmp[1];
346  ret = ff_set_cmp(&mecc, me_cmp, avctx->ildct_cmp, 1);
347  if (ret < 0)
348  return ret;
349  if (!me_cmp[0] || !me_cmp[4])
350  return AVERROR(EINVAL);
351  s->ildct_cmp[0] = me_cmp[0];
352  s->ildct_cmp[1] = me_cmp[4];
353  }
354 
355  s->sum_abs_dctelem = mecc.sum_abs_dctelem;
356 
357  s->sse_cmp[0] = mecc.sse[0];
358  s->sse_cmp[1] = mecc.sse[1];
359  s->sad_cmp[0] = mecc.sad[0];
360  s->sad_cmp[1] = mecc.sad[1];
361  if (avctx->mb_cmp == FF_CMP_NSSE) {
362  s->n_sse_cmp[0] = mecc.nsse[0];
363  s->n_sse_cmp[1] = mecc.nsse[1];
364  } else {
365  s->n_sse_cmp[0] = mecc.sse[0];
366  s->n_sse_cmp[1] = mecc.sse[1];
367  }
368 
369  return 0;
370 }
371 
372 #define ALLOCZ_ARRAYS(p, mult, numb) ((p) = av_calloc(numb, mult * sizeof(*(p))))
374 {
375  MPVEncContext *const s = &m->s;
376  const int nb_matrices = 1 + (s->c.out_format == FMT_MJPEG) + !m->intra_only;
377  const uint16_t *intra_matrix, *inter_matrix;
378  int ret;
379 
380  if (!ALLOCZ_ARRAYS(s->q_intra_matrix, 32, nb_matrices) ||
381  !ALLOCZ_ARRAYS(s->q_intra_matrix16, 32, nb_matrices))
382  return AVERROR(ENOMEM);
383 
384  if (s->c.out_format == FMT_MJPEG) {
385  s->q_chroma_intra_matrix = s->q_intra_matrix + 32;
386  s->q_chroma_intra_matrix16 = s->q_intra_matrix16 + 32;
387  // No need to set q_inter_matrix
389  // intra_matrix, chroma_intra_matrix will be set later for MJPEG.
390  return 0;
391  } else {
392  s->q_chroma_intra_matrix = s->q_intra_matrix;
393  s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
394  }
395  if (!m->intra_only) {
396  s->q_inter_matrix = s->q_intra_matrix + 32;
397  s->q_inter_matrix16 = s->q_intra_matrix16 + 32;
398  }
399 
400  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4 &&
401  s->mpeg_quant) {
404  } else if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
405  intra_matrix =
407  } else {
408  /* MPEG-1/2, SpeedHQ */
411  }
412  if (avctx->intra_matrix)
414  if (avctx->inter_matrix)
416 
417  /* init q matrix */
418  for (int i = 0; i < 64; i++) {
419  int j = s->c.idsp.idct_permutation[i];
420 
421  s->c.intra_matrix[j] = s->c.chroma_intra_matrix[j] = intra_matrix[i];
422  s->c.inter_matrix[j] = inter_matrix[i];
423  }
424 
425  /* precompute matrix */
427  if (ret < 0)
428  return ret;
429 
430  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
431  s->c.intra_matrix, s->intra_quant_bias, avctx->qmin,
432  31, 1);
433  if (s->q_inter_matrix)
434  ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
435  s->c.inter_matrix, s->inter_quant_bias, avctx->qmin,
436  31, 0);
437 
438  return 0;
439 }
440 
442 {
443  MPVEncContext *const s = &m->s;
444  int has_b_frames = !!m->max_b_frames;
445  int16_t (*mv_table)[2];
446 
447  /* Allocate MB type table */
448  unsigned mb_array_size = s->c.mb_stride * s->c.mb_height;
449  s->mb_type = av_calloc(mb_array_size, 3 * sizeof(*s->mb_type) + sizeof(*s->mb_mean));
450  if (!s->mb_type)
451  return AVERROR(ENOMEM);
452  s->mc_mb_var = s->mb_type + mb_array_size;
453  s->mb_var = s->mc_mb_var + mb_array_size;
454  s->mb_mean = (uint8_t*)(s->mb_var + mb_array_size);
455 
456  if (!FF_ALLOCZ_TYPED_ARRAY(s->lambda_table, mb_array_size))
457  return AVERROR(ENOMEM);
458 
459  unsigned mv_table_size = (s->c.mb_height + 2) * s->c.mb_stride + 1;
460  unsigned nb_mv_tables = 1 + 5 * has_b_frames;
461  if (s->c.codec_id == AV_CODEC_ID_MPEG4 ||
462  (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME)) {
463  nb_mv_tables += 8 * has_b_frames;
464  s->p_field_select_table[0] = av_calloc(mv_table_size, 2 * (2 + 4 * has_b_frames));
465  if (!s->p_field_select_table[0])
466  return AVERROR(ENOMEM);
467  s->p_field_select_table[1] = s->p_field_select_table[0] + 2 * mv_table_size;
468  }
469 
470  mv_table = av_calloc(mv_table_size, nb_mv_tables * sizeof(*mv_table));
471  if (!mv_table)
472  return AVERROR(ENOMEM);
473  m->mv_table_base = mv_table;
474  mv_table += s->c.mb_stride + 1;
475 
476  s->p_mv_table = mv_table;
477  if (has_b_frames) {
478  s->b_forw_mv_table = mv_table += mv_table_size;
479  s->b_back_mv_table = mv_table += mv_table_size;
480  s->b_bidir_forw_mv_table = mv_table += mv_table_size;
481  s->b_bidir_back_mv_table = mv_table += mv_table_size;
482  s->b_direct_mv_table = mv_table += mv_table_size;
483 
484  if (s->p_field_select_table[1]) { // MPEG-4 or INTERLACED_ME above
485  uint8_t *field_select = s->p_field_select_table[1];
486  for (int j = 0; j < 2; j++) {
487  for (int k = 0; k < 2; k++) {
488  for (int l = 0; l < 2; l++)
489  s->b_field_mv_table[j][k][l] = mv_table += mv_table_size;
490  s->b_field_select_table[j][k] = field_select += 2 * mv_table_size;
491  }
492  }
493  }
494  }
495 
496  return 0;
497 }
498 
500 {
501  MPVEncContext *const s = &m->s;
502  // Align the following per-thread buffers to avoid false sharing.
503  enum {
504 #ifndef _MSC_VER
505  /// The number is supposed to match/exceed the cache-line size.
506  ALIGN = FFMAX(128, _Alignof(max_align_t)),
507 #else
508  ALIGN = 128,
509 #endif
510  DCT_ERROR_SIZE = FFALIGN(2 * sizeof(*s->dct_error_sum), ALIGN),
511  };
512  static_assert(DCT_ERROR_SIZE * MAX_THREADS + ALIGN - 1 <= SIZE_MAX,
513  "Need checks for potential overflow.");
514  unsigned nb_slices = s->c.slice_context_count;
515  char *dct_error = NULL;
516 
517  if (m->noise_reduction) {
518  if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
519  return AVERROR(ENOMEM);
520  dct_error = av_mallocz(ALIGN - 1 + nb_slices * DCT_ERROR_SIZE);
521  if (!dct_error)
522  return AVERROR(ENOMEM);
524  dct_error += FFALIGN((uintptr_t)dct_error, ALIGN) - (uintptr_t)dct_error;
525  }
526 
527  const int y_size = s->c.b8_stride * (2 * s->c.mb_height + 1);
528  const int c_size = s->c.mb_stride * (s->c.mb_height + 1);
529  const int yc_size = y_size + 2 * c_size;
530  ptrdiff_t offset = 0;
531 
532  for (unsigned i = 0; i < nb_slices; ++i) {
533  MPVEncContext *const s2 = s->c.enc_contexts[i];
534 
535  s2->block = s2->blocks[0];
536 
537  if (dct_error) {
538  s2->dct_offset = s->dct_offset;
539  s2->dct_error_sum = (void*)dct_error;
540  dct_error += DCT_ERROR_SIZE;
541  }
542 
543  if (s2->c.ac_val) {
544  s2->c.dc_val += offset + i;
545  s2->c.ac_val += offset;
546  offset += yc_size;
547  }
548  }
549  return 0;
550 }
551 
552 /* init video encoder */
554 {
555  MPVMainEncContext *const m = avctx->priv_data;
556  MPVEncContext *const s = &m->s;
557  AVCPBProperties *cpb_props;
558  int gcd, ret;
559 
561 
562  switch (avctx->pix_fmt) {
563  case AV_PIX_FMT_YUVJ444P:
564  case AV_PIX_FMT_YUV444P:
565  s->c.chroma_format = CHROMA_444;
566  break;
567  case AV_PIX_FMT_YUVJ422P:
568  case AV_PIX_FMT_YUV422P:
569  s->c.chroma_format = CHROMA_422;
570  break;
571  default:
572  av_unreachable("Already checked via CODEC_PIXFMTS");
573  case AV_PIX_FMT_YUVJ420P:
574  case AV_PIX_FMT_YUV420P:
575  s->c.chroma_format = CHROMA_420;
576  break;
577  }
578 
580 
581  m->bit_rate = avctx->bit_rate;
582  s->c.width = avctx->width;
583  s->c.height = avctx->height;
584  if (avctx->gop_size > 600 &&
587  "keyframe interval too large!, reducing it from %d to %d\n",
588  avctx->gop_size, 600);
589  avctx->gop_size = 600;
590  }
591  m->gop_size = avctx->gop_size;
592  s->c.avctx = avctx;
594  av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
595  "is " AV_STRINGIFY(MPVENC_MAX_B_FRAMES) ".\n");
597  } else if (avctx->max_b_frames < 0) {
599  "max b frames must be 0 or positive for mpegvideo based encoders\n");
600  return AVERROR(EINVAL);
601  }
603  s->c.codec_id = avctx->codec->id;
605  av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
606  return AVERROR(EINVAL);
607  }
608 
609  s->c.quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
610  s->rtp_mode = !!s->rtp_payload_size;
611  s->c.intra_dc_precision = avctx->intra_dc_precision;
612 
613  // workaround some differences between how applications specify dc precision
614  if (s->c.intra_dc_precision < 0) {
615  s->c.intra_dc_precision += 8;
616  } else if (s->c.intra_dc_precision >= 8)
617  s->c.intra_dc_precision -= 8;
618 
619  if (s->c.intra_dc_precision < 0) {
621  "intra dc precision must be positive, note some applications use"
622  " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
623  return AVERROR(EINVAL);
624  }
625 
626  if (s->c.intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
627  av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
628  return AVERROR(EINVAL);
629  }
631 
632  if (m->gop_size <= 1) {
633  m->intra_only = 1;
634  m->gop_size = 12;
635  } else {
636  m->intra_only = 0;
637  }
638 
639  /* Fixed QSCALE */
641 
642  s->adaptive_quant = (avctx->lumi_masking ||
643  avctx->dark_masking ||
646  avctx->p_masking ||
647  m->border_masking ||
648  (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
649  !m->fixed_qscale;
650 
651  s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
652 
654  switch(avctx->codec_id) {
657  avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
658  break;
659  case AV_CODEC_ID_MPEG4:
663  if (avctx->rc_max_rate >= 15000000) {
664  avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
665  } else if(avctx->rc_max_rate >= 2000000) {
666  avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
667  } else if(avctx->rc_max_rate >= 384000) {
668  avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
669  } else
670  avctx->rc_buffer_size = 40;
671  avctx->rc_buffer_size *= 16384;
672  break;
673  }
674  if (avctx->rc_buffer_size) {
675  av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
676  }
677  }
678 
679  if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
680  av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
681  return AVERROR(EINVAL);
682  }
683 
686  "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
687  }
688 
690  av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
691  return AVERROR(EINVAL);
692  }
693 
695  av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
696  return AVERROR(EINVAL);
697  }
698 
699  if (avctx->rc_max_rate &&
703  "impossible bitrate constraints, this will fail\n");
704  }
705 
706  if (avctx->rc_buffer_size &&
709  av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
710  return AVERROR(EINVAL);
711  }
712 
713  if (!m->fixed_qscale &&
716  double nbt = avctx->bit_rate * av_q2d(avctx->time_base) * 5;
718  "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
719  if (nbt <= INT_MAX) {
720  avctx->bit_rate_tolerance = nbt;
721  } else
722  avctx->bit_rate_tolerance = INT_MAX;
723  }
724 
725  if ((avctx->flags & AV_CODEC_FLAG_4MV) && s->c.codec_id != AV_CODEC_ID_MPEG4 &&
726  s->c.codec_id != AV_CODEC_ID_H263 && s->c.codec_id != AV_CODEC_ID_H263P &&
727  s->c.codec_id != AV_CODEC_ID_FLV1) {
728  av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
729  return AVERROR(EINVAL);
730  }
731 
732  if (s->c.obmc && avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
734  "OBMC is only supported with simple mb decision\n");
735  return AVERROR(EINVAL);
736  }
737 
738  if (s->c.quarter_sample && s->c.codec_id != AV_CODEC_ID_MPEG4) {
739  av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
740  return AVERROR(EINVAL);
741  }
742 
743  if ((s->c.codec_id == AV_CODEC_ID_MPEG4 ||
744  s->c.codec_id == AV_CODEC_ID_H263 ||
745  s->c.codec_id == AV_CODEC_ID_H263P) &&
746  (avctx->sample_aspect_ratio.num > 255 ||
747  avctx->sample_aspect_ratio.den > 255)) {
749  "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
753  }
754 
755  if ((s->c.codec_id == AV_CODEC_ID_H263 ||
756  s->c.codec_id == AV_CODEC_ID_H263P) &&
757  (avctx->width > 2048 ||
758  avctx->height > 1152 )) {
759  av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
760  return AVERROR(EINVAL);
761  }
762  if (s->c.codec_id == AV_CODEC_ID_FLV1 &&
763  (avctx->width > 65535 ||
764  avctx->height > 65535 )) {
765  av_log(avctx, AV_LOG_ERROR, "FLV does not support resolutions above 16bit\n");
766  return AVERROR(EINVAL);
767  }
768  if ((s->c.codec_id == AV_CODEC_ID_H263 ||
769  s->c.codec_id == AV_CODEC_ID_H263P ||
770  s->c.codec_id == AV_CODEC_ID_RV20) &&
771  ((avctx->width &3) ||
772  (avctx->height&3) )) {
773  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
774  return AVERROR(EINVAL);
775  }
776 
777  if (s->c.codec_id == AV_CODEC_ID_RV10 &&
778  (avctx->width &15 ||
779  avctx->height&15 )) {
780  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
781  return AVERROR(EINVAL);
782  }
783 
784  if ((s->c.codec_id == AV_CODEC_ID_WMV1 ||
785  s->c.codec_id == AV_CODEC_ID_WMV2) &&
786  avctx->width & 1) {
787  av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
788  return AVERROR(EINVAL);
789  }
790 
792  s->c.codec_id != AV_CODEC_ID_MPEG4 && s->c.codec_id != AV_CODEC_ID_MPEG2VIDEO) {
793  av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
794  return AVERROR(EINVAL);
795  }
796 
797  if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
798  av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
799  return AVERROR(EINVAL);
800  }
801 
802  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
804  av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=rd\n");
805  return AVERROR(EINVAL);
806  }
807 
808  if (m->scenechange_threshold < 1000000000 &&
811  "closed gop with scene change detection are not supported yet, "
812  "set threshold to 1000000000\n");
813  return AVERROR_PATCHWELCOME;
814  }
815 
817  if (s->c.codec_id != AV_CODEC_ID_MPEG2VIDEO &&
820  "low delay forcing is only available for mpeg2, "
821  "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
822  return AVERROR(EINVAL);
823  }
824  if (m->max_b_frames != 0) {
826  "B-frames cannot be used with low delay\n");
827  return AVERROR(EINVAL);
828  }
829  }
830 
831  if (avctx->slices > 1 &&
833  av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
834  return AVERROR(EINVAL);
835  }
836 
839  "notice: b_frame_strategy only affects the first pass\n");
840  m->b_frame_strategy = 0;
841  }
842 
844  if (gcd > 1) {
845  av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
846  avctx->time_base.den /= gcd;
847  avctx->time_base.num /= gcd;
848  //return -1;
849  }
850 
851  if (s->mpeg_quant || s->c.codec_id == AV_CODEC_ID_MPEG1VIDEO || s->c.codec_id == AV_CODEC_ID_MPEG2VIDEO || s->c.codec_id == AV_CODEC_ID_MJPEG || s->c.codec_id == AV_CODEC_ID_AMV || s->c.codec_id == AV_CODEC_ID_SPEEDHQ) {
852  // (a + x * 3 / 8) / x
853  s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
854  s->inter_quant_bias = 0;
855  } else {
856  s->intra_quant_bias = 0;
857  // (a - x / 4) / x
858  s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
859  }
860 
861  if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
862  av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
863  return AVERROR(EINVAL);
864  }
865 
866  av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
867 
868  switch (avctx->codec->id) {
869 #if CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER
871  s->rtp_mode = 1;
872  /* fallthrough */
874  s->c.out_format = FMT_MPEG1;
875  s->c.low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
876  avctx->delay = s->c.low_delay ? 0 : (m->max_b_frames + 1);
878  break;
879 #endif
880 #if CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER
881  case AV_CODEC_ID_MJPEG:
882  case AV_CODEC_ID_AMV:
883  s->c.out_format = FMT_MJPEG;
884  m->intra_only = 1; /* force intra only for jpeg */
885  avctx->delay = 0;
886  s->c.low_delay = 1;
887  break;
888 #endif
889  case AV_CODEC_ID_SPEEDHQ:
890  s->c.out_format = FMT_SPEEDHQ;
891  m->intra_only = 1; /* force intra only for SHQ */
892  avctx->delay = 0;
893  s->c.low_delay = 1;
894  break;
895  case AV_CODEC_ID_H261:
896  s->c.out_format = FMT_H261;
897  avctx->delay = 0;
898  s->c.low_delay = 1;
899  s->rtp_mode = 0; /* Sliced encoding not supported */
900  break;
901  case AV_CODEC_ID_H263:
902  if (!CONFIG_H263_ENCODER)
905  s->c.width, s->c.height) == 8) {
907  "The specified picture size of %dx%d is not valid for "
908  "the H.263 codec.\nValid sizes are 128x96, 176x144, "
909  "352x288, 704x576, and 1408x1152. "
910  "Try H.263+.\n", s->c.width, s->c.height);
911  return AVERROR(EINVAL);
912  }
913  s->c.out_format = FMT_H263;
914  avctx->delay = 0;
915  s->c.low_delay = 1;
916  break;
917  case AV_CODEC_ID_H263P:
918  s->c.out_format = FMT_H263;
919  /* Fx */
920  s->c.h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
921  s->modified_quant = s->c.h263_aic;
922  s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
923  s->me.unrestricted_mv = s->c.obmc || s->loop_filter || s->umvplus;
924  s->flipflop_rounding = 1;
925 
926  /* /Fx */
927  /* These are just to be sure */
928  avctx->delay = 0;
929  s->c.low_delay = 1;
930  break;
931  case AV_CODEC_ID_FLV1:
932  s->c.out_format = FMT_H263;
933  s->me.unrestricted_mv = 1;
934  s->rtp_mode = 0; /* don't allow GOB */
935  avctx->delay = 0;
936  s->c.low_delay = 1;
937  break;
938 #if CONFIG_RV10_ENCODER
939  case AV_CODEC_ID_RV10:
941  s->c.out_format = FMT_H263;
942  avctx->delay = 0;
943  s->c.low_delay = 1;
944  break;
945 #endif
946 #if CONFIG_RV20_ENCODER
947  case AV_CODEC_ID_RV20:
949  s->c.out_format = FMT_H263;
950  avctx->delay = 0;
951  s->c.low_delay = 1;
952  s->modified_quant = 1;
953  // Set here to force allocation of dc_val;
954  // will be set later on a per-frame basis.
955  s->c.h263_aic = 1;
956  s->loop_filter = 1;
957  s->me.unrestricted_mv = 0;
958  break;
959 #endif
960  case AV_CODEC_ID_MPEG4:
961  s->c.out_format = FMT_H263;
962  s->c.h263_pred = 1;
963  s->me.unrestricted_mv = 1;
964  s->flipflop_rounding = 1;
965  s->c.low_delay = m->max_b_frames ? 0 : 1;
966  avctx->delay = s->c.low_delay ? 0 : (m->max_b_frames + 1);
967  break;
969  s->c.out_format = FMT_H263;
970  s->c.h263_pred = 1;
971  s->me.unrestricted_mv = 1;
972  s->c.msmpeg4_version = MSMP4_V2;
973  avctx->delay = 0;
974  s->c.low_delay = 1;
975  break;
977  s->c.out_format = FMT_H263;
978  s->c.h263_pred = 1;
979  s->me.unrestricted_mv = 1;
980  s->c.msmpeg4_version = MSMP4_V3;
981  s->flipflop_rounding = 1;
982  avctx->delay = 0;
983  s->c.low_delay = 1;
984  break;
985  case AV_CODEC_ID_WMV1:
986  s->c.out_format = FMT_H263;
987  s->c.h263_pred = 1;
988  s->me.unrestricted_mv = 1;
989  s->c.msmpeg4_version = MSMP4_WMV1;
990  s->flipflop_rounding = 1;
991  avctx->delay = 0;
992  s->c.low_delay = 1;
993  break;
994  case AV_CODEC_ID_WMV2:
995  s->c.out_format = FMT_H263;
996  s->c.h263_pred = 1;
997  s->me.unrestricted_mv = 1;
998  s->c.msmpeg4_version = MSMP4_WMV2;
999  s->flipflop_rounding = 1;
1000  avctx->delay = 0;
1001  s->c.low_delay = 1;
1002  break;
1003  default:
1004  av_unreachable("List contains all codecs using ff_mpv_encode_init()");
1005  }
1006 
1007  avctx->has_b_frames = !s->c.low_delay;
1008 
1009  s->c.encoding = 1;
1010 
1011  s->c.progressive_frame =
1012  s->c.progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
1014  s->c.alternate_scan);
1015 
1018  s->frame_reconstruction_bitfield = (1 << AV_PICTURE_TYPE_I) |
1019  (1 << AV_PICTURE_TYPE_P) |
1020  (1 << AV_PICTURE_TYPE_B);
1021  } else if (!m->intra_only) {
1022  s->frame_reconstruction_bitfield = (1 << AV_PICTURE_TYPE_I) |
1023  (1 << AV_PICTURE_TYPE_P);
1024  } else {
1025  s->frame_reconstruction_bitfield = 0;
1026  }
1027 
1028  if (m->lmin > m->lmax) {
1029  av_log(avctx, AV_LOG_WARNING, "Clipping lmin value to %d\n", m->lmax);
1030  m->lmin = m->lmax;
1031  }
1032 
1033  /* ff_mpv_init_duplicate_contexts() will copy (memdup) the contents of the
1034  * main slice to the slice contexts, so we initialize various fields of it
1035  * before calling ff_mpv_init_duplicate_contexts(). */
1036  s->parent = m;
1037  ff_mpv_idct_init(&s->c);
1039  ff_fdctdsp_init(&s->fdsp, avctx);
1040  ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
1041  ff_pixblockdsp_init(&s->pdsp, 8);
1042  ret = me_cmp_init(m, avctx);
1043  if (ret < 0)
1044  return ret;
1045 
1046  if (!(avctx->stats_out = av_mallocz(256)) ||
1047  !(s->new_pic = av_frame_alloc()) ||
1048  !(s->c.picture_pool = ff_mpv_alloc_pic_pool(0)))
1049  return AVERROR(ENOMEM);
1050 
1051  ret = init_matrices(m, avctx);
1052  if (ret < 0)
1053  return ret;
1054 
1056 
1057  if (CONFIG_H263_ENCODER && s->c.out_format == FMT_H263) {
1059 #if CONFIG_MSMPEG4ENC
1060  if (s->c.msmpeg4_version != MSMP4_UNUSED)
1062 #endif
1063  }
1064 
1065  s->c.slice_ctx_size = sizeof(*s);
1066  ret = ff_mpv_common_init(&s->c);
1067  if (ret < 0)
1068  return ret;
1069  ret = init_buffers(m);
1070  if (ret < 0)
1071  return ret;
1072  if (s->c.slice_context_count > 1) {
1073  s->rtp_mode = 1;
1075  s->h263_slice_structured = 1;
1076  }
1078  if (ret < 0)
1079  return ret;
1080 
1081  ret = init_slice_buffers(m);
1082  if (ret < 0)
1083  return ret;
1084 
1086  if (ret < 0)
1087  return ret;
1088 
1089  if (m->b_frame_strategy == 2) {
1090  for (int i = 0; i < m->max_b_frames + 2; i++) {
1091  m->tmp_frames[i] = av_frame_alloc();
1092  if (!m->tmp_frames[i])
1093  return AVERROR(ENOMEM);
1094 
1096  m->tmp_frames[i]->width = s->c.width >> m->brd_scale;
1097  m->tmp_frames[i]->height = s->c.height >> m->brd_scale;
1098 
1099  ret = av_frame_get_buffer(m->tmp_frames[i], 0);
1100  if (ret < 0)
1101  return ret;
1102  }
1103  }
1104 
1105  cpb_props = ff_encode_add_cpb_side_data(avctx);
1106  if (!cpb_props)
1107  return AVERROR(ENOMEM);
1108  cpb_props->max_bitrate = avctx->rc_max_rate;
1109  cpb_props->min_bitrate = avctx->rc_min_rate;
1110  cpb_props->avg_bitrate = avctx->bit_rate;
1111  cpb_props->buffer_size = avctx->rc_buffer_size;
1112 
1113  return 0;
1114 }
1115 
1117 {
1118  MPVMainEncContext *const m = avctx->priv_data;
1119  MPVEncContext *const s = &m->s;
1120 
1122 
1123  ff_mpv_common_end(&s->c);
1124  av_refstruct_pool_uninit(&s->c.picture_pool);
1125 
1126  for (int i = 0; i < MPVENC_MAX_B_FRAMES + 1; i++) {
1129  }
1130  for (int i = 0; i < FF_ARRAY_ELEMS(m->tmp_frames); i++)
1131  av_frame_free(&m->tmp_frames[i]);
1132 
1133  av_frame_free(&s->new_pic);
1134 
1136 
1137  av_freep(&m->mv_table_base);
1138  av_freep(&s->p_field_select_table[0]);
1140 
1141  av_freep(&s->mb_type);
1142  av_freep(&s->lambda_table);
1143 
1144  av_freep(&s->q_intra_matrix);
1145  av_freep(&s->q_intra_matrix16);
1146  av_freep(&s->dct_offset);
1147 
1148  return 0;
1149 }
1150 
1151 /* put block[] to dest[] */
1152 static inline void put_dct(MPVEncContext *const s,
1153  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1154 {
1155  s->c.dct_unquantize_intra(&s->c, block, i, qscale);
1156  s->c.idsp.idct_put(dest, line_size, block);
1157 }
1158 
1159 static inline void add_dequant_dct(MPVEncContext *const s,
1160  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1161 {
1162  if (s->c.block_last_index[i] >= 0) {
1163  s->c.dct_unquantize_inter(&s->c, block, i, qscale);
1164 
1165  s->c.idsp.idct_add(dest, line_size, block);
1166  }
1167 }
1168 
1169 /**
1170  * Performs dequantization and IDCT (if necessary)
1171  */
1172 static void mpv_reconstruct_mb(MPVEncContext *const s, int16_t block[12][64])
1173 {
1174  if (s->c.avctx->debug & FF_DEBUG_DCT_COEFF) {
1175  /* print DCT coefficients */
1176  av_log(s->c.avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->c.mb_x, s->c.mb_y);
1177  for (int i = 0; i < 6; i++) {
1178  for (int j = 0; j < 64; j++) {
1179  av_log(s->c.avctx, AV_LOG_DEBUG, "%5d",
1180  block[i][s->c.idsp.idct_permutation[j]]);
1181  }
1182  av_log(s->c.avctx, AV_LOG_DEBUG, "\n");
1183  }
1184  }
1185 
1186  if ((1 << s->c.pict_type) & s->frame_reconstruction_bitfield) {
1187  uint8_t *dest_y = s->c.dest[0], *dest_cb = s->c.dest[1], *dest_cr = s->c.dest[2];
1188  int dct_linesize, dct_offset;
1189  const int linesize = s->c.cur_pic.linesize[0];
1190  const int uvlinesize = s->c.cur_pic.linesize[1];
1191  const int block_size = 8;
1192 
1193  dct_linesize = linesize << s->c.interlaced_dct;
1194  dct_offset = s->c.interlaced_dct ? linesize : linesize * block_size;
1195 
1196  if (!s->c.mb_intra) {
1197  /* No MC, as that was already done otherwise */
1198  add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->c.qscale);
1199  add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->c.qscale);
1200  add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->c.qscale);
1201  add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->c.qscale);
1202 
1203  if (!CONFIG_GRAY || !(s->c.avctx->flags & AV_CODEC_FLAG_GRAY)) {
1204  if (s->c.chroma_y_shift) {
1205  add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->c.chroma_qscale);
1206  add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->c.chroma_qscale);
1207  } else {
1208  dct_linesize >>= 1;
1209  dct_offset >>= 1;
1210  add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->c.chroma_qscale);
1211  add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->c.chroma_qscale);
1212  add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->c.chroma_qscale);
1213  add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->c.chroma_qscale);
1214  }
1215  }
1216  } else {
1217  /* dct only in intra block */
1218  put_dct(s, block[0], 0, dest_y , dct_linesize, s->c.qscale);
1219  put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->c.qscale);
1220  put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->c.qscale);
1221  put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->c.qscale);
1222 
1223  if (!CONFIG_GRAY || !(s->c.avctx->flags & AV_CODEC_FLAG_GRAY)) {
1224  if (s->c.chroma_y_shift) {
1225  put_dct(s, block[4], 4, dest_cb, uvlinesize, s->c.chroma_qscale);
1226  put_dct(s, block[5], 5, dest_cr, uvlinesize, s->c.chroma_qscale);
1227  } else {
1228  dct_offset >>= 1;
1229  dct_linesize >>= 1;
1230  put_dct(s, block[4], 4, dest_cb, dct_linesize, s->c.chroma_qscale);
1231  put_dct(s, block[5], 5, dest_cr, dct_linesize, s->c.chroma_qscale);
1232  put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->c.chroma_qscale);
1233  put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->c.chroma_qscale);
1234  }
1235  }
1236  }
1237  }
1238 }
1239 
1240 static int get_sae(const uint8_t *src, int ref, int stride)
1241 {
1242  int x,y;
1243  int acc = 0;
1244 
1245  for (y = 0; y < 16; y++) {
1246  for (x = 0; x < 16; x++) {
1247  acc += FFABS(src[x + y * stride] - ref);
1248  }
1249  }
1250 
1251  return acc;
1252 }
1253 
1254 static int get_intra_count(MPVEncContext *const s, const uint8_t *src,
1255  const uint8_t *ref, int stride)
1256 {
1257  int x, y, w, h;
1258  int acc = 0;
1259 
1260  w = s->c.width & ~15;
1261  h = s->c.height & ~15;
1262 
1263  for (y = 0; y < h; y += 16) {
1264  for (x = 0; x < w; x += 16) {
1265  int offset = x + y * stride;
1266  int sad = s->sad_cmp[0](NULL, src + offset, ref + offset,
1267  stride, 16);
1268  int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1269  int sae = get_sae(src + offset, mean, stride);
1270 
1271  acc += sae + 500 < sad;
1272  }
1273  }
1274  return acc;
1275 }
1276 
1277 /**
1278  * Allocates new buffers for an AVFrame and copies the properties
1279  * from another AVFrame.
1280  */
1281 static int prepare_picture(MPVEncContext *const s, AVFrame *f, const AVFrame *props_frame)
1282 {
1283  AVCodecContext *avctx = s->c.avctx;
1284  int ret;
1285 
1286  f->width = avctx->width + 2 * EDGE_WIDTH;
1287  f->height = avctx->height + 2 * EDGE_WIDTH;
1288 
1290  if (ret < 0)
1291  return ret;
1292 
1293  ret = ff_mpv_pic_check_linesize(avctx, f, &s->c.linesize, &s->c.uvlinesize);
1294  if (ret < 0)
1295  return ret;
1296 
1297  for (int i = 0; f->data[i]; i++) {
1298  int offset = (EDGE_WIDTH >> (i ? s->c.chroma_y_shift : 0)) *
1299  f->linesize[i] +
1300  (EDGE_WIDTH >> (i ? s->c.chroma_x_shift : 0));
1301  f->data[i] += offset;
1302  }
1303  f->width = avctx->width;
1304  f->height = avctx->height;
1305 
1306  ret = av_frame_copy_props(f, props_frame);
1307  if (ret < 0)
1308  return ret;
1309 
1310  return 0;
1311 }
1312 
1313 static int load_input_picture(MPVMainEncContext *const m, const AVFrame *pic_arg)
1314 {
1315  MPVEncContext *const s = &m->s;
1316  MPVPicture *pic = NULL;
1317  int64_t pts;
1318  int display_picture_number = 0, ret;
1319  int encoding_delay = m->max_b_frames ? m->max_b_frames
1320  : (s->c.low_delay ? 0 : 1);
1321  int flush_offset = 1;
1322  int direct = 1;
1323 
1324  av_assert1(!m->input_picture[0]);
1325 
1326  if (pic_arg) {
1327  pts = pic_arg->pts;
1328  display_picture_number = m->input_picture_number++;
1329 
1330  if (pts != AV_NOPTS_VALUE) {
1331  if (m->user_specified_pts != AV_NOPTS_VALUE) {
1332  int64_t last = m->user_specified_pts;
1333 
1334  if (pts <= last) {
1335  av_log(s->c.avctx, AV_LOG_ERROR,
1336  "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1337  pts, last);
1338  return AVERROR(EINVAL);
1339  }
1340 
1341  if (!s->c.low_delay && display_picture_number == 1)
1342  m->dts_delta = pts - last;
1343  }
1344  m->user_specified_pts = pts;
1345  } else {
1346  if (m->user_specified_pts != AV_NOPTS_VALUE) {
1347  m->user_specified_pts =
1348  pts = m->user_specified_pts + 1;
1349  av_log(s->c.avctx, AV_LOG_INFO,
1350  "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1351  pts);
1352  } else {
1353  pts = display_picture_number;
1354  }
1355  }
1356 
1357  if (pic_arg->linesize[0] != s->c.linesize ||
1358  pic_arg->linesize[1] != s->c.uvlinesize ||
1359  pic_arg->linesize[2] != s->c.uvlinesize)
1360  direct = 0;
1361  if ((s->c.width & 15) || (s->c.height & 15))
1362  direct = 0;
1363  if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1364  direct = 0;
1365  if (s->c.linesize & (STRIDE_ALIGN-1))
1366  direct = 0;
1367 
1368  ff_dlog(s->c.avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1369  pic_arg->linesize[1], s->c.linesize, s->c.uvlinesize);
1370 
1371  pic = av_refstruct_pool_get(s->c.picture_pool);
1372  if (!pic)
1373  return AVERROR(ENOMEM);
1374 
1375  if (direct) {
1376  if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1377  goto fail;
1378  pic->shared = 1;
1379  } else {
1380  ret = prepare_picture(s, pic->f, pic_arg);
1381  if (ret < 0)
1382  goto fail;
1383 
1384  for (int i = 0; i < 3; i++) {
1385  ptrdiff_t src_stride = pic_arg->linesize[i];
1386  ptrdiff_t dst_stride = i ? s->c.uvlinesize : s->c.linesize;
1387  int h_shift = i ? s->c.chroma_x_shift : 0;
1388  int v_shift = i ? s->c.chroma_y_shift : 0;
1389  int w = AV_CEIL_RSHIFT(s->c.width , h_shift);
1390  int h = AV_CEIL_RSHIFT(s->c.height, v_shift);
1391  const uint8_t *src = pic_arg->data[i];
1392  uint8_t *dst = pic->f->data[i];
1393  int vpad = 16;
1394 
1395  if ( s->c.codec_id == AV_CODEC_ID_MPEG2VIDEO
1396  && !s->c.progressive_sequence
1397  && FFALIGN(s->c.height, 32) - s->c.height > 16)
1398  vpad = 32;
1399 
1400  if (!s->c.avctx->rc_buffer_size)
1401  dst += INPLACE_OFFSET;
1402 
1403  if (src_stride == dst_stride)
1404  memcpy(dst, src, src_stride * h - src_stride + w);
1405  else {
1406  int h2 = h;
1407  uint8_t *dst2 = dst;
1408  while (h2--) {
1409  memcpy(dst2, src, w);
1410  dst2 += dst_stride;
1411  src += src_stride;
1412  }
1413  }
1414  if ((s->c.width & 15) || (s->c.height & (vpad-1))) {
1415  s->mpvencdsp.draw_edges(dst, dst_stride,
1416  w, h,
1417  16 >> h_shift,
1418  vpad >> v_shift,
1419  EDGE_BOTTOM);
1420  }
1421  }
1422  emms_c();
1423  }
1424 
1425  pic->display_picture_number = display_picture_number;
1426  pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1427  } else if (!m->reordered_input_picture[1]) {
1428  /* Flushing: When the above check is true, the encoder is about to run
1429  * out of frames to encode. Check if there are input_pictures left;
1430  * if so, ensure m->input_picture[0] contains the first picture.
1431  * A flush_offset != 1 will only happen if we did not receive enough
1432  * input frames. */
1433  for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1434  if (m->input_picture[flush_offset])
1435  break;
1436 
1437  encoding_delay -= flush_offset - 1;
1438  }
1439 
1440  /* shift buffer entries */
1441  for (int i = flush_offset; i <= MPVENC_MAX_B_FRAMES; i++)
1442  m->input_picture[i - flush_offset] = m->input_picture[i];
1443  for (int i = MPVENC_MAX_B_FRAMES + 1 - flush_offset; i <= MPVENC_MAX_B_FRAMES; i++)
1444  m->input_picture[i] = NULL;
1445 
1446  m->input_picture[encoding_delay] = pic;
1447 
1448  return 0;
1449 fail:
1450  av_refstruct_unref(&pic);
1451  return ret;
1452 }
1453 
1454 static int skip_check(MPVMainEncContext *const m,
1455  const MPVPicture *p, const MPVPicture *ref)
1456 {
1457  MPVEncContext *const s = &m->s;
1458  int score = 0;
1459  int64_t score64 = 0;
1460 
1461  for (int plane = 0; plane < 3; plane++) {
1462  const int stride = p->f->linesize[plane];
1463  const int bw = plane ? 1 : 2;
1464  for (int y = 0; y < s->c.mb_height * bw; y++) {
1465  for (int x = 0; x < s->c.mb_width * bw; x++) {
1466  int off = p->shared ? 0 : 16;
1467  const uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1468  const uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1469  int v = m->frame_skip_cmp_fn(s, dptr, rptr, stride, 8);
1470 
1471  switch (FFABS(m->frame_skip_exp)) {
1472  case 0: score = FFMAX(score, v); break;
1473  case 1: score += FFABS(v); break;
1474  case 2: score64 += v * (int64_t)v; break;
1475  case 3: score64 += FFABS(v * (int64_t)v * v); break;
1476  case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1477  }
1478  }
1479  }
1480  }
1481  emms_c();
1482 
1483  if (score)
1484  score64 = score;
1485  if (m->frame_skip_exp < 0)
1486  score64 = pow(score64 / (double)(s->c.mb_width * s->c.mb_height),
1487  -1.0/m->frame_skip_exp);
1488 
1489  if (score64 < m->frame_skip_threshold)
1490  return 1;
1491  if (score64 < ((m->frame_skip_factor * (int64_t) s->lambda) >> 8))
1492  return 1;
1493  return 0;
1494 }
1495 
1497 {
1498  int ret;
1499  int size = 0;
1500 
1502  if (ret < 0)
1503  return ret;
1504 
1505  do {
1507  if (ret >= 0) {
1508  size += pkt->size;
1510  } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1511  return ret;
1512  } while (ret >= 0);
1513 
1514  return size;
1515 }
1516 
1518 {
1519  MPVEncContext *const s = &m->s;
1520  AVPacket *pkt;
1521  const int scale = m->brd_scale;
1522  int width = s->c.width >> scale;
1523  int height = s->c.height >> scale;
1524  int out_size, p_lambda, b_lambda, lambda2;
1525  int64_t best_rd = INT64_MAX;
1526  int best_b_count = -1;
1527  int ret = 0;
1528 
1529  av_assert0(scale >= 0 && scale <= 3);
1530 
1531  pkt = av_packet_alloc();
1532  if (!pkt)
1533  return AVERROR(ENOMEM);
1534 
1535  //emms_c();
1536  p_lambda = m->last_lambda_for[AV_PICTURE_TYPE_P];
1537  //p_lambda * FFABS(s->c.avctx->b_quant_factor) + s->c.avctx->b_quant_offset;
1538  b_lambda = m->last_lambda_for[AV_PICTURE_TYPE_B];
1539  if (!b_lambda) // FIXME we should do this somewhere else
1540  b_lambda = p_lambda;
1541  lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1543 
1544  for (int i = 0; i < m->max_b_frames + 2; i++) {
1545  const MPVPicture *pre_input_ptr = i ? m->input_picture[i - 1] :
1546  s->c.next_pic.ptr;
1547 
1548  if (pre_input_ptr) {
1549  const uint8_t *data[4];
1550  memcpy(data, pre_input_ptr->f->data, sizeof(data));
1551 
1552  if (!pre_input_ptr->shared && i) {
1553  data[0] += INPLACE_OFFSET;
1554  data[1] += INPLACE_OFFSET;
1555  data[2] += INPLACE_OFFSET;
1556  }
1557 
1558  s->mpvencdsp.shrink[scale](m->tmp_frames[i]->data[0],
1559  m->tmp_frames[i]->linesize[0],
1560  data[0],
1561  pre_input_ptr->f->linesize[0],
1562  width, height);
1563  s->mpvencdsp.shrink[scale](m->tmp_frames[i]->data[1],
1564  m->tmp_frames[i]->linesize[1],
1565  data[1],
1566  pre_input_ptr->f->linesize[1],
1567  width >> 1, height >> 1);
1568  s->mpvencdsp.shrink[scale](m->tmp_frames[i]->data[2],
1569  m->tmp_frames[i]->linesize[2],
1570  data[2],
1571  pre_input_ptr->f->linesize[2],
1572  width >> 1, height >> 1);
1573  }
1574  }
1575 
1576  for (int j = 0; j < m->max_b_frames + 1; j++) {
1577  AVCodecContext *c;
1578  int64_t rd = 0;
1579 
1580  if (!m->input_picture[j])
1581  break;
1582 
1584  if (!c) {
1585  ret = AVERROR(ENOMEM);
1586  goto fail;
1587  }
1588 
1589  c->width = width;
1590  c->height = height;
1592  c->flags |= s->c.avctx->flags & AV_CODEC_FLAG_QPEL;
1593  c->mb_decision = s->c.avctx->mb_decision;
1594  c->me_cmp = s->c.avctx->me_cmp;
1595  c->mb_cmp = s->c.avctx->mb_cmp;
1596  c->me_sub_cmp = s->c.avctx->me_sub_cmp;
1597  c->pix_fmt = AV_PIX_FMT_YUV420P;
1598  c->time_base = s->c.avctx->time_base;
1599  c->max_b_frames = m->max_b_frames;
1600 
1601  ret = avcodec_open2(c, s->c.avctx->codec, NULL);
1602  if (ret < 0)
1603  goto fail;
1604 
1605 
1607  m->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1608 
1609  out_size = encode_frame(c, m->tmp_frames[0], pkt);
1610  if (out_size < 0) {
1611  ret = out_size;
1612  goto fail;
1613  }
1614 
1615  //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1616 
1617  for (int i = 0; i < m->max_b_frames + 1; i++) {
1618  int is_p = i % (j + 1) == j || i == m->max_b_frames;
1619 
1620  m->tmp_frames[i + 1]->pict_type = is_p ?
1622  m->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1623 
1624  out_size = encode_frame(c, m->tmp_frames[i + 1], pkt);
1625  if (out_size < 0) {
1626  ret = out_size;
1627  goto fail;
1628  }
1629 
1630  rd += (out_size * (uint64_t)lambda2) >> (FF_LAMBDA_SHIFT - 3);
1631  }
1632 
1633  /* get the delayed frames */
1635  if (out_size < 0) {
1636  ret = out_size;
1637  goto fail;
1638  }
1639  rd += (out_size * (uint64_t)lambda2) >> (FF_LAMBDA_SHIFT - 3);
1640 
1641  rd += c->error[0] + c->error[1] + c->error[2];
1642 
1643  if (rd < best_rd) {
1644  best_rd = rd;
1645  best_b_count = j;
1646  }
1647 
1648 fail:
1651  if (ret < 0) {
1652  best_b_count = ret;
1653  break;
1654  }
1655  }
1656 
1657  av_packet_free(&pkt);
1658 
1659  return best_b_count;
1660 }
1661 
1662 /**
1663  * Determines whether an input picture is discarded or not
1664  * and if not determines the length of the next chain of B frames
1665  * and moves these pictures (including the P frame) into
1666  * reordered_input_picture.
1667  * input_picture[0] is always NULL when exiting this function, even on error;
1668  * reordered_input_picture[0] is always NULL when exiting this function on error.
1669  */
1671 {
1672  MPVEncContext *const s = &m->s;
1673 
1674  /* Either nothing to do or can't do anything */
1675  if (m->reordered_input_picture[0] || !m->input_picture[0])
1676  return 0;
1677 
1678  /* set next picture type & ordering */
1679  if (m->frame_skip_threshold || m->frame_skip_factor) {
1680  if (m->picture_in_gop_number < m->gop_size &&
1681  s->c.next_pic.ptr &&
1682  skip_check(m, m->input_picture[0], s->c.next_pic.ptr)) {
1683  // FIXME check that the gop check above is +-1 correct
1685 
1686  ff_vbv_update(m, 0);
1687 
1688  return 0;
1689  }
1690  }
1691 
1692  if (/* m->picture_in_gop_number >= m->gop_size || */
1693  !s->c.next_pic.ptr || m->intra_only) {
1694  m->reordered_input_picture[0] = m->input_picture[0];
1695  m->input_picture[0] = NULL;
1698  m->coded_picture_number++;
1699  } else {
1700  int b_frames = 0;
1701 
1702  if (s->c.avctx->flags & AV_CODEC_FLAG_PASS2) {
1703  for (int i = 0; i < m->max_b_frames + 1; i++) {
1704  int pict_num = m->input_picture[0]->display_picture_number + i;
1705 
1706  if (pict_num >= m->rc_context.num_entries)
1707  break;
1708  if (!m->input_picture[i]) {
1709  m->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1710  break;
1711  }
1712 
1713  m->input_picture[i]->f->pict_type =
1714  m->rc_context.entry[pict_num].new_pict_type;
1715  }
1716  }
1717 
1718  if (m->b_frame_strategy == 0) {
1719  b_frames = m->max_b_frames;
1720  while (b_frames && !m->input_picture[b_frames])
1721  b_frames--;
1722  } else if (m->b_frame_strategy == 1) {
1723  for (int i = 1; i < m->max_b_frames + 1; i++) {
1724  if (m->input_picture[i] &&
1725  m->input_picture[i]->b_frame_score == 0) {
1728  m->input_picture[i ]->f->data[0],
1729  m->input_picture[i - 1]->f->data[0],
1730  s->c.linesize) + 1;
1731  }
1732  }
1733  for (int i = 0;; i++) {
1734  if (i >= m->max_b_frames + 1 ||
1735  !m->input_picture[i] ||
1736  m->input_picture[i]->b_frame_score - 1 >
1737  s->c.mb_num / m->b_sensitivity) {
1738  b_frames = FFMAX(0, i - 1);
1739  break;
1740  }
1741  }
1742 
1743  /* reset scores */
1744  for (int i = 0; i < b_frames + 1; i++)
1745  m->input_picture[i]->b_frame_score = 0;
1746  } else if (m->b_frame_strategy == 2) {
1747  b_frames = estimate_best_b_count(m);
1748  if (b_frames < 0) {
1750  return b_frames;
1751  }
1752  }
1753 
1754  emms_c();
1755 
1756  for (int i = b_frames - 1; i >= 0; i--) {
1757  int type = m->input_picture[i]->f->pict_type;
1758  if (type && type != AV_PICTURE_TYPE_B)
1759  b_frames = i;
1760  }
1761  if (m->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1762  b_frames == m->max_b_frames) {
1763  av_log(s->c.avctx, AV_LOG_ERROR,
1764  "warning, too many B-frames in a row\n");
1765  }
1766 
1767  if (m->picture_in_gop_number + b_frames >= m->gop_size) {
1768  if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1769  m->gop_size > m->picture_in_gop_number) {
1770  b_frames = m->gop_size - m->picture_in_gop_number - 1;
1771  } else {
1772  if (s->c.avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1773  b_frames = 0;
1774  m->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1775  }
1776  }
1777 
1778  if ((s->c.avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1779  m->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1780  b_frames--;
1781 
1782  m->reordered_input_picture[0] = m->input_picture[b_frames];
1783  m->input_picture[b_frames] = NULL;
1787  m->coded_picture_number++;
1788  for (int i = 0; i < b_frames; i++) {
1789  m->reordered_input_picture[i + 1] = m->input_picture[i];
1790  m->input_picture[i] = NULL;
1791  m->reordered_input_picture[i + 1]->f->pict_type =
1794  m->coded_picture_number++;
1795  }
1796  }
1797 
1798  return 0;
1799 }
1800 
1802 {
1803  MPVEncContext *const s = &m->s;
1804  int ret;
1805 
1807 
1808  for (int i = 1; i <= MPVENC_MAX_B_FRAMES; i++)
1811 
1813  av_assert1(!m->input_picture[0]);
1814  if (ret < 0)
1815  return ret;
1816 
1817  av_frame_unref(s->new_pic);
1818 
1819  if (m->reordered_input_picture[0]) {
1822 
1823  if (m->reordered_input_picture[0]->shared || s->c.avctx->rc_buffer_size) {
1824  // input is a shared pix, so we can't modify it -> allocate a new
1825  // one & ensure that the shared one is reusable
1826  av_frame_move_ref(s->new_pic, m->reordered_input_picture[0]->f);
1827 
1828  ret = prepare_picture(s, m->reordered_input_picture[0]->f, s->new_pic);
1829  if (ret < 0)
1830  goto fail;
1831  } else {
1832  // input is not a shared pix -> reuse buffer for current_pix
1833  ret = av_frame_ref(s->new_pic, m->reordered_input_picture[0]->f);
1834  if (ret < 0)
1835  goto fail;
1836  for (int i = 0; i < MPV_MAX_PLANES; i++)
1837  s->new_pic->data[i] += INPLACE_OFFSET;
1838  }
1839  s->c.cur_pic.ptr = m->reordered_input_picture[0];
1840  m->reordered_input_picture[0] = NULL;
1841  av_assert1(s->c.mb_width == s->c.buffer_pools.alloc_mb_width);
1842  av_assert1(s->c.mb_height == s->c.buffer_pools.alloc_mb_height);
1843  av_assert1(s->c.mb_stride == s->c.buffer_pools.alloc_mb_stride);
1844  ret = ff_mpv_alloc_pic_accessories(s->c.avctx, &s->c.cur_pic,
1845  &s->c.sc, &s->c.buffer_pools, s->c.mb_height);
1846  if (ret < 0) {
1847  ff_mpv_unref_picture(&s->c.cur_pic);
1848  return ret;
1849  }
1850  s->picture_number = s->c.cur_pic.ptr->display_picture_number;
1851 
1852  }
1853  return 0;
1854 fail:
1856  return ret;
1857 }
1858 
1859 static void frame_end(MPVMainEncContext *const m)
1860 {
1861  MPVEncContext *const s = &m->s;
1862 
1863  if (s->me.unrestricted_mv &&
1864  s->c.cur_pic.reference &&
1865  !m->intra_only) {
1866  int hshift = s->c.chroma_x_shift;
1867  int vshift = s->c.chroma_y_shift;
1868  s->mpvencdsp.draw_edges(s->c.cur_pic.data[0],
1869  s->c.cur_pic.linesize[0],
1870  s->c.h_edge_pos, s->c.v_edge_pos,
1872  EDGE_TOP | EDGE_BOTTOM);
1873  s->mpvencdsp.draw_edges(s->c.cur_pic.data[1],
1874  s->c.cur_pic.linesize[1],
1875  s->c.h_edge_pos >> hshift,
1876  s->c.v_edge_pos >> vshift,
1877  EDGE_WIDTH >> hshift,
1878  EDGE_WIDTH >> vshift,
1879  EDGE_TOP | EDGE_BOTTOM);
1880  s->mpvencdsp.draw_edges(s->c.cur_pic.data[2],
1881  s->c.cur_pic.linesize[2],
1882  s->c.h_edge_pos >> hshift,
1883  s->c.v_edge_pos >> vshift,
1884  EDGE_WIDTH >> hshift,
1885  EDGE_WIDTH >> vshift,
1886  EDGE_TOP | EDGE_BOTTOM);
1887  }
1888 
1889  emms_c();
1890 
1891  m->last_pict_type = s->c.pict_type;
1892  m->last_lambda_for[s->c.pict_type] = s->c.cur_pic.ptr->f->quality;
1893  if (s->c.pict_type != AV_PICTURE_TYPE_B)
1894  m->last_non_b_pict_type = s->c.pict_type;
1895 }
1896 
1898 {
1899  MPVEncContext *const s = &m->s;
1900  int intra, i;
1901 
1902  for (intra = 0; intra < 2; intra++) {
1903  if (s->dct_count[intra] > (1 << 16)) {
1904  for (i = 0; i < 64; i++) {
1905  s->dct_error_sum[intra][i] >>= 1;
1906  }
1907  s->dct_count[intra] >>= 1;
1908  }
1909 
1910  for (i = 0; i < 64; i++) {
1911  s->dct_offset[intra][i] = (m->noise_reduction *
1912  s->dct_count[intra] +
1913  s->dct_error_sum[intra][i] / 2) /
1914  (s->dct_error_sum[intra][i] + 1);
1915  }
1916  }
1917 }
1918 
1919 static void frame_start(MPVMainEncContext *const m)
1920 {
1921  MPVEncContext *const s = &m->s;
1922 
1923  s->c.cur_pic.ptr->f->pict_type = s->c.pict_type;
1924 
1925  if (s->c.pict_type != AV_PICTURE_TYPE_B) {
1926  ff_mpv_replace_picture(&s->c.last_pic, &s->c.next_pic);
1927  ff_mpv_replace_picture(&s->c.next_pic, &s->c.cur_pic);
1928  }
1929 
1930  av_assert2(!!m->noise_reduction == !!s->dct_error_sum);
1931  if (s->dct_error_sum) {
1933  }
1934 }
1935 
1937  const AVFrame *pic_arg, int *got_packet)
1938 {
1939  MPVMainEncContext *const m = avctx->priv_data;
1940  MPVEncContext *const s = &m->s;
1941  int stuffing_count, ret;
1942  int context_count = s->c.slice_context_count;
1943 
1944  ff_mpv_unref_picture(&s->c.cur_pic);
1945 
1946  m->vbv_ignore_qmax = 0;
1947 
1948  m->picture_in_gop_number++;
1949 
1950  ret = load_input_picture(m, pic_arg);
1951  if (ret < 0)
1952  return ret;
1953 
1955  if (ret < 0)
1956  return ret;
1957 
1958  /* output? */
1959  if (s->new_pic->data[0]) {
1960  int growing_buffer = context_count == 1 && !s->data_partitioning;
1961  size_t pkt_size = 10000 + s->c.mb_width * s->c.mb_height *
1962  (growing_buffer ? 64 : (MAX_MB_BYTES + 100));
1963  if (CONFIG_MJPEG_ENCODER && avctx->codec_id == AV_CODEC_ID_MJPEG) {
1964  ret = ff_mjpeg_add_icc_profile_size(avctx, s->new_pic, &pkt_size);
1965  if (ret < 0)
1966  return ret;
1967  }
1968  if ((ret = ff_alloc_packet(avctx, pkt, pkt_size)) < 0)
1969  return ret;
1971  if (s->mb_info) {
1972  s->mb_info_ptr = av_packet_new_side_data(pkt,
1974  s->c.mb_width*s->c.mb_height*12);
1975  if (!s->mb_info_ptr)
1976  return AVERROR(ENOMEM);
1977  s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1978  }
1979 
1980  s->c.pict_type = s->new_pic->pict_type;
1981  //emms_c();
1982  frame_start(m);
1983 vbv_retry:
1984  ret = encode_picture(m, pkt);
1985  if (growing_buffer) {
1986  av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1987  pkt->data = s->pb.buf;
1989  }
1990  if (ret < 0)
1991  return -1;
1992 
1993  frame_end(m);
1994 
1995  if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) && s->c.out_format == FMT_MJPEG)
1997 
1998  if (avctx->rc_buffer_size) {
1999  RateControlContext *rcc = &m->rc_context;
2000  int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
2001  int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
2002  int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
2003 
2004  if (put_bits_count(&s->pb) > max_size &&
2005  s->lambda < m->lmax) {
2006  m->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
2007  (s->c.qscale + 1) / s->c.qscale);
2008  if (s->adaptive_quant) {
2009  for (int i = 0; i < s->c.mb_height * s->c.mb_stride; i++)
2010  s->lambda_table[i] =
2011  FFMAX(s->lambda_table[i] + min_step,
2012  s->lambda_table[i] * (s->c.qscale + 1) /
2013  s->c.qscale);
2014  }
2015  s->c.mb_skipped = 0; // done in frame_start()
2016  // done in encode_picture() so we must undo it
2017  if (s->c.pict_type == AV_PICTURE_TYPE_P) {
2018  s->c.no_rounding ^= s->flipflop_rounding;
2019  }
2020  if (s->c.pict_type != AV_PICTURE_TYPE_B) {
2021  s->c.time_base = s->c.last_time_base;
2022  s->c.last_non_b_time = s->c.time - s->c.pp_time;
2023  }
2024  m->vbv_ignore_qmax = 1;
2025  av_log(avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
2026  goto vbv_retry;
2027  }
2028 
2030  }
2031 
2034 
2035  for (int i = 0; i < MPV_MAX_PLANES; i++)
2036  avctx->error[i] += s->encoding_error[i];
2037  ff_encode_add_stats_side_data(pkt, s->c.cur_pic.ptr->f->quality,
2038  s->encoding_error,
2040  s->c.pict_type);
2041 
2043  assert(put_bits_count(&s->pb) == m->header_bits + s->mv_bits +
2044  s->misc_bits + s->i_tex_bits +
2045  s->p_tex_bits);
2046  flush_put_bits(&s->pb);
2047  m->frame_bits = put_bits_count(&s->pb);
2048 
2049  stuffing_count = ff_vbv_update(m, m->frame_bits);
2050  m->stuffing_bits = 8*stuffing_count;
2051  if (stuffing_count) {
2052  if (put_bytes_left(&s->pb, 0) < stuffing_count + 50) {
2053  av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
2054  return -1;
2055  }
2056 
2057  switch (s->c.codec_id) {
2060  while (stuffing_count--) {
2061  put_bits(&s->pb, 8, 0);
2062  }
2063  break;
2064  case AV_CODEC_ID_MPEG4:
2065  put_bits(&s->pb, 16, 0);
2066  put_bits(&s->pb, 16, 0x1C3);
2067  stuffing_count -= 4;
2068  while (stuffing_count--) {
2069  put_bits(&s->pb, 8, 0xFF);
2070  }
2071  break;
2072  default:
2073  av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
2074  m->stuffing_bits = 0;
2075  }
2076  flush_put_bits(&s->pb);
2077  m->frame_bits = put_bits_count(&s->pb);
2078  }
2079 
2080  /* update MPEG-1/2 vbv_delay for CBR */
2081  if (avctx->rc_max_rate &&
2083  s->c.out_format == FMT_MPEG1 &&
2084  90000LL * (avctx->rc_buffer_size - 1) <=
2085  avctx->rc_max_rate * 0xFFFFLL) {
2086  AVCPBProperties *props;
2087  size_t props_size;
2088 
2089  int vbv_delay, min_delay;
2090  double inbits = avctx->rc_max_rate *
2092  int minbits = m->frame_bits - 8 *
2093  (m->vbv_delay_pos - 1);
2094  double bits = m->rc_context.buffer_index + minbits - inbits;
2095  uint8_t *const vbv_delay_ptr = s->pb.buf + m->vbv_delay_pos;
2096 
2097  if (bits < 0)
2099  "Internal error, negative bits\n");
2100 
2101  av_assert1(s->c.repeat_first_field == 0);
2102 
2103  vbv_delay = bits * 90000 / avctx->rc_max_rate;
2104  min_delay = (minbits * 90000LL + avctx->rc_max_rate - 1) /
2105  avctx->rc_max_rate;
2106 
2107  vbv_delay = FFMAX(vbv_delay, min_delay);
2108 
2109  av_assert0(vbv_delay < 0xFFFF);
2110 
2111  vbv_delay_ptr[0] &= 0xF8;
2112  vbv_delay_ptr[0] |= vbv_delay >> 13;
2113  vbv_delay_ptr[1] = vbv_delay >> 5;
2114  vbv_delay_ptr[2] &= 0x07;
2115  vbv_delay_ptr[2] |= vbv_delay << 3;
2116 
2117  props = av_cpb_properties_alloc(&props_size);
2118  if (!props)
2119  return AVERROR(ENOMEM);
2120  props->vbv_delay = vbv_delay * 300;
2121 
2123  (uint8_t*)props, props_size);
2124  if (ret < 0) {
2125  av_freep(&props);
2126  return ret;
2127  }
2128  }
2129  m->total_bits += m->frame_bits;
2130 
2131  pkt->pts = s->c.cur_pic.ptr->f->pts;
2132  pkt->duration = s->c.cur_pic.ptr->f->duration;
2133  if (!s->c.low_delay && s->c.pict_type != AV_PICTURE_TYPE_B) {
2134  if (!s->c.cur_pic.ptr->coded_picture_number)
2135  pkt->dts = pkt->pts - m->dts_delta;
2136  else
2137  pkt->dts = m->reordered_pts;
2138  m->reordered_pts = pkt->pts;
2139  } else
2140  pkt->dts = pkt->pts;
2141 
2142  // the no-delay case is handled in generic code
2144  ret = ff_encode_reordered_opaque(avctx, pkt, s->c.cur_pic.ptr->f);
2145  if (ret < 0)
2146  return ret;
2147  }
2148 
2149  if (s->c.cur_pic.ptr->f->flags & AV_FRAME_FLAG_KEY)
2151  if (s->mb_info)
2153  } else {
2154  m->frame_bits = 0;
2155  }
2156 
2157  ff_mpv_unref_picture(&s->c.cur_pic);
2158 
2159  av_assert1((m->frame_bits & 7) == 0);
2160 
2161  pkt->size = m->frame_bits / 8;
2162  *got_packet = !!pkt->size;
2163  return 0;
2164 }
2165 
2167  int n, int threshold)
2168 {
2169  static const char tab[64] = {
2170  3, 2, 2, 1, 1, 1, 1, 1,
2171  1, 1, 1, 1, 1, 1, 1, 1,
2172  1, 1, 1, 1, 1, 1, 1, 1,
2173  0, 0, 0, 0, 0, 0, 0, 0,
2174  0, 0, 0, 0, 0, 0, 0, 0,
2175  0, 0, 0, 0, 0, 0, 0, 0,
2176  0, 0, 0, 0, 0, 0, 0, 0,
2177  0, 0, 0, 0, 0, 0, 0, 0
2178  };
2179  int score = 0;
2180  int run = 0;
2181  int i;
2182  int16_t *block = s->block[n];
2183  const int last_index = s->c.block_last_index[n];
2184  int skip_dc;
2185 
2186  if (threshold < 0) {
2187  skip_dc = 0;
2188  threshold = -threshold;
2189  } else
2190  skip_dc = 1;
2191 
2192  /* Are all we could set to zero already zero? */
2193  if (last_index <= skip_dc - 1)
2194  return;
2195 
2196  for (i = 0; i <= last_index; i++) {
2197  const int j = s->c.intra_scantable.permutated[i];
2198  const int level = FFABS(block[j]);
2199  if (level == 1) {
2200  if (skip_dc && i == 0)
2201  continue;
2202  score += tab[run];
2203  run = 0;
2204  } else if (level > 1) {
2205  return;
2206  } else {
2207  run++;
2208  }
2209  }
2210  if (score >= threshold)
2211  return;
2212  for (i = skip_dc; i <= last_index; i++) {
2213  const int j = s->c.intra_scantable.permutated[i];
2214  block[j] = 0;
2215  }
2216  if (block[0])
2217  s->c.block_last_index[n] = 0;
2218  else
2219  s->c.block_last_index[n] = -1;
2220 }
2221 
2222 static inline void clip_coeffs(const MPVEncContext *const s, int16_t block[],
2223  int last_index)
2224 {
2225  int i;
2226  const int maxlevel = s->max_qcoeff;
2227  const int minlevel = s->min_qcoeff;
2228  int overflow = 0;
2229 
2230  if (s->c.mb_intra) {
2231  i = 1; // skip clipping of intra dc
2232  } else
2233  i = 0;
2234 
2235  for (; i <= last_index; i++) {
2236  const int j = s->c.intra_scantable.permutated[i];
2237  int level = block[j];
2238 
2239  if (level > maxlevel) {
2240  level = maxlevel;
2241  overflow++;
2242  } else if (level < minlevel) {
2243  level = minlevel;
2244  overflow++;
2245  }
2246 
2247  block[j] = level;
2248  }
2249 
2250  if (overflow && s->c.avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2251  av_log(s->c.avctx, AV_LOG_INFO,
2252  "warning, clipping %d dct coefficients to %d..%d\n",
2253  overflow, minlevel, maxlevel);
2254 }
2255 
2256 static void get_visual_weight(int16_t *weight, const uint8_t *ptr, int stride)
2257 {
2258  int x, y;
2259  // FIXME optimize
2260  for (y = 0; y < 8; y++) {
2261  for (x = 0; x < 8; x++) {
2262  int x2, y2;
2263  int sum = 0;
2264  int sqr = 0;
2265  int count = 0;
2266 
2267  for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2268  for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2269  int v = ptr[x2 + y2 * stride];
2270  sum += v;
2271  sqr += v * v;
2272  count++;
2273  }
2274  }
2275  weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2276  }
2277  }
2278 }
2279 
2281  int motion_x, int motion_y,
2282  int mb_block_height,
2283  int mb_block_width,
2284  int mb_block_count,
2285  int chroma_x_shift,
2286  int chroma_y_shift,
2287  int chroma_format)
2288 {
2289 /* Interlaced DCT is only possible with MPEG-2 and MPEG-4
2290  * and neither of these encoders currently supports 444. */
2291 #define INTERLACED_DCT(s) ((chroma_format == CHROMA_420 || chroma_format == CHROMA_422) && \
2292  (s)->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT)
2293  DECLARE_ALIGNED(16, int16_t, weight)[12][64];
2294  int16_t orig[12][64];
2295  const int mb_x = s->c.mb_x;
2296  const int mb_y = s->c.mb_y;
2297  int i;
2298  int skip_dct[12];
2299  int dct_offset = s->c.linesize * 8; // default for progressive frames
2300  int uv_dct_offset = s->c.uvlinesize * 8;
2301  const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2302  ptrdiff_t wrap_y, wrap_c;
2303 
2304  for (i = 0; i < mb_block_count; i++)
2305  skip_dct[i] = s->skipdct;
2306 
2307  if (s->adaptive_quant) {
2308  const int last_qp = s->c.qscale;
2309  const int mb_xy = mb_x + mb_y * s->c.mb_stride;
2310 
2311  s->lambda = s->lambda_table[mb_xy];
2312  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
2314 
2315  if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2316  s->dquant = s->c.cur_pic.qscale_table[mb_xy] - last_qp;
2317 
2318  if (s->c.out_format == FMT_H263) {
2319  s->dquant = av_clip(s->dquant, -2, 2);
2320 
2321  if (s->c.codec_id == AV_CODEC_ID_MPEG4) {
2322  if (!s->c.mb_intra) {
2323  if (s->c.pict_type == AV_PICTURE_TYPE_B) {
2324  if (s->dquant & 1 || s->c.mv_dir & MV_DIRECT)
2325  s->dquant = 0;
2326  }
2327  if (s->c.mv_type == MV_TYPE_8X8)
2328  s->dquant = 0;
2329  }
2330  }
2331  }
2332  }
2333  ff_set_qscale(&s->c, last_qp + s->dquant);
2334  } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2335  ff_set_qscale(&s->c, s->c.qscale + s->dquant);
2336 
2337  wrap_y = s->c.linesize;
2338  wrap_c = s->c.uvlinesize;
2339  ptr_y = s->new_pic->data[0] +
2340  (mb_y * 16 * wrap_y) + mb_x * 16;
2341  ptr_cb = s->new_pic->data[1] +
2342  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2343  ptr_cr = s->new_pic->data[2] +
2344  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2345 
2346  if ((mb_x * 16 + 16 > s->c.width || mb_y * 16 + 16 > s->c.height) &&
2347  s->c.codec_id != AV_CODEC_ID_AMV) {
2348  uint8_t *ebuf = s->c.sc.edge_emu_buffer + 38 * wrap_y;
2349  int cw = (s->c.width + chroma_x_shift) >> chroma_x_shift;
2350  int ch = (s->c.height + chroma_y_shift) >> chroma_y_shift;
2351  s->c.vdsp.emulated_edge_mc(ebuf, ptr_y,
2352  wrap_y, wrap_y,
2353  16, 16, mb_x * 16, mb_y * 16,
2354  s->c.width, s->c.height);
2355  ptr_y = ebuf;
2356  s->c.vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2357  wrap_c, wrap_c,
2358  mb_block_width, mb_block_height,
2359  mb_x * mb_block_width, mb_y * mb_block_height,
2360  cw, ch);
2361  ptr_cb = ebuf + 16 * wrap_y;
2362  s->c.vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2363  wrap_c, wrap_c,
2364  mb_block_width, mb_block_height,
2365  mb_x * mb_block_width, mb_y * mb_block_height,
2366  cw, ch);
2367  ptr_cr = ebuf + 16 * wrap_y + 16;
2368  }
2369 
2370  if (s->c.mb_intra) {
2371  if (INTERLACED_DCT(s)) {
2372  int progressive_score, interlaced_score;
2373 
2374  s->c.interlaced_dct = 0;
2375  progressive_score = s->ildct_cmp[1](s, ptr_y, NULL, wrap_y, 8) +
2376  s->ildct_cmp[1](s, ptr_y + wrap_y * 8,
2377  NULL, wrap_y, 8) - 400;
2378 
2379  if (progressive_score > 0) {
2380  interlaced_score = s->ildct_cmp[1](s, ptr_y,
2381  NULL, wrap_y * 2, 8) +
2382  s->ildct_cmp[1](s, ptr_y + wrap_y,
2383  NULL, wrap_y * 2, 8);
2384  if (progressive_score > interlaced_score) {
2385  s->c.interlaced_dct = 1;
2386 
2387  dct_offset = wrap_y;
2388  uv_dct_offset = wrap_c;
2389  wrap_y <<= 1;
2390  if (chroma_format == CHROMA_422 ||
2392  wrap_c <<= 1;
2393  }
2394  }
2395  }
2396 
2397  s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2398  s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2399  s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2400  s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2401 
2402  if (s->c.avctx->flags & AV_CODEC_FLAG_GRAY) {
2403  skip_dct[4] = 1;
2404  skip_dct[5] = 1;
2405  } else {
2406  s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2407  s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2408  if (chroma_format == CHROMA_422) {
2409  s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2410  s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2411  } else if (chroma_format == CHROMA_444) {
2412  s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2413  s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2414  s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2415  s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2416  s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2417  s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2418  }
2419  }
2420  } else {
2421  op_pixels_func (*op_pix)[4];
2422  qpel_mc_func (*op_qpix)[16];
2423  uint8_t *dest_y, *dest_cb, *dest_cr;
2424 
2425  dest_y = s->c.dest[0];
2426  dest_cb = s->c.dest[1];
2427  dest_cr = s->c.dest[2];
2428 
2429  if ((!s->c.no_rounding) || s->c.pict_type == AV_PICTURE_TYPE_B) {
2430  op_pix = s->c.hdsp.put_pixels_tab;
2431  op_qpix = s->c.qdsp.put_qpel_pixels_tab;
2432  } else {
2433  op_pix = s->c.hdsp.put_no_rnd_pixels_tab;
2434  op_qpix = s->c.qdsp.put_no_rnd_qpel_pixels_tab;
2435  }
2436 
2437  if (s->c.mv_dir & MV_DIR_FORWARD) {
2438  ff_mpv_motion(&s->c, dest_y, dest_cb, dest_cr, 0,
2439  s->c.last_pic.data,
2440  op_pix, op_qpix);
2441  op_pix = s->c.hdsp.avg_pixels_tab;
2442  op_qpix = s->c.qdsp.avg_qpel_pixels_tab;
2443  }
2444  if (s->c.mv_dir & MV_DIR_BACKWARD) {
2445  ff_mpv_motion(&s->c, dest_y, dest_cb, dest_cr, 1,
2446  s->c.next_pic.data,
2447  op_pix, op_qpix);
2448  }
2449 
2450  if (INTERLACED_DCT(s)) {
2451  int progressive_score, interlaced_score;
2452 
2453  s->c.interlaced_dct = 0;
2454  progressive_score = s->ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2455  s->ildct_cmp[0](s, dest_y + wrap_y * 8,
2456  ptr_y + wrap_y * 8,
2457  wrap_y, 8) - 400;
2458 
2459  if (s->c.avctx->ildct_cmp == FF_CMP_VSSE)
2460  progressive_score -= 400;
2461 
2462  if (progressive_score > 0) {
2463  interlaced_score = s->ildct_cmp[0](s, dest_y, ptr_y,
2464  wrap_y * 2, 8) +
2465  s->ildct_cmp[0](s, dest_y + wrap_y,
2466  ptr_y + wrap_y,
2467  wrap_y * 2, 8);
2468 
2469  if (progressive_score > interlaced_score) {
2470  s->c.interlaced_dct = 1;
2471 
2472  dct_offset = wrap_y;
2473  uv_dct_offset = wrap_c;
2474  wrap_y <<= 1;
2475  if (chroma_format == CHROMA_422)
2476  wrap_c <<= 1;
2477  }
2478  }
2479  }
2480 
2481  s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2482  s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2483  s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2484  dest_y + dct_offset, wrap_y);
2485  s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2486  dest_y + dct_offset + 8, wrap_y);
2487 
2488  if (s->c.avctx->flags & AV_CODEC_FLAG_GRAY) {
2489  skip_dct[4] = 1;
2490  skip_dct[5] = 1;
2491  } else {
2492  s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2493  s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2494  if (!chroma_y_shift) { /* 422 */
2495  s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2496  dest_cb + uv_dct_offset, wrap_c);
2497  s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2498  dest_cr + uv_dct_offset, wrap_c);
2499  }
2500  }
2501  /* pre quantization */
2502  if (s->mc_mb_var[s->c.mb_stride * mb_y + mb_x] < 2 * s->c.qscale * s->c.qscale) {
2503  // FIXME optimize
2504  if (s->sad_cmp[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->c.qscale)
2505  skip_dct[0] = 1;
2506  if (s->sad_cmp[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->c.qscale)
2507  skip_dct[1] = 1;
2508  if (s->sad_cmp[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2509  wrap_y, 8) < 20 * s->c.qscale)
2510  skip_dct[2] = 1;
2511  if (s->sad_cmp[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2512  wrap_y, 8) < 20 * s->c.qscale)
2513  skip_dct[3] = 1;
2514  if (s->sad_cmp[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->c.qscale)
2515  skip_dct[4] = 1;
2516  if (s->sad_cmp[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->c.qscale)
2517  skip_dct[5] = 1;
2518  if (!chroma_y_shift) { /* 422 */
2519  if (s->sad_cmp[1](NULL, ptr_cb + uv_dct_offset,
2520  dest_cb + uv_dct_offset,
2521  wrap_c, 8) < 20 * s->c.qscale)
2522  skip_dct[6] = 1;
2523  if (s->sad_cmp[1](NULL, ptr_cr + uv_dct_offset,
2524  dest_cr + uv_dct_offset,
2525  wrap_c, 8) < 20 * s->c.qscale)
2526  skip_dct[7] = 1;
2527  }
2528  }
2529  }
2530 
2531  if (s->quantizer_noise_shaping) {
2532  if (!skip_dct[0])
2533  get_visual_weight(weight[0], ptr_y , wrap_y);
2534  if (!skip_dct[1])
2535  get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2536  if (!skip_dct[2])
2537  get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2538  if (!skip_dct[3])
2539  get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2540  if (!skip_dct[4])
2541  get_visual_weight(weight[4], ptr_cb , wrap_c);
2542  if (!skip_dct[5])
2543  get_visual_weight(weight[5], ptr_cr , wrap_c);
2544  if (!chroma_y_shift) { /* 422 */
2545  if (!skip_dct[6])
2546  get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2547  wrap_c);
2548  if (!skip_dct[7])
2549  get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2550  wrap_c);
2551  }
2552  memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2553  }
2554 
2555  /* DCT & quantize */
2556  av_assert2(s->c.out_format != FMT_MJPEG || s->c.qscale == 8);
2557  {
2558  for (i = 0; i < mb_block_count; i++) {
2559  if (!skip_dct[i]) {
2560  int overflow;
2561  s->c.block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->c.qscale, &overflow);
2562  // FIXME we could decide to change to quantizer instead of
2563  // clipping
2564  // JS: I don't think that would be a good idea it could lower
2565  // quality instead of improve it. Just INTRADC clipping
2566  // deserves changes in quantizer
2567  if (overflow)
2568  clip_coeffs(s, s->block[i], s->c.block_last_index[i]);
2569  } else
2570  s->c.block_last_index[i] = -1;
2571  }
2572  if (s->quantizer_noise_shaping) {
2573  for (i = 0; i < mb_block_count; i++) {
2574  if (!skip_dct[i]) {
2575  s->c.block_last_index[i] =
2576  dct_quantize_refine(s, s->block[i], weight[i],
2577  orig[i], i, s->c.qscale);
2578  }
2579  }
2580  }
2581 
2582  if (s->luma_elim_threshold && !s->c.mb_intra)
2583  for (i = 0; i < 4; i++)
2584  dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2585  if (s->chroma_elim_threshold && !s->c.mb_intra)
2586  for (i = 4; i < mb_block_count; i++)
2587  dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2588 
2589  if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2590  for (i = 0; i < mb_block_count; i++) {
2591  if (s->c.block_last_index[i] == -1)
2592  s->coded_score[i] = INT_MAX / 256;
2593  }
2594  }
2595  }
2596 
2597  if ((s->c.avctx->flags & AV_CODEC_FLAG_GRAY) && s->c.mb_intra) {
2598  s->c.block_last_index[4] =
2599  s->c.block_last_index[5] = 0;
2600  s->block[4][0] =
2601  s->block[5][0] = (1024 + s->c.c_dc_scale / 2) / s->c.c_dc_scale;
2602  if (!chroma_y_shift) { /* 422 / 444 */
2603  for (i=6; i<12; i++) {
2604  s->c.block_last_index[i] = 0;
2605  s->block[i][0] = s->block[4][0];
2606  }
2607  }
2608  }
2609 
2610  // non c quantize code returns incorrect block_last_index FIXME
2611  if (s->c.alternate_scan && s->dct_quantize != dct_quantize_c) {
2612  for (i = 0; i < mb_block_count; i++) {
2613  int j;
2614  if (s->c.block_last_index[i] > 0) {
2615  for (j = 63; j > 0; j--) {
2616  if (s->block[i][s->c.intra_scantable.permutated[j]])
2617  break;
2618  }
2619  s->c.block_last_index[i] = j;
2620  }
2621  }
2622  }
2623 
2624  s->encode_mb(s, s->block, motion_x, motion_y);
2625 }
2626 
2627 static void encode_mb(MPVEncContext *const s, int motion_x, int motion_y)
2628 {
2629  if (s->c.chroma_format == CHROMA_420)
2630  encode_mb_internal(s, motion_x, motion_y, 8, 8, 6, 1, 1, CHROMA_420);
2631  else if (s->c.chroma_format == CHROMA_422)
2632  encode_mb_internal(s, motion_x, motion_y, 16, 8, 8, 1, 0, CHROMA_422);
2633  else
2634  encode_mb_internal(s, motion_x, motion_y, 16, 16, 12, 0, 0, CHROMA_444);
2635 }
2636 
2637 typedef struct MBBackup {
2638  struct {
2639  int mv[2][4][2];
2640  int last_mv[2][2][2];
2643  int qscale;
2646  } c;
2648  int last_dc[3];
2650  int dquant;
2652  int16_t (*block)[64];
2654 } MBBackup;
2655 
2656 #define COPY_CONTEXT(BEFORE, AFTER, DST_TYPE, SRC_TYPE) \
2657 static inline void BEFORE ##_context_before_encode(DST_TYPE *const d, \
2658  const SRC_TYPE *const s) \
2659 { \
2660  /* FIXME is memcpy faster than a loop? */ \
2661  memcpy(d->c.last_mv, s->c.last_mv, 2*2*2*sizeof(int)); \
2662  \
2663  /* MPEG-1 */ \
2664  d->mb_skip_run = s->mb_skip_run; \
2665  for (int i = 0; i < 3; i++) \
2666  d->last_dc[i] = s->last_dc[i]; \
2667  \
2668  /* statistics */ \
2669  d->mv_bits = s->mv_bits; \
2670  d->i_tex_bits = s->i_tex_bits; \
2671  d->p_tex_bits = s->p_tex_bits; \
2672  d->i_count = s->i_count; \
2673  d->misc_bits = s->misc_bits; \
2674  d->last_bits = 0; \
2675  \
2676  d->c.mb_skipped = 0; \
2677  d->c.qscale = s->c.qscale; \
2678  d->dquant = s->dquant; \
2679  \
2680  d->esc3_level_length = s->esc3_level_length; \
2681 } \
2682  \
2683 static inline void AFTER ## _context_after_encode(DST_TYPE *const d, \
2684  const SRC_TYPE *const s, \
2685  int data_partitioning) \
2686 { \
2687  /* FIXME is memcpy faster than a loop? */ \
2688  memcpy(d->c.mv, s->c.mv, 2*4*2*sizeof(int)); \
2689  memcpy(d->c.last_mv, s->c.last_mv, 2*2*2*sizeof(int)); \
2690  \
2691  /* MPEG-1 */ \
2692  d->mb_skip_run = s->mb_skip_run; \
2693  for (int i = 0; i < 3; i++) \
2694  d->last_dc[i] = s->last_dc[i]; \
2695  \
2696  /* statistics */ \
2697  d->mv_bits = s->mv_bits; \
2698  d->i_tex_bits = s->i_tex_bits; \
2699  d->p_tex_bits = s->p_tex_bits; \
2700  d->i_count = s->i_count; \
2701  d->misc_bits = s->misc_bits; \
2702  \
2703  d->c.mb_intra = s->c.mb_intra; \
2704  d->c.mb_skipped = s->c.mb_skipped; \
2705  d->c.mv_type = s->c.mv_type; \
2706  d->c.mv_dir = s->c.mv_dir; \
2707  d->pb = s->pb; \
2708  if (data_partitioning) { \
2709  d->pb2 = s->pb2; \
2710  d->tex_pb = s->tex_pb; \
2711  } \
2712  d->block = s->block; \
2713  for (int i = 0; i < 8; i++) \
2714  d->c.block_last_index[i] = s->c.block_last_index[i]; \
2715  d->c.interlaced_dct = s->c.interlaced_dct; \
2716  d->c.qscale = s->c.qscale; \
2717  \
2718  d->esc3_level_length = s->esc3_level_length; \
2719 }
2720 
2721 COPY_CONTEXT(backup, save, MBBackup, MPVEncContext)
2722 COPY_CONTEXT(reset, store, MPVEncContext, MBBackup)
2723 
2724 static void encode_mb_hq(MPVEncContext *const s, MBBackup *const backup, MBBackup *const best,
2725  PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2726  int *dmin, int *next_block, int motion_x, int motion_y)
2727 {
2728  int score;
2729  uint8_t *dest_backup[3];
2730 
2731  reset_context_before_encode(s, backup);
2732 
2733  s->block = s->blocks[*next_block];
2734  s->pb = pb[*next_block];
2735  if (s->data_partitioning) {
2736  s->pb2 = pb2 [*next_block];
2737  s->tex_pb= tex_pb[*next_block];
2738  }
2739 
2740  if(*next_block){
2741  memcpy(dest_backup, s->c.dest, sizeof(s->c.dest));
2742  s->c.dest[0] = s->c.sc.rd_scratchpad;
2743  s->c.dest[1] = s->c.sc.rd_scratchpad + 16*s->c.linesize;
2744  s->c.dest[2] = s->c.sc.rd_scratchpad + 16*s->c.linesize + 8;
2745  av_assert0(s->c.linesize >= 32); //FIXME
2746  }
2747 
2748  encode_mb(s, motion_x, motion_y);
2749 
2750  score= put_bits_count(&s->pb);
2751  if (s->data_partitioning) {
2752  score+= put_bits_count(&s->pb2);
2753  score+= put_bits_count(&s->tex_pb);
2754  }
2755 
2756  if (s->c.avctx->mb_decision == FF_MB_DECISION_RD) {
2757  mpv_reconstruct_mb(s, s->block);
2758 
2759  score *= s->lambda2;
2760  score += sse_mb(s) << FF_LAMBDA_SHIFT;
2761  }
2762 
2763  if(*next_block){
2764  memcpy(s->c.dest, dest_backup, sizeof(s->c.dest));
2765  }
2766 
2767  if(score<*dmin){
2768  *dmin= score;
2769  *next_block^=1;
2770 
2771  save_context_after_encode(best, s, s->data_partitioning);
2772  }
2773 }
2774 
2775 static int sse(const MPVEncContext *const s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride)
2776 {
2777  const uint32_t *sq = ff_square_tab + 256;
2778  int acc=0;
2779  int x,y;
2780 
2781  if(w==16 && h==16)
2782  return s->sse_cmp[0](NULL, src1, src2, stride, 16);
2783  else if(w==8 && h==8)
2784  return s->sse_cmp[1](NULL, src1, src2, stride, 8);
2785 
2786  for(y=0; y<h; y++){
2787  for(x=0; x<w; x++){
2788  acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2789  }
2790  }
2791 
2792  av_assert2(acc>=0);
2793 
2794  return acc;
2795 }
2796 
2797 static int sse_mb(MPVEncContext *const s)
2798 {
2799  int w= 16;
2800  int h= 16;
2801  int chroma_mb_w = w >> s->c.chroma_x_shift;
2802  int chroma_mb_h = h >> s->c.chroma_y_shift;
2803 
2804  if (s->c.mb_x*16 + 16 > s->c.width ) w = s->c.width - s->c.mb_x*16;
2805  if (s->c.mb_y*16 + 16 > s->c.height) h = s->c.height- s->c.mb_y*16;
2806 
2807  if(w==16 && h==16)
2808  return s->n_sse_cmp[0](s, s->new_pic->data[0] + s->c.mb_x * 16 + s->c.mb_y * s->c.linesize * 16,
2809  s->c.dest[0], s->c.linesize, 16) +
2810  s->n_sse_cmp[1](s, s->new_pic->data[1] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2811  s->c.dest[1], s->c.uvlinesize, chroma_mb_h) +
2812  s->n_sse_cmp[1](s, s->new_pic->data[2] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2813  s->c.dest[2], s->c.uvlinesize, chroma_mb_h);
2814  else
2815  return sse(s, s->new_pic->data[0] + s->c.mb_x * 16 + s->c.mb_y * s->c.linesize * 16,
2816  s->c.dest[0], w, h, s->c.linesize) +
2817  sse(s, s->new_pic->data[1] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2818  s->c.dest[1], w >> s->c.chroma_x_shift, h >> s->c.chroma_y_shift, s->c.uvlinesize) +
2819  sse(s, s->new_pic->data[2] + s->c.mb_x * chroma_mb_w + s->c.mb_y * s->c.uvlinesize * chroma_mb_h,
2820  s->c.dest[2], w >> s->c.chroma_x_shift, h >> s->c.chroma_y_shift, s->c.uvlinesize);
2821 }
2822 
2824  MPVEncContext *const s = *(void**)arg;
2825 
2826 
2827  s->me.pre_pass = 1;
2828  s->me.dia_size = s->c.avctx->pre_dia_size;
2829  s->c.first_slice_line = 1;
2830  for (s->c.mb_y = s->c.end_mb_y - 1; s->c.mb_y >= s->c.start_mb_y; s->c.mb_y--) {
2831  for (s->c.mb_x = s->c.mb_width - 1; s->c.mb_x >=0 ; s->c.mb_x--)
2832  ff_pre_estimate_p_frame_motion(s, s->c.mb_x, s->c.mb_y);
2833  s->c.first_slice_line = 0;
2834  }
2835 
2836  s->me.pre_pass = 0;
2837 
2838  return 0;
2839 }
2840 
2842  MPVEncContext *const s = *(void**)arg;
2843 
2844  s->me.dia_size = s->c.avctx->dia_size;
2845  s->c.first_slice_line = 1;
2846  for (s->c.mb_y = s->c.start_mb_y; s->c.mb_y < s->c.end_mb_y; s->c.mb_y++) {
2847  s->c.mb_x = 0; //for block init below
2848  ff_init_block_index(&s->c);
2849  for (s->c.mb_x = 0; s->c.mb_x < s->c.mb_width; s->c.mb_x++) {
2850  s->c.block_index[0] += 2;
2851  s->c.block_index[1] += 2;
2852  s->c.block_index[2] += 2;
2853  s->c.block_index[3] += 2;
2854 
2855  /* compute motion vector & mb_type and store in context */
2856  if (s->c.pict_type == AV_PICTURE_TYPE_B)
2857  ff_estimate_b_frame_motion(s, s->c.mb_x, s->c.mb_y);
2858  else
2859  ff_estimate_p_frame_motion(s, s->c.mb_x, s->c.mb_y);
2860  }
2861  s->c.first_slice_line = 0;
2862  }
2863  return 0;
2864 }
2865 
2866 static int mb_var_thread(AVCodecContext *c, void *arg){
2867  MPVEncContext *const s = *(void**)arg;
2868 
2869  for (int mb_y = s->c.start_mb_y; mb_y < s->c.end_mb_y; mb_y++) {
2870  for (int mb_x = 0; mb_x < s->c.mb_width; mb_x++) {
2871  int xx = mb_x * 16;
2872  int yy = mb_y * 16;
2873  const uint8_t *pix = s->new_pic->data[0] + (yy * s->c.linesize) + xx;
2874  int varc;
2875  int sum = s->mpvencdsp.pix_sum(pix, s->c.linesize);
2876 
2877  varc = (s->mpvencdsp.pix_norm1(pix, s->c.linesize) -
2878  (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2879 
2880  s->mb_var [s->c.mb_stride * mb_y + mb_x] = varc;
2881  s->mb_mean[s->c.mb_stride * mb_y + mb_x] = (sum+128)>>8;
2882  s->me.mb_var_sum_temp += varc;
2883  }
2884  }
2885  return 0;
2886 }
2887 
2888 static void write_slice_end(MPVEncContext *const s)
2889 {
2890  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4) {
2891  if (s->partitioned_frame)
2893 
2894  ff_mpeg4_stuffing(&s->pb);
2895  } else if ((CONFIG_MJPEG_ENCODER || CONFIG_AMV_ENCODER) &&
2896  s->c.out_format == FMT_MJPEG) {
2898  } else if (CONFIG_SPEEDHQ_ENCODER && s->c.out_format == FMT_SPEEDHQ) {
2900  }
2901 
2902  flush_put_bits(&s->pb);
2903 
2904  if ((s->c.avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2905  s->misc_bits+= get_bits_diff(s);
2906 }
2907 
2908 static void write_mb_info(MPVEncContext *const s)
2909 {
2910  uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2911  int offset = put_bits_count(&s->pb);
2912  int mba = s->c.mb_x + s->c.mb_width * (s->c.mb_y % s->gob_index);
2913  int gobn = s->c.mb_y / s->gob_index;
2914  int pred_x, pred_y;
2915  if (CONFIG_H263_ENCODER)
2916  ff_h263_pred_motion(&s->c, 0, 0, &pred_x, &pred_y);
2917  bytestream_put_le32(&ptr, offset);
2918  bytestream_put_byte(&ptr, s->c.qscale);
2919  bytestream_put_byte(&ptr, gobn);
2920  bytestream_put_le16(&ptr, mba);
2921  bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2922  bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2923  /* 4MV not implemented */
2924  bytestream_put_byte(&ptr, 0); /* hmv2 */
2925  bytestream_put_byte(&ptr, 0); /* vmv2 */
2926 }
2927 
2928 static void update_mb_info(MPVEncContext *const s, int startcode)
2929 {
2930  if (!s->mb_info)
2931  return;
2932  if (put_bytes_count(&s->pb, 0) - s->prev_mb_info >= s->mb_info) {
2933  s->mb_info_size += 12;
2934  s->prev_mb_info = s->last_mb_info;
2935  }
2936  if (startcode) {
2937  s->prev_mb_info = put_bytes_count(&s->pb, 0);
2938  /* This might have incremented mb_info_size above, and we return without
2939  * actually writing any info into that slot yet. But in that case,
2940  * this will be called again at the start of the after writing the
2941  * start code, actually writing the mb info. */
2942  return;
2943  }
2944 
2945  s->last_mb_info = put_bytes_count(&s->pb, 0);
2946  if (!s->mb_info_size)
2947  s->mb_info_size += 12;
2948  write_mb_info(s);
2949 }
2950 
2951 int ff_mpv_reallocate_putbitbuffer(MPVEncContext *const s, size_t threshold, size_t size_increase)
2952 {
2953  if (put_bytes_left(&s->pb, 0) < threshold
2954  && s->c.slice_context_count == 1
2955  && s->pb.buf == s->c.avctx->internal->byte_buffer) {
2956  int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2957 
2958  uint8_t *new_buffer = NULL;
2959  int new_buffer_size = 0;
2960 
2961  if ((s->c.avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2962  av_log(s->c.avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2963  return AVERROR(ENOMEM);
2964  }
2965 
2966  emms_c();
2967 
2968  av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2969  s->c.avctx->internal->byte_buffer_size + size_increase);
2970  if (!new_buffer)
2971  return AVERROR(ENOMEM);
2972 
2973  memcpy(new_buffer, s->c.avctx->internal->byte_buffer, s->c.avctx->internal->byte_buffer_size);
2974  av_free(s->c.avctx->internal->byte_buffer);
2975  s->c.avctx->internal->byte_buffer = new_buffer;
2976  s->c.avctx->internal->byte_buffer_size = new_buffer_size;
2977  rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2978  s->ptr_lastgob = s->pb.buf + lastgob_pos;
2979  }
2980  if (put_bytes_left(&s->pb, 0) < threshold)
2981  return AVERROR(EINVAL);
2982  return 0;
2983 }
2984 
2985 static int encode_thread(AVCodecContext *c, void *arg){
2986  MPVEncContext *const s = *(void**)arg;
2987  int chr_h = 16 >> s->c.chroma_y_shift;
2988  int i;
2989  MBBackup best_s = { 0 }, backup_s;
2990  uint8_t bit_buf[2][MAX_MB_BYTES];
2991  // + 2 because ff_copy_bits() overreads
2992  uint8_t bit_buf2[2][MAX_PB2_MB_SIZE + 2];
2993  uint8_t bit_buf_tex[2][MAX_AC_TEX_MB_SIZE + 2];
2994  PutBitContext pb[2], pb2[2], tex_pb[2];
2995 
2996  for(i=0; i<2; i++){
2997  init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2998  init_put_bits(&pb2 [i], bit_buf2 [i], MAX_PB2_MB_SIZE);
2999  init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_AC_TEX_MB_SIZE);
3000  }
3001 
3002  s->last_bits= put_bits_count(&s->pb);
3003  s->mv_bits=0;
3004  s->misc_bits=0;
3005  s->i_tex_bits=0;
3006  s->p_tex_bits=0;
3007  s->i_count=0;
3008 
3009  for(i=0; i<3; i++){
3010  /* init last dc values */
3011  /* note: quant matrix value (8) is implied here */
3012  s->last_dc[i] = 128 << s->c.intra_dc_precision;
3013 
3014  s->encoding_error[i] = 0;
3015  }
3016  if (s->c.codec_id == AV_CODEC_ID_AMV) {
3017  s->last_dc[0] = 128 * 8 / 13;
3018  s->last_dc[1] = 128 * 8 / 14;
3019  s->last_dc[2] = 128 * 8 / 14;
3020 #if CONFIG_MPEG4_ENCODER
3021  } else if (s->partitioned_frame) {
3022  av_assert1(s->c.codec_id == AV_CODEC_ID_MPEG4);
3024 #endif
3025  }
3026  s->mb_skip_run = 0;
3027  memset(s->c.last_mv, 0, sizeof(s->c.last_mv));
3028 
3029  s->last_mv_dir = 0;
3030 
3031  s->c.resync_mb_x = 0;
3032  s->c.resync_mb_y = 0;
3033  s->c.first_slice_line = 1;
3034  s->ptr_lastgob = s->pb.buf;
3035  for (int mb_y_order = s->c.start_mb_y; mb_y_order < s->c.end_mb_y; mb_y_order++) {
3036  int mb_y;
3037  if (CONFIG_SPEEDHQ_ENCODER && s->c.codec_id == AV_CODEC_ID_SPEEDHQ) {
3038  int first_in_slice;
3039  mb_y = ff_speedhq_mb_y_order_to_mb(mb_y_order, s->c.mb_height, &first_in_slice);
3040  if (first_in_slice && mb_y_order != s->c.start_mb_y)
3042  s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 1024 << s->c.intra_dc_precision;
3043  } else {
3044  mb_y = mb_y_order;
3045  }
3046  s->c.mb_x = 0;
3047  s->c.mb_y = mb_y;
3048 
3049  ff_set_qscale(&s->c, s->c.qscale);
3050  ff_init_block_index(&s->c);
3051 
3052  for (int mb_x = 0; mb_x < s->c.mb_width; mb_x++) {
3053  int mb_type, xy;
3054 // int d;
3055  int dmin= INT_MAX;
3056  int dir;
3057  int size_increase = s->c.avctx->internal->byte_buffer_size/4
3058  + s->c.mb_width*MAX_MB_BYTES;
3059 
3061  if (put_bytes_left(&s->pb, 0) < MAX_MB_BYTES){
3062  av_log(s->c.avctx, AV_LOG_ERROR, "encoded frame too large\n");
3063  return -1;
3064  }
3065  if (s->data_partitioning) {
3066  if (put_bytes_left(&s->pb2, 0) < MAX_MB_BYTES ||
3067  put_bytes_left(&s->tex_pb, 0) < MAX_MB_BYTES) {
3068  av_log(s->c.avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3069  return -1;
3070  }
3071  }
3072 
3073  s->c.mb_x = mb_x;
3074  s->c.mb_y = mb_y; // moved into loop, can get changed by H.261
3075  ff_update_block_index(&s->c, 8, 0, s->c.chroma_x_shift);
3076 
3077  if (CONFIG_H261_ENCODER && s->c.codec_id == AV_CODEC_ID_H261)
3079  xy = s->c.mb_y * s->c.mb_stride + s->c.mb_x;
3080  mb_type = s->mb_type[xy];
3081 
3082  /* write gob / video packet header */
3083  if(s->rtp_mode){
3084  int current_packet_size, is_gob_start;
3085 
3086  current_packet_size = put_bytes_count(&s->pb, 1)
3087  - (s->ptr_lastgob - s->pb.buf);
3088 
3089  is_gob_start = s->rtp_payload_size &&
3090  current_packet_size >= s->rtp_payload_size &&
3091  mb_y + mb_x > 0;
3092 
3093  if (s->c.start_mb_y == mb_y && mb_y > 0 && mb_x == 0) is_gob_start = 1;
3094 
3095  switch (s->c.codec_id) {
3096  case AV_CODEC_ID_H263:
3097  case AV_CODEC_ID_H263P:
3098  if (!s->h263_slice_structured)
3099  if (s->c.mb_x || s->c.mb_y % s->gob_index) is_gob_start = 0;
3100  break;
3102  if (s->c.mb_x == 0 && s->c.mb_y != 0) is_gob_start = 1;
3104  if (s->c.codec_id == AV_CODEC_ID_MPEG1VIDEO && s->c.mb_y >= 175 ||
3105  s->mb_skip_run)
3106  is_gob_start=0;
3107  break;
3108  case AV_CODEC_ID_MJPEG:
3109  if (s->c.mb_x == 0 && s->c.mb_y != 0) is_gob_start = 1;
3110  break;
3111  }
3112 
3113  if(is_gob_start){
3114  if (s->c.start_mb_y != mb_y || mb_x != 0) {
3115  write_slice_end(s);
3116 
3117  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4 && s->partitioned_frame)
3119  }
3120 
3121  av_assert2((put_bits_count(&s->pb)&7) == 0);
3122  current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3123 
3124  if (s->error_rate && s->c.resync_mb_x + s->c.resync_mb_y > 0) {
3125  int r = put_bytes_count(&s->pb, 0) + s->picture_number + 16 + s->c.mb_x + s->c.mb_y;
3126  int d = 100 / s->error_rate;
3127  if(r % d == 0){
3128  current_packet_size=0;
3129  s->pb.buf_ptr= s->ptr_lastgob;
3130  av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3131  }
3132  }
3133 
3134  switch (s->c.codec_id) {
3135  case AV_CODEC_ID_MPEG4:
3136  if (CONFIG_MPEG4_ENCODER) {
3140  }
3141  break;
3144  if (CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER) {
3147  }
3148  break;
3149 #if CONFIG_H263P_ENCODER
3150  case AV_CODEC_ID_H263P:
3151  if (s->c.dc_val)
3153  // fallthrough
3154 #endif
3155  case AV_CODEC_ID_H263:
3156  if (CONFIG_H263_ENCODER) {
3157  update_mb_info(s, 1);
3159  }
3160  break;
3161  }
3162 
3163  if (s->c.avctx->flags & AV_CODEC_FLAG_PASS1) {
3164  int bits= put_bits_count(&s->pb);
3165  s->misc_bits+= bits - s->last_bits;
3166  s->last_bits= bits;
3167  }
3168 
3169  s->ptr_lastgob += current_packet_size;
3170  s->c.first_slice_line = 1;
3171  s->c.resync_mb_x = mb_x;
3172  s->c.resync_mb_y = mb_y;
3173  }
3174  }
3175 
3176  if (s->c.resync_mb_x == s->c.mb_x &&
3177  s->c.resync_mb_y+1 == s->c.mb_y)
3178  s->c.first_slice_line = 0;
3179 
3180  s->c.mb_skipped = 0;
3181  s->dquant=0; //only for QP_RD
3182 
3183  update_mb_info(s, 0);
3184 
3185  if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3186  int next_block=0;
3187  int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3188 
3189  backup_context_before_encode(&backup_s, s);
3190  backup_s.pb= s->pb;
3191  if (s->data_partitioning) {
3192  backup_s.pb2= s->pb2;
3193  backup_s.tex_pb= s->tex_pb;
3194  }
3195 
3196  if(mb_type&CANDIDATE_MB_TYPE_INTER){
3197  s->c.mv_dir = MV_DIR_FORWARD;
3198  s->c.mv_type = MV_TYPE_16X16;
3199  s->c.mb_intra = 0;
3200  s->c.mv[0][0][0] = s->p_mv_table[xy][0];
3201  s->c.mv[0][0][1] = s->p_mv_table[xy][1];
3202  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3203  &dmin, &next_block, s->c.mv[0][0][0], s->c.mv[0][0][1]);
3204  }
3205  if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3206  s->c.mv_dir = MV_DIR_FORWARD;
3207  s->c.mv_type = MV_TYPE_FIELD;
3208  s->c.mb_intra = 0;
3209  for(i=0; i<2; i++){
3210  int j = s->c.field_select[0][i] = s->p_field_select_table[i][xy];
3211  s->c.mv[0][i][0] = s->c.p_field_mv_table[i][j][xy][0];
3212  s->c.mv[0][i][1] = s->c.p_field_mv_table[i][j][xy][1];
3213  }
3214  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3215  &dmin, &next_block, 0, 0);
3216  }
3217  if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3218  s->c.mv_dir = MV_DIR_FORWARD;
3219  s->c.mv_type = MV_TYPE_16X16;
3220  s->c.mb_intra = 0;
3221  s->c.mv[0][0][0] = 0;
3222  s->c.mv[0][0][1] = 0;
3223  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3224  &dmin, &next_block, s->c.mv[0][0][0], s->c.mv[0][0][1]);
3225  }
3226  if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3227  s->c.mv_dir = MV_DIR_FORWARD;
3228  s->c.mv_type = MV_TYPE_8X8;
3229  s->c.mb_intra = 0;
3230  for(i=0; i<4; i++){
3231  s->c.mv[0][i][0] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][0];
3232  s->c.mv[0][i][1] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][1];
3233  }
3234  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3235  &dmin, &next_block, 0, 0);
3236  }
3237  if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3238  s->c.mv_dir = MV_DIR_FORWARD;
3239  s->c.mv_type = MV_TYPE_16X16;
3240  s->c.mb_intra = 0;
3241  s->c.mv[0][0][0] = s->b_forw_mv_table[xy][0];
3242  s->c.mv[0][0][1] = s->b_forw_mv_table[xy][1];
3243  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3244  &dmin, &next_block, s->c.mv[0][0][0], s->c.mv[0][0][1]);
3245  }
3246  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3247  s->c.mv_dir = MV_DIR_BACKWARD;
3248  s->c.mv_type = MV_TYPE_16X16;
3249  s->c.mb_intra = 0;
3250  s->c.mv[1][0][0] = s->b_back_mv_table[xy][0];
3251  s->c.mv[1][0][1] = s->b_back_mv_table[xy][1];
3252  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3253  &dmin, &next_block, s->c.mv[1][0][0], s->c.mv[1][0][1]);
3254  }
3255  if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3256  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3257  s->c.mv_type = MV_TYPE_16X16;
3258  s->c.mb_intra = 0;
3259  s->c.mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3260  s->c.mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3261  s->c.mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3262  s->c.mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3263  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3264  &dmin, &next_block, 0, 0);
3265  }
3266  if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3267  s->c.mv_dir = MV_DIR_FORWARD;
3268  s->c.mv_type = MV_TYPE_FIELD;
3269  s->c.mb_intra = 0;
3270  for(i=0; i<2; i++){
3271  int j = s->c.field_select[0][i] = s->b_field_select_table[0][i][xy];
3272  s->c.mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3273  s->c.mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3274  }
3275  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3276  &dmin, &next_block, 0, 0);
3277  }
3278  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3279  s->c.mv_dir = MV_DIR_BACKWARD;
3280  s->c.mv_type = MV_TYPE_FIELD;
3281  s->c.mb_intra = 0;
3282  for(i=0; i<2; i++){
3283  int j = s->c.field_select[1][i] = s->b_field_select_table[1][i][xy];
3284  s->c.mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3285  s->c.mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3286  }
3287  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3288  &dmin, &next_block, 0, 0);
3289  }
3290  if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3291  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3292  s->c.mv_type = MV_TYPE_FIELD;
3293  s->c.mb_intra = 0;
3294  for(dir=0; dir<2; dir++){
3295  for(i=0; i<2; i++){
3296  int j = s->c.field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3297  s->c.mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3298  s->c.mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3299  }
3300  }
3301  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3302  &dmin, &next_block, 0, 0);
3303  }
3304  if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3305  s->c.mv_dir = 0;
3306  s->c.mv_type = MV_TYPE_16X16;
3307  s->c.mb_intra = 1;
3308  s->c.mv[0][0][0] = 0;
3309  s->c.mv[0][0][1] = 0;
3310  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3311  &dmin, &next_block, 0, 0);
3312  s->c.mbintra_table[xy] = 1;
3313  }
3314 
3315  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3316  if (best_s.c.mv_type == MV_TYPE_16X16) { //FIXME move 4mv after QPRD
3317  const int last_qp = backup_s.c.qscale;
3318  int qpi, qp, dc[6];
3319  int16_t ac[6][16];
3320  const int mvdir = (best_s.c.mv_dir & MV_DIR_BACKWARD) ? 1 : 0;
3321  static const int dquant_tab[4]={-1,1,-2,2};
3322  int storecoefs = s->c.mb_intra && s->c.dc_val;
3323 
3324  av_assert2(backup_s.dquant == 0);
3325 
3326  //FIXME intra
3327  s->c.mv_dir = best_s.c.mv_dir;
3328  s->c.mv_type = MV_TYPE_16X16;
3329  s->c.mb_intra = best_s.c.mb_intra;
3330  s->c.mv[0][0][0] = best_s.c.mv[0][0][0];
3331  s->c.mv[0][0][1] = best_s.c.mv[0][0][1];
3332  s->c.mv[1][0][0] = best_s.c.mv[1][0][0];
3333  s->c.mv[1][0][1] = best_s.c.mv[1][0][1];
3334 
3335  qpi = s->c.pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3336  for(; qpi<4; qpi++){
3337  int dquant= dquant_tab[qpi];
3338  qp= last_qp + dquant;
3339  if (qp < s->c.avctx->qmin || qp > s->c.avctx->qmax)
3340  continue;
3341  backup_s.dquant= dquant;
3342  if(storecoefs){
3343  for(i=0; i<6; i++){
3344  dc[i] = s->c.dc_val[s->c.block_index[i]];
3345  memcpy(ac[i], s->c.ac_val[s->c.block_index[i]], sizeof(*s->c.ac_val));
3346  }
3347  }
3348 
3349  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3350  &dmin, &next_block, s->c.mv[mvdir][0][0], s->c.mv[mvdir][0][1]);
3351  if (best_s.c.qscale != qp) {
3352  if(storecoefs){
3353  for(i=0; i<6; i++){
3354  s->c.dc_val[s->c.block_index[i]] = dc[i];
3355  memcpy(s->c.ac_val[s->c.block_index[i]], ac[i], sizeof(*s->c.ac_val));
3356  }
3357  }
3358  }
3359  }
3360  }
3361  }
3362  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT){
3363  int mx= s->b_direct_mv_table[xy][0];
3364  int my= s->b_direct_mv_table[xy][1];
3365 
3366  backup_s.dquant = 0;
3367  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3368  s->c.mb_intra = 0;
3369  ff_mpeg4_set_direct_mv(&s->c, mx, my);
3370  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3371  &dmin, &next_block, mx, my);
3372  }
3373  if(CONFIG_MPEG4_ENCODER && mb_type&CANDIDATE_MB_TYPE_DIRECT0){
3374  backup_s.dquant = 0;
3375  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3376  s->c.mb_intra = 0;
3377  ff_mpeg4_set_direct_mv(&s->c, 0, 0);
3378  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3379  &dmin, &next_block, 0, 0);
3380  }
3381  if (!best_s.c.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3382  int coded=0;
3383  for(i=0; i<6; i++)
3384  coded |= s->c.block_last_index[i];
3385  if(coded){
3386  int mx,my;
3387  memcpy(s->c.mv, best_s.c.mv, sizeof(s->c.mv));
3388  if (CONFIG_MPEG4_ENCODER && best_s.c.mv_dir & MV_DIRECT) {
3389  mx=my=0; //FIXME find the one we actually used
3390  ff_mpeg4_set_direct_mv(&s->c, mx, my);
3391  } else if (best_s.c.mv_dir & MV_DIR_BACKWARD) {
3392  mx = s->c.mv[1][0][0];
3393  my = s->c.mv[1][0][1];
3394  }else{
3395  mx = s->c.mv[0][0][0];
3396  my = s->c.mv[0][0][1];
3397  }
3398 
3399  s->c.mv_dir = best_s.c.mv_dir;
3400  s->c.mv_type = best_s.c.mv_type;
3401  s->c.mb_intra = 0;
3402 /* s->c.mv[0][0][0] = best_s.mv[0][0][0];
3403  s->c.mv[0][0][1] = best_s.mv[0][0][1];
3404  s->c.mv[1][0][0] = best_s.mv[1][0][0];
3405  s->c.mv[1][0][1] = best_s.mv[1][0][1];*/
3406  backup_s.dquant= 0;
3407  s->skipdct=1;
3408  encode_mb_hq(s, &backup_s, &best_s, pb, pb2, tex_pb,
3409  &dmin, &next_block, mx, my);
3410  s->skipdct=0;
3411  }
3412  }
3413 
3414  store_context_after_encode(s, &best_s, s->data_partitioning);
3415 
3416  pb_bits_count= put_bits_count(&s->pb);
3417  flush_put_bits(&s->pb);
3418  ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3419  s->pb= backup_s.pb;
3420 
3421  if (s->data_partitioning) {
3422  pb2_bits_count= put_bits_count(&s->pb2);
3423  flush_put_bits(&s->pb2);
3424  ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3425  s->pb2= backup_s.pb2;
3426 
3427  tex_pb_bits_count= put_bits_count(&s->tex_pb);
3428  flush_put_bits(&s->tex_pb);
3429  ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3430  s->tex_pb= backup_s.tex_pb;
3431  }
3432  s->last_bits= put_bits_count(&s->pb);
3433 
3434  if (CONFIG_H263_ENCODER &&
3435  s->c.out_format == FMT_H263 && s->c.pict_type != AV_PICTURE_TYPE_B)
3437 
3438  if(next_block==0){ //FIXME 16 vs linesize16
3439  s->c.hdsp.put_pixels_tab[0][0](s->c.dest[0], s->c.sc.rd_scratchpad , s->c.linesize ,16);
3440  s->c.hdsp.put_pixels_tab[1][0](s->c.dest[1], s->c.sc.rd_scratchpad + 16*s->c.linesize , s->c.uvlinesize, 8);
3441  s->c.hdsp.put_pixels_tab[1][0](s->c.dest[2], s->c.sc.rd_scratchpad + 16*s->c.linesize + 8, s->c.uvlinesize, 8);
3442  }
3443 
3444  if (s->c.avctx->mb_decision == FF_MB_DECISION_BITS)
3445  mpv_reconstruct_mb(s, s->block);
3446  } else {
3447  int motion_x = 0, motion_y = 0;
3448  s->c.mv_type = MV_TYPE_16X16;
3449  // only one MB-Type possible
3450 
3451  switch(mb_type){
3453  s->c.mv_dir = 0;
3454  s->c.mb_intra = 1;
3455  motion_x= s->c.mv[0][0][0] = 0;
3456  motion_y= s->c.mv[0][0][1] = 0;
3457  s->c.mbintra_table[xy] = 1;
3458  break;
3460  s->c.mv_dir = MV_DIR_FORWARD;
3461  s->c.mb_intra = 0;
3462  motion_x= s->c.mv[0][0][0] = s->p_mv_table[xy][0];
3463  motion_y= s->c.mv[0][0][1] = s->p_mv_table[xy][1];
3464  break;
3466  s->c.mv_dir = MV_DIR_FORWARD;
3467  s->c.mv_type = MV_TYPE_FIELD;
3468  s->c.mb_intra = 0;
3469  for(i=0; i<2; i++){
3470  int j = s->c.field_select[0][i] = s->p_field_select_table[i][xy];
3471  s->c.mv[0][i][0] = s->c.p_field_mv_table[i][j][xy][0];
3472  s->c.mv[0][i][1] = s->c.p_field_mv_table[i][j][xy][1];
3473  }
3474  break;
3476  s->c.mv_dir = MV_DIR_FORWARD;
3477  s->c.mv_type = MV_TYPE_8X8;
3478  s->c.mb_intra = 0;
3479  for(i=0; i<4; i++){
3480  s->c.mv[0][i][0] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][0];
3481  s->c.mv[0][i][1] = s->c.cur_pic.motion_val[0][s->c.block_index[i]][1];
3482  }
3483  break;
3485  if (CONFIG_MPEG4_ENCODER) {
3487  s->c.mb_intra = 0;
3488  motion_x=s->b_direct_mv_table[xy][0];
3489  motion_y=s->b_direct_mv_table[xy][1];
3490  ff_mpeg4_set_direct_mv(&s->c, motion_x, motion_y);
3491  }
3492  break;
3494  if (CONFIG_MPEG4_ENCODER) {
3496  s->c.mb_intra = 0;
3497  ff_mpeg4_set_direct_mv(&s->c, 0, 0);
3498  }
3499  break;
3501  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3502  s->c.mb_intra = 0;
3503  s->c.mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3504  s->c.mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3505  s->c.mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3506  s->c.mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3507  break;
3509  s->c.mv_dir = MV_DIR_BACKWARD;
3510  s->c.mb_intra = 0;
3511  motion_x= s->c.mv[1][0][0] = s->b_back_mv_table[xy][0];
3512  motion_y= s->c.mv[1][0][1] = s->b_back_mv_table[xy][1];
3513  break;
3515  s->c.mv_dir = MV_DIR_FORWARD;
3516  s->c.mb_intra = 0;
3517  motion_x= s->c.mv[0][0][0] = s->b_forw_mv_table[xy][0];
3518  motion_y= s->c.mv[0][0][1] = s->b_forw_mv_table[xy][1];
3519  break;
3521  s->c.mv_dir = MV_DIR_FORWARD;
3522  s->c.mv_type = MV_TYPE_FIELD;
3523  s->c.mb_intra = 0;
3524  for(i=0; i<2; i++){
3525  int j = s->c.field_select[0][i] = s->b_field_select_table[0][i][xy];
3526  s->c.mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3527  s->c.mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3528  }
3529  break;
3531  s->c.mv_dir = MV_DIR_BACKWARD;
3532  s->c.mv_type = MV_TYPE_FIELD;
3533  s->c.mb_intra = 0;
3534  for(i=0; i<2; i++){
3535  int j = s->c.field_select[1][i] = s->b_field_select_table[1][i][xy];
3536  s->c.mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3537  s->c.mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3538  }
3539  break;
3541  s->c.mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3542  s->c.mv_type = MV_TYPE_FIELD;
3543  s->c.mb_intra = 0;
3544  for(dir=0; dir<2; dir++){
3545  for(i=0; i<2; i++){
3546  int j = s->c.field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3547  s->c.mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3548  s->c.mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3549  }
3550  }
3551  break;
3552  default:
3553  av_unreachable("There is a case for every CANDIDATE_MB_TYPE_* "
3554  "except CANDIDATE_MB_TYPE_SKIPPED which is never "
3555  "the only candidate (always coupled with INTER) "
3556  "so that it never reaches this switch");
3557  }
3558 
3559  encode_mb(s, motion_x, motion_y);
3560 
3561  // RAL: Update last macroblock type
3562  s->last_mv_dir = s->c.mv_dir;
3563 
3564  if (CONFIG_H263_ENCODER &&
3565  s->c.out_format == FMT_H263 && s->c.pict_type != AV_PICTURE_TYPE_B)
3567 
3568  mpv_reconstruct_mb(s, s->block);
3569  }
3570 
3571  s->c.cur_pic.qscale_table[xy] = s->c.qscale;
3572 
3573  /* clean the MV table in IPS frames for direct mode in B-frames */
3574  if (s->c.mb_intra /* && I,P,S_TYPE */) {
3575  s->p_mv_table[xy][0]=0;
3576  s->p_mv_table[xy][1]=0;
3577 #if CONFIG_H263_ENCODER
3578  } else if (s->c.h263_pred || s->c.h263_aic) {
3580 #endif
3581  }
3582 
3583  if (s->c.avctx->flags & AV_CODEC_FLAG_PSNR) {
3584  int w= 16;
3585  int h= 16;
3586 
3587  if (s->c.mb_x*16 + 16 > s->c.width ) w = s->c.width - s->c.mb_x*16;
3588  if (s->c.mb_y*16 + 16 > s->c.height) h = s->c.height- s->c.mb_y*16;
3589 
3590  s->encoding_error[0] += sse(
3591  s, s->new_pic->data[0] + s->c.mb_x*16 + s->c.mb_y*s->c.linesize*16,
3592  s->c.dest[0], w, h, s->c.linesize);
3593  s->encoding_error[1] += sse(
3594  s, s->new_pic->data[1] + s->c.mb_x*8 + s->c.mb_y*s->c.uvlinesize*chr_h,
3595  s->c.dest[1], w>>1, h>>s->c.chroma_y_shift, s->c.uvlinesize);
3596  s->encoding_error[2] += sse(
3597  s, s->new_pic->data[2] + s->c.mb_x*8 + s->c.mb_y*s->c.uvlinesize*chr_h,
3598  s->c.dest[2], w>>1, h>>s->c.chroma_y_shift, s->c.uvlinesize);
3599  }
3600  if (s->loop_filter) {
3601  if (CONFIG_H263_ENCODER && s->c.out_format == FMT_H263)
3602  ff_h263_loop_filter(&s->c);
3603  }
3604  ff_dlog(s->c.avctx, "MB %d %d bits\n",
3605  s->c.mb_x + s->c.mb_y * s->c.mb_stride, put_bits_count(&s->pb));
3606  }
3607  }
3608 
3609 #if CONFIG_MSMPEG4ENC
3610  //not beautiful here but we must write it before flushing so it has to be here
3611  if (s->c.msmpeg4_version != MSMP4_UNUSED && s->c.msmpeg4_version < MSMP4_WMV1 &&
3612  s->c.pict_type == AV_PICTURE_TYPE_I)
3614 #endif
3615 
3616  write_slice_end(s);
3617 
3618  return 0;
3619 }
3620 
3621 #define ADD(field) dst->field += src->field;
3622 #define MERGE(field) dst->field += src->field; src->field=0
3624 {
3625  ADD(me.scene_change_score);
3626  ADD(me.mc_mb_var_sum_temp);
3627  ADD(me.mb_var_sum_temp);
3628 }
3629 
3631 {
3632  int i;
3633 
3634  MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3635  MERGE(dct_count[1]);
3636  ADD(mv_bits);
3637  ADD(i_tex_bits);
3638  ADD(p_tex_bits);
3639  ADD(i_count);
3640  ADD(misc_bits);
3641  ADD(encoding_error[0]);
3642  ADD(encoding_error[1]);
3643  ADD(encoding_error[2]);
3644 
3645  if (dst->dct_error_sum) {
3646  for(i=0; i<64; i++){
3647  MERGE(dct_error_sum[0][i]);
3648  MERGE(dct_error_sum[1][i]);
3649  }
3650  }
3651 
3652  av_assert1(put_bits_count(&src->pb) % 8 ==0);
3653  av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3654  ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3655  flush_put_bits(&dst->pb);
3656 }
3657 
3658 static int estimate_qp(MPVMainEncContext *const m, int dry_run)
3659 {
3660  MPVEncContext *const s = &m->s;
3661 
3662  if (m->next_lambda){
3663  s->c.cur_pic.ptr->f->quality = m->next_lambda;
3664  if(!dry_run) m->next_lambda= 0;
3665  } else if (!m->fixed_qscale) {
3666  int quality = ff_rate_estimate_qscale(m, dry_run);
3667  s->c.cur_pic.ptr->f->quality = quality;
3668  if (s->c.cur_pic.ptr->f->quality < 0)
3669  return -1;
3670  }
3671 
3672  if(s->adaptive_quant){
3673  init_qscale_tab(s);
3674 
3675  switch (s->c.codec_id) {
3676  case AV_CODEC_ID_MPEG4:
3677  if (CONFIG_MPEG4_ENCODER)
3679  break;
3680  case AV_CODEC_ID_H263:
3681  case AV_CODEC_ID_H263P:
3682  case AV_CODEC_ID_FLV1:
3683  if (CONFIG_H263_ENCODER)
3685  break;
3686  }
3687 
3688  s->lambda = s->lambda_table[0];
3689  //FIXME broken
3690  }else
3691  s->lambda = s->c.cur_pic.ptr->f->quality;
3692  update_qscale(m);
3693  return 0;
3694 }
3695 
3696 /* must be called before writing the header */
3698 {
3699  av_assert1(s->c.cur_pic.ptr->f->pts != AV_NOPTS_VALUE);
3700  s->c.time = s->c.cur_pic.ptr->f->pts * s->c.avctx->time_base.num;
3701 
3702  if (s->c.pict_type == AV_PICTURE_TYPE_B) {
3703  s->c.pb_time = s->c.pp_time - (s->c.last_non_b_time - s->c.time);
3704  av_assert1(s->c.pb_time > 0 && s->c.pb_time < s->c.pp_time);
3705  }else{
3706  s->c.pp_time = s->c.time - s->c.last_non_b_time;
3707  s->c.last_non_b_time = s->c.time;
3708  av_assert1(s->picture_number == 0 || s->c.pp_time > 0);
3709  }
3710 }
3711 
3712 static int encode_picture(MPVMainEncContext *const m, const AVPacket *pkt)
3713 {
3714  MPVEncContext *const s = &m->s;
3715  int i, ret;
3716  int bits;
3717  int context_count = s->c.slice_context_count;
3718 
3719  /* we need to initialize some time vars before we can encode B-frames */
3720  // RAL: Condition added for MPEG1VIDEO
3721  if (s->c.out_format == FMT_MPEG1 || (s->c.h263_pred && s->c.msmpeg4_version == MSMP4_UNUSED))
3723  if (CONFIG_MPEG4_ENCODER && s->c.codec_id == AV_CODEC_ID_MPEG4)
3725 
3726 // s->lambda = s->c.cur_pic.ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3727 
3728  if (s->c.pict_type == AV_PICTURE_TYPE_I) {
3729  s->c.no_rounding = s->c.msmpeg4_version >= MSMP4_V3;
3730  } else if (s->c.pict_type != AV_PICTURE_TYPE_B) {
3731  s->c.no_rounding ^= s->flipflop_rounding;
3732  }
3733 
3734  if (s->c.avctx->flags & AV_CODEC_FLAG_PASS2) {
3735  ret = estimate_qp(m, 1);
3736  if (ret < 0)
3737  return ret;
3738  ff_get_2pass_fcode(m);
3739  } else if (!(s->c.avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3740  if (s->c.pict_type == AV_PICTURE_TYPE_B)
3741  s->lambda = m->last_lambda_for[s->c.pict_type];
3742  else
3743  s->lambda = m->last_lambda_for[m->last_non_b_pict_type];
3744  update_qscale(m);
3745  }
3746 
3747  s->c.mb_intra = 0; //for the rate distortion & bit compare functions
3748  for (int i = 0; i < context_count; i++) {
3749  MPVEncContext *const slice = s->c.enc_contexts[i];
3750  int h = s->c.mb_height;
3751  uint8_t *start = pkt->data + (int64_t)pkt->size * slice->c.start_mb_y / h;
3752  uint8_t *end = pkt->data + (int64_t)pkt->size * slice->c. end_mb_y / h;
3753 
3754  init_put_bits(&slice->pb, start, end - start);
3755 
3756  if (i) {
3757  ret = ff_update_duplicate_context(&slice->c, &s->c);
3758  if (ret < 0)
3759  return ret;
3760  slice->lambda = s->lambda;
3761  slice->lambda2 = s->lambda2;
3762  }
3763  slice->me.temp = slice->me.scratchpad = slice->c.sc.scratchpad_buf;
3764  ff_me_init_pic(slice);
3765  }
3766 
3767  /* Estimate motion for every MB */
3768  if (s->c.pict_type != AV_PICTURE_TYPE_I) {
3769  s->lambda = (s->lambda * m->me_penalty_compensation + 128) >> 8;
3770  s->lambda2 = (s->lambda2 * (int64_t) m->me_penalty_compensation + 128) >> 8;
3771  if (s->c.pict_type != AV_PICTURE_TYPE_B) {
3772  if ((m->me_pre && m->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3773  m->me_pre == 2) {
3774  s->c.avctx->execute(s->c.avctx, pre_estimate_motion_thread,
3775  &s->c.enc_contexts[0], NULL,
3776  context_count, sizeof(void*));
3777  }
3778  }
3779 
3780  s->c.avctx->execute(s->c.avctx, estimate_motion_thread, &s->c.enc_contexts[0],
3781  NULL, context_count, sizeof(void*));
3782  }else /* if (s->c.pict_type == AV_PICTURE_TYPE_I) */{
3783  /* I-Frame */
3784  for (int i = 0; i < s->c.mb_stride * s->c.mb_height; i++)
3785  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3786 
3787  if (!m->fixed_qscale) {
3788  /* finding spatial complexity for I-frame rate control */
3789  s->c.avctx->execute(s->c.avctx, mb_var_thread, &s->c.enc_contexts[0],
3790  NULL, context_count, sizeof(void*));
3791  }
3792  }
3793  for(i=1; i<context_count; i++){
3794  merge_context_after_me(s, s->c.enc_contexts[i]);
3795  }
3796  m->mc_mb_var_sum = s->me.mc_mb_var_sum_temp;
3797  m->mb_var_sum = s->me. mb_var_sum_temp;
3798  emms_c();
3799 
3800  if (s->me.scene_change_score > m->scenechange_threshold &&
3801  s->c.pict_type == AV_PICTURE_TYPE_P) {
3802  s->c.pict_type = AV_PICTURE_TYPE_I;
3803  for (int i = 0; i < s->c.mb_stride * s->c.mb_height; i++)
3804  s->mb_type[i] = CANDIDATE_MB_TYPE_INTRA;
3805  if (s->c.msmpeg4_version >= MSMP4_V3)
3806  s->c.no_rounding = 1;
3807  ff_dlog(s->c.avctx, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3808  m->mb_var_sum, m->mc_mb_var_sum);
3809  }
3810 
3811  if (!s->umvplus) {
3812  if (s->c.pict_type == AV_PICTURE_TYPE_P || s->c.pict_type == AV_PICTURE_TYPE_S) {
3813  s->f_code = ff_get_best_fcode(m, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3814 
3815  if (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3816  int a,b;
3817  a = ff_get_best_fcode(m, s->c.p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3818  b = ff_get_best_fcode(m, s->c.p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3819  s->f_code = FFMAX3(s->f_code, a, b);
3820  }
3821 
3823  ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3824  if (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3825  int j;
3826  for(i=0; i<2; i++){
3827  for(j=0; j<2; j++)
3828  ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3829  s->c.p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3830  }
3831  }
3832  } else if (s->c.pict_type == AV_PICTURE_TYPE_B) {
3833  int a, b;
3834 
3835  a = ff_get_best_fcode(m, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3836  b = ff_get_best_fcode(m, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3837  s->f_code = FFMAX(a, b);
3838 
3839  a = ff_get_best_fcode(m, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3840  b = ff_get_best_fcode(m, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3841  s->b_code = FFMAX(a, b);
3842 
3843  ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3844  ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3845  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3846  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3847  if (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3848  int dir, j;
3849  for(dir=0; dir<2; dir++){
3850  for(i=0; i<2; i++){
3851  for(j=0; j<2; j++){
3854  ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3855  s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3856  }
3857  }
3858  }
3859  }
3860  }
3861  }
3862 
3863  ret = estimate_qp(m, 0);
3864  if (ret < 0)
3865  return ret;
3866 
3867  if (s->c.qscale < 3 && s->max_qcoeff <= 128 &&
3868  s->c.pict_type == AV_PICTURE_TYPE_I &&
3869  !(s->c.avctx->flags & AV_CODEC_FLAG_QSCALE))
3870  s->c.qscale = 3; //reduce clipping problems
3871 
3872  if (s->c.out_format == FMT_MJPEG) {
3874  (7 + s->c.qscale) / s->c.qscale, 65535);
3875  if (ret < 0)
3876  return ret;
3877 
3878  if (s->c.codec_id != AV_CODEC_ID_AMV) {
3879  const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3880  const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3881 
3882  if (s->c.avctx->intra_matrix) {
3883  chroma_matrix =
3884  luma_matrix = s->c.avctx->intra_matrix;
3885  }
3886  if (s->c.avctx->chroma_intra_matrix)
3887  chroma_matrix = s->c.avctx->chroma_intra_matrix;
3888 
3889  /* for mjpeg, we do include qscale in the matrix */
3890  for (int i = 1; i < 64; i++) {
3891  int j = s->c.idsp.idct_permutation[i];
3892 
3893  s->c.chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->c.qscale) >> 3);
3894  s->c. intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->c.qscale) >> 3);
3895  }
3896  s->c.y_dc_scale_table =
3897  s->c.c_dc_scale_table = ff_mpeg12_dc_scale_table[s->c.intra_dc_precision];
3898  s->c.chroma_intra_matrix[0] =
3899  s->c.intra_matrix[0] = ff_mpeg12_dc_scale_table[s->c.intra_dc_precision][8];
3900  } else {
3901  static const uint8_t y[32] = {13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3902  static const uint8_t c[32] = {14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3903  for (int i = 1; i < 64; i++) {
3904  int j = s->c.idsp.idct_permutation[ff_zigzag_direct[i]];
3905 
3906  s->c.intra_matrix[j] = sp5x_qscale_five_quant_table[0][i];
3907  s->c.chroma_intra_matrix[j] = sp5x_qscale_five_quant_table[1][i];
3908  }
3909  s->c.y_dc_scale_table = y;
3910  s->c.c_dc_scale_table = c;
3911  s->c.intra_matrix[0] = 13;
3912  s->c.chroma_intra_matrix[0] = 14;
3913  }
3914  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3915  s->c.intra_matrix, s->intra_quant_bias, 8, 8, 1);
3916  ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3917  s->c.chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3918  s->c.qscale = 8;
3919  }
3920 
3921  if (s->c.pict_type == AV_PICTURE_TYPE_I) {
3922  s->c.cur_pic.ptr->f->flags |= AV_FRAME_FLAG_KEY;
3923  } else {
3924  s->c.cur_pic.ptr->f->flags &= ~AV_FRAME_FLAG_KEY;
3925  }
3926  s->c.cur_pic.ptr->f->pict_type = s->c.pict_type;
3927 
3928  if (s->c.cur_pic.ptr->f->flags & AV_FRAME_FLAG_KEY)
3929  m->picture_in_gop_number = 0;
3930 
3931  s->c.mb_x = s->c.mb_y = 0;
3932  s->last_bits= put_bits_count(&s->pb);
3933  ret = m->encode_picture_header(m);
3934  if (ret < 0)
3935  return ret;
3936  bits= put_bits_count(&s->pb);
3937  m->header_bits = bits - s->last_bits;
3938 
3939  for(i=1; i<context_count; i++){
3940  update_duplicate_context_after_me(s->c.enc_contexts[i], s);
3941  }
3942  s->c.avctx->execute(s->c.avctx, encode_thread, &s->c.enc_contexts[0],
3943  NULL, context_count, sizeof(void*));
3944  for(i=1; i<context_count; i++){
3945  if (s->pb.buf_end == s->c.enc_contexts[i]->pb.buf)
3946  set_put_bits_buffer_size(&s->pb, FFMIN(s->c.enc_contexts[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3947  merge_context_after_encode(s, s->c.enc_contexts[i]);
3948  }
3949  emms_c();
3950  return 0;
3951 }
3952 
3953 static inline void denoise_dct(MPVEncContext *const s, int16_t block[])
3954 {
3955  if (!s->dct_error_sum)
3956  return;
3957 
3958  const int intra = s->c.mb_intra;
3959  s->dct_count[intra]++;
3960  s->mpvencdsp.denoise_dct(block, s->dct_error_sum[intra], s->dct_offset[intra]);
3961 }
3962 
3964  int16_t *block, int n,
3965  int qscale, int *overflow){
3966  const int *qmat;
3967  const uint16_t *matrix;
3968  const uint8_t *scantable;
3969  const uint8_t *perm_scantable;
3970  int max=0;
3971  unsigned int threshold1, threshold2;
3972  int bias=0;
3973  int run_tab[65];
3974  int level_tab[65];
3975  int score_tab[65];
3976  int survivor[65];
3977  int survivor_count;
3978  int last_run=0;
3979  int last_level=0;
3980  int last_score= 0;
3981  int last_i;
3982  int coeff[2][64];
3983  int coeff_count[64];
3984  int qmul, qadd, start_i, last_non_zero, i, dc;
3985  const int esc_length= s->ac_esc_length;
3986  const uint8_t *length, *last_length;
3987  const int lambda = s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3988  int mpeg2_qscale;
3989 
3990  s->fdsp.fdct(block);
3991 
3992  denoise_dct(s, block);
3993 
3994  qmul= qscale*16;
3995  qadd= ((qscale-1)|1)*8;
3996 
3997  if (s->c.q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
3998  else mpeg2_qscale = qscale << 1;
3999 
4000  if (s->c.mb_intra) {
4001  int q;
4002  scantable = s->c.intra_scantable.scantable;
4003  perm_scantable = s->c.intra_scantable.permutated;
4004  if (!s->c.h263_aic) {
4005  if (n < 4)
4006  q = s->c.y_dc_scale;
4007  else
4008  q = s->c.c_dc_scale;
4009  q = q << 3;
4010  } else{
4011  /* For AIC we skip quant/dequant of INTRADC */
4012  q = 1 << 3;
4013  qadd=0;
4014  }
4015 
4016  /* note: block[0] is assumed to be positive */
4017  block[0] = (block[0] + (q >> 1)) / q;
4018  start_i = 1;
4019  last_non_zero = 0;
4020  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4021  matrix = n < 4 ? s->c.intra_matrix : s->c.chroma_intra_matrix;
4022  if (s->mpeg_quant || s->c.out_format == FMT_MPEG1 || s->c.out_format == FMT_MJPEG)
4023  bias= 1<<(QMAT_SHIFT-1);
4024 
4025  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4026  length = s->intra_chroma_ac_vlc_length;
4027  last_length= s->intra_chroma_ac_vlc_last_length;
4028  } else {
4029  length = s->intra_ac_vlc_length;
4030  last_length= s->intra_ac_vlc_last_length;
4031  }
4032  } else {
4033  scantable = s->c.inter_scantable.scantable;
4034  perm_scantable = s->c.inter_scantable.permutated;
4035  start_i = 0;
4036  last_non_zero = -1;
4037  qmat = s->q_inter_matrix[qscale];
4038  matrix = s->c.inter_matrix;
4039  length = s->inter_ac_vlc_length;
4040  last_length= s->inter_ac_vlc_last_length;
4041  }
4042  last_i= start_i;
4043 
4044  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4045  threshold2= (threshold1<<1);
4046 
4047  for(i=63; i>=start_i; i--) {
4048  const int j = scantable[i];
4049  int64_t level = (int64_t)block[j] * qmat[j];
4050 
4051  if(((uint64_t)(level+threshold1))>threshold2){
4052  last_non_zero = i;
4053  break;
4054  }
4055  }
4056 
4057  for(i=start_i; i<=last_non_zero; i++) {
4058  const int j = scantable[i];
4059  int64_t level = (int64_t)block[j] * qmat[j];
4060 
4061 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4062 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4063  if(((uint64_t)(level+threshold1))>threshold2){
4064  if(level>0){
4065  level= (bias + level)>>QMAT_SHIFT;
4066  coeff[0][i]= level;
4067  coeff[1][i]= level-1;
4068 // coeff[2][k]= level-2;
4069  }else{
4070  level= (bias - level)>>QMAT_SHIFT;
4071  coeff[0][i]= -level;
4072  coeff[1][i]= -level+1;
4073 // coeff[2][k]= -level+2;
4074  }
4075  coeff_count[i]= FFMIN(level, 2);
4076  av_assert2(coeff_count[i]);
4077  max |=level;
4078  }else{
4079  coeff[0][i]= (level>>31)|1;
4080  coeff_count[i]= 1;
4081  }
4082  }
4083 
4084  *overflow= s->max_qcoeff < max; //overflow might have happened
4085 
4086  if(last_non_zero < start_i){
4087  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4088  return last_non_zero;
4089  }
4090 
4091  score_tab[start_i]= 0;
4092  survivor[0]= start_i;
4093  survivor_count= 1;
4094 
4095  for(i=start_i; i<=last_non_zero; i++){
4096  int level_index, j, zero_distortion;
4097  int dct_coeff= FFABS(block[ scantable[i] ]);
4098  int best_score=256*256*256*120;
4099 
4100  if (s->fdsp.fdct == ff_fdct_ifast)
4101  dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4102  zero_distortion= dct_coeff*dct_coeff;
4103 
4104  for(level_index=0; level_index < coeff_count[i]; level_index++){
4105  int distortion;
4106  int level= coeff[level_index][i];
4107  const int alevel= FFABS(level);
4108  int unquant_coeff;
4109 
4110  av_assert2(level);
4111 
4112  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4113  unquant_coeff= alevel*qmul + qadd;
4114  } else if (s->c.out_format == FMT_MJPEG) {
4115  j = s->c.idsp.idct_permutation[scantable[i]];
4116  unquant_coeff = alevel * matrix[j] * 8;
4117  }else{ // MPEG-1
4118  j = s->c.idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4119  if (s->c.mb_intra) {
4120  unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4121  unquant_coeff = (unquant_coeff - 1) | 1;
4122  }else{
4123  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4124  unquant_coeff = (unquant_coeff - 1) | 1;
4125  }
4126  unquant_coeff<<= 3;
4127  }
4128 
4129  distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4130  level+=64;
4131  if((level&(~127)) == 0){
4132  for(j=survivor_count-1; j>=0; j--){
4133  int run= i - survivor[j];
4134  int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4135  score += score_tab[i-run];
4136 
4137  if(score < best_score){
4138  best_score= score;
4139  run_tab[i+1]= run;
4140  level_tab[i+1]= level-64;
4141  }
4142  }
4143 
4144  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4145  for(j=survivor_count-1; j>=0; j--){
4146  int run= i - survivor[j];
4147  int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4148  score += score_tab[i-run];
4149  if(score < last_score){
4150  last_score= score;
4151  last_run= run;
4152  last_level= level-64;
4153  last_i= i+1;
4154  }
4155  }
4156  }
4157  }else{
4158  distortion += esc_length*lambda;
4159  for(j=survivor_count-1; j>=0; j--){
4160  int run= i - survivor[j];
4161  int score= distortion + score_tab[i-run];
4162 
4163  if(score < best_score){
4164  best_score= score;
4165  run_tab[i+1]= run;
4166  level_tab[i+1]= level-64;
4167  }
4168  }
4169 
4170  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4171  for(j=survivor_count-1; j>=0; j--){
4172  int run= i - survivor[j];
4173  int score= distortion + score_tab[i-run];
4174  if(score < last_score){
4175  last_score= score;
4176  last_run= run;
4177  last_level= level-64;
4178  last_i= i+1;
4179  }
4180  }
4181  }
4182  }
4183  }
4184 
4185  score_tab[i+1]= best_score;
4186 
4187  // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4188  if(last_non_zero <= 27){
4189  for(; survivor_count; survivor_count--){
4190  if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4191  break;
4192  }
4193  }else{
4194  for(; survivor_count; survivor_count--){
4195  if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4196  break;
4197  }
4198  }
4199 
4200  survivor[ survivor_count++ ]= i+1;
4201  }
4202 
4203  if (s->c.out_format != FMT_H263 && s->c.out_format != FMT_H261) {
4204  last_score= 256*256*256*120;
4205  for(i= survivor[0]; i<=last_non_zero + 1; i++){
4206  int score= score_tab[i];
4207  if (i)
4208  score += lambda * 2; // FIXME more exact?
4209 
4210  if(score < last_score){
4211  last_score= score;
4212  last_i= i;
4213  last_level= level_tab[i];
4214  last_run= run_tab[i];
4215  }
4216  }
4217  }
4218 
4219  s->coded_score[n] = last_score;
4220 
4221  dc= FFABS(block[0]);
4222  last_non_zero= last_i - 1;
4223  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4224 
4225  if(last_non_zero < start_i)
4226  return last_non_zero;
4227 
4228  if(last_non_zero == 0 && start_i == 0){
4229  int best_level= 0;
4230  int best_score= dc * dc;
4231 
4232  for(i=0; i<coeff_count[0]; i++){
4233  int level= coeff[i][0];
4234  int alevel= FFABS(level);
4235  int unquant_coeff, score, distortion;
4236 
4237  if (s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) {
4238  unquant_coeff= (alevel*qmul + qadd)>>3;
4239  } else{ // MPEG-1
4240  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4241  unquant_coeff = (unquant_coeff - 1) | 1;
4242  }
4243  unquant_coeff = (unquant_coeff + 4) >> 3;
4244  unquant_coeff<<= 3 + 3;
4245 
4246  distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4247  level+=64;
4248  if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4249  else score= distortion + esc_length*lambda;
4250 
4251  if(score < best_score){
4252  best_score= score;
4253  best_level= level - 64;
4254  }
4255  }
4256  block[0]= best_level;
4257  s->coded_score[n] = best_score - dc*dc;
4258  if(best_level == 0) return -1;
4259  else return last_non_zero;
4260  }
4261 
4262  i= last_i;
4263  av_assert2(last_level);
4264 
4265  block[ perm_scantable[last_non_zero] ]= last_level;
4266  i -= last_run + 1;
4267 
4268  for(; i>start_i; i -= run_tab[i] + 1){
4269  block[ perm_scantable[i-1] ]= level_tab[i];
4270  }
4271 
4272  return last_non_zero;
4273 }
4274 
4275 static DECLARE_ALIGNED(16, int16_t, basis)[64][64];
4276 
4277 static void build_basis(uint8_t *perm){
4278  int i, j, x, y;
4279  emms_c();
4280  for(i=0; i<8; i++){
4281  for(j=0; j<8; j++){
4282  for(y=0; y<8; y++){
4283  for(x=0; x<8; x++){
4284  double s= 0.25*(1<<BASIS_SHIFT);
4285  int index= 8*i + j;
4286  int perm_index= perm[index];
4287  if(i==0) s*= sqrt(0.5);
4288  if(j==0) s*= sqrt(0.5);
4289  basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4290  }
4291  }
4292  }
4293  }
4294 }
4295 
4296 static int dct_quantize_refine(MPVEncContext *const s, //FIXME breaks denoise?
4297  int16_t *block, int16_t *weight, int16_t *orig,
4298  int n, int qscale){
4299  DECLARE_ALIGNED(16, int16_t, rem)[64];
4300  LOCAL_ALIGNED_16(int16_t, d1, [64]);
4301  const uint8_t *scantable;
4302  const uint8_t *perm_scantable;
4303 // unsigned int threshold1, threshold2;
4304 // int bias=0;
4305  int run_tab[65];
4306  int prev_run=0;
4307  int prev_level=0;
4308  int qmul, qadd, start_i, last_non_zero, i, dc;
4309  const uint8_t *length;
4310  const uint8_t *last_length;
4311  int lambda;
4312  int rle_index, run, q = 1, sum; //q is only used when s->c.mb_intra is true
4313 
4314  if(basis[0][0] == 0)
4315  build_basis(s->c.idsp.idct_permutation);
4316 
4317  qmul= qscale*2;
4318  qadd= (qscale-1)|1;
4319  if (s->c.mb_intra) {
4320  scantable = s->c.intra_scantable.scantable;
4321  perm_scantable = s->c.intra_scantable.permutated;
4322  if (!s->c.h263_aic) {
4323  if (n < 4)
4324  q = s->c.y_dc_scale;
4325  else
4326  q = s->c.c_dc_scale;
4327  } else{
4328  /* For AIC we skip quant/dequant of INTRADC */
4329  q = 1;
4330  qadd=0;
4331  }
4332  q <<= RECON_SHIFT-3;
4333  /* note: block[0] is assumed to be positive */
4334  dc= block[0]*q;
4335 // block[0] = (block[0] + (q >> 1)) / q;
4336  start_i = 1;
4337 // if (s->mpeg_quant || s->c.out_format == FMT_MPEG1)
4338 // bias= 1<<(QMAT_SHIFT-1);
4339  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4340  length = s->intra_chroma_ac_vlc_length;
4341  last_length= s->intra_chroma_ac_vlc_last_length;
4342  } else {
4343  length = s->intra_ac_vlc_length;
4344  last_length= s->intra_ac_vlc_last_length;
4345  }
4346  } else {
4347  scantable = s->c.inter_scantable.scantable;
4348  perm_scantable = s->c.inter_scantable.permutated;
4349  dc= 0;
4350  start_i = 0;
4351  length = s->inter_ac_vlc_length;
4352  last_length= s->inter_ac_vlc_last_length;
4353  }
4354  last_non_zero = s->c.block_last_index[n];
4355 
4356  dc += (1<<(RECON_SHIFT-1));
4357  for(i=0; i<64; i++){
4358  rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4359  }
4360 
4361  sum=0;
4362  for(i=0; i<64; i++){
4363  int one= 36;
4364  int qns=4;
4365  int w;
4366 
4367  w= FFABS(weight[i]) + qns*one;
4368  w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4369 
4370  weight[i] = w;
4371 // w=weight[i] = (63*qns + (w/2)) / w;
4372 
4373  av_assert2(w>0);
4374  av_assert2(w<(1<<6));
4375  sum += w*w;
4376  }
4377  lambda = sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4378 
4379  run=0;
4380  rle_index=0;
4381  for(i=start_i; i<=last_non_zero; i++){
4382  int j= perm_scantable[i];
4383  const int level= block[j];
4384  int coeff;
4385 
4386  if(level){
4387  if(level<0) coeff= qmul*level - qadd;
4388  else coeff= qmul*level + qadd;
4389  run_tab[rle_index++]=run;
4390  run=0;
4391 
4392  s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4393  }else{
4394  run++;
4395  }
4396  }
4397 
4398  for(;;){
4399  int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4400  int best_coeff=0;
4401  int best_change=0;
4402  int run2, best_unquant_change=0, analyze_gradient;
4403  analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4404 
4405  if(analyze_gradient){
4406  for(i=0; i<64; i++){
4407  int w= weight[i];
4408 
4409  d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4410  }
4411  s->fdsp.fdct(d1);
4412  }
4413 
4414  if(start_i){
4415  const int level= block[0];
4416  int change, old_coeff;
4417 
4418  av_assert2(s->c.mb_intra);
4419 
4420  old_coeff= q*level;
4421 
4422  for(change=-1; change<=1; change+=2){
4423  int new_level= level + change;
4424  int score, new_coeff;
4425 
4426  new_coeff= q*new_level;
4427  if(new_coeff >= 2048 || new_coeff < 0)
4428  continue;
4429 
4430  score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4431  new_coeff - old_coeff);
4432  if(score<best_score){
4433  best_score= score;
4434  best_coeff= 0;
4435  best_change= change;
4436  best_unquant_change= new_coeff - old_coeff;
4437  }
4438  }
4439  }
4440 
4441  run=0;
4442  rle_index=0;
4443  run2= run_tab[rle_index++];
4444  prev_level=0;
4445  prev_run=0;
4446 
4447  for(i=start_i; i<64; i++){
4448  int j= perm_scantable[i];
4449  const int level= block[j];
4450  int change, old_coeff;
4451 
4452  if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4453  break;
4454 
4455  if(level){
4456  if(level<0) old_coeff= qmul*level - qadd;
4457  else old_coeff= qmul*level + qadd;
4458  run2= run_tab[rle_index++]; //FIXME ! maybe after last
4459  }else{
4460  old_coeff=0;
4461  run2--;
4462  av_assert2(run2>=0 || i >= last_non_zero );
4463  }
4464 
4465  for(change=-1; change<=1; change+=2){
4466  int new_level= level + change;
4467  int score, new_coeff, unquant_change;
4468 
4469  score=0;
4470  if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4471  continue;
4472 
4473  if(new_level){
4474  if(new_level<0) new_coeff= qmul*new_level - qadd;
4475  else new_coeff= qmul*new_level + qadd;
4476  if(new_coeff >= 2048 || new_coeff <= -2048)
4477  continue;
4478  //FIXME check for overflow
4479 
4480  if(level){
4481  if(level < 63 && level > -63){
4482  if(i < last_non_zero)
4483  score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4484  - length[UNI_AC_ENC_INDEX(run, level+64)];
4485  else
4486  score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4487  - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4488  }
4489  }else{
4490  av_assert2(FFABS(new_level)==1);
4491 
4492  if(analyze_gradient){
4493  int g= d1[ scantable[i] ];
4494  if(g && (g^new_level) >= 0)
4495  continue;
4496  }
4497 
4498  if(i < last_non_zero){
4499  int next_i= i + run2 + 1;
4500  int next_level= block[ perm_scantable[next_i] ] + 64;
4501 
4502  if(next_level&(~127))
4503  next_level= 0;
4504 
4505  if(next_i < last_non_zero)
4506  score += length[UNI_AC_ENC_INDEX(run, 65)]
4507  + length[UNI_AC_ENC_INDEX(run2, next_level)]
4508  - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4509  else
4510  score += length[UNI_AC_ENC_INDEX(run, 65)]
4511  + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4512  - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4513  }else{
4514  score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4515  if(prev_level){
4516  score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4517  - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4518  }
4519  }
4520  }
4521  }else{
4522  new_coeff=0;
4523  av_assert2(FFABS(level)==1);
4524 
4525  if(i < last_non_zero){
4526  int next_i= i + run2 + 1;
4527  int next_level= block[ perm_scantable[next_i] ] + 64;
4528 
4529  if(next_level&(~127))
4530  next_level= 0;
4531 
4532  if(next_i < last_non_zero)
4533  score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4534  - length[UNI_AC_ENC_INDEX(run2, next_level)]
4535  - length[UNI_AC_ENC_INDEX(run, 65)];
4536  else
4537  score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4538  - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4539  - length[UNI_AC_ENC_INDEX(run, 65)];
4540  }else{
4541  score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4542  if(prev_level){
4543  score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4544  - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4545  }
4546  }
4547  }
4548 
4549  score *= lambda;
4550 
4551  unquant_change= new_coeff - old_coeff;
4552  av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4553 
4554  score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4555  unquant_change);
4556  if(score<best_score){
4557  best_score= score;
4558  best_coeff= i;
4559  best_change= change;
4560  best_unquant_change= unquant_change;
4561  }
4562  }
4563  if(level){
4564  prev_level= level + 64;
4565  if(prev_level&(~127))
4566  prev_level= 0;
4567  prev_run= run;
4568  run=0;
4569  }else{
4570  run++;
4571  }
4572  }
4573 
4574  if(best_change){
4575  int j= perm_scantable[ best_coeff ];
4576 
4577  block[j] += best_change;
4578 
4579  if(best_coeff > last_non_zero){
4580  last_non_zero= best_coeff;
4581  av_assert2(block[j]);
4582  }else{
4583  for(; last_non_zero>=start_i; last_non_zero--){
4584  if(block[perm_scantable[last_non_zero]])
4585  break;
4586  }
4587  }
4588 
4589  run=0;
4590  rle_index=0;
4591  for(i=start_i; i<=last_non_zero; i++){
4592  int j= perm_scantable[i];
4593  const int level= block[j];
4594 
4595  if(level){
4596  run_tab[rle_index++]=run;
4597  run=0;
4598  }else{
4599  run++;
4600  }
4601  }
4602 
4603  s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4604  }else{
4605  break;
4606  }
4607  }
4608 
4609  return last_non_zero;
4610 }
4611 
4612 /**
4613  * Permute an 8x8 block according to permutation.
4614  * @param block the block which will be permuted according to
4615  * the given permutation vector
4616  * @param permutation the permutation vector
4617  * @param last the last non zero coefficient in scantable order, used to
4618  * speed the permutation up
4619  * @param scantable the used scantable, this is only used to speed the
4620  * permutation up, the block is not (inverse) permutated
4621  * to scantable order!
4622  */
4623 void ff_block_permute(int16_t *block, const uint8_t *permutation,
4624  const uint8_t *scantable, int last)
4625 {
4626  int i;
4627  int16_t temp[64];
4628 
4629  if (last <= 0)
4630  return;
4631  //FIXME it is ok but not clean and might fail for some permutations
4632  // if (permutation[1] == 1)
4633  // return;
4634 
4635  for (i = 0; i <= last; i++) {
4636  const int j = scantable[i];
4637  temp[j] = block[j];
4638  block[j] = 0;
4639  }
4640 
4641  for (i = 0; i <= last; i++) {
4642  const int j = scantable[i];
4643  const int perm_j = permutation[j];
4644  block[perm_j] = temp[j];
4645  }
4646 }
4647 
4648 static int dct_quantize_c(MPVEncContext *const s,
4649  int16_t *block, int n,
4650  int qscale, int *overflow)
4651 {
4652  int i, last_non_zero, q, start_i;
4653  const int *qmat;
4654  const uint8_t *scantable;
4655  int bias;
4656  int max=0;
4657  unsigned int threshold1, threshold2;
4658 
4659  s->fdsp.fdct(block);
4660 
4661  denoise_dct(s, block);
4662 
4663  if (s->c.mb_intra) {
4664  scantable = s->c.intra_scantable.scantable;
4665  if (!s->c.h263_aic) {
4666  if (n < 4)
4667  q = s->c.y_dc_scale;
4668  else
4669  q = s->c.c_dc_scale;
4670  q = q << 3;
4671  } else
4672  /* For AIC we skip quant/dequant of INTRADC */
4673  q = 1 << 3;
4674 
4675  /* note: block[0] is assumed to be positive */
4676  block[0] = (block[0] + (q >> 1)) / q;
4677  start_i = 1;
4678  last_non_zero = 0;
4679  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4680  bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4681  } else {
4682  scantable = s->c.inter_scantable.scantable;
4683  start_i = 0;
4684  last_non_zero = -1;
4685  qmat = s->q_inter_matrix[qscale];
4686  bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4687  }
4688  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4689  threshold2= (threshold1<<1);
4690  for(i=63;i>=start_i;i--) {
4691  const int j = scantable[i];
4692  int64_t level = (int64_t)block[j] * qmat[j];
4693 
4694  if(((uint64_t)(level+threshold1))>threshold2){
4695  last_non_zero = i;
4696  break;
4697  }else{
4698  block[j]=0;
4699  }
4700  }
4701  for(i=start_i; i<=last_non_zero; i++) {
4702  const int j = scantable[i];
4703  int64_t level = (int64_t)block[j] * qmat[j];
4704 
4705 // if( bias+level >= (1<<QMAT_SHIFT)
4706 // || bias-level >= (1<<QMAT_SHIFT)){
4707  if(((uint64_t)(level+threshold1))>threshold2){
4708  if(level>0){
4709  level= (bias + level)>>QMAT_SHIFT;
4710  block[j]= level;
4711  }else{
4712  level= (bias - level)>>QMAT_SHIFT;
4713  block[j]= -level;
4714  }
4715  max |=level;
4716  }else{
4717  block[j]=0;
4718  }
4719  }
4720  *overflow= s->max_qcoeff < max; //overflow might have happened
4721 
4722  /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4723  if (s->c.idsp.perm_type != FF_IDCT_PERM_NONE)
4724  ff_block_permute(block, s->c.idsp.idct_permutation,
4725  scantable, last_non_zero);
4726 
4727  return last_non_zero;
4728 }
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:78
encode_frame
static int encode_frame(AVCodecContext *c, const AVFrame *frame, AVPacket *pkt)
Definition: mpegvideo_enc.c:1496
dct_quantize_trellis_c
static int dct_quantize_trellis_c(MPVEncContext *const s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:3963
put_dct
static void put_dct(MPVEncContext *const s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo_enc.c:1152
MPV_MAX_PLANES
#define MPV_MAX_PLANES
Definition: mpegpicture.h:31
ff_fix_long_p_mvs
void ff_fix_long_p_mvs(MPVEncContext *const s, int type)
Definition: motion_est.c:1661
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:378
FF_MATRIX_TYPE_INTRA
#define FF_MATRIX_TYPE_INTRA
Check if the elements of codec context matrices (intra_matrix, inter_matrix or chroma_intra_matrix) a...
Definition: encode.h:106
QMAT_SHIFT_MMX
#define QMAT_SHIFT_MMX
Definition: mpegvideo_enc.c:82
ff_encode_reordered_opaque
int ff_encode_reordered_opaque(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame)
Propagate user opaque values from the frame to avctx/pkt as needed.
Definition: encode.c:219
mpegvideo_unquantize.h
MPVMainEncContext::me_pre
int me_pre
prepass for motion estimation
Definition: mpegvideoenc.h:264
ff_fix_long_mvs
void ff_fix_long_mvs(MPVEncContext *const s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
Definition: motion_est.c:1710
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:432
MPVMainEncContext::fcode_tab
const uint8_t * fcode_tab
smallest fcode needed for each MV
Definition: mpegvideoenc.h:239
MPVMainEncContext::fixed_qscale
int fixed_qscale
fixed qscale if non zero
Definition: mpegvideoenc.h:257
CANDIDATE_MB_TYPE_BIDIR
#define CANDIDATE_MB_TYPE_BIDIR
Definition: mpegvideoenc.h:298
encode_mb_hq
static void encode_mb_hq(MPVEncContext *const s, MBBackup *const backup, MBBackup *const best, PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], int *dmin, int *next_block, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2724
MPVMainEncContext::frame_skip_cmp_fn
me_cmp_func frame_skip_cmp_fn
Definition: mpegvideoenc.h:246
MPVMainEncContext::bit_rate
int64_t bit_rate
Definition: mpegvideoenc.h:251
dct_single_coeff_elimination
static void dct_single_coeff_elimination(MPVEncContext *const s, int n, int threshold)
Definition: mpegvideo_enc.c:2166
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:174
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:216
h263data.h
init_unquantize
static av_cold void init_unquantize(MPVEncContext *const s2, AVCodecContext *avctx)
Definition: mpegvideo_enc.c:310
ff_mpv_enc_class
const AVClass ff_mpv_enc_class
Definition: mpegvideo_enc.c:101
encode_mb
static void encode_mb(MPVEncContext *const s, int motion_x, int motion_y)
Definition: mpegvideo_enc.c:2627
level
uint8_t level
Definition: svq3.c:208
ff_estimate_b_frame_motion
void ff_estimate_b_frame_motion(MPVEncContext *const s, int mb_x, int mb_y)
Definition: motion_est.c:1493
av_clip
#define av_clip
Definition: common.h:100
MPVEncContext
Definition: mpegvideoenc.h:46
avcodec_receive_packet
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:524
FF_LAMBDA_SCALE
#define FF_LAMBDA_SCALE
Definition: avutil.h:225
ALIGN
#define ALIGN
Definition: hashtable.c:32
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_mpv_motion
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, const op_pixels_func(*pix_op)[4], const qpel_mc_func(*qpix_op)[16])
Definition: mpegvideo_motion.c:821
init_qscale_tab
static void init_qscale_tab(MPVEncContext *const s)
init s->c.cur_pic.qscale_table from s->lambda_table
Definition: mpegvideo_enc.c:241
ff_mpv_init_duplicate_contexts
av_cold int ff_mpv_init_duplicate_contexts(MpegEncContext *s)
Initialize an MpegEncContext's thread contexts.
Definition: mpegvideo.c:118
update_noise_reduction
static void update_noise_reduction(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1897
MBBackup::mv_bits
int mv_bits
Definition: mpegvideo_enc.c:2649
mem_internal.h
MPVMainEncContext::dct_error_sum_base
char * dct_error_sum_base
backs dct_error_sum
Definition: mpegvideoenc.h:270
ff_me_init
av_cold int ff_me_init(MotionEstContext *c, AVCodecContext *avctx, const MECmpContext *mecc, int mpvenc)
Definition: motion_est.c:309
MBBackup::misc_bits
int misc_bits
Definition: mpegvideo_enc.c:2649
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:206
EDGE_BOTTOM
#define EDGE_BOTTOM
Definition: mpegvideoencdsp.h:30
mjpegenc_common.h
BUF_BITS
#define BUF_BITS
Definition: put_bits.h:47
AVCodecContext::rc_min_rate
int64_t rc_min_rate
minimum bitrate
Definition: avcodec.h:1277
set_frame_distances
static void set_frame_distances(MPVEncContext *const s)
Definition: mpegvideo_enc.c:3697
thread.h
frame_start
static void frame_start(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1919
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
ff_speedhq_end_slice
void ff_speedhq_end_slice(MPVEncContext *const s)
Definition: speedhqenc.c:118
MBBackup::block_last_index
int block_last_index[8]
Definition: mpegvideo_enc.c:2644
estimate_qp
static int estimate_qp(MPVMainEncContext *const m, int dry_run)
Definition: mpegvideo_enc.c:3658
ff_msmpeg4_encode_init
av_cold void ff_msmpeg4_encode_init(MPVMainEncContext *const m)
Definition: msmpeg4enc.c:673
matrix
Definition: vc1dsp.c:43
src1
const pixel * src1
Definition: h264pred_template.c:420
MPVEncContext::c
MpegEncContext c
the common base context
Definition: mpegvideoenc.h:47
AV_CODEC_FLAG_QSCALE
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
Definition: avcodec.h:213
MBBackup::last_mv
int last_mv[2][2][2]
Definition: mpegvideo_enc.c:2640
MPVMainEncContext::total_bits
int64_t total_bits
Definition: mpegvideoenc.h:252
mpegvideoenc.h
int64_t
long long int64_t
Definition: coverity.c:34
init_put_bits
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:62
ff_dct_encode_init
av_cold void ff_dct_encode_init(MPVEncContext *const s)
Definition: mpegvideo_enc.c:298
MPVMainEncContext::noise_reduction
int noise_reduction
Definition: mpegvideoenc.h:230
COPY
#define COPY(a)
ff_me_init_pic
void ff_me_init_pic(MPVEncContext *const s)
Definition: motion_est.c:371
h263enc.h
basis
static int16_t basis[64][64]
Definition: mpegvideo_enc.c:4275
AVCodecContext::intra_matrix
uint16_t * intra_matrix
custom intra quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:948
estimate_best_b_count
static int estimate_best_b_count(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1517
MPVMainEncContext::last_lambda_for
int last_lambda_for[5]
last lambda for a specific pict type
Definition: mpegvideoenc.h:258
mv_bits
static const uint8_t mv_bits[2][16][10]
Definition: mobiclip.c:165
estimate_motion_thread
static int estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2841
ff_clean_h263_qscales
void ff_clean_h263_qscales(MPVEncContext *s)
AVCodecContext::lumi_masking
float lumi_masking
luminance masking (0-> disabled)
Definition: avcodec.h:808
out_size
int out_size
Definition: movenc.c:56
MV_DIRECT
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4)
Definition: mpegvideo.h:172
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:64
sse
static int sse(const MPVEncContext *const s, const uint8_t *src1, const uint8_t *src2, int w, int h, int stride)
Definition: mpegvideo_enc.c:2775
CANDIDATE_MB_TYPE_INTER
#define CANDIDATE_MB_TYPE_INTER
Definition: mpegvideoenc.h:291
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo.c:158
MPVMainEncContext::frame_skip_threshold
int frame_skip_threshold
Definition: mpegvideoenc.h:242
MPVUnquantDSPContext::dct_unquantize_mpeg1_intra
void(* dct_unquantize_mpeg1_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:35
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:427
put_bits
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:154
INTERLACED_DCT
#define INTERLACED_DCT(s)
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:529
AVFrame::width
int width
Definition: frame.h:499
AVCodec::capabilities
int capabilities
Codec capabilities.
Definition: codec.h:191
w
uint8_t w
Definition: llviddspenc.c:38
internal.h
MBBackup::last_bits
int last_bits
Definition: mpegvideo_enc.c:2649
AVPacket::data
uint8_t * data
Definition: packet.h:588
av_packet_shrink_side_data
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Shrink the already allocated side data buffer.
Definition: packet.c:379
AVOption
AVOption.
Definition: opt.h:429
encode.h
b
#define b
Definition: input.c:42
put_bytes_count
static int put_bytes_count(const PutBitContext *s, int round_up)
Definition: put_bits.h:110
MPVEncContext::lambda
unsigned int lambda
Lagrange multiplier used in rate distortion.
Definition: mpegvideoenc.h:52
data
const char data[16]
Definition: mxf.c:149
MPVMainEncContext::dts_delta
int64_t dts_delta
pts difference between the first and second input frame, used for calculating dts of the first frame ...
Definition: mpegvideoenc.h:216
ff_mpeg2_non_linear_qscale
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:26
MpegEncContext::MSMP4_UNUSED
@ MSMP4_UNUSED
Definition: mpegvideo.h:235
write_slice_end
static void write_slice_end(MPVEncContext *const s)
Definition: mpegvideo_enc.c:2888
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:226
MpegEncContext::dest
uint8_t * dest[3]
Definition: mpegvideo.h:198
speedhqenc.h
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:491
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:606
FF_MPV_FLAG_SKIP_RD
#define FF_MPV_FLAG_SKIP_RD
Definition: mpegvideoenc.h:308
max
#define max(a, b)
Definition: cuda_runtime.h:33
ff_mpeg12_dc_scale_table
const uint8_t ff_mpeg12_dc_scale_table[4][32]
Definition: mpegvideodata.c:33
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:81
mathematics.h
FF_COMPLIANCE_EXPERIMENTAL
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: defs.h:62
sqr
static double sqr(double in)
Definition: af_afwtdn.c:872
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:306
pre_estimate_motion_thread
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2823
get_visual_weight
static void get_visual_weight(int16_t *weight, const uint8_t *ptr, int stride)
Definition: mpegvideo_enc.c:2256
FF_LAMBDA_SHIFT
#define FF_LAMBDA_SHIFT
Definition: avutil.h:224
COPY_CONTEXT
#define COPY_CONTEXT(BEFORE, AFTER, DST_TYPE, SRC_TYPE)
Definition: mpegvideo_enc.c:2656
AVCodecContext::mb_decision
int mb_decision
macroblock decision mode
Definition: avcodec.h:936
FMT_H261
@ FMT_H261
Definition: mpegvideo.h:54
MPVMainEncContext::gop_size
int gop_size
Definition: mpegvideoenc.h:203
AVCodecContext::qmax
int qmax
maximum quantizer
Definition: avcodec.h:1241
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:331
MPVMainEncContext::mb_var_sum
int64_t mb_var_sum
sum of MB variance for current frame
Definition: mpegvideoenc.h:266
mpegutils.h
pix
enum AVPixelFormat pix
Definition: ohcodec.c:55
AV_CODEC_FLAG_4MV
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
Definition: avcodec.h:217
AVCodecContext::delay
int delay
Codec delay.
Definition: avcodec.h:575
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:643
AVCodecContext::mb_cmp
int mb_cmp
macroblock comparison function (not supported yet)
Definition: avcodec.h:862
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: packet.c:74
MPVMainEncContext::encode_picture_header
int(* encode_picture_header)(struct MPVMainEncContext *m)
Definition: mpegvideoenc.h:248
quality
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about quality
Definition: rate_distortion.txt:12
CANDIDATE_MB_TYPE_BACKWARD_I
#define CANDIDATE_MB_TYPE_BACKWARD_I
Definition: mpegvideoenc.h:302
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:448
MV_DIR_BACKWARD
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:171
MECmpContext::sum_abs_dctelem
int(* sum_abs_dctelem)(const int16_t *block)
Definition: me_cmp.h:51
MBBackup::c
struct MBBackup::@212 c
AV_CODEC_ID_H261
@ AV_CODEC_ID_H261
Definition: codec_id.h:55
update_mb_info
static void update_mb_info(MPVEncContext *const s, int startcode)
Definition: mpegvideo_enc.c:2928
MBBackup::i_tex_bits
int i_tex_bits
Definition: mpegvideo_enc.c:2649
MPVMainEncContext::coded_picture_number
int coded_picture_number
used to set pic->coded_picture_number
Definition: mpegvideoenc.h:207
av_gcd
int64_t av_gcd(int64_t a, int64_t b)
Compute the greatest common divisor of two integer operands.
Definition: mathematics.c:37
set_bframe_chain_length
static int set_bframe_chain_length(MPVMainEncContext *const m)
Determines whether an input picture is discarded or not and if not determines the length of the next ...
Definition: mpegvideo_enc.c:1670
FF_MPV_COMMON_MOTION_EST_OPTS
#define FF_MPV_COMMON_MOTION_EST_OPTS
Definition: mpegvideoenc.h:377
mpv_reconstruct_mb
static void mpv_reconstruct_mb(MPVEncContext *const s, int16_t block[12][64])
Performs dequantization and IDCT (if necessary)
Definition: mpegvideo_enc.c:1172
MBBackup::tex_pb
PutBitContext tex_pb
Definition: mpegvideo_enc.c:2653
mpeg4videoenc.h
FF_CMP_VSSE
#define FF_CMP_VSSE
Definition: avcodec.h:878
ff_mpv_encode_picture
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
Definition: mpegvideo_enc.c:1936
FF_MPV_COMMON_OPTS
#define FF_MPV_COMMON_OPTS
Definition: mpegvideoenc.h:336
sp5x.h
MBBackup::mb_skip_run
int mb_skip_run
Definition: mpegvideo_enc.c:2647
ff_copy_bits
void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
Definition: bitstream.c:49
FMT_MJPEG
@ FMT_MJPEG
Definition: mpegvideo.h:56
init_slice_buffers
static av_cold int init_slice_buffers(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:499
mx
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t mx
Definition: dsp.h:57
FDCTDSPContext
Definition: fdctdsp.h:28
MPVMainEncContext::b_sensitivity
int b_sensitivity
Definition: mpegvideoenc.h:225
faandct.h
Floating point AAN DCT.
av_packet_add_side_data
int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
Definition: packet.c:197
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegvideo.h:53
ff_match_2uint16
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
Definition: utils.c:843
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:440
mpeg12enc.h
ff_h263_pred_motion
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
Definition: h263.c:182
MBBackup::interlaced_dct
int interlaced_dct
Definition: mpegvideo_enc.c:2645
STRIDE_ALIGN
#define STRIDE_ALIGN
Definition: internal.h:46
ff_vbv_update
int ff_vbv_update(MPVMainEncContext *m, int frame_size)
Definition: ratecontrol.c:722
MpegEncContext::chroma_y_shift
int chroma_y_shift
Definition: mpegvideo.h:267
fail
#define fail()
Definition: checkasm.h:208
FMT_SPEEDHQ
@ FMT_SPEEDHQ
Definition: mpegvideo.h:57
tab
static const struct twinvq_data tab
Definition: twinvq_data.h:10345
MpegEncContext::linesize
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:103
ff_h263_encode_init
void ff_h263_encode_init(MPVMainEncContext *m)
ff_me_cmp_init
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:961
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:488
CANDIDATE_MB_TYPE_SKIPPED
#define CANDIDATE_MB_TYPE_SKIPPED
Definition: mpegvideoenc.h:293
MPVUnquantDSPContext::dct_unquantize_h263_intra
void(* dct_unquantize_h263_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:43
perm
perm
Definition: f_perms.c:75
MAX_THREADS
#define MAX_THREADS
Definition: frame_thread_encoder.c:37
weight
const h264_weight_func weight
Definition: h264dsp_init.c:33
MPVMainEncContext::input_picture
MPVPicture * input_picture[MPVENC_MAX_B_FRAMES+1]
next pictures in display order
Definition: mpegvideoenc.h:209
MpegEncContext::MSMP4_WMV2
@ MSMP4_WMV2
Definition: mpegvideo.h:240
AVCodecContext::bit_rate_tolerance
int bit_rate_tolerance
number of bits the bitstream is allowed to diverge from the reference.
Definition: avcodec.h:1209
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AV_CODEC_FLAG_LOW_DELAY
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:314
pts
static int64_t pts
Definition: transcode_aac.c:644
FF_MPV_FLAG_CBP_RD
#define FF_MPV_FLAG_CBP_RD
Definition: mpegvideoenc.h:311
get_intra_count
static int get_intra_count(MPVEncContext *const s, const uint8_t *src, const uint8_t *ref, int stride)
Definition: mpegvideo_enc.c:1254
ff_mpeg4_init_partitions
void ff_mpeg4_init_partitions(MPVEncContext *const s)
Definition: mpeg4videoenc.c:1287
sse_mb
static int sse_mb(MPVEncContext *const s)
Definition: mpegvideo_enc.c:2797
ff_encode_add_stats_side_data
int ff_encode_add_stats_side_data(AVPacket *pkt, int quality, const int64_t error[], int error_count, enum AVPictureType pict_type)
Definition: encode.c:919
AV_CODEC_ID_MSMPEG4V2
@ AV_CODEC_ID_MSMPEG4V2
Definition: codec_id.h:67
AV_CODEC_FLAG_LOOP_FILTER
#define AV_CODEC_FLAG_LOOP_FILTER
loop filter.
Definition: avcodec.h:298
ff_sqrt
#define ff_sqrt
Definition: mathops.h:217
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
ff_mpeg1_encode_init
static void ff_mpeg1_encode_init(MPVEncContext *s)
Definition: mpeg12enc.h:33
init_matrices
static av_cold int init_matrices(MPVMainEncContext *const m, AVCodecContext *avctx)
Definition: mpegvideo_enc.c:373
AVRational::num
int num
Numerator.
Definition: rational.h:59
put_bytes_left
static int put_bytes_left(const PutBitContext *s, int round_up)
Definition: put_bits.h:145
refstruct.h
AV_CODEC_FLAG_INTERLACED_DCT
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:310
CANDIDATE_MB_TYPE_DIRECT
#define CANDIDATE_MB_TYPE_DIRECT
Definition: mpegvideoenc.h:295
CANDIDATE_MB_TYPE_INTER_I
#define CANDIDATE_MB_TYPE_INTER_I
Definition: mpegvideoenc.h:300
MPVMainEncContext::frame_skip_factor
int frame_skip_factor
Definition: mpegvideoenc.h:243
skip_check
static int skip_check(MPVMainEncContext *const m, const MPVPicture *p, const MPVPicture *ref)
Definition: mpegvideo_enc.c:1454
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:52
MPVMainEncContext::stuffing_bits
int stuffing_bits
bits used for stuffing
Definition: mpegvideoenc.h:255
MPVMainEncContext::picture_in_gop_number
int picture_in_gop_number
0-> first pic in gop, ...
Definition: mpegvideoenc.h:205
RateControlContext
rate control context.
Definition: ratecontrol.h:60
RateControlContext::num_entries
int num_entries
number of RateControlEntries
Definition: ratecontrol.h:61
ff_thread_once
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:205
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:210
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:106
ff_h263_encode_gob_header
void ff_h263_encode_gob_header(MPVEncContext *s, int mb_line)
MAX_MV
#define MAX_MV
Definition: motion_est.h:37
MPVPicture::shared
int shared
Definition: mpegpicture.h:87
MPVPicture::coded_picture_number
int coded_picture_number
Definition: mpegpicture.h:90
me_cmp_func
int(* me_cmp_func)(MPVEncContext *c, const uint8_t *blk1, const uint8_t *blk2, ptrdiff_t stride, int h)
Definition: me_cmp.h:45
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:642
default_fcode_tab
static uint8_t default_fcode_tab[MAX_MV *2+1]
Definition: mpegvideo_enc.c:93
MpegEncContext::ac_val
int16_t(* ac_val)[16]
used for H.263 AIC, MPEG-4 AC prediction
Definition: mpegvideo.h:144
ff_mpeg4_set_direct_mv
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
Definition: mpeg4video.c:119
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
emms_c
#define emms_c()
Definition: emms.h:63
build_basis
static void build_basis(uint8_t *perm)
Definition: mpegvideo_enc.c:4277
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:697
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:149
MPVMainEncContext::tmp_frames
AVFrame * tmp_frames[MPVENC_MAX_B_FRAMES+2]
temporary frames used by b_frame_strategy = 2
Definition: mpegvideoenc.h:223
MAX_MB_BYTES
#define MAX_MB_BYTES
Definition: mpegutils.h:35
get_sae
static int get_sae(const uint8_t *src, int ref, int stride)
Definition: mpegvideo_enc.c:1240
ff_rv10_encode_picture_header
int ff_rv10_encode_picture_header(MPVMainEncContext *const m)
Definition: rv10enc.c:34
s
#define s(width, name)
Definition: cbs_vp9.c:198
rebase_put_bits
static void rebase_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Rebase the bit writer onto a reallocated buffer.
Definition: put_bits.h:122
CHROMA_422
#define CHROMA_422
Definition: mpegvideo.h:264
BASIS_SHIFT
#define BASIS_SHIFT
Definition: mpegvideoencdsp.h:26
MPVMainEncContext::brd_scale
int brd_scale
Definition: mpegvideoenc.h:226
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:60
MBBackup::esc3_level_length
int esc3_level_length
Definition: mpegvideo_enc.c:2651
MPVMainEncContext::reordered_input_picture
MPVPicture * reordered_input_picture[MPVENC_MAX_B_FRAMES+1]
next pictures in coded order
Definition: mpegvideoenc.h:210
MPVMainEncContext::intra_only
int intra_only
if true, only intra pictures are generated
Definition: mpegvideoenc.h:202
MPVMainEncContext::mc_mb_var_sum
int64_t mc_mb_var_sum
motion compensated MB variance for current frame
Definition: mpegvideoenc.h:267
merge_context_after_me
static void merge_context_after_me(MPVEncContext *const dst, MPVEncContext *const src)
Definition: mpegvideo_enc.c:3623
g
const char * g
Definition: vf_curves.c:128
ff_mpeg4_stuffing
void ff_mpeg4_stuffing(PutBitContext *pbc)
add MPEG-4 stuffing bits (01...1)
Definition: mpeg4videoenc.c:835
MPVMainEncContext::rc_context
RateControlContext rc_context
contains stuff only accessed in ratecontrol.c
Definition: mpegvideoenc.h:261
MPVUnquantDSPContext::dct_unquantize_mpeg2_intra
void(* dct_unquantize_mpeg2_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:39
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
AV_CODEC_ID_WMV2
@ AV_CODEC_ID_WMV2
Definition: codec_id.h:70
ff_mpeg1_dc_scale_table
static const uint8_t *const ff_mpeg1_dc_scale_table
Definition: mpegvideodata.h:32
bits
uint8_t bits
Definition: vp3data.h:128
LOCAL_ALIGNED_16
#define LOCAL_ALIGNED_16(t, v,...)
Definition: mem_internal.h:130
MPVEncContext::pb
PutBitContext pb
bit output
Definition: mpegvideoenc.h:50
MPVMainEncContext::header_bits
int header_bits
Definition: mpegvideoenc.h:254
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:41
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1553
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:231
RateControlEntry::new_pict_type
int new_pict_type
Definition: ratecontrol.h:51
ff_write_quant_matrix
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
Definition: mpegvideo_enc.c:225
limits.h
AV_CODEC_ID_MSMPEG4V1
@ AV_CODEC_ID_MSMPEG4V1
Definition: codec_id.h:66
MPVMainEncContext::max_b_frames
int max_b_frames
max number of B-frames
Definition: mpegvideoenc.h:204
MAX_AC_TEX_MB_SIZE
@ MAX_AC_TEX_MB_SIZE
Definition: mpeg4videoenc.h:39
ff_pre_estimate_p_frame_motion
int ff_pre_estimate_p_frame_motion(MPVEncContext *const s, int mb_x, int mb_y)
Definition: motion_est.c:1067
ff_clean_mpeg4_qscales
void ff_clean_mpeg4_qscales(MPVEncContext *const s)
modify mb_type & qscale so that encoding is actually possible in MPEG-4
Definition: mpeg4videoenc.c:270
rv10enc.h
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
AVCodecContext::rc_max_rate
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1270
ff_block_permute
void ff_block_permute(int16_t *block, const uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
Definition: mpegvideo_enc.c:4623
AVCodecContext::error
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:1505
AVCPBProperties
This structure describes the bitrate properties of an encoded bitstream.
Definition: defs.h:282
PutBitContext
Definition: put_bits.h:50
ff_speedhq_mb_y_order_to_mb
static int ff_speedhq_mb_y_order_to_mb(int mb_y_order, int mb_height, int *first_in_slice)
Definition: speedhqenc.h:41
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
CANDIDATE_MB_TYPE_FORWARD
#define CANDIDATE_MB_TYPE_FORWARD
Definition: mpegvideoenc.h:296
MBBackup::mv_dir
int mv_dir
Definition: mpegvideo_enc.c:2641
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:441
my
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t my
Definition: dsp.h:57
AVCodecContext::p_masking
float p_masking
p block masking (0-> disabled)
Definition: avcodec.h:829
mb_var_thread
static int mb_var_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2866
FMT_H263
@ FMT_H263
Definition: mpegvideo.h:55
arg
const char * arg
Definition: jacosubdec.c:65
mpv_encode_init_static
static av_cold void mpv_encode_init_static(void)
Definition: mpegvideo_enc.c:269
ff_mpv_common_end
av_cold void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:447
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:74
if
if(ret)
Definition: filter_design.txt:179
ff_mpv_unref_picture
void ff_mpv_unref_picture(MPVWorkPicture *pic)
Definition: mpegpicture.c:98
AVCodecContext::rc_buffer_size
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:1255
MECmpContext
Definition: me_cmp.h:50
MpegEncContext::field_select
int field_select[2][2]
Definition: mpegvideo.h:185
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AV_ONCE_INIT
#define AV_ONCE_INIT
Definition: thread.h:203
CANDIDATE_MB_TYPE_FORWARD_I
#define CANDIDATE_MB_TYPE_FORWARD_I
Definition: mpegvideoenc.h:301
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:76
MPVEncContext::block
int16_t(* block)[64]
points into blocks below
Definition: mpegvideoenc.h:114
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:118
NULL
#define NULL
Definition: coverity.c:32
MPVEncContext::dct_error_sum
int(* dct_error_sum)[64]
Definition: mpegvideoenc.h:126
MPVMainEncContext::lmin
int lmin
Definition: mpegvideoenc.h:233
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:599
run
uint8_t run
Definition: svq3.c:207
MpegEncContext::mb_y
int mb_y
Definition: mpegvideo.h:193
bias
static int bias(int x, int c)
Definition: vqcdec.c:115
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:96
me
#define me
Definition: vf_colormatrix.c:102
aandcttab.h
ff_mpv_common_defaults
av_cold void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:190
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:164
av_unreachable
#define av_unreachable(msg)
Asserts that are used as compiler optimization hints depending upon ASSERT_LEVEL and NBDEBUG.
Definition: avassert.h:108
ff_rate_estimate_qscale
float ff_rate_estimate_qscale(MPVMainEncContext *const m, int dry_run)
Definition: ratecontrol.c:912
CANDIDATE_MB_TYPE_BACKWARD
#define CANDIDATE_MB_TYPE_BACKWARD
Definition: mpegvideoenc.h:297
AVCodecContext::internal
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:466
MECmpContext::sad
me_cmp_func sad[6]
Definition: me_cmp.h:53
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:481
MPVPicture::display_picture_number
int display_picture_number
Definition: mpegpicture.h:89
EDGE_WIDTH
#define EDGE_WIDTH
Definition: diracdec.c:47
MpegEncContext::MSMP4_WMV1
@ MSMP4_WMV1
Definition: mpegvideo.h:239
ROUNDED_DIV
#define ROUNDED_DIV(a, b)
Definition: common.h:58
ff_faandct
void ff_faandct(int16_t *data)
Definition: faandct.c:115
MpegEncContext::inter_matrix
uint16_t inter_matrix[64]
Definition: mpegvideo.h:205
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:241
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:278
MPVEncContext::lambda2
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
Definition: mpegvideoenc.h:53
me_cmp_init
static av_cold int me_cmp_init(MPVMainEncContext *const m, AVCodecContext *avctx)
Definition: mpegvideo_enc.c:330
select_input_picture
static int select_input_picture(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1801
ff_set_qscale
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:524
AV_CODEC_ID_SPEEDHQ
@ AV_CODEC_ID_SPEEDHQ
Definition: codec_id.h:279
mathops.h
dct_error
static int dct_error(const struct algo *dct, int test, int is_idct, int speed, const int bits)
Definition: dct.c:188
AV_CODEC_FLAG_AC_PRED
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
Definition: avcodec.h:327
MERGE
#define MERGE(field)
Definition: mpegvideo_enc.c:3622
AVCodecContext::ildct_cmp
int ildct_cmp
interlaced DCT comparison function
Definition: avcodec.h:868
av_refstruct_pool_get
void * av_refstruct_pool_get(AVRefStructPool *pool)
Get an object from the pool, reusing an old one from the pool when available.
Definition: refstruct.c:297
ff_mpv_encode_end
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:1116
MBBackup::qscale
int qscale
Definition: mpegvideo_enc.c:2643
FF_MB_DECISION_SIMPLE
#define FF_MB_DECISION_SIMPLE
uses mb_cmp
Definition: avcodec.h:937
qpeldsp.h
ff_mpv_reallocate_putbitbuffer
int ff_mpv_reallocate_putbitbuffer(MPVEncContext *const s, size_t threshold, size_t size_increase)
Definition: mpegvideo_enc.c:2951
ff_h261_reorder_mb_index
void ff_h261_reorder_mb_index(MPVEncContext *const s)
Definition: h261enc.c:120
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:144
ff_mpv_unquantize_init
#define ff_mpv_unquantize_init(s, bitexact, q_scale_type)
Definition: mpegvideo_unquantize.h:50
add_dequant_dct
static void add_dequant_dct(MPVEncContext *const s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo_enc.c:1159
AVCodecContext::trellis
int trellis
trellis RD quantization
Definition: avcodec.h:1305
AV_CODEC_ID_WMV1
@ AV_CODEC_ID_WMV1
Definition: codec_id.h:69
ff_mpeg4_encode_video_packet_header
void ff_mpeg4_encode_video_packet_header(MPVEncContext *const s)
Definition: mpeg4videoenc.c:1326
op_pixels_func
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Average and put pixel Widths can be 16, 8, 4 or 2.
Definition: hpeldsp.h:39
MBBackup::block
int16_t(* block)[64]
Definition: mpegvideo_enc.c:2652
update_duplicate_context_after_me
static void update_duplicate_context_after_me(MPVEncContext *const dst, const MPVEncContext *const src)
Definition: mpegvideo_enc.c:253
MPVMainEncContext
Definition: mpegvideoenc.h:199
AVOnce
#define AVOnce
Definition: thread.h:202
index
int index
Definition: gxfenc.c:90
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
MPVPicture::reference
int reference
Definition: mpegpicture.h:86
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:175
AVCodecContext::temporal_cplx_masking
float temporal_cplx_masking
temporary complexity masking (0-> disabled)
Definition: avcodec.h:815
load_input_picture
static int load_input_picture(MPVMainEncContext *const m, const AVFrame *pic_arg)
Definition: mpegvideo_enc.c:1313
set_put_bits_buffer_size
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
Definition: put_bits.h:436
ff_set_mpeg4_time
void ff_set_mpeg4_time(MPVEncContext *const s)
Definition: mpeg4videoenc.c:843
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:535
ff_encode_alloc_frame
int ff_encode_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Allocate buffers for a frame.
Definition: encode.c:838
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1380
ff_h263_clean_intra_table_entries
static void ff_h263_clean_intra_table_entries(MpegEncContext *s, int xy)
Definition: h263.h:47
AVCodecContext::stats_out
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1312
MPVMainEncContext::last_pict_type
int last_pict_type
Definition: mpegvideoenc.h:259
AV_CODEC_FLAG_QPEL
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
Definition: avcodec.h:225
f
f
Definition: af_crystalizer.c:122
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:519
QUANT_BIAS_SHIFT
#define QUANT_BIAS_SHIFT
Definition: mpegvideo_enc.c:80
MotionEstContext::temp
uint8_t * temp
Definition: motion_est.h:57
clip_coeffs
static void clip_coeffs(const MPVEncContext *const s, int16_t block[], int last_index)
Definition: mpegvideo_enc.c:2222
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:302
AVPacket::size
int size
Definition: packet.h:589
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
AVCodecContext::gop_size
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1005
MpegEncContext::qscale
int qscale
QP.
Definition: mpegvideo.h:154
ff_mpeg4_clean_buffers
void ff_mpeg4_clean_buffers(MpegEncContext *s)
Definition: mpeg4video.c:44
height
#define height
Definition: dsp.h:89
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:278
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem_internal.h:104
MPVMainEncContext::vbv_delay_pos
int vbv_delay_pos
offset of vbv_delay in the bitstream
Definition: mpegvideoenc.h:237
MECmpContext::sse
me_cmp_func sse[6]
Definition: me_cmp.h:54
shift
static int shift(int a, int b)
Definition: bonk.c:261
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:87
MBBackup::mv_type
int mv_type
Definition: mpegvideo_enc.c:2641
MpegEncContext::intra_matrix
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:203
AVFrame::quality
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:549
ff_update_block_index
static void ff_update_block_index(MpegEncContext *s, int bits_per_raw_sample, int lowres, int chroma_x_shift)
Definition: mpegvideo.h:336
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:424
FF_IDCT_PERM_NONE
@ FF_IDCT_PERM_NONE
Definition: idctdsp.h:28
CANDIDATE_MB_TYPE_DIRECT0
#define CANDIDATE_MB_TYPE_DIRECT0
Definition: mpegvideoenc.h:305
ff_mpeg4_default_intra_matrix
const int16_t ff_mpeg4_default_intra_matrix[64]
Definition: mpeg4data.h:334
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:56
size
int size
Definition: twinvq_data.h:10344
CANDIDATE_MB_TYPE_INTRA
#define CANDIDATE_MB_TYPE_INTRA
Definition: mpegvideoenc.h:290
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:247
mpv_generic_options
static const AVOption mpv_generic_options[]
Definition: mpegvideo_enc.c:95
RECON_SHIFT
#define RECON_SHIFT
Definition: mpegvideoencdsp.h:27
MPVMainEncContext::frame_bits
int frame_bits
bits used for the current frame
Definition: mpegvideoenc.h:253
AVCodecInternal::byte_buffer
uint8_t * byte_buffer
temporary buffer used for encoders to store their bitstream
Definition: internal.h:95
FF_MPV_FLAG_QP_RD
#define FF_MPV_FLAG_QP_RD
Definition: mpegvideoenc.h:310
encode_picture
static int encode_picture(MPVMainEncContext *const s, const AVPacket *pkt)
Definition: mpegvideo_enc.c:3712
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:514
AVCPBProperties::min_bitrate
int64_t min_bitrate
Minimum bitrate of the stream, in bits per second.
Definition: defs.h:292
MECmpContext::nsse
me_cmp_func nsse[6]
Definition: me_cmp.h:62
ff_mpeg1_default_intra_matrix
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:31
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:166
ff_set_cmp
av_cold int ff_set_cmp(const MECmpContext *c, me_cmp_func *cmp, int type, int mpvenc)
Fill the function pointer array cmp[6] with me_cmp_funcs from c based upon type.
Definition: me_cmp.c:443
MPVEncContext::me
MotionEstContext me
Definition: mpegvideoenc.h:78
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:587
AV_CODEC_FLAG_PASS2
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:294
FF_COMPLIANCE_NORMAL
#define FF_COMPLIANCE_NORMAL
Definition: defs.h:60
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
ff_mpeg4_default_non_intra_matrix
const int16_t ff_mpeg4_default_non_intra_matrix[64]
Definition: mpeg4data.h:345
ALLOCZ_ARRAYS
#define ALLOCZ_ARRAYS(p, mult, numb)
Definition: mpegvideo_enc.c:372
MPVMainEncContext::input_picture_number
int input_picture_number
used to set pic->display_picture_number
Definition: mpegvideoenc.h:206
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:99
ff_mpeg1_encode_slice_header
void ff_mpeg1_encode_slice_header(MPVEncContext *s)
MPVUnquantDSPContext::dct_unquantize_mpeg2_inter
void(* dct_unquantize_mpeg2_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:41
mpegvideodata.h
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:177
MAX_PB2_MB_SIZE
@ MAX_PB2_MB_SIZE
Definition: mpeg4videoenc.h:38
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:594
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: packet.c:63
AVCPBProperties::avg_bitrate
int64_t avg_bitrate
Average bitrate of the stream, in bits per second.
Definition: defs.h:297
AVCodecInternal::byte_buffer_size
unsigned int byte_buffer_size
Definition: internal.h:96
ScratchpadContext::scratchpad_buf
uint8_t * scratchpad_buf
the other *_scratchpad point into this buffer
Definition: mpegpicture.h:38
MPVMainEncContext::me_penalty_compensation
int me_penalty_compensation
Definition: mpegvideoenc.h:263
UNI_AC_ENC_INDEX
#define UNI_AC_ENC_INDEX(run, level)
Definition: mpegvideoenc.h:286
M_PI
#define M_PI
Definition: mathematics.h:67
CANDIDATE_MB_TYPE_BIDIR_I
#define CANDIDATE_MB_TYPE_BIDIR_I
Definition: mpegvideoenc.h:303
MBBackup
Definition: mpegvideo_enc.c:2637
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:221
CANDIDATE_MB_TYPE_INTER4V
#define CANDIDATE_MB_TYPE_INTER4V
Definition: mpegvideoenc.h:292
AVCodec::id
enum AVCodecID id
Definition: codec.h:186
av_refstruct_unref
void av_refstruct_unref(void *objp)
Decrement the reference count of the underlying object and automatically free the object if there are...
Definition: refstruct.c:120
ff_mjpeg_add_icc_profile_size
int ff_mjpeg_add_icc_profile_size(AVCodecContext *avctx, const AVFrame *frame, size_t *max_pkt_size)
Definition: mjpegenc_common.c:137
CHROMA_444
#define CHROMA_444
Definition: mpegvideo.h:265
AVCPBProperties::vbv_delay
uint64_t vbv_delay
The delay between the time the packet this structure is associated with is received and the time when...
Definition: defs.h:312
emms.h
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:59
get_bits_diff
static int get_bits_diff(MPVEncContext *s)
Definition: mpegvideoenc.h:408
MBBackup::last_dc
int last_dc[3]
Definition: mpegvideo_enc.c:2648
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:68
MpegEncContext::uvlinesize
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:104
AV_PKT_DATA_CPB_PROPERTIES
@ AV_PKT_DATA_CPB_PROPERTIES
This side data corresponds to the AVCPBProperties struct.
Definition: packet.h:142
AV_PKT_DATA_H263_MB_INFO
@ AV_PKT_DATA_H263_MB_INFO
An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of structures with info about macroblo...
Definition: packet.h:90
AV_CODEC_ID_RV10
@ AV_CODEC_ID_RV10
Definition: codec_id.h:57
CHROMA_420
#define CHROMA_420
Definition: mpegvideo.h:263
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
MBBackup::mv
int mv[2][4][2]
Definition: mpegvideo_enc.c:2639
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:581
MPVUnquantDSPContext::dct_unquantize_h263_inter
void(* dct_unquantize_h263_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:45
put_bits_count
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:90
ff_rv20_encode_picture_header
int ff_rv20_encode_picture_header(MPVMainEncContext *m)
Definition: rv20enc.c:37
encode_thread
static int encode_thread(AVCodecContext *c, void *arg)
Definition: mpegvideo_enc.c:2985
MPVMainEncContext::mv_table_base
int16_t(* mv_table_base)[2]
Definition: mpegvideoenc.h:271
MBBackup::pb2
PutBitContext pb2
Definition: mpegvideo_enc.c:2653
ff_jpeg_fdct_islow_8
void ff_jpeg_fdct_islow_8(int16_t *data)
ff_fdctdsp_init
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
Definition: fdctdsp.c:25
internal.h
FF_MATRIX_TYPE_CHROMA_INTRA
#define FF_MATRIX_TYPE_CHROMA_INTRA
Definition: encode.h:108
ff_h263_update_mb
void ff_h263_update_mb(MPVEncContext *s)
AVCodecContext::intra_dc_precision
int intra_dc_precision
precision of the intra DC coefficient - 8
Definition: avcodec.h:971
src2
const pixel * src2
Definition: h264pred_template.c:421
MPVEncContext::dct_offset
uint16_t(* dct_offset)[64]
Definition: mpegvideoenc.h:128
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:57
AVCPBProperties::max_bitrate
int64_t max_bitrate
Maximum bitrate of the stream, in bits per second.
Definition: defs.h:287
MpegEncContext::mb_x
int mb_x
Definition: mpegvideo.h:193
ff_rate_control_init
av_cold int ff_rate_control_init(MPVMainEncContext *const m)
Definition: ratecontrol.c:497
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:53
AV_CODEC_ID_RV20
@ AV_CODEC_ID_RV20
Definition: codec_id.h:58
av_always_inline
#define av_always_inline
Definition: attributes.h:63
MPVENC_MAX_B_FRAMES
#define MPVENC_MAX_B_FRAMES
Definition: mpegvideoenc.h:44
ff_jpeg_fdct_islow_10
void ff_jpeg_fdct_islow_10(int16_t *data)
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
mpv_encode_defaults
static av_cold void mpv_encode_defaults(MPVMainEncContext *const m)
Set the given MPVEncContext to defaults for encoding.
Definition: mpegvideo_enc.c:278
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:523
MPVMainEncContext::next_lambda
int next_lambda
next lambda used for retrying to encode a frame
Definition: mpegvideoenc.h:256
MpegEncContext::sc
ScratchpadContext sc
Definition: mpegvideo.h:152
AV_STRINGIFY
#define AV_STRINGIFY(s)
Definition: macros.h:66
ff_h263_format
const uint16_t ff_h263_format[8][2]
Definition: h263data.c:236
FF_CMP_NSSE
#define FF_CMP_NSSE
Definition: avcodec.h:879
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:496
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
MPVMainEncContext::border_masking
float border_masking
Definition: mpegvideoenc.h:232
ff_write_pass1_stats
void ff_write_pass1_stats(MPVMainEncContext *const m)
Definition: ratecontrol.c:38
ff_msmpeg4_encode_ext_header
void ff_msmpeg4_encode_ext_header(MPVEncContext *const s)
Definition: msmpeg4enc.c:285
ff_square_tab
const EXTERN uint32_t ff_square_tab[512]
Definition: mathops.h:35
MPVMainEncContext::last_non_b_pict_type
int last_non_b_pict_type
used for MPEG-4 gmc B-frames & ratecontrol
Definition: mpegvideoenc.h:260
MpegEncContext::MSMP4_V3
@ MSMP4_V3
Definition: mpegvideo.h:238
AVCodecContext::height
int height
Definition: avcodec.h:592
avcodec_send_frame
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:491
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:631
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
prepare_picture
static int prepare_picture(MPVEncContext *const s, AVFrame *f, const AVFrame *props_frame)
Allocates new buffers for an AVFrame and copies the properties from another AVFrame.
Definition: mpegvideo_enc.c:1281
RateControlContext::buffer_index
double buffer_index
amount of bits in the video/audio buffer
Definition: ratecontrol.h:63
ff_get_2pass_fcode
void ff_get_2pass_fcode(MPVMainEncContext *const m)
Definition: ratecontrol.c:900
frame_end
static void frame_end(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:1859
idctdsp.h
MPVPicture::b_frame_score
int b_frame_score
Definition: mpegpicture.h:84
encode_mb_internal
static av_always_inline void encode_mb_internal(MPVEncContext *const s, int motion_x, int motion_y, int mb_block_height, int mb_block_width, int mb_block_count, int chroma_x_shift, int chroma_y_shift, int chroma_format)
Definition: mpegvideo_enc.c:2280
avcodec.h
init_buffers
static av_cold int init_buffers(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:441
ff_pixblockdsp_init
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, int bits_per_raw_sample)
Definition: pixblockdsp.c:87
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:137
AV_CODEC_FLAG_CLOSED_GOP
#define AV_CODEC_FLAG_CLOSED_GOP
Definition: avcodec.h:332
ret
ret
Definition: filter_design.txt:187
ff_h263_mpeg4_reset_dc
void ff_h263_mpeg4_reset_dc(MPVEncContext *s)
MPVMainEncContext::vbv_ignore_qmax
int vbv_ignore_qmax
Definition: mpegvideoenc.h:234
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:81
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:265
ff_mpeg1_default_non_intra_matrix
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:42
AVCPBProperties::buffer_size
int64_t buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
Definition: defs.h:303
AVCodecContext::strict_std_compliance
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1357
ff_fdct_ifast
void ff_fdct_ifast(int16_t *data)
Definition: jfdctfst.c:207
ff_inv_aanscales
const uint16_t ff_inv_aanscales[64]
Definition: aandcttab.c:38
ff_h263_loop_filter
void ff_h263_loop_filter(MpegEncContext *s)
Definition: h263.c:97
ff_convert_matrix
void ff_convert_matrix(MPVEncContext *const s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
Definition: mpegvideo_enc.c:108
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
MPVMainEncContext::reordered_pts
int64_t reordered_pts
reordered pts to be used as dts for the next output frame when there's a delay
Definition: mpegvideoenc.h:220
MPVPicture::f
struct AVFrame * f
Definition: mpegpicture.h:59
MotionEstContext::scratchpad
uint8_t * scratchpad
data area for the ME algo, so that the ME does not need to malloc/free.
Definition: motion_est.h:55
mpeg12data.h
AV_CODEC_ID_AMV
@ AV_CODEC_ID_AMV
Definition: codec_id.h:159
MpegEncContext::chroma_x_shift
int chroma_x_shift
Definition: mpegvideo.h:266
AVCodecContext::dark_masking
float dark_masking
darkness masking (0-> disabled)
Definition: avcodec.h:836
MPVMainEncContext::frame_skip_cmp
int frame_skip_cmp
Definition: mpegvideoenc.h:245
MBBackup::dquant
int dquant
Definition: mpegvideo_enc.c:2650
AVCodecContext
main external API structure.
Definition: avcodec.h:431
AVFrame::height
int height
Definition: frame.h:499
MBBackup::mb_skipped
int mb_skipped
Definition: mpegvideo_enc.c:2642
AV_CODEC_ID_H263P
@ AV_CODEC_ID_H263P
Definition: codec_id.h:71
h261enc.h
EDGE_TOP
#define EDGE_TOP
Definition: mpegvideoencdsp.h:29
put_bits_ptr
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:402
MPVMainEncContext::lmax
int lmax
Definition: mpegvideoenc.h:233
ADD
#define ADD(field)
Definition: mpegvideo_enc.c:3621
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:280
av_packet_new_side_data
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Allocate new information of a packet.
Definition: packet.c:231
mpeg4video.h
AVCodecContext::qmin
int qmin
minimum quantizer
Definition: avcodec.h:1234
AVRational::den
int den
Denominator.
Definition: rational.h:60
MPVUnquantDSPContext::dct_unquantize_mpeg1_inter
void(* dct_unquantize_mpeg1_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo_unquantize.h:37
ff_mjpeg_encode_stuffing
int ff_mjpeg_encode_stuffing(MPVEncContext *const s)
Writes the complete JPEG frame when optimal huffman tables are enabled, otherwise writes the stuffing...
Definition: mjpegenc.c:238
MBBackup::i_count
int i_count
Definition: mpegvideo_enc.c:2649
AVCodecContext::spatial_cplx_masking
float spatial_cplx_masking
spatial complexity masking (0-> disabled)
Definition: avcodec.h:822
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:117
temp
else temp
Definition: vf_mcdeint.c:271
ff_mpv_pic_check_linesize
int ff_mpv_pic_check_linesize(void *logctx, const AVFrame *f, ptrdiff_t *linesizep, ptrdiff_t *uvlinesizep)
Definition: mpegpicture.c:181
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:76
mean
static float mean(const float *input, int size)
Definition: vf_nnedi.c:861
Windows::Graphics::DirectX::Direct3D11::p
IDirect3DDxgiInterfaceAccess _COM_Outptr_ void ** p
Definition: vsrc_gfxcapture_winrt.hpp:53
av_clip_uint8
#define av_clip_uint8
Definition: common.h:106
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
MPVMainEncContext::frame_skip_exp
int frame_skip_exp
Definition: mpegvideoenc.h:244
QMAT_SHIFT
#define QMAT_SHIFT
Definition: mpegvideo_enc.c:83
FF_MB_DECISION_RD
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:939
ff_mpv_replace_picture
void ff_mpv_replace_picture(MPVWorkPicture *dst, const MPVWorkPicture *src)
Definition: mpegpicture.c:121
ff_estimate_p_frame_motion
void ff_estimate_p_frame_motion(MPVEncContext *const s, int mb_x, int mb_y)
Definition: motion_est.c:892
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:279
ff_mpeg1_clean_buffers
static void ff_mpeg1_clean_buffers(MPVEncContext *s)
Definition: mpeg12enc.h:39
AVERROR_ENCODER_NOT_FOUND
#define AVERROR_ENCODER_NOT_FOUND
Encoder not found.
Definition: error.h:56
INPLACE_OFFSET
#define INPLACE_OFFSET
Definition: mpegvideoenc.h:287
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
msmpeg4enc.h
mem.h
AVCodecContext::max_b_frames
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:769
overflow
Undefined Behavior In the C some operations are like signed integer overflow
Definition: undefined.txt:3
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:322
denoise_dct
static void denoise_dct(MPVEncContext *const s, int16_t block[])
Definition: mpegvideo_enc.c:3953
dct_quantize_refine
static int dct_quantize_refine(MPVEncContext *const s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale)
Definition: mpegvideo_enc.c:4296
FDCTDSPContext::fdct
void(* fdct)(int16_t *block)
Definition: fdctdsp.h:29
ff_mpv_encode_init
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
Definition: mpegvideo_enc.c:553
AVCodecContext::rc_max_available_vbv_use
float rc_max_available_vbv_use
Ratecontrol attempt to use, at maximum, of what can be used without an underflow.
Definition: avcodec.h:1284
flush_put_bits
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:153
ff_mpeg4_merge_partitions
void ff_mpeg4_merge_partitions(MPVEncContext *const s)
Definition: mpeg4videoenc.c:1300
merge_context_after_encode
static void merge_context_after_encode(MPVEncContext *const dst, MPVEncContext *const src)
Definition: mpegvideo_enc.c:3630
MPVMainEncContext::b_frame_strategy
int b_frame_strategy
Definition: mpegvideoenc.h:224
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
av_refstruct_pool_uninit
static void av_refstruct_pool_uninit(AVRefStructPool **poolp)
Mark the pool as being available for freeing.
Definition: refstruct.h:292
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:273
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:170
AVCodecContext::slices
int slices
Number of slices.
Definition: avcodec.h:1021
FF_MB_DECISION_BITS
#define FF_MB_DECISION_BITS
chooses the one which needs the fewest bits
Definition: avcodec.h:938
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:458
AVPacket
This structure stores compressed data.
Definition: packet.h:565
mpeg4videodata.h
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AVCodecContext::inter_matrix
uint16_t * inter_matrix
custom inter quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:957
ff_mpegvideoencdsp_init
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
Definition: mpegvideoencdsp.c:276
MPVMainEncContext::scenechange_threshold
int scenechange_threshold
Definition: mpegvideoenc.h:228
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
ff_dct_encode_init_x86
void ff_dct_encode_init_x86(MPVEncContext *s)
Definition: mpegvideoenc.c:56
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:592
bytestream.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:472
coeff
static const double coeff[2][5]
Definition: vf_owdenoise.c:80
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
ff_mjpeg_encode_picture_trailer
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
Definition: mjpegenc_common.c:461
MBBackup::mb_intra
int mb_intra
Definition: mpegvideo_enc.c:2642
AV_CODEC_ID_MSMPEG4V3
@ AV_CODEC_ID_MSMPEG4V3
Definition: codec_id.h:68
MPVUnquantDSPContext
Definition: mpegvideo_unquantize.h:34
h
h
Definition: vp9dsp_template.c:2070
MPVMainEncContext::user_specified_pts
int64_t user_specified_pts
last non-zero pts from user-supplied AVFrame
Definition: mpegvideoenc.h:212
ff_encode_add_cpb_side_data
AVCPBProperties * ff_encode_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
Definition: encode.c:888
dct_quantize_c
static int dct_quantize_c(MPVEncContext *const s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo_enc.c:4648
stride
#define stride
Definition: h264pred_template.c:536
MBBackup::pb
PutBitContext pb
Definition: mpegvideo_enc.c:2653
MPVPicture
MPVPicture.
Definition: mpegpicture.h:58
width
#define width
Definition: dsp.h:89
FF_QP2LAMBDA
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:226
FF_MPV_FLAG_STRICT_GOP
#define FF_MPV_FLAG_STRICT_GOP
Definition: mpegvideoenc.h:309
MpegEncContext::start_mb_y
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:109
AV_CODEC_ID_FLV1
@ AV_CODEC_ID_FLV1
Definition: codec_id.h:73
sp5x_qscale_five_quant_table
static const uint8_t sp5x_qscale_five_quant_table[][64]
Definition: sp5x.h:135
mjpegenc.h
AV_PICTURE_TYPE_S
@ AV_PICTURE_TYPE_S
S(GMC)-VOP MPEG-4.
Definition: avutil.h:281
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
ff_mpv_alloc_pic_accessories
int ff_mpv_alloc_pic_accessories(AVCodecContext *avctx, MPVWorkPicture *wpic, ScratchpadContext *sc, BufferPoolContext *pools, int mb_height)
Allocate an MPVPicture's accessories (but not the AVFrame's buffer itself) and set the MPVWorkPicture...
Definition: mpegpicture.c:237
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:63
update_qscale
static void update_qscale(MPVMainEncContext *const m)
Definition: mpegvideo_enc.c:195
RateControlContext::entry
RateControlEntry * entry
Definition: ratecontrol.h:62
ff_alloc_packet
int ff_alloc_packet(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and allocate data.
Definition: encode.c:61
MPVMainEncContext::s
MPVEncContext s
The main slicecontext.
Definition: mpegvideoenc.h:200
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:616
write_mb_info
static void write_mb_info(MPVEncContext *const s)
Definition: mpegvideo_enc.c:2908
MpegEncContext::dc_val
int16_t * dc_val
used for H.263 AIC/MPEG-4 DC prediction and ER
Definition: mpegvideo.h:143
ff_mpv_alloc_pic_pool
av_cold AVRefStructPool * ff_mpv_alloc_pic_pool(int init_progress)
Allocate a pool of MPVPictures.
Definition: mpegpicture.c:90
src
#define src
Definition: vp8dsp.c:248
MBBackup::p_tex_bits
int p_tex_bits
Definition: mpegvideo_enc.c:2649
pixblockdsp.h
MpegEncContext::MSMP4_V2
@ MSMP4_V2
Definition: mpegvideo.h:237
ff_aanscales
const uint16_t ff_aanscales[64]
Definition: aandcttab.c:26
av_cpb_properties_alloc
AVCPBProperties * av_cpb_properties_alloc(size_t *size)
Allocate a CPB properties structure and initialize its fields to default values.
Definition: utils.c:968
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:290
ff_check_codec_matrices
int ff_check_codec_matrices(AVCodecContext *avctx, unsigned types, uint16_t min, uint16_t max)
Definition: encode.c:944
MpegEncContext::chroma_format
int chroma_format
Definition: mpegvideo.h:262
FF_MATRIX_TYPE_INTER
#define FF_MATRIX_TYPE_INTER
Definition: encode.h:107
h263.h
ff_rate_control_uninit
av_cold void ff_rate_control_uninit(RateControlContext *rcc)
Definition: ratecontrol.c:711
ff_get_best_fcode
int ff_get_best_fcode(MPVMainEncContext *const m, const int16_t(*mv_table)[2], int type)
Definition: motion_est.c:1605
intmath.h
MPVEncContext::mpeg_quant
int mpeg_quant
Definition: mpegvideoenc.h:167