FFmpeg
mpegvideo.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29 
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
34 #include "libavutil/mem.h"
35 
36 #include "avcodec.h"
37 #include "blockdsp.h"
38 #include "idctdsp.h"
39 #include "mathops.h"
40 #include "mpeg_er.h"
41 #include "mpegutils.h"
42 #include "mpegvideo.h"
43 #include "mpegvideodata.h"
44 
46  int16_t *block, int n, int qscale)
47 {
48  int i, level, nCoeffs;
49  const uint16_t *quant_matrix;
50 
51  nCoeffs= s->block_last_index[n];
52 
53  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
54  /* XXX: only MPEG-1 */
55  quant_matrix = s->intra_matrix;
56  for(i=1;i<=nCoeffs;i++) {
57  int j= s->intra_scantable.permutated[i];
58  level = block[j];
59  if (level) {
60  if (level < 0) {
61  level = -level;
62  level = (int)(level * qscale * quant_matrix[j]) >> 3;
63  level = (level - 1) | 1;
64  level = -level;
65  } else {
66  level = (int)(level * qscale * quant_matrix[j]) >> 3;
67  level = (level - 1) | 1;
68  }
69  block[j] = level;
70  }
71  }
72 }
73 
75  int16_t *block, int n, int qscale)
76 {
77  int i, level, nCoeffs;
78  const uint16_t *quant_matrix;
79 
80  nCoeffs= s->block_last_index[n];
81 
82  quant_matrix = s->inter_matrix;
83  for(i=0; i<=nCoeffs; i++) {
84  int j= s->intra_scantable.permutated[i];
85  level = block[j];
86  if (level) {
87  if (level < 0) {
88  level = -level;
89  level = (((level << 1) + 1) * qscale *
90  ((int) (quant_matrix[j]))) >> 4;
91  level = (level - 1) | 1;
92  level = -level;
93  } else {
94  level = (((level << 1) + 1) * qscale *
95  ((int) (quant_matrix[j]))) >> 4;
96  level = (level - 1) | 1;
97  }
98  block[j] = level;
99  }
100  }
101 }
102 
104  int16_t *block, int n, int qscale)
105 {
106  int i, level, nCoeffs;
107  const uint16_t *quant_matrix;
108 
109  if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
110  else qscale <<= 1;
111 
112  if(s->alternate_scan) nCoeffs= 63;
113  else nCoeffs= s->block_last_index[n];
114 
115  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
116  quant_matrix = s->intra_matrix;
117  for(i=1;i<=nCoeffs;i++) {
118  int j= s->intra_scantable.permutated[i];
119  level = block[j];
120  if (level) {
121  if (level < 0) {
122  level = -level;
123  level = (int)(level * qscale * quant_matrix[j]) >> 4;
124  level = -level;
125  } else {
126  level = (int)(level * qscale * quant_matrix[j]) >> 4;
127  }
128  block[j] = level;
129  }
130  }
131 }
132 
134  int16_t *block, int n, int qscale)
135 {
136  int i, level, nCoeffs;
137  const uint16_t *quant_matrix;
138  int sum=-1;
139 
140  if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
141  else qscale <<= 1;
142 
143  if(s->alternate_scan) nCoeffs= 63;
144  else nCoeffs= s->block_last_index[n];
145 
146  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
147  sum += block[0];
148  quant_matrix = s->intra_matrix;
149  for(i=1;i<=nCoeffs;i++) {
150  int j= s->intra_scantable.permutated[i];
151  level = block[j];
152  if (level) {
153  if (level < 0) {
154  level = -level;
155  level = (int)(level * qscale * quant_matrix[j]) >> 4;
156  level = -level;
157  } else {
158  level = (int)(level * qscale * quant_matrix[j]) >> 4;
159  }
160  block[j] = level;
161  sum+=level;
162  }
163  }
164  block[63]^=sum&1;
165 }
166 
168  int16_t *block, int n, int qscale)
169 {
170  int i, level, nCoeffs;
171  const uint16_t *quant_matrix;
172  int sum=-1;
173 
174  if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
175  else qscale <<= 1;
176 
177  if(s->alternate_scan) nCoeffs= 63;
178  else nCoeffs= s->block_last_index[n];
179 
180  quant_matrix = s->inter_matrix;
181  for(i=0; i<=nCoeffs; i++) {
182  int j= s->intra_scantable.permutated[i];
183  level = block[j];
184  if (level) {
185  if (level < 0) {
186  level = -level;
187  level = (((level << 1) + 1) * qscale *
188  ((int) (quant_matrix[j]))) >> 5;
189  level = -level;
190  } else {
191  level = (((level << 1) + 1) * qscale *
192  ((int) (quant_matrix[j]))) >> 5;
193  }
194  block[j] = level;
195  sum+=level;
196  }
197  }
198  block[63]^=sum&1;
199 }
200 
202  int16_t *block, int n, int qscale)
203 {
204  int i, level, qmul, qadd;
205  int nCoeffs;
206 
207  av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
208 
209  qmul = qscale << 1;
210 
211  if (!s->h263_aic) {
212  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
213  qadd = (qscale - 1) | 1;
214  }else{
215  qadd = 0;
216  }
217  if(s->ac_pred)
218  nCoeffs=63;
219  else
220  nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ];
221 
222  for(i=1; i<=nCoeffs; i++) {
223  level = block[i];
224  if (level) {
225  if (level < 0) {
226  level = level * qmul - qadd;
227  } else {
228  level = level * qmul + qadd;
229  }
230  block[i] = level;
231  }
232  }
233 }
234 
236  int16_t *block, int n, int qscale)
237 {
238  int i, level, qmul, qadd;
239  int nCoeffs;
240 
241  av_assert2(s->block_last_index[n]>=0);
242 
243  qadd = (qscale - 1) | 1;
244  qmul = qscale << 1;
245 
246  nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
247 
248  for(i=0; i<=nCoeffs; i++) {
249  level = block[i];
250  if (level) {
251  if (level < 0) {
252  level = level * qmul - qadd;
253  } else {
254  level = level * qmul + qadd;
255  }
256  block[i] = level;
257  }
258  }
259 }
260 
261 
262 static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
263 {
264  while(h--)
265  memset(dst + h*linesize, 128, 16);
266 }
267 
268 static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
269 {
270  while(h--)
271  memset(dst + h*linesize, 128, 8);
272 }
273 
274 /* init common dct for both encoder and decoder */
276 {
277  ff_blockdsp_init(&s->bdsp);
278  ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
279  ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
280 
281  if (s->avctx->debug & FF_DEBUG_NOMC) {
282  int i;
283  for (i=0; i<4; i++) {
284  s->hdsp.avg_pixels_tab[0][i] = gray16;
285  s->hdsp.put_pixels_tab[0][i] = gray16;
286  s->hdsp.put_no_rnd_pixels_tab[0][i] = gray16;
287 
288  s->hdsp.avg_pixels_tab[1][i] = gray8;
289  s->hdsp.put_pixels_tab[1][i] = gray8;
290  s->hdsp.put_no_rnd_pixels_tab[1][i] = gray8;
291  }
292  }
293 
294  s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
295  s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
296  s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
297  s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
298  s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
299  if (s->avctx->flags & AV_CODEC_FLAG_BITEXACT)
300  s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
301  s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
302 
303 #if HAVE_INTRINSICS_NEON
305 #endif
306 
307 #if ARCH_ALPHA
309 #elif ARCH_ARM
311 #elif ARCH_PPC
313 #elif ARCH_X86
315 #elif ARCH_MIPS
317 #endif
318 
319  return 0;
320 }
321 
322 av_cold void ff_init_scantable(const uint8_t *permutation, ScanTable *st,
323  const uint8_t *src_scantable)
324 {
325  int end;
326 
327  st->scantable = src_scantable;
328 
329  for (int i = 0; i < 64; i++) {
330  int j = src_scantable[i];
331  st->permutated[i] = permutation[j];
332  }
333 
334  end = -1;
335  for (int i = 0; i < 64; i++) {
336  int j = st->permutated[i];
337  if (j > end)
338  end = j;
339  st->raster_end[i] = end;
340  }
341 }
342 
344 {
345  if (s->codec_id == AV_CODEC_ID_MPEG4)
346  s->idsp.mpeg4_studio_profile = s->studio_profile;
347  ff_idctdsp_init(&s->idsp, s->avctx);
348 
349  /* load & permutate scantables
350  * note: only wmv uses different ones
351  */
352  if (s->alternate_scan) {
353  ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
354  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
355  } else {
356  ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
357  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
358  }
359  ff_permute_scantable(s->permutated_intra_h_scantable, ff_alternate_horizontal_scan,
360  s->idsp.idct_permutation);
361  ff_permute_scantable(s->permutated_intra_v_scantable, ff_alternate_vertical_scan,
362  s->idsp.idct_permutation);
363 }
364 
366 {
367  int y_size = s->b8_stride * (2 * s->mb_height + 1);
368  int c_size = s->mb_stride * (s->mb_height + 1);
369  int yc_size = y_size + 2 * c_size;
370  int i;
371 
372  if (s->mb_height & 1)
373  yc_size += 2*s->b8_stride + 2*s->mb_stride;
374 
375  if (s->encoding) {
376  s->me.map = av_mallocz(2 * ME_MAP_SIZE * sizeof(*s->me.map));
377  if (!s->me.map)
378  return AVERROR(ENOMEM);
379  s->me.score_map = s->me.map + ME_MAP_SIZE;
380 
381  if (s->noise_reduction) {
382  if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_error_sum, 2))
383  return AVERROR(ENOMEM);
384  }
385  }
386  if (!FF_ALLOCZ_TYPED_ARRAY(s->blocks, 1 + s->encoding))
387  return AVERROR(ENOMEM);
388  s->block = s->blocks[0];
389 
390  for (i = 0; i < 12; i++) {
391  s->pblocks[i] = &s->block[i];
392  }
393 
394  if (s->avctx->codec_tag == AV_RL32("VCR2")) {
395  // exchange uv
396  FFSWAP(void *, s->pblocks[4], s->pblocks[5]);
397  }
398 
399  if (s->out_format == FMT_H263) {
400  /* ac values */
401  if (!FF_ALLOCZ_TYPED_ARRAY(s->ac_val_base, yc_size))
402  return AVERROR(ENOMEM);
403  s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
404  s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
405  s->ac_val[2] = s->ac_val[1] + c_size;
406  }
407 
408  return 0;
409 }
410 
412 {
413  int nb_slices = s->slice_context_count, ret;
414 
415  /* We initialize the copies before the original so that
416  * fields allocated in init_duplicate_context are NULL after
417  * copying. This prevents double-frees upon allocation error. */
418  for (int i = 1; i < nb_slices; i++) {
419  s->thread_context[i] = av_memdup(s, sizeof(MpegEncContext));
420  if (!s->thread_context[i])
421  return AVERROR(ENOMEM);
422  if ((ret = init_duplicate_context(s->thread_context[i])) < 0)
423  return ret;
424  s->thread_context[i]->start_mb_y =
425  (s->mb_height * (i ) + nb_slices / 2) / nb_slices;
426  s->thread_context[i]->end_mb_y =
427  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
428  }
429  s->start_mb_y = 0;
430  s->end_mb_y = nb_slices > 1 ? (s->mb_height + nb_slices / 2) / nb_slices
431  : s->mb_height;
432  return init_duplicate_context(s);
433 }
434 
436 {
437  if (!s)
438  return;
439 
440  av_freep(&s->sc.edge_emu_buffer);
441  av_freep(&s->me.scratchpad);
442  s->me.temp =
443  s->sc.rd_scratchpad =
444  s->sc.b_scratchpad =
445  s->sc.obmc_scratchpad = NULL;
446 
447  av_freep(&s->dct_error_sum);
448  av_freep(&s->me.map);
449  s->me.score_map = NULL;
450  av_freep(&s->blocks);
451  av_freep(&s->ac_val_base);
452  s->block = NULL;
453 }
454 
456 {
457  for (int i = 1; i < s->slice_context_count; i++) {
458  free_duplicate_context(s->thread_context[i]);
459  av_freep(&s->thread_context[i]);
460  }
462 }
463 
465 {
466 #define COPY(a) bak->a = src->a
467  COPY(sc.edge_emu_buffer);
468  COPY(me.scratchpad);
469  COPY(me.temp);
470  COPY(sc.rd_scratchpad);
471  COPY(sc.b_scratchpad);
472  COPY(sc.obmc_scratchpad);
473  COPY(me.map);
474  COPY(me.score_map);
475  COPY(blocks);
476  COPY(block);
477  COPY(start_mb_y);
478  COPY(end_mb_y);
479  COPY(me.map_generation);
480  COPY(pb);
481  COPY(dct_error_sum);
482  COPY(dct_count[0]);
483  COPY(dct_count[1]);
484  COPY(ac_val_base);
485  COPY(ac_val[0]);
486  COPY(ac_val[1]);
487  COPY(ac_val[2]);
488 #undef COPY
489 }
490 
492 {
493  MpegEncContext bak;
494  int i, ret;
495  // FIXME copy only needed parts
496  backup_duplicate_context(&bak, dst);
497  memcpy(dst, src, sizeof(MpegEncContext));
498  backup_duplicate_context(dst, &bak);
499  for (i = 0; i < 12; i++) {
500  dst->pblocks[i] = &dst->block[i];
501  }
502  if (dst->avctx->codec_tag == AV_RL32("VCR2")) {
503  // exchange uv
504  FFSWAP(void *, dst->pblocks[4], dst->pblocks[5]);
505  }
506  if (!dst->sc.edge_emu_buffer &&
507  (ret = ff_mpeg_framesize_alloc(dst->avctx, &dst->me,
508  &dst->sc, dst->linesize)) < 0) {
509  av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
510  "scratch buffers.\n");
511  return ret;
512  }
513  return 0;
514 }
515 
516 /**
517  * Set the given MpegEncContext to common defaults
518  * (same for encoding and decoding).
519  * The changed fields will not depend upon the
520  * prior state of the MpegEncContext.
521  */
523 {
524  s->y_dc_scale_table =
525  s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
526  s->chroma_qscale_table = ff_default_chroma_qscale_table;
527  s->progressive_frame = 1;
528  s->progressive_sequence = 1;
529  s->picture_structure = PICT_FRAME;
530 
531  s->coded_picture_number = 0;
532  s->picture_number = 0;
533 
534  s->f_code = 1;
535  s->b_code = 1;
536 
537  s->slice_context_count = 1;
538 }
539 
541 {
542  int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
543 
544  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
545  s->mb_height = (s->height + 31) / 32 * 2;
546  else
547  s->mb_height = (s->height + 15) / 16;
548 
549  s->mb_width = (s->width + 15) / 16;
550  s->mb_stride = s->mb_width + 1;
551  s->b8_stride = s->mb_width * 2 + 1;
552  mb_array_size = s->mb_height * s->mb_stride;
553  mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
554 
555  /* set default edge pos, will be overridden
556  * in decode_header if needed */
557  s->h_edge_pos = s->mb_width * 16;
558  s->v_edge_pos = s->mb_height * 16;
559 
560  s->mb_num = s->mb_width * s->mb_height;
561 
562  s->block_wrap[0] =
563  s->block_wrap[1] =
564  s->block_wrap[2] =
565  s->block_wrap[3] = s->b8_stride;
566  s->block_wrap[4] =
567  s->block_wrap[5] = s->mb_stride;
568 
569  y_size = s->b8_stride * (2 * s->mb_height + 1);
570  c_size = s->mb_stride * (s->mb_height + 1);
571  yc_size = y_size + 2 * c_size;
572 
573  if (s->mb_height & 1)
574  yc_size += 2*s->b8_stride + 2*s->mb_stride;
575 
576  if (!FF_ALLOCZ_TYPED_ARRAY(s->mb_index2xy, s->mb_num + 1))
577  return AVERROR(ENOMEM);
578  for (y = 0; y < s->mb_height; y++)
579  for (x = 0; x < s->mb_width; x++)
580  s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
581 
582  s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
583 
584  if (s->codec_id == AV_CODEC_ID_MPEG4 ||
585  (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME)) {
586  /* interlaced direct mode decoding tables */
587  int16_t (*tmp)[2] = av_calloc(mv_table_size, 4 * sizeof(*tmp));
588  if (!tmp)
589  return AVERROR(ENOMEM);
590  s->p_field_mv_table_base = tmp;
591  tmp += s->mb_stride + 1;
592  for (int i = 0; i < 2; i++) {
593  for (int j = 0; j < 2; j++) {
594  s->p_field_mv_table[i][j] = tmp;
595  tmp += mv_table_size;
596  }
597  }
598  }
599 
600  if (s->out_format == FMT_H263) {
601  /* cbp values, cbp, ac_pred, pred_dir */
602  if (!(s->coded_block_base = av_mallocz(y_size + (s->mb_height&1)*2*s->b8_stride)) ||
603  !(s->cbp_table = av_mallocz(mb_array_size)) ||
604  !(s->pred_dir_table = av_mallocz(mb_array_size)))
605  return AVERROR(ENOMEM);
606  s->coded_block = s->coded_block_base + s->b8_stride + 1;
607  }
608 
609  if (s->h263_pred || s->h263_plus || !s->encoding) {
610  /* dc values */
611  // MN: we need these for error resilience of intra-frames
612  if (!FF_ALLOCZ_TYPED_ARRAY(s->dc_val_base, yc_size))
613  return AVERROR(ENOMEM);
614  s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
615  s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
616  s->dc_val[2] = s->dc_val[1] + c_size;
617  for (i = 0; i < yc_size; i++)
618  s->dc_val_base[i] = 1024;
619  }
620 
621  // Note the + 1 is for a quicker MPEG-4 slice_end detection
622  if (!(s->mbskip_table = av_mallocz(mb_array_size + 2)) ||
623  /* which mb is an intra block, init macroblock skip table */
624  !(s->mbintra_table = av_malloc(mb_array_size)))
625  return AVERROR(ENOMEM);
626  memset(s->mbintra_table, 1, mb_array_size);
627 
628  return !CONFIG_MPEGVIDEODEC || s->encoding ? 0 : ff_mpeg_er_init(s);
629 }
630 
632 {
633  memset(&s->next_picture, 0, sizeof(s->next_picture));
634  memset(&s->last_picture, 0, sizeof(s->last_picture));
635  memset(&s->current_picture, 0, sizeof(s->current_picture));
636 
637  memset(s->thread_context, 0, sizeof(s->thread_context));
638 
639  s->me.map = NULL;
640  s->me.score_map = NULL;
641  s->dct_error_sum = NULL;
642  s->block = NULL;
643  s->blocks = NULL;
644  memset(s->pblocks, 0, sizeof(s->pblocks));
645  s->ac_val_base = NULL;
646  s->ac_val[0] =
647  s->ac_val[1] =
648  s->ac_val[2] =NULL;
649  s->sc.edge_emu_buffer = NULL;
650  s->me.scratchpad = NULL;
651  s->me.temp =
652  s->sc.rd_scratchpad =
653  s->sc.b_scratchpad =
654  s->sc.obmc_scratchpad = NULL;
655 
656 
657  s->bitstream_buffer = NULL;
658  s->allocated_bitstream_buffer_size = 0;
659  s->picture = NULL;
660  s->p_field_mv_table_base = NULL;
661  for (int i = 0; i < 2; i++)
662  for (int j = 0; j < 2; j++)
663  s->p_field_mv_table[i][j] = NULL;
664 
665  s->dc_val_base = NULL;
666  s->coded_block_base = NULL;
667  s->mbintra_table = NULL;
668  s->cbp_table = NULL;
669  s->pred_dir_table = NULL;
670 
671  s->mbskip_table = NULL;
672 
673  s->er.error_status_table = NULL;
674  s->er.er_temp_buffer = NULL;
675  s->mb_index2xy = NULL;
676 }
677 
678 /**
679  * init common structure for both encoder and decoder.
680  * this assumes that some variables like width/height are already set
681  */
683 {
684  int i, ret;
685  int nb_slices = (HAVE_THREADS &&
686  s->avctx->active_thread_type & FF_THREAD_SLICE) ?
687  s->avctx->thread_count : 1;
688 
689  clear_context(s);
690 
691  if (s->encoding && s->avctx->slices)
692  nb_slices = s->avctx->slices;
693 
694  if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
695  av_log(s->avctx, AV_LOG_ERROR,
696  "decoding to AV_PIX_FMT_NONE is not supported.\n");
697  return AVERROR(EINVAL);
698  }
699 
700  if ((s->width || s->height) &&
701  av_image_check_size(s->width, s->height, 0, s->avctx))
702  return AVERROR(EINVAL);
703 
704  dct_init(s);
705 
706  /* set chroma shifts */
707  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
708  &s->chroma_x_shift,
709  &s->chroma_y_shift);
710  if (ret)
711  return ret;
712 
713  if (!FF_ALLOCZ_TYPED_ARRAY(s->picture, MAX_PICTURE_COUNT))
714  return AVERROR(ENOMEM);
715  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
716  s->picture[i].f = av_frame_alloc();
717  if (!s->picture[i].f)
718  goto fail_nomem;
719  }
720 
721  if (!(s->next_picture.f = av_frame_alloc()) ||
722  !(s->last_picture.f = av_frame_alloc()) ||
723  !(s->current_picture.f = av_frame_alloc()))
724  goto fail_nomem;
725 
727  goto fail;
728 
729  if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
730  int max_slices;
731  if (s->mb_height)
732  max_slices = FFMIN(MAX_THREADS, s->mb_height);
733  else
734  max_slices = MAX_THREADS;
735  av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
736  " reducing to %d\n", nb_slices, max_slices);
737  nb_slices = max_slices;
738  }
739 
740  s->context_initialized = 1;
741  memset(s->thread_context, 0, sizeof(s->thread_context));
742  s->thread_context[0] = s;
743  s->slice_context_count = nb_slices;
744 
745 // if (s->width && s->height) {
747  if (ret < 0)
748  goto fail;
749 // }
750 
751  return 0;
752  fail_nomem:
753  ret = AVERROR(ENOMEM);
754  fail:
756  return ret;
757 }
758 
760 {
762 
763  av_freep(&s->p_field_mv_table_base);
764  for (int i = 0; i < 2; i++)
765  for (int j = 0; j < 2; j++)
766  s->p_field_mv_table[i][j] = NULL;
767 
768  av_freep(&s->dc_val_base);
769  av_freep(&s->coded_block_base);
770  av_freep(&s->mbintra_table);
771  av_freep(&s->cbp_table);
772  av_freep(&s->pred_dir_table);
773 
774  av_freep(&s->mbskip_table);
775 
776  av_freep(&s->er.error_status_table);
777  av_freep(&s->er.er_temp_buffer);
778  av_freep(&s->mb_index2xy);
779 
780  s->linesize = s->uvlinesize = 0;
781 }
782 
784 {
786  if (s->slice_context_count > 1)
787  s->slice_context_count = 1;
788 
789  av_freep(&s->bitstream_buffer);
790  s->allocated_bitstream_buffer_size = 0;
791 
792  if (s->picture) {
793  for (int i = 0; i < MAX_PICTURE_COUNT; i++)
794  ff_mpv_picture_free(&s->picture[i]);
795  }
796  av_freep(&s->picture);
797  ff_mpv_picture_free(&s->last_picture);
798  ff_mpv_picture_free(&s->current_picture);
799  ff_mpv_picture_free(&s->next_picture);
800 
801  s->context_initialized = 0;
802  s->context_reinit = 0;
803  s->last_picture_ptr =
804  s->next_picture_ptr =
805  s->current_picture_ptr = NULL;
806  s->linesize = s->uvlinesize = 0;
807 }
808 
809 
810 /**
811  * Clean dc, ac, coded_block for the current non-intra MB.
812  */
814 {
815  int wrap = s->b8_stride;
816  int xy = s->block_index[0];
817 
818  s->dc_val[0][xy ] =
819  s->dc_val[0][xy + 1 ] =
820  s->dc_val[0][xy + wrap] =
821  s->dc_val[0][xy + 1 + wrap] = 1024;
822  /* ac pred */
823  memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
824  memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
825  if (s->msmpeg4_version>=3) {
826  s->coded_block[xy ] =
827  s->coded_block[xy + 1 ] =
828  s->coded_block[xy + wrap] =
829  s->coded_block[xy + 1 + wrap] = 0;
830  }
831  /* chroma */
832  wrap = s->mb_stride;
833  xy = s->mb_x + s->mb_y * wrap;
834  s->dc_val[1][xy] =
835  s->dc_val[2][xy] = 1024;
836  /* ac pred */
837  memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
838  memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
839 
840  s->mbintra_table[xy]= 0;
841 }
842 
843 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
844  const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
845  const int uvlinesize = s->current_picture.f->linesize[1];
846  const int width_of_mb = (4 + (s->avctx->bits_per_raw_sample > 8)) - s->avctx->lowres;
847  const int height_of_mb = 4 - s->avctx->lowres;
848 
849  s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
850  s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
851  s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
852  s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
853  s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
854  s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
855  //block_index is not used by mpeg2, so it is not affected by chroma_format
856 
857  s->dest[0] = s->current_picture.f->data[0] + (int)((s->mb_x - 1U) << width_of_mb);
858  s->dest[1] = s->current_picture.f->data[1] + (int)((s->mb_x - 1U) << (width_of_mb - s->chroma_x_shift));
859  s->dest[2] = s->current_picture.f->data[2] + (int)((s->mb_x - 1U) << (width_of_mb - s->chroma_x_shift));
860 
861  if (s->picture_structure == PICT_FRAME) {
862  s->dest[0] += s->mb_y * linesize << height_of_mb;
863  s->dest[1] += s->mb_y * uvlinesize << (height_of_mb - s->chroma_y_shift);
864  s->dest[2] += s->mb_y * uvlinesize << (height_of_mb - s->chroma_y_shift);
865  } else {
866  s->dest[0] += (s->mb_y>>1) * linesize << height_of_mb;
867  s->dest[1] += (s->mb_y>>1) * uvlinesize << (height_of_mb - s->chroma_y_shift);
868  s->dest[2] += (s->mb_y>>1) * uvlinesize << (height_of_mb - s->chroma_y_shift);
869  av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
870  }
871 }
872 
873 /**
874  * set qscale and update qscale dependent variables.
875  */
876 void ff_set_qscale(MpegEncContext * s, int qscale)
877 {
878  if (qscale < 1)
879  qscale = 1;
880  else if (qscale > 31)
881  qscale = 31;
882 
883  s->qscale = qscale;
884  s->chroma_qscale= s->chroma_qscale_table[qscale];
885 
886  s->y_dc_scale= s->y_dc_scale_table[ qscale ];
887  s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
888 }
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:78
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:33
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:682
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
free_duplicate_contexts
static void free_duplicate_contexts(MpegEncContext *s)
Definition: mpegvideo.c:455
level
uint8_t level
Definition: svq3.c:205
ff_mpeg_framesize_alloc
int ff_mpeg_framesize_alloc(AVCodecContext *avctx, MotionEstContext *me, ScratchpadContext *sc, int linesize)
Definition: mpegpicture.c:88
blockdsp.h
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
ff_mpv_init_context_frame
int ff_mpv_init_context_frame(MpegEncContext *s)
Initialize and allocates MpegEncContext fields dependent on the resolution.
Definition: mpegvideo.c:540
backup_duplicate_context
static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
Definition: mpegvideo.c:464
ff_mpv_common_defaults
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:522
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo.c:491
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
ff_mpeg2_non_linear_qscale
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:26
ff_clean_intra_table_entries
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
Definition: mpegvideo.c:813
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:32
init_duplicate_context
static int init_duplicate_context(MpegEncContext *s)
Definition: mpegvideo.c:365
ff_mpv_common_init_arm
av_cold void ff_mpv_common_init_arm(MpegEncContext *s)
Definition: mpegvideo_arm.c:48
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:843
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:91
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:351
mpegutils.h
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:228
free_duplicate_context
static void free_duplicate_context(MpegEncContext *s)
Definition: mpegvideo.c:435
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
av_memdup
void * av_memdup(const void *p, size_t size)
Duplicate a buffer with av_malloc().
Definition: mem.c:304
ff_permute_scantable
av_cold void ff_permute_scantable(uint8_t dst[64], const uint8_t src[64], const uint8_t permutation[64])
Definition: idctdsp.c:30
dct_unquantize_mpeg1_inter_c
static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:74
fail
#define fail()
Definition: checkasm.h:179
wrap
#define wrap(func)
Definition: neontest.h:65
MpegEncContext::linesize
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:129
MAX_THREADS
#define MAX_THREADS
Definition: frame_thread_encoder.c:37
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2993
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
MAX_PICTURE_COUNT
#define MAX_PICTURE_COUNT
Definition: mpegpicture.h:33
dct_unquantize_mpeg1_intra_c
static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:45
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:148
ff_mpv_common_end
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:783
MpegEncContext::pblocks
int16_t(*[12] pblocks)[64]
Definition: mpegvideo.h:471
avassert.h
gray16
static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
Definition: mpegvideo.c:262
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
ff_blockdsp_init
av_cold void ff_blockdsp_init(BlockDSPContext *c)
Definition: blockdsp.c:58
s
#define s(width, name)
Definition: cbs_vp9.c:198
ff_mpeg1_dc_scale_table
static const uint8_t *const ff_mpeg1_dc_scale_table
Definition: mpegvideodata.h:32
dct_unquantize_mpeg2_intra_bitexact
static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:133
ff_mpv_picture_free
void av_cold ff_mpv_picture_free(Picture *pic)
Definition: mpegpicture.c:393
ScanTable::scantable
const uint8_t * scantable
Definition: mpegvideo.h:57
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
FMT_H263
@ FMT_H263
Definition: mpegvideo.h:65
NULL
#define NULL
Definition: coverity.c:32
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:343
me
#define me
Definition: vf_colormatrix.c:102
ff_set_qscale
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:876
mathops.h
ff_alternate_horizontal_scan
const uint8_t ff_alternate_horizontal_scan[64]
Definition: mpegvideodata.c:52
MpegEncContext::me
MotionEstContext me
Definition: mpegvideo.h:283
ME_MAP_SIZE
#define ME_MAP_SIZE
Definition: motion_est.h:38
dct_unquantize_mpeg2_intra_c
static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:103
ff_mpeg_er_init
int ff_mpeg_er_init(MpegEncContext *s)
Definition: mpeg_er.c:103
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1594
mpegvideodata.h
attributes.h
dct_unquantize_mpeg2_inter_c
static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:167
clear_context
static void clear_context(MpegEncContext *s)
Definition: mpegvideo.c:631
ff_init_scantable
av_cold void ff_init_scantable(const uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: mpegvideo.c:322
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:67
dct_unquantize_h263_inter_c
static void dct_unquantize_h263_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:235
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
ff_alternate_vertical_scan
const uint8_t ff_alternate_vertical_scan[64]
Definition: mpegvideodata.c:63
internal.h
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
ff_mpv_common_init_ppc
void ff_mpv_common_init_ppc(MpegEncContext *s)
Definition: mpegvideo_altivec.c:119
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
MpegEncContext::sc
ScratchpadContext sc
Definition: mpegvideo.h:198
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
FF_DEBUG_NOMC
#define FF_DEBUG_NOMC
Definition: avcodec.h:1411
idctdsp.h
avcodec.h
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
ret
ret
Definition: filter_design.txt:187
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
dct_init
static av_cold int dct_init(MpegEncContext *s)
Definition: mpegvideo.c:275
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
U
#define U(x)
Definition: vpx_arith.h:37
ff_mpv_free_context_frame
void ff_mpv_free_context_frame(MpegEncContext *s)
Frees and resets MpegEncContext fields depending on the resolution as well as the slice thread contex...
Definition: mpegvideo.c:759
MpegEncContext::block
int16_t(* block)[64]
points to one of the following blocks
Definition: mpegvideo.h:473
ScratchpadContext::edge_emu_buffer
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
Definition: mpegpicture.h:37
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
dct_unquantize_h263_intra_c
static void dct_unquantize_h263_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:201
ff_mpv_common_init_x86
av_cold void ff_mpv_common_init_x86(MpegEncContext *s)
Definition: mpegvideo.c:454
ff_mpv_common_init_axp
av_cold void ff_mpv_common_init_axp(MpegEncContext *s)
Definition: mpegvideo_alpha.c:106
ff_mpv_common_init_mips
av_cold void ff_mpv_common_init_mips(MpegEncContext *s)
Definition: mpegvideo_init_mips.c:26
ff_default_chroma_qscale_table
const uint8_t ff_default_chroma_qscale_table[32]
Definition: mpegvideodata.c:21
mem.h
ff_mpv_init_duplicate_contexts
int ff_mpv_init_duplicate_contexts(MpegEncContext *s)
Initialize an MpegEncContext's thread contexts.
Definition: mpegvideo.c:411
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:342
COPY
#define COPY(a)
ScanTable
Scantable.
Definition: mpegvideo.h:56
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:470
ScanTable::permutated
uint8_t permutated[64]
Definition: mpegvideo.h:58
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
mpeg_er.h
imgutils.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:419
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
h
h
Definition: vp9dsp_template.c:2038
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
int
int
Definition: ffmpeg_filter.c:424
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
ff_mpv_common_init_neon
av_cold void ff_mpv_common_init_neon(MpegEncContext *s)
Definition: mpegvideo.c:127
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:73
gray8
static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
Definition: mpegvideo.c:268
ScanTable::raster_end
uint8_t raster_end[64]
Definition: mpegvideo.h:59