FFmpeg
mpegvideo.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29 
30 #include "config_components.h"
31 
32 #include "libavutil/attributes.h"
33 #include "libavutil/avassert.h"
34 #include "libavutil/imgutils.h"
35 #include "libavutil/internal.h"
36 
37 #include "avcodec.h"
38 #include "blockdsp.h"
39 #include "h264chroma.h"
40 #include "idctdsp.h"
41 #include "mathops.h"
42 #include "mpeg_er.h"
43 #include "mpegutils.h"
44 #include "mpegvideo.h"
45 #include "mpeg4videodec.h"
46 #include "mpegvideodata.h"
47 #include "qpeldsp.h"
48 #include "threadframe.h"
49 #include "wmv2dec.h"
50 #include <limits.h>
51 
53  int16_t *block, int n, int qscale)
54 {
55  int i, level, nCoeffs;
56  const uint16_t *quant_matrix;
57 
58  nCoeffs= s->block_last_index[n];
59 
60  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
61  /* XXX: only MPEG-1 */
62  quant_matrix = s->intra_matrix;
63  for(i=1;i<=nCoeffs;i++) {
64  int j= s->intra_scantable.permutated[i];
65  level = block[j];
66  if (level) {
67  if (level < 0) {
68  level = -level;
69  level = (int)(level * qscale * quant_matrix[j]) >> 3;
70  level = (level - 1) | 1;
71  level = -level;
72  } else {
73  level = (int)(level * qscale * quant_matrix[j]) >> 3;
74  level = (level - 1) | 1;
75  }
76  block[j] = level;
77  }
78  }
79 }
80 
82  int16_t *block, int n, int qscale)
83 {
84  int i, level, nCoeffs;
85  const uint16_t *quant_matrix;
86 
87  nCoeffs= s->block_last_index[n];
88 
89  quant_matrix = s->inter_matrix;
90  for(i=0; i<=nCoeffs; i++) {
91  int j= s->intra_scantable.permutated[i];
92  level = block[j];
93  if (level) {
94  if (level < 0) {
95  level = -level;
96  level = (((level << 1) + 1) * qscale *
97  ((int) (quant_matrix[j]))) >> 4;
98  level = (level - 1) | 1;
99  level = -level;
100  } else {
101  level = (((level << 1) + 1) * qscale *
102  ((int) (quant_matrix[j]))) >> 4;
103  level = (level - 1) | 1;
104  }
105  block[j] = level;
106  }
107  }
108 }
109 
111  int16_t *block, int n, int qscale)
112 {
113  int i, level, nCoeffs;
114  const uint16_t *quant_matrix;
115 
116  if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
117  else qscale <<= 1;
118 
119  if(s->alternate_scan) nCoeffs= 63;
120  else nCoeffs= s->block_last_index[n];
121 
122  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
123  quant_matrix = s->intra_matrix;
124  for(i=1;i<=nCoeffs;i++) {
125  int j= s->intra_scantable.permutated[i];
126  level = block[j];
127  if (level) {
128  if (level < 0) {
129  level = -level;
130  level = (int)(level * qscale * quant_matrix[j]) >> 4;
131  level = -level;
132  } else {
133  level = (int)(level * qscale * quant_matrix[j]) >> 4;
134  }
135  block[j] = level;
136  }
137  }
138 }
139 
141  int16_t *block, int n, int qscale)
142 {
143  int i, level, nCoeffs;
144  const uint16_t *quant_matrix;
145  int sum=-1;
146 
147  if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
148  else qscale <<= 1;
149 
150  if(s->alternate_scan) nCoeffs= 63;
151  else nCoeffs= s->block_last_index[n];
152 
153  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
154  sum += block[0];
155  quant_matrix = s->intra_matrix;
156  for(i=1;i<=nCoeffs;i++) {
157  int j= s->intra_scantable.permutated[i];
158  level = block[j];
159  if (level) {
160  if (level < 0) {
161  level = -level;
162  level = (int)(level * qscale * quant_matrix[j]) >> 4;
163  level = -level;
164  } else {
165  level = (int)(level * qscale * quant_matrix[j]) >> 4;
166  }
167  block[j] = level;
168  sum+=level;
169  }
170  }
171  block[63]^=sum&1;
172 }
173 
175  int16_t *block, int n, int qscale)
176 {
177  int i, level, nCoeffs;
178  const uint16_t *quant_matrix;
179  int sum=-1;
180 
181  if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
182  else qscale <<= 1;
183 
184  if(s->alternate_scan) nCoeffs= 63;
185  else nCoeffs= s->block_last_index[n];
186 
187  quant_matrix = s->inter_matrix;
188  for(i=0; i<=nCoeffs; i++) {
189  int j= s->intra_scantable.permutated[i];
190  level = block[j];
191  if (level) {
192  if (level < 0) {
193  level = -level;
194  level = (((level << 1) + 1) * qscale *
195  ((int) (quant_matrix[j]))) >> 5;
196  level = -level;
197  } else {
198  level = (((level << 1) + 1) * qscale *
199  ((int) (quant_matrix[j]))) >> 5;
200  }
201  block[j] = level;
202  sum+=level;
203  }
204  }
205  block[63]^=sum&1;
206 }
207 
209  int16_t *block, int n, int qscale)
210 {
211  int i, level, qmul, qadd;
212  int nCoeffs;
213 
214  av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
215 
216  qmul = qscale << 1;
217 
218  if (!s->h263_aic) {
219  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
220  qadd = (qscale - 1) | 1;
221  }else{
222  qadd = 0;
223  }
224  if(s->ac_pred)
225  nCoeffs=63;
226  else
227  nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ];
228 
229  for(i=1; i<=nCoeffs; i++) {
230  level = block[i];
231  if (level) {
232  if (level < 0) {
233  level = level * qmul - qadd;
234  } else {
235  level = level * qmul + qadd;
236  }
237  block[i] = level;
238  }
239  }
240 }
241 
243  int16_t *block, int n, int qscale)
244 {
245  int i, level, qmul, qadd;
246  int nCoeffs;
247 
248  av_assert2(s->block_last_index[n]>=0);
249 
250  qadd = (qscale - 1) | 1;
251  qmul = qscale << 1;
252 
253  nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
254 
255  for(i=0; i<=nCoeffs; i++) {
256  level = block[i];
257  if (level) {
258  if (level < 0) {
259  level = level * qmul - qadd;
260  } else {
261  level = level * qmul + qadd;
262  }
263  block[i] = level;
264  }
265  }
266 }
267 
268 
269 static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
270 {
271  while(h--)
272  memset(dst + h*linesize, 128, 16);
273 }
274 
275 static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
276 {
277  while(h--)
278  memset(dst + h*linesize, 128, 8);
279 }
280 
281 /* init common dct for both encoder and decoder */
283 {
284  ff_blockdsp_init(&s->bdsp);
285  ff_h264chroma_init(&s->h264chroma, 8); //for lowres
286  ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
287  ff_mpegvideodsp_init(&s->mdsp);
288  ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
289 
290  if (s->avctx->debug & FF_DEBUG_NOMC) {
291  int i;
292  for (i=0; i<4; i++) {
293  s->hdsp.avg_pixels_tab[0][i] = gray16;
294  s->hdsp.put_pixels_tab[0][i] = gray16;
295  s->hdsp.put_no_rnd_pixels_tab[0][i] = gray16;
296 
297  s->hdsp.avg_pixels_tab[1][i] = gray8;
298  s->hdsp.put_pixels_tab[1][i] = gray8;
299  s->hdsp.put_no_rnd_pixels_tab[1][i] = gray8;
300  }
301  }
302 
303  s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
304  s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
305  s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
306  s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
307  s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
308  if (s->avctx->flags & AV_CODEC_FLAG_BITEXACT)
309  s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
310  s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
311 
312 #if HAVE_INTRINSICS_NEON
314 #endif
315 
316 #if ARCH_ALPHA
318 #elif ARCH_ARM
320 #elif ARCH_PPC
322 #elif ARCH_X86
324 #elif ARCH_MIPS
326 #endif
327 
328  return 0;
329 }
330 
332 {
333  if (s->codec_id == AV_CODEC_ID_MPEG4)
334  s->idsp.mpeg4_studio_profile = s->studio_profile;
335  ff_idctdsp_init(&s->idsp, s->avctx);
336 
337  /* load & permutate scantables
338  * note: only wmv uses different ones
339  */
340  if (s->alternate_scan) {
341  ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
342  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
343  } else {
344  ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
345  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
346  }
347  ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
348  ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
349 }
350 
352 {
353  int y_size = s->b8_stride * (2 * s->mb_height + 1);
354  int c_size = s->mb_stride * (s->mb_height + 1);
355  int yc_size = y_size + 2 * c_size;
356  int i;
357 
358  if (s->mb_height & 1)
359  yc_size += 2*s->b8_stride + 2*s->mb_stride;
360 
361  if (s->encoding) {
362  if (!FF_ALLOCZ_TYPED_ARRAY(s->me.map, ME_MAP_SIZE) ||
363  !FF_ALLOCZ_TYPED_ARRAY(s->me.score_map, ME_MAP_SIZE))
364  return AVERROR(ENOMEM);
365 
366  if (s->noise_reduction) {
367  if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_error_sum, 2))
368  return AVERROR(ENOMEM);
369  }
370  }
371  if (!FF_ALLOCZ_TYPED_ARRAY(s->blocks, 2))
372  return AVERROR(ENOMEM);
373  s->block = s->blocks[0];
374 
375  for (i = 0; i < 12; i++) {
376  s->pblocks[i] = &s->block[i];
377  }
378 
379  if (s->avctx->codec_tag == AV_RL32("VCR2")) {
380  // exchange uv
381  FFSWAP(void *, s->pblocks[4], s->pblocks[5]);
382  }
383 
384  if (s->out_format == FMT_H263) {
385  /* ac values */
386  if (!FF_ALLOCZ_TYPED_ARRAY(s->ac_val_base, yc_size))
387  return AVERROR(ENOMEM);
388  s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
389  s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
390  s->ac_val[2] = s->ac_val[1] + c_size;
391  }
392 
393  return 0;
394 }
395 
397 {
398  int nb_slices = s->slice_context_count, ret;
399 
400  /* We initialize the copies before the original so that
401  * fields allocated in init_duplicate_context are NULL after
402  * copying. This prevents double-frees upon allocation error. */
403  for (int i = 1; i < nb_slices; i++) {
404  s->thread_context[i] = av_memdup(s, sizeof(MpegEncContext));
405  if (!s->thread_context[i])
406  return AVERROR(ENOMEM);
407  if ((ret = init_duplicate_context(s->thread_context[i])) < 0)
408  return ret;
409  s->thread_context[i]->start_mb_y =
410  (s->mb_height * (i ) + nb_slices / 2) / nb_slices;
411  s->thread_context[i]->end_mb_y =
412  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
413  }
414  s->start_mb_y = 0;
415  s->end_mb_y = nb_slices > 1 ? (s->mb_height + nb_slices / 2) / nb_slices
416  : s->mb_height;
417  return init_duplicate_context(s);
418 }
419 
421 {
422  if (!s)
423  return;
424 
425  av_freep(&s->sc.edge_emu_buffer);
426  av_freep(&s->me.scratchpad);
427  s->me.temp =
428  s->sc.rd_scratchpad =
429  s->sc.b_scratchpad =
430  s->sc.obmc_scratchpad = NULL;
431 
432  av_freep(&s->dct_error_sum);
433  av_freep(&s->me.map);
434  av_freep(&s->me.score_map);
435  av_freep(&s->blocks);
436  av_freep(&s->ac_val_base);
437  s->block = NULL;
438 }
439 
441 {
442  for (int i = 1; i < s->slice_context_count; i++) {
443  free_duplicate_context(s->thread_context[i]);
444  av_freep(&s->thread_context[i]);
445  }
447 }
448 
450 {
451 #define COPY(a) bak->a = src->a
452  COPY(sc.edge_emu_buffer);
453  COPY(me.scratchpad);
454  COPY(me.temp);
455  COPY(sc.rd_scratchpad);
456  COPY(sc.b_scratchpad);
457  COPY(sc.obmc_scratchpad);
458  COPY(me.map);
459  COPY(me.score_map);
460  COPY(blocks);
461  COPY(block);
462  COPY(start_mb_y);
463  COPY(end_mb_y);
464  COPY(me.map_generation);
465  COPY(pb);
466  COPY(dct_error_sum);
467  COPY(dct_count[0]);
468  COPY(dct_count[1]);
469  COPY(ac_val_base);
470  COPY(ac_val[0]);
471  COPY(ac_val[1]);
472  COPY(ac_val[2]);
473 #undef COPY
474 }
475 
477 {
478  MpegEncContext bak;
479  int i, ret;
480  // FIXME copy only needed parts
481  backup_duplicate_context(&bak, dst);
482  memcpy(dst, src, sizeof(MpegEncContext));
483  backup_duplicate_context(dst, &bak);
484  for (i = 0; i < 12; i++) {
485  dst->pblocks[i] = &dst->block[i];
486  }
487  if (dst->avctx->codec_tag == AV_RL32("VCR2")) {
488  // exchange uv
489  FFSWAP(void *, dst->pblocks[4], dst->pblocks[5]);
490  }
491  if (!dst->sc.edge_emu_buffer &&
492  (ret = ff_mpeg_framesize_alloc(dst->avctx, &dst->me,
493  &dst->sc, dst->linesize)) < 0) {
494  av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
495  "scratch buffers.\n");
496  return ret;
497  }
498  return 0;
499 }
500 
501 /**
502  * Set the given MpegEncContext to common defaults
503  * (same for encoding and decoding).
504  * The changed fields will not depend upon the
505  * prior state of the MpegEncContext.
506  */
508 {
509  s->y_dc_scale_table =
510  s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
511  s->chroma_qscale_table = ff_default_chroma_qscale_table;
512  s->progressive_frame = 1;
513  s->progressive_sequence = 1;
514  s->picture_structure = PICT_FRAME;
515 
516  s->coded_picture_number = 0;
517  s->picture_number = 0;
518 
519  s->f_code = 1;
520  s->b_code = 1;
521 
522  s->slice_context_count = 1;
523 }
524 
526 {
527  int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
528 
529  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
530  s->mb_height = (s->height + 31) / 32 * 2;
531  else
532  s->mb_height = (s->height + 15) / 16;
533 
534  s->mb_width = (s->width + 15) / 16;
535  s->mb_stride = s->mb_width + 1;
536  s->b8_stride = s->mb_width * 2 + 1;
537  mb_array_size = s->mb_height * s->mb_stride;
538  mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
539 
540  /* set default edge pos, will be overridden
541  * in decode_header if needed */
542  s->h_edge_pos = s->mb_width * 16;
543  s->v_edge_pos = s->mb_height * 16;
544 
545  s->mb_num = s->mb_width * s->mb_height;
546 
547  s->block_wrap[0] =
548  s->block_wrap[1] =
549  s->block_wrap[2] =
550  s->block_wrap[3] = s->b8_stride;
551  s->block_wrap[4] =
552  s->block_wrap[5] = s->mb_stride;
553 
554  y_size = s->b8_stride * (2 * s->mb_height + 1);
555  c_size = s->mb_stride * (s->mb_height + 1);
556  yc_size = y_size + 2 * c_size;
557 
558  if (s->mb_height & 1)
559  yc_size += 2*s->b8_stride + 2*s->mb_stride;
560 
561  if (!FF_ALLOCZ_TYPED_ARRAY(s->mb_index2xy, s->mb_num + 1))
562  return AVERROR(ENOMEM);
563  for (y = 0; y < s->mb_height; y++)
564  for (x = 0; x < s->mb_width; x++)
565  s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
566 
567  s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
568 
569  if (s->codec_id == AV_CODEC_ID_MPEG4 ||
570  (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME)) {
571  /* interlaced direct mode decoding tables */
572  int16_t (*tmp)[2] = av_calloc(mv_table_size, 4 * sizeof(*tmp));
573  if (!tmp)
574  return AVERROR(ENOMEM);
575  s->p_field_mv_table_base = tmp;
576  tmp += s->mb_stride + 1;
577  for (int i = 0; i < 2; i++) {
578  for (int j = 0; j < 2; j++) {
579  s->p_field_mv_table[i][j] = tmp;
580  tmp += mv_table_size;
581  }
582  }
583  }
584 
585  if (s->out_format == FMT_H263) {
586  /* cbp values, cbp, ac_pred, pred_dir */
587  if (!(s->coded_block_base = av_mallocz(y_size + (s->mb_height&1)*2*s->b8_stride)) ||
588  !(s->cbp_table = av_mallocz(mb_array_size)) ||
589  !(s->pred_dir_table = av_mallocz(mb_array_size)))
590  return AVERROR(ENOMEM);
591  s->coded_block = s->coded_block_base + s->b8_stride + 1;
592  }
593 
594  if (s->h263_pred || s->h263_plus || !s->encoding) {
595  /* dc values */
596  // MN: we need these for error resilience of intra-frames
597  if (!FF_ALLOCZ_TYPED_ARRAY(s->dc_val_base, yc_size))
598  return AVERROR(ENOMEM);
599  s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
600  s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
601  s->dc_val[2] = s->dc_val[1] + c_size;
602  for (i = 0; i < yc_size; i++)
603  s->dc_val_base[i] = 1024;
604  }
605 
606  // Note the + 1 is for a quicker MPEG-4 slice_end detection
607  if (!(s->mbskip_table = av_mallocz(mb_array_size + 2)) ||
608  /* which mb is an intra block, init macroblock skip table */
609  !(s->mbintra_table = av_malloc(mb_array_size)))
610  return AVERROR(ENOMEM);
611  memset(s->mbintra_table, 1, mb_array_size);
612 
613  return !CONFIG_MPEGVIDEODEC || s->encoding ? 0 : ff_mpeg_er_init(s);
614 }
615 
617 {
618  memset(&s->next_picture, 0, sizeof(s->next_picture));
619  memset(&s->last_picture, 0, sizeof(s->last_picture));
620  memset(&s->current_picture, 0, sizeof(s->current_picture));
621  memset(&s->new_picture, 0, sizeof(s->new_picture));
622 
623  memset(s->thread_context, 0, sizeof(s->thread_context));
624 
625  s->me.map = NULL;
626  s->me.score_map = NULL;
627  s->dct_error_sum = NULL;
628  s->block = NULL;
629  s->blocks = NULL;
630  memset(s->pblocks, 0, sizeof(s->pblocks));
631  s->ac_val_base = NULL;
632  s->ac_val[0] =
633  s->ac_val[1] =
634  s->ac_val[2] =NULL;
635  s->sc.edge_emu_buffer = NULL;
636  s->me.scratchpad = NULL;
637  s->me.temp =
638  s->sc.rd_scratchpad =
639  s->sc.b_scratchpad =
640  s->sc.obmc_scratchpad = NULL;
641 
642 
643  s->bitstream_buffer = NULL;
644  s->allocated_bitstream_buffer_size = 0;
645  s->picture = NULL;
646  s->p_field_mv_table_base = NULL;
647  for (int i = 0; i < 2; i++)
648  for (int j = 0; j < 2; j++)
649  s->p_field_mv_table[i][j] = NULL;
650 
651  s->dc_val_base = NULL;
652  s->coded_block_base = NULL;
653  s->mbintra_table = NULL;
654  s->cbp_table = NULL;
655  s->pred_dir_table = NULL;
656 
657  s->mbskip_table = NULL;
658 
659  s->er.error_status_table = NULL;
660  s->er.er_temp_buffer = NULL;
661  s->mb_index2xy = NULL;
662 }
663 
664 /**
665  * init common structure for both encoder and decoder.
666  * this assumes that some variables like width/height are already set
667  */
669 {
670  int i, ret;
671  int nb_slices = (HAVE_THREADS &&
672  s->avctx->active_thread_type & FF_THREAD_SLICE) ?
673  s->avctx->thread_count : 1;
674 
675  clear_context(s);
676 
677  if (s->encoding && s->avctx->slices)
678  nb_slices = s->avctx->slices;
679 
680  if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
681  av_log(s->avctx, AV_LOG_ERROR,
682  "decoding to AV_PIX_FMT_NONE is not supported.\n");
683  return AVERROR(EINVAL);
684  }
685 
686  if ((s->width || s->height) &&
687  av_image_check_size(s->width, s->height, 0, s->avctx))
688  return AVERROR(EINVAL);
689 
690  dct_init(s);
691 
692  /* set chroma shifts */
693  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
694  &s->chroma_x_shift,
695  &s->chroma_y_shift);
696  if (ret)
697  return ret;
698 
699  if (!FF_ALLOCZ_TYPED_ARRAY(s->picture, MAX_PICTURE_COUNT))
700  return AVERROR(ENOMEM);
701  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
702  s->picture[i].f = av_frame_alloc();
703  if (!s->picture[i].f)
704  goto fail_nomem;
705  }
706 
707  if (!(s->next_picture.f = av_frame_alloc()) ||
708  !(s->last_picture.f = av_frame_alloc()) ||
709  !(s->current_picture.f = av_frame_alloc()) ||
710  !(s->new_picture = av_frame_alloc()))
711  goto fail_nomem;
712 
714  goto fail;
715 
716  if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
717  int max_slices;
718  if (s->mb_height)
719  max_slices = FFMIN(MAX_THREADS, s->mb_height);
720  else
721  max_slices = MAX_THREADS;
722  av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
723  " reducing to %d\n", nb_slices, max_slices);
724  nb_slices = max_slices;
725  }
726 
727 #if FF_API_FLAG_TRUNCATED
728  s->parse_context.state = -1;
729 #endif
730 
731  s->context_initialized = 1;
732  memset(s->thread_context, 0, sizeof(s->thread_context));
733  s->thread_context[0] = s;
734  s->slice_context_count = nb_slices;
735 
736 // if (s->width && s->height) {
738  if (ret < 0)
739  goto fail;
740 // }
741 
742  return 0;
743  fail_nomem:
744  ret = AVERROR(ENOMEM);
745  fail:
747  return ret;
748 }
749 
751 {
753 
754  av_freep(&s->p_field_mv_table_base);
755  for (int i = 0; i < 2; i++)
756  for (int j = 0; j < 2; j++)
757  s->p_field_mv_table[i][j] = NULL;
758 
759  av_freep(&s->dc_val_base);
760  av_freep(&s->coded_block_base);
761  av_freep(&s->mbintra_table);
762  av_freep(&s->cbp_table);
763  av_freep(&s->pred_dir_table);
764 
765  av_freep(&s->mbskip_table);
766 
767  av_freep(&s->er.error_status_table);
768  av_freep(&s->er.er_temp_buffer);
769  av_freep(&s->mb_index2xy);
770 
771  s->linesize = s->uvlinesize = 0;
772 }
773 
774 /* init common structure for both encoder and decoder */
776 {
777  if (!s)
778  return;
779 
781  if (s->slice_context_count > 1)
782  s->slice_context_count = 1;
783 
784 #if FF_API_FLAG_TRUNCATED
785  av_freep(&s->parse_context.buffer);
786  s->parse_context.buffer_size = 0;
787 #endif
788 
789  av_freep(&s->bitstream_buffer);
790  s->allocated_bitstream_buffer_size = 0;
791 
792  if (!s->avctx)
793  return;
794 
795  if (s->picture) {
796  for (int i = 0; i < MAX_PICTURE_COUNT; i++)
797  ff_mpv_picture_free(s->avctx, &s->picture[i]);
798  }
799  av_freep(&s->picture);
800  ff_mpv_picture_free(s->avctx, &s->last_picture);
801  ff_mpv_picture_free(s->avctx, &s->current_picture);
802  ff_mpv_picture_free(s->avctx, &s->next_picture);
803  av_frame_free(&s->new_picture);
804 
805  s->context_initialized = 0;
806  s->context_reinit = 0;
807  s->last_picture_ptr =
808  s->next_picture_ptr =
809  s->current_picture_ptr = NULL;
810  s->linesize = s->uvlinesize = 0;
811 }
812 
813 
815  uint8_t *dest, const uint8_t *src,
816  int field_based, int field_select,
817  int src_x, int src_y,
818  int width, int height, ptrdiff_t stride,
819  int h_edge_pos, int v_edge_pos,
820  int w, int h, const h264_chroma_mc_func *pix_op,
821  int motion_x, int motion_y)
822 {
823  const int lowres = s->avctx->lowres;
824  const int op_index = FFMIN(lowres, 3);
825  const int s_mask = (2 << lowres) - 1;
826  int emu = 0;
827  int sx, sy;
828 
829  if (s->quarter_sample) {
830  motion_x /= 2;
831  motion_y /= 2;
832  }
833 
834  sx = motion_x & s_mask;
835  sy = motion_y & s_mask;
836  src_x += motion_x >> lowres + 1;
837  src_y += motion_y >> lowres + 1;
838 
839  src += src_y * stride + src_x;
840 
841  if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
842  (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
843  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, src,
844  s->linesize, s->linesize,
845  w + 1, (h + 1) << field_based,
846  src_x, src_y * (1 << field_based),
847  h_edge_pos, v_edge_pos);
848  src = s->sc.edge_emu_buffer;
849  emu = 1;
850  }
851 
852  sx = (sx << 2) >> lowres;
853  sy = (sy << 2) >> lowres;
854  if (field_select)
855  src += s->linesize;
856  pix_op[op_index](dest, src, stride, h, sx, sy);
857  return emu;
858 }
859 
860 /* apply one mpeg motion vector to the three components */
862  uint8_t *dest_y,
863  uint8_t *dest_cb,
864  uint8_t *dest_cr,
865  int field_based,
866  int bottom_field,
867  int field_select,
868  uint8_t *const *ref_picture,
869  const h264_chroma_mc_func *pix_op,
870  int motion_x, int motion_y,
871  int h, int mb_y)
872 {
873  const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
874  int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
875  ptrdiff_t uvlinesize, linesize;
876  const int lowres = s->avctx->lowres;
877  const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
878  const int block_s = 8>>lowres;
879  const int s_mask = (2 << lowres) - 1;
880  const int h_edge_pos = s->h_edge_pos >> lowres;
881  const int v_edge_pos = s->v_edge_pos >> lowres;
882  linesize = s->current_picture.f->linesize[0] << field_based;
883  uvlinesize = s->current_picture.f->linesize[1] << field_based;
884 
885  // FIXME obviously not perfect but qpel will not work in lowres anyway
886  if (s->quarter_sample) {
887  motion_x /= 2;
888  motion_y /= 2;
889  }
890 
891  if(field_based){
892  motion_y += (bottom_field - field_select)*((1 << lowres)-1);
893  }
894 
895  sx = motion_x & s_mask;
896  sy = motion_y & s_mask;
897  src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
898  src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
899 
900  if (s->out_format == FMT_H263) {
901  uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
902  uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
903  uvsrc_x = src_x >> 1;
904  uvsrc_y = src_y >> 1;
905  } else if (s->out_format == FMT_H261) {
906  // even chroma mv's are full pel in H261
907  mx = motion_x / 4;
908  my = motion_y / 4;
909  uvsx = (2 * mx) & s_mask;
910  uvsy = (2 * my) & s_mask;
911  uvsrc_x = s->mb_x * block_s + (mx >> lowres);
912  uvsrc_y = mb_y * block_s + (my >> lowres);
913  } else {
914  if(s->chroma_y_shift){
915  mx = motion_x / 2;
916  my = motion_y / 2;
917  uvsx = mx & s_mask;
918  uvsy = my & s_mask;
919  uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
920  uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
921  } else {
922  if(s->chroma_x_shift){
923  //Chroma422
924  mx = motion_x / 2;
925  uvsx = mx & s_mask;
926  uvsy = motion_y & s_mask;
927  uvsrc_y = src_y;
928  uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
929  } else {
930  //Chroma444
931  uvsx = motion_x & s_mask;
932  uvsy = motion_y & s_mask;
933  uvsrc_x = src_x;
934  uvsrc_y = src_y;
935  }
936  }
937  }
938 
939  ptr_y = ref_picture[0] + src_y * linesize + src_x;
940  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
941  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
942 
943  if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
944  (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
945  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
946  linesize >> field_based, linesize >> field_based,
947  17, 17 + field_based,
948  src_x, src_y * (1 << field_based), h_edge_pos,
949  v_edge_pos);
950  ptr_y = s->sc.edge_emu_buffer;
951  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
952  uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
953  uint8_t *vbuf =ubuf + 10 * s->uvlinesize;
954  if (s->workaround_bugs & FF_BUG_IEDGE)
955  vbuf -= s->uvlinesize;
956  s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
957  uvlinesize >> field_based, uvlinesize >> field_based,
958  9, 9 + field_based,
959  uvsrc_x, uvsrc_y * (1 << field_based),
960  h_edge_pos >> 1, v_edge_pos >> 1);
961  s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
962  uvlinesize >> field_based,uvlinesize >> field_based,
963  9, 9 + field_based,
964  uvsrc_x, uvsrc_y * (1 << field_based),
965  h_edge_pos >> 1, v_edge_pos >> 1);
966  ptr_cb = ubuf;
967  ptr_cr = vbuf;
968  }
969  }
970 
971  // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
972  if (bottom_field) {
973  dest_y += s->linesize;
974  dest_cb += s->uvlinesize;
975  dest_cr += s->uvlinesize;
976  }
977 
978  if (field_select) {
979  ptr_y += s->linesize;
980  ptr_cb += s->uvlinesize;
981  ptr_cr += s->uvlinesize;
982  }
983 
984  sx = (sx << 2) >> lowres;
985  sy = (sy << 2) >> lowres;
986  pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
987 
988  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
989  int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
990  uvsx = (uvsx << 2) >> lowres;
991  uvsy = (uvsy << 2) >> lowres;
992  if (hc) {
993  pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
994  pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
995  }
996  }
997  // FIXME h261 lowres loop filter
998 }
999 
1001  uint8_t *dest_cb, uint8_t *dest_cr,
1002  uint8_t *const *ref_picture,
1003  const h264_chroma_mc_func * pix_op,
1004  int mx, int my)
1005 {
1006  const int lowres = s->avctx->lowres;
1007  const int op_index = FFMIN(lowres, 3);
1008  const int block_s = 8 >> lowres;
1009  const int s_mask = (2 << lowres) - 1;
1010  const int h_edge_pos = s->h_edge_pos >> lowres + 1;
1011  const int v_edge_pos = s->v_edge_pos >> lowres + 1;
1012  int emu = 0, src_x, src_y, sx, sy;
1013  ptrdiff_t offset;
1014  const uint8_t *ptr;
1015 
1016  if (s->quarter_sample) {
1017  mx /= 2;
1018  my /= 2;
1019  }
1020 
1021  /* In case of 8X8, we construct a single chroma motion vector
1022  with a special rounding */
1023  mx = ff_h263_round_chroma(mx);
1024  my = ff_h263_round_chroma(my);
1025 
1026  sx = mx & s_mask;
1027  sy = my & s_mask;
1028  src_x = s->mb_x * block_s + (mx >> lowres + 1);
1029  src_y = s->mb_y * block_s + (my >> lowres + 1);
1030 
1031  offset = src_y * s->uvlinesize + src_x;
1032  ptr = ref_picture[1] + offset;
1033  if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
1034  (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
1035  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
1036  s->uvlinesize, s->uvlinesize,
1037  9, 9,
1038  src_x, src_y, h_edge_pos, v_edge_pos);
1039  ptr = s->sc.edge_emu_buffer;
1040  emu = 1;
1041  }
1042  sx = (sx << 2) >> lowres;
1043  sy = (sy << 2) >> lowres;
1044  pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
1045 
1046  ptr = ref_picture[2] + offset;
1047  if (emu) {
1048  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
1049  s->uvlinesize, s->uvlinesize,
1050  9, 9,
1051  src_x, src_y, h_edge_pos, v_edge_pos);
1052  ptr = s->sc.edge_emu_buffer;
1053  }
1054  pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
1055 }
1056 
1057 /**
1058  * motion compensation of a single macroblock
1059  * @param s context
1060  * @param dest_y luma destination pointer
1061  * @param dest_cb chroma cb/u destination pointer
1062  * @param dest_cr chroma cr/v destination pointer
1063  * @param dir direction (0->forward, 1->backward)
1064  * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
1065  * @param pix_op halfpel motion compensation function (average or put normally)
1066  * the motion vectors are taken from s->mv and the MV type from s->mv_type
1067  */
1068 static inline void MPV_motion_lowres(MpegEncContext *s,
1069  uint8_t *dest_y, uint8_t *dest_cb,
1070  uint8_t *dest_cr,
1071  int dir, uint8_t *const *ref_picture,
1072  const h264_chroma_mc_func *pix_op)
1073 {
1074  int mx, my;
1075  int mb_x, mb_y, i;
1076  const int lowres = s->avctx->lowres;
1077  const int block_s = 8 >>lowres;
1078 
1079  mb_x = s->mb_x;
1080  mb_y = s->mb_y;
1081 
1082  switch (s->mv_type) {
1083  case MV_TYPE_16X16:
1084  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1085  0, 0, 0,
1086  ref_picture, pix_op,
1087  s->mv[dir][0][0], s->mv[dir][0][1],
1088  2 * block_s, mb_y);
1089  break;
1090  case MV_TYPE_8X8:
1091  mx = 0;
1092  my = 0;
1093  for (i = 0; i < 4; i++) {
1094  hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
1095  s->linesize) * block_s,
1096  ref_picture[0], 0, 0,
1097  (2 * mb_x + (i & 1)) * block_s,
1098  (2 * mb_y + (i >> 1)) * block_s,
1099  s->width, s->height, s->linesize,
1100  s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
1101  block_s, block_s, pix_op,
1102  s->mv[dir][i][0], s->mv[dir][i][1]);
1103 
1104  mx += s->mv[dir][i][0];
1105  my += s->mv[dir][i][1];
1106  }
1107 
1108  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
1109  chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
1110  pix_op, mx, my);
1111  break;
1112  case MV_TYPE_FIELD:
1113  if (s->picture_structure == PICT_FRAME) {
1114  /* top field */
1115  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1116  1, 0, s->field_select[dir][0],
1117  ref_picture, pix_op,
1118  s->mv[dir][0][0], s->mv[dir][0][1],
1119  block_s, mb_y);
1120  /* bottom field */
1121  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1122  1, 1, s->field_select[dir][1],
1123  ref_picture, pix_op,
1124  s->mv[dir][1][0], s->mv[dir][1][1],
1125  block_s, mb_y);
1126  } else {
1127  if (s->picture_structure != s->field_select[dir][0] + 1 &&
1128  s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
1129  ref_picture = s->current_picture_ptr->f->data;
1130 
1131  }
1132  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1133  0, 0, s->field_select[dir][0],
1134  ref_picture, pix_op,
1135  s->mv[dir][0][0],
1136  s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
1137  }
1138  break;
1139  case MV_TYPE_16X8:
1140  for (i = 0; i < 2; i++) {
1141  uint8_t *const *ref2picture;
1142 
1143  if (s->picture_structure == s->field_select[dir][i] + 1 ||
1144  s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
1145  ref2picture = ref_picture;
1146  } else {
1147  ref2picture = s->current_picture_ptr->f->data;
1148  }
1149 
1150  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1151  0, 0, s->field_select[dir][i],
1152  ref2picture, pix_op,
1153  s->mv[dir][i][0], s->mv[dir][i][1] +
1154  2 * block_s * i, block_s, mb_y >> 1);
1155 
1156  dest_y += 2 * block_s * s->linesize;
1157  dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
1158  dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
1159  }
1160  break;
1161  case MV_TYPE_DMV:
1162  if (s->picture_structure == PICT_FRAME) {
1163  for (i = 0; i < 2; i++) {
1164  int j;
1165  for (j = 0; j < 2; j++) {
1166  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1167  1, j, j ^ i,
1168  ref_picture, pix_op,
1169  s->mv[dir][2 * i + j][0],
1170  s->mv[dir][2 * i + j][1],
1171  block_s, mb_y);
1172  }
1173  pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
1174  }
1175  } else {
1176  for (i = 0; i < 2; i++) {
1177  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1178  0, 0, s->picture_structure != i + 1,
1179  ref_picture, pix_op,
1180  s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
1181  2 * block_s, mb_y >> 1);
1182 
1183  // after put we make avg of the same block
1184  pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
1185 
1186  // opposite parity is always in the same
1187  // frame if this is second field
1188  if (!s->first_field) {
1189  ref_picture = s->current_picture_ptr->f->data;
1190  }
1191  }
1192  }
1193  break;
1194  default:
1195  av_assert2(0);
1196  }
1197 }
1198 
1199 /**
1200  * find the lowest MB row referenced in the MVs
1201  */
1203 {
1204  int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1205  int my, off, i, mvs;
1206 
1207  if (s->picture_structure != PICT_FRAME || s->mcsel)
1208  goto unhandled;
1209 
1210  switch (s->mv_type) {
1211  case MV_TYPE_16X16:
1212  mvs = 1;
1213  break;
1214  case MV_TYPE_16X8:
1215  mvs = 2;
1216  break;
1217  case MV_TYPE_8X8:
1218  mvs = 4;
1219  break;
1220  default:
1221  goto unhandled;
1222  }
1223 
1224  for (i = 0; i < mvs; i++) {
1225  my = s->mv[dir][i][1];
1226  my_max = FFMAX(my_max, my);
1227  my_min = FFMIN(my_min, my);
1228  }
1229 
1230  off = ((FFMAX(-my_min, my_max)<<qpel_shift) + 63) >> 6;
1231 
1232  return av_clip(s->mb_y + off, 0, s->mb_height - 1);
1233 unhandled:
1234  return s->mb_height-1;
1235 }
1236 
1237 /* put block[] to dest[] */
1238 static inline void put_dct(MpegEncContext *s,
1239  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1240 {
1241  s->dct_unquantize_intra(s, block, i, qscale);
1242  s->idsp.idct_put(dest, line_size, block);
1243 }
1244 
1245 /* add block[] to dest[] */
1246 static inline void add_dct(MpegEncContext *s,
1247  int16_t *block, int i, uint8_t *dest, int line_size)
1248 {
1249  if (s->block_last_index[i] >= 0) {
1250  s->idsp.idct_add(dest, line_size, block);
1251  }
1252 }
1253 
1254 static inline void add_dequant_dct(MpegEncContext *s,
1255  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1256 {
1257  if (s->block_last_index[i] >= 0) {
1258  s->dct_unquantize_inter(s, block, i, qscale);
1259 
1260  s->idsp.idct_add(dest, line_size, block);
1261  }
1262 }
1263 
1264 /**
1265  * Clean dc, ac, coded_block for the current non-intra MB.
1266  */
1268 {
1269  int wrap = s->b8_stride;
1270  int xy = s->block_index[0];
1271 
1272  s->dc_val[0][xy ] =
1273  s->dc_val[0][xy + 1 ] =
1274  s->dc_val[0][xy + wrap] =
1275  s->dc_val[0][xy + 1 + wrap] = 1024;
1276  /* ac pred */
1277  memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
1278  memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1279  if (s->msmpeg4_version>=3) {
1280  s->coded_block[xy ] =
1281  s->coded_block[xy + 1 ] =
1282  s->coded_block[xy + wrap] =
1283  s->coded_block[xy + 1 + wrap] = 0;
1284  }
1285  /* chroma */
1286  wrap = s->mb_stride;
1287  xy = s->mb_x + s->mb_y * wrap;
1288  s->dc_val[1][xy] =
1289  s->dc_val[2][xy] = 1024;
1290  /* ac pred */
1291  memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1292  memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1293 
1294  s->mbintra_table[xy]= 0;
1295 }
1296 
1297 /* generic function called after a macroblock has been parsed by the
1298  decoder or after it has been encoded by the encoder.
1299 
1300  Important variables used:
1301  s->mb_intra : true if intra macroblock
1302  s->mv_dir : motion vector direction
1303  s->mv_type : motion vector type
1304  s->mv : motion vector
1305  s->interlaced_dct : true if interlaced dct used (mpeg2)
1306  */
1307 static av_always_inline
1309  int lowres_flag, int is_mpeg12)
1310 {
1311 #define IS_ENCODER(s) (CONFIG_MPEGVIDEOENC && !lowres_flag && (s)->encoding)
1312 #define IS_MPEG12(s) (CONFIG_SMALL ? ((s)->out_format == FMT_MPEG1) : is_mpeg12)
1313  const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1314 
1315  s->current_picture.qscale_table[mb_xy] = s->qscale;
1316 
1317  /* update DC predictors for P macroblocks */
1318  if (!s->mb_intra) {
1319  if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
1320  if(s->mbintra_table[mb_xy])
1322  } else {
1323  s->last_dc[0] =
1324  s->last_dc[1] =
1325  s->last_dc[2] = 128 << s->intra_dc_precision;
1326  }
1327  }
1328  else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
1329  s->mbintra_table[mb_xy]=1;
1330 
1331  if (!IS_ENCODER(s) || (s->avctx->flags & AV_CODEC_FLAG_PSNR) || s->frame_skip_threshold || s->frame_skip_factor ||
1332  !((s->intra_only || s->pict_type == AV_PICTURE_TYPE_B) &&
1333  s->avctx->mb_decision != FF_MB_DECISION_RD)) { // FIXME precalc
1334  uint8_t *dest_y, *dest_cb, *dest_cr;
1335  int dct_linesize, dct_offset;
1336  op_pixels_func (*op_pix)[4];
1337  qpel_mc_func (*op_qpix)[16];
1338  const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
1339  const int uvlinesize = s->current_picture.f->linesize[1];
1340  const int readable = s->pict_type != AV_PICTURE_TYPE_B || IS_ENCODER(s) || s->avctx->draw_horiz_band || lowres_flag;
1341  const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
1342 
1343  /* avoid copy if macroblock skipped in last frame too */
1344  /* skip only during decoding as we might trash the buffers during encoding a bit */
1345  if (!IS_ENCODER(s)) {
1346  uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
1347 
1348  if (s->mb_skipped) {
1349  s->mb_skipped= 0;
1350  av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
1351  *mbskip_ptr = 1;
1352  } else if(!s->current_picture.reference) {
1353  *mbskip_ptr = 1;
1354  } else{
1355  *mbskip_ptr = 0; /* not skipped */
1356  }
1357  }
1358 
1359  dct_linesize = linesize << s->interlaced_dct;
1360  dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
1361 
1362  if(readable){
1363  dest_y= s->dest[0];
1364  dest_cb= s->dest[1];
1365  dest_cr= s->dest[2];
1366  }else{
1367  dest_y = s->sc.b_scratchpad;
1368  dest_cb= s->sc.b_scratchpad+16*linesize;
1369  dest_cr= s->sc.b_scratchpad+32*linesize;
1370  }
1371 
1372  if (!s->mb_intra) {
1373  /* motion handling */
1374  /* decoding or more than one mb_type (MC was already done otherwise) */
1375  if (!IS_ENCODER(s)) {
1376 
1377  if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
1378  if (s->mv_dir & MV_DIR_FORWARD) {
1379  ff_thread_await_progress(&s->last_picture_ptr->tf,
1381  0);
1382  }
1383  if (s->mv_dir & MV_DIR_BACKWARD) {
1384  ff_thread_await_progress(&s->next_picture_ptr->tf,
1386  0);
1387  }
1388  }
1389 
1390  if(lowres_flag){
1391  const h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
1392 
1393  if (s->mv_dir & MV_DIR_FORWARD) {
1394  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix);
1395  op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
1396  }
1397  if (s->mv_dir & MV_DIR_BACKWARD) {
1398  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix);
1399  }
1400  }else{
1401  op_qpix = s->me.qpel_put;
1402  if ((is_mpeg12 || !s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
1403  op_pix = s->hdsp.put_pixels_tab;
1404  }else{
1405  op_pix = s->hdsp.put_no_rnd_pixels_tab;
1406  }
1407  if (s->mv_dir & MV_DIR_FORWARD) {
1408  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
1409  op_pix = s->hdsp.avg_pixels_tab;
1410  op_qpix= s->me.qpel_avg;
1411  }
1412  if (s->mv_dir & MV_DIR_BACKWARD) {
1413  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
1414  }
1415  }
1416  }
1417 
1418  /* skip dequant / idct if we are really late ;) */
1419  if(s->avctx->skip_idct){
1420  if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
1421  ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
1422  || s->avctx->skip_idct >= AVDISCARD_ALL)
1423  goto skip_idct;
1424  }
1425 
1426  /* add dct residue */
1427  if (IS_ENCODER(s) || !(IS_MPEG12(s) || s->msmpeg4_version
1428  || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
1429  add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
1430  add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
1431  add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
1432  add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
1433 
1434  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1435  if (s->chroma_y_shift){
1436  add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
1437  add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
1438  }else{
1439  dct_linesize >>= 1;
1440  dct_offset >>=1;
1441  add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
1442  add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
1443  add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
1444  add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
1445  }
1446  }
1447  } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
1448  add_dct(s, block[0], 0, dest_y , dct_linesize);
1449  add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
1450  add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
1451  add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
1452 
1453  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1454  if(s->chroma_y_shift){//Chroma420
1455  add_dct(s, block[4], 4, dest_cb, uvlinesize);
1456  add_dct(s, block[5], 5, dest_cr, uvlinesize);
1457  }else{
1458  //chroma422
1459  dct_linesize = uvlinesize << s->interlaced_dct;
1460  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
1461 
1462  add_dct(s, block[4], 4, dest_cb, dct_linesize);
1463  add_dct(s, block[5], 5, dest_cr, dct_linesize);
1464  add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
1465  add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
1466  if(!s->chroma_x_shift){//Chroma444
1467  add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
1468  add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
1469  add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
1470  add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
1471  }
1472  }
1473  }//fi gray
1474  } else if (CONFIG_WMV2_DECODER) {
1475  ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
1476  }
1477  } else {
1478  /* Only MPEG-4 Simple Studio Profile is supported in > 8-bit mode.
1479  TODO: Integrate 10-bit properly into mpegvideo.c so that ER works properly */
1480  if (!is_mpeg12 && CONFIG_MPEG4_DECODER && /* s->codec_id == AV_CODEC_ID_MPEG4 && */
1481  s->avctx->bits_per_raw_sample > 8) {
1482  ff_mpeg4_decode_studio(s, dest_y, dest_cb, dest_cr, block_size,
1483  uvlinesize, dct_linesize, dct_offset);
1484  }
1485  /* dct only in intra block */
1486  else if (IS_ENCODER(s) || !IS_MPEG12(s)) {
1487  put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
1488  put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
1489  put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
1490  put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
1491 
1492  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1493  if(s->chroma_y_shift){
1494  put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
1495  put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
1496  }else{
1497  dct_offset >>=1;
1498  dct_linesize >>=1;
1499  put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
1500  put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
1501  put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
1502  put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
1503  }
1504  }
1505  }else{
1506  s->idsp.idct_put(dest_y, dct_linesize, block[0]);
1507  s->idsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
1508  s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]);
1509  s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
1510 
1511  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1512  if(s->chroma_y_shift){
1513  s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
1514  s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
1515  }else{
1516 
1517  dct_linesize = uvlinesize << s->interlaced_dct;
1518  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
1519 
1520  s->idsp.idct_put(dest_cb, dct_linesize, block[4]);
1521  s->idsp.idct_put(dest_cr, dct_linesize, block[5]);
1522  s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
1523  s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
1524  if(!s->chroma_x_shift){//Chroma444
1525  s->idsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
1526  s->idsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
1527  s->idsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
1528  s->idsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
1529  }
1530  }
1531  }//gray
1532  }
1533  }
1534 skip_idct:
1535  if(!readable){
1536  s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
1537  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1538  s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
1539  s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
1540  }
1541  }
1542  }
1543 }
1544 
1546 {
1547  if (s->avctx->debug & FF_DEBUG_DCT_COEFF) {
1548  /* print DCT coefficients */
1549  av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1550  for (int i = 0; i < 6; i++) {
1551  for (int j = 0; j < 64; j++) {
1552  av_log(s->avctx, AV_LOG_DEBUG, "%5d",
1553  block[i][s->idsp.idct_permutation[j]]);
1554  }
1555  av_log(s->avctx, AV_LOG_DEBUG, "\n");
1556  }
1557  }
1558 
1559 #if !CONFIG_SMALL
1560  if(s->out_format == FMT_MPEG1) {
1561  if(s->avctx->lowres) mpv_reconstruct_mb_internal(s, block, 1, 1);
1562  else mpv_reconstruct_mb_internal(s, block, 0, 1);
1563  } else
1564 #endif
1565  if(s->avctx->lowres) mpv_reconstruct_mb_internal(s, block, 1, 0);
1566  else mpv_reconstruct_mb_internal(s, block, 0, 0);
1567 }
1568 
1569 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
1570  const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
1571  const int uvlinesize = s->current_picture.f->linesize[1];
1572  const int width_of_mb = (4 + (s->avctx->bits_per_raw_sample > 8)) - s->avctx->lowres;
1573  const int height_of_mb = 4 - s->avctx->lowres;
1574 
1575  s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
1576  s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
1577  s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
1578  s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
1579  s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
1580  s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
1581  //block_index is not used by mpeg2, so it is not affected by chroma_format
1582 
1583  s->dest[0] = s->current_picture.f->data[0] + (int)((s->mb_x - 1U) << width_of_mb);
1584  s->dest[1] = s->current_picture.f->data[1] + (int)((s->mb_x - 1U) << (width_of_mb - s->chroma_x_shift));
1585  s->dest[2] = s->current_picture.f->data[2] + (int)((s->mb_x - 1U) << (width_of_mb - s->chroma_x_shift));
1586 
1587  if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
1588  {
1589  if(s->picture_structure==PICT_FRAME){
1590  s->dest[0] += s->mb_y * linesize << height_of_mb;
1591  s->dest[1] += s->mb_y * uvlinesize << (height_of_mb - s->chroma_y_shift);
1592  s->dest[2] += s->mb_y * uvlinesize << (height_of_mb - s->chroma_y_shift);
1593  }else{
1594  s->dest[0] += (s->mb_y>>1) * linesize << height_of_mb;
1595  s->dest[1] += (s->mb_y>>1) * uvlinesize << (height_of_mb - s->chroma_y_shift);
1596  s->dest[2] += (s->mb_y>>1) * uvlinesize << (height_of_mb - s->chroma_y_shift);
1597  av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
1598  }
1599  }
1600 }
1601 
1602 /**
1603  * set qscale and update qscale dependent variables.
1604  */
1605 void ff_set_qscale(MpegEncContext * s, int qscale)
1606 {
1607  if (qscale < 1)
1608  qscale = 1;
1609  else if (qscale > 31)
1610  qscale = 31;
1611 
1612  s->qscale = qscale;
1613  s->chroma_qscale= s->chroma_qscale_table[qscale];
1614 
1615  s->y_dc_scale= s->y_dc_scale_table[ qscale ];
1616  s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
1617 }
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:97
ff_h263_round_chroma
static int ff_h263_round_chroma(int x)
Definition: motion_est.h:101
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:38
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:668
h264_chroma_mc_func
void(* h264_chroma_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: h264chroma.h:25
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:255
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
free_duplicate_contexts
static void free_duplicate_contexts(MpegEncContext *s)
Definition: mpegvideo.c:440
level
uint8_t level
Definition: svq3.c:204
av_clip
#define av_clip
Definition: common.h:95
ff_mpeg_framesize_alloc
int ff_mpeg_framesize_alloc(AVCodecContext *avctx, MotionEstContext *me, ScratchpadContext *sc, int linesize)
Definition: mpegpicture.c:87
blockdsp.h
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegutils.h:117
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
ff_mpv_init_context_frame
int ff_mpv_init_context_frame(MpegEncContext *s)
Initialize and allocates MpegEncContext fields dependent on the resolution.
Definition: mpegvideo.c:525
backup_duplicate_context
static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
Definition: mpegvideo.c:449
ff_mpv_common_defaults
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:507
ff_mpeg1_dc_scale_table
const uint8_t ff_mpeg1_dc_scale_table[128]
Definition: mpegvideodata.c:33
mpeg4videodec.h
MV_TYPE_16X8
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:257
lowest_referenced_row
static int lowest_referenced_row(MpegEncContext *s, int dir)
find the lowest MB row referenced in the MVs
Definition: mpegvideo.c:1202
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:116
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo.c:476
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
w
uint8_t w
Definition: llviddspenc.c:38
ff_mpeg2_non_linear_qscale
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:26
ff_clean_intra_table_entries
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
Definition: mpegvideo.c:1267
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:37
init_duplicate_context
static int init_duplicate_context(MpegEncContext *s)
Definition: mpegvideo.c:351
ff_mpv_common_init_arm
av_cold void ff_mpv_common_init_arm(MpegEncContext *s)
Definition: mpegvideo_arm.c:44
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:1569
mpegvideo.h
ff_wmv2_add_mb
void ff_wmv2_add_mb(MpegEncContext *s, int16_t block1[6][64], uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr)
Definition: wmv2dec.c:85
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:79
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:263
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:297
mpegutils.h
ff_thread_await_progress
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:240
free_duplicate_context
static void free_duplicate_context(MpegEncContext *s)
Definition: mpegvideo.c:420
MV_DIR_BACKWARD
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:252
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
MV_TYPE_DMV
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:259
IS_MPEG12
#define IS_MPEG12(s)
mpv_reconstruct_mb_internal
static av_always_inline void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64], int lowres_flag, int is_mpeg12)
Definition: mpegvideo.c:1308
av_memdup
void * av_memdup(const void *p, size_t size)
Duplicate a buffer with av_malloc().
Definition: mem.c:312
dct_unquantize_mpeg1_inter_c
static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:81
fail
#define fail()
Definition: checkasm.h:133
wrap
#define wrap(func)
Definition: neontest.h:65
MpegEncContext::linesize
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:117
MAX_THREADS
#define MAX_THREADS
Definition: frame_thread_encoder.c:34
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2886
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
MAX_PICTURE_COUNT
#define MAX_PICTURE_COUNT
Definition: mpegpicture.h:33
dct_unquantize_mpeg1_intra_c
static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:52
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:104
ff_mpv_common_end
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:775
MpegEncContext::pblocks
int16_t(*[12] pblocks)[64]
Definition: mpegvideo.h:473
avassert.h
gray16
static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
Definition: mpegvideo.c:269
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
width
#define width
ff_blockdsp_init
av_cold void ff_blockdsp_init(BlockDSPContext *c)
Definition: blockdsp.c:58
add_dct
static void add_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size)
Definition: mpegvideo.c:1246
s
#define s(width, name)
Definition: cbs_vp9.c:256
AV_CODEC_ID_WMV2
@ AV_CODEC_ID_WMV2
Definition: codec_id.h:70
dct_unquantize_mpeg2_intra_bitexact
static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:140
FMT_H261
@ FMT_H261
Definition: mpegutils.h:118
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
hpel_motion_lowres
static int hpel_motion_lowres(MpegEncContext *s, uint8_t *dest, const uint8_t *src, int field_based, int field_select, int src_x, int src_y, int width, int height, ptrdiff_t stride, int h_edge_pos, int v_edge_pos, int w, int h, const h264_chroma_mc_func *pix_op, int motion_x, int motion_y)
Definition: mpegvideo.c:814
limits.h
mpeg_motion_lowres
static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op, int motion_x, int motion_y, int h, int mb_y)
Definition: mpegvideo.c:861
ff_mpegvideodsp_init
av_cold void ff_mpegvideodsp_init(MpegVideoDSPContext *c)
Definition: mpegvideodsp.c:110
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:76
threadframe.h
NULL
#define NULL
Definition: coverity.c:32
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:331
me
#define me
Definition: vf_colormatrix.c:104
put_dct
static void put_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo.c:1238
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
ff_set_qscale
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:1605
mathops.h
FF_BUG_IEDGE
#define FF_BUG_IEDGE
Definition: avcodec.h:1295
lowres
static int lowres
Definition: ffplay.c:335
qpeldsp.h
ff_alternate_horizontal_scan
const uint8_t ff_alternate_horizontal_scan[64]
Definition: mpegvideodata.c:84
MpegEncContext::me
MotionEstContext me
Definition: mpegvideo.h:271
ME_MAP_SIZE
#define ME_MAP_SIZE
Definition: motion_est.h:38
op_pixels_func
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
dct_unquantize_mpeg2_intra_c
static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:110
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:256
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1333
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:75
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:259
chroma_4mv_motion_lowres
static void chroma_4mv_motion_lowres(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op, int mx, int my)
Definition: mpegvideo.c:1000
ff_mpeg_er_init
int ff_mpeg_er_init(MpegEncContext *s)
Definition: mpeg_er.c:101
h264chroma.h
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1478
height
#define height
mpegvideodata.h
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
attributes.h
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:258
ff_mpv_motion
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
Definition: mpegvideo_motion.c:975
IS_ENCODER
#define IS_ENCODER(s)
dct_unquantize_mpeg2_inter_c
static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:174
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1477
clear_context
static void clear_context(MpegEncContext *s)
Definition: mpegvideo.c:616
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
FMT_H263
@ FMT_H263
Definition: mpegutils.h:119
dct_unquantize_h263_inter_c
static void dct_unquantize_h263_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:242
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
ff_alternate_vertical_scan
const uint8_t ff_alternate_vertical_scan[64]
Definition: mpegvideodata.c:95
ff_init_scantable
av_cold void ff_init_scantable(const uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: idctdsp.c:30
ff_h264chroma_init
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:41
internal.h
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
ff_mpv_common_init_ppc
void ff_mpv_common_init_ppc(MpegEncContext *s)
Definition: mpegvideo_altivec.c:119
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
MpegEncContext::sc
ScratchpadContext sc
Definition: mpegvideo.h:185
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:264
ff_mpv_picture_free
void av_cold ff_mpv_picture_free(AVCodecContext *avctx, Picture *pic)
Definition: mpegpicture.c:461
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:272
FF_DEBUG_NOMC
#define FF_DEBUG_NOMC
Definition: avcodec.h:1342
idctdsp.h
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
ret
ret
Definition: filter_design.txt:187
wmv2dec.h
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
dct_init
static av_cold int dct_init(MpegEncContext *s)
Definition: mpegvideo.c:282
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
U
#define U(x)
Definition: vpx_arith.h:37
ff_mpv_free_context_frame
void ff_mpv_free_context_frame(MpegEncContext *s)
Frees and resets MpegEncContext fields depending on the resolution as well as the slice thread contex...
Definition: mpegvideo.c:750
MpegEncContext::block
int16_t(* block)[64]
points to one of the following blocks
Definition: mpegvideo.h:475
ff_mpeg4_decode_studio
void ff_mpeg4_decode_studio(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int block_size, int uvlinesize, int dct_linesize, int dct_offset)
Definition: mpeg4videodec.c:75
ScratchpadContext::edge_emu_buffer
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
Definition: mpegpicture.h:37
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
dct_unquantize_h263_intra_c
static void dct_unquantize_h263_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:208
ff_mpv_common_init_x86
av_cold void ff_mpv_common_init_x86(MpegEncContext *s)
Definition: mpegvideo.c:454
ff_mpv_reconstruct_mb
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo.c:1545
ff_mpv_common_init_axp
av_cold void ff_mpv_common_init_axp(MpegEncContext *s)
Definition: mpegvideo_alpha.c:106
FF_MB_DECISION_RD
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:874
ff_mpv_common_init_mips
av_cold void ff_mpv_common_init_mips(MpegEncContext *s)
Definition: mpegvideo_init_mips.c:26
ff_default_chroma_qscale_table
const uint8_t ff_default_chroma_qscale_table[32]
Definition: mpegvideodata.c:21
ff_mpv_init_duplicate_contexts
int ff_mpv_init_duplicate_contexts(MpegEncContext *s)
Initialize an MpegEncContext's thread contexts.
Definition: mpegvideo.c:396
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:288
COPY
#define COPY(a)
MPV_motion_lowres
static void MPV_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t *const *ref_picture, const h264_chroma_mc_func *pix_op)
motion compensation of a single macroblock
Definition: mpegvideo.c:1068
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:423
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:251
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
mpeg_er.h
imgutils.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:370
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
add_dequant_dct
static void add_dequant_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo.c:1254
h
h
Definition: vp9dsp_template.c:2038
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:72
int
int
Definition: ffmpeg_filter.c:156
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
ff_mpv_common_init_neon
av_cold void ff_mpv_common_init_neon(MpegEncContext *s)
Definition: mpegvideo.c:127
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:62
gray8
static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
Definition: mpegvideo.c:275