FFmpeg
mpegvideo.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29 
30 #include "config_components.h"
31 
32 #include "libavutil/attributes.h"
33 #include "libavutil/avassert.h"
34 #include "libavutil/imgutils.h"
35 #include "libavutil/internal.h"
36 
37 #include "avcodec.h"
38 #include "blockdsp.h"
39 #include "h264chroma.h"
40 #include "idctdsp.h"
41 #include "mathops.h"
42 #include "mpeg_er.h"
43 #include "mpegutils.h"
44 #include "mpegvideo.h"
45 #include "mpeg4videodec.h"
46 #include "mpegvideodata.h"
47 #include "qpeldsp.h"
48 #include "threadframe.h"
49 #include "wmv2dec.h"
50 #include <limits.h>
51 
53  int16_t *block, int n, int qscale)
54 {
55  int i, level, nCoeffs;
56  const uint16_t *quant_matrix;
57 
58  nCoeffs= s->block_last_index[n];
59 
60  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
61  /* XXX: only MPEG-1 */
62  quant_matrix = s->intra_matrix;
63  for(i=1;i<=nCoeffs;i++) {
64  int j= s->intra_scantable.permutated[i];
65  level = block[j];
66  if (level) {
67  if (level < 0) {
68  level = -level;
69  level = (int)(level * qscale * quant_matrix[j]) >> 3;
70  level = (level - 1) | 1;
71  level = -level;
72  } else {
73  level = (int)(level * qscale * quant_matrix[j]) >> 3;
74  level = (level - 1) | 1;
75  }
76  block[j] = level;
77  }
78  }
79 }
80 
82  int16_t *block, int n, int qscale)
83 {
84  int i, level, nCoeffs;
85  const uint16_t *quant_matrix;
86 
87  nCoeffs= s->block_last_index[n];
88 
89  quant_matrix = s->inter_matrix;
90  for(i=0; i<=nCoeffs; i++) {
91  int j= s->intra_scantable.permutated[i];
92  level = block[j];
93  if (level) {
94  if (level < 0) {
95  level = -level;
96  level = (((level << 1) + 1) * qscale *
97  ((int) (quant_matrix[j]))) >> 4;
98  level = (level - 1) | 1;
99  level = -level;
100  } else {
101  level = (((level << 1) + 1) * qscale *
102  ((int) (quant_matrix[j]))) >> 4;
103  level = (level - 1) | 1;
104  }
105  block[j] = level;
106  }
107  }
108 }
109 
111  int16_t *block, int n, int qscale)
112 {
113  int i, level, nCoeffs;
114  const uint16_t *quant_matrix;
115 
116  if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
117  else qscale <<= 1;
118 
119  if(s->alternate_scan) nCoeffs= 63;
120  else nCoeffs= s->block_last_index[n];
121 
122  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
123  quant_matrix = s->intra_matrix;
124  for(i=1;i<=nCoeffs;i++) {
125  int j= s->intra_scantable.permutated[i];
126  level = block[j];
127  if (level) {
128  if (level < 0) {
129  level = -level;
130  level = (int)(level * qscale * quant_matrix[j]) >> 4;
131  level = -level;
132  } else {
133  level = (int)(level * qscale * quant_matrix[j]) >> 4;
134  }
135  block[j] = level;
136  }
137  }
138 }
139 
141  int16_t *block, int n, int qscale)
142 {
143  int i, level, nCoeffs;
144  const uint16_t *quant_matrix;
145  int sum=-1;
146 
147  if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
148  else qscale <<= 1;
149 
150  if(s->alternate_scan) nCoeffs= 63;
151  else nCoeffs= s->block_last_index[n];
152 
153  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
154  sum += block[0];
155  quant_matrix = s->intra_matrix;
156  for(i=1;i<=nCoeffs;i++) {
157  int j= s->intra_scantable.permutated[i];
158  level = block[j];
159  if (level) {
160  if (level < 0) {
161  level = -level;
162  level = (int)(level * qscale * quant_matrix[j]) >> 4;
163  level = -level;
164  } else {
165  level = (int)(level * qscale * quant_matrix[j]) >> 4;
166  }
167  block[j] = level;
168  sum+=level;
169  }
170  }
171  block[63]^=sum&1;
172 }
173 
175  int16_t *block, int n, int qscale)
176 {
177  int i, level, nCoeffs;
178  const uint16_t *quant_matrix;
179  int sum=-1;
180 
181  if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
182  else qscale <<= 1;
183 
184  if(s->alternate_scan) nCoeffs= 63;
185  else nCoeffs= s->block_last_index[n];
186 
187  quant_matrix = s->inter_matrix;
188  for(i=0; i<=nCoeffs; i++) {
189  int j= s->intra_scantable.permutated[i];
190  level = block[j];
191  if (level) {
192  if (level < 0) {
193  level = -level;
194  level = (((level << 1) + 1) * qscale *
195  ((int) (quant_matrix[j]))) >> 5;
196  level = -level;
197  } else {
198  level = (((level << 1) + 1) * qscale *
199  ((int) (quant_matrix[j]))) >> 5;
200  }
201  block[j] = level;
202  sum+=level;
203  }
204  }
205  block[63]^=sum&1;
206 }
207 
209  int16_t *block, int n, int qscale)
210 {
211  int i, level, qmul, qadd;
212  int nCoeffs;
213 
214  av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
215 
216  qmul = qscale << 1;
217 
218  if (!s->h263_aic) {
219  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
220  qadd = (qscale - 1) | 1;
221  }else{
222  qadd = 0;
223  }
224  if(s->ac_pred)
225  nCoeffs=63;
226  else
227  nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ];
228 
229  for(i=1; i<=nCoeffs; i++) {
230  level = block[i];
231  if (level) {
232  if (level < 0) {
233  level = level * qmul - qadd;
234  } else {
235  level = level * qmul + qadd;
236  }
237  block[i] = level;
238  }
239  }
240 }
241 
243  int16_t *block, int n, int qscale)
244 {
245  int i, level, qmul, qadd;
246  int nCoeffs;
247 
248  av_assert2(s->block_last_index[n]>=0);
249 
250  qadd = (qscale - 1) | 1;
251  qmul = qscale << 1;
252 
253  nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
254 
255  for(i=0; i<=nCoeffs; i++) {
256  level = block[i];
257  if (level) {
258  if (level < 0) {
259  level = level * qmul - qadd;
260  } else {
261  level = level * qmul + qadd;
262  }
263  block[i] = level;
264  }
265  }
266 }
267 
268 
269 static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
270 {
271  while(h--)
272  memset(dst + h*linesize, 128, 16);
273 }
274 
275 static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
276 {
277  while(h--)
278  memset(dst + h*linesize, 128, 8);
279 }
280 
281 /* init common dct for both encoder and decoder */
283 {
284  ff_blockdsp_init(&s->bdsp, s->avctx);
285  ff_h264chroma_init(&s->h264chroma, 8); //for lowres
286  ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
287  ff_mpegvideodsp_init(&s->mdsp);
288  ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
289 
290  if (s->avctx->debug & FF_DEBUG_NOMC) {
291  int i;
292  for (i=0; i<4; i++) {
293  s->hdsp.avg_pixels_tab[0][i] = gray16;
294  s->hdsp.put_pixels_tab[0][i] = gray16;
295  s->hdsp.put_no_rnd_pixels_tab[0][i] = gray16;
296 
297  s->hdsp.avg_pixels_tab[1][i] = gray8;
298  s->hdsp.put_pixels_tab[1][i] = gray8;
299  s->hdsp.put_no_rnd_pixels_tab[1][i] = gray8;
300  }
301  }
302 
303  s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
304  s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
305  s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
306  s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
307  s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
308  if (s->avctx->flags & AV_CODEC_FLAG_BITEXACT)
309  s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
310  s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
311 
312 #if HAVE_INTRINSICS_NEON
314 #endif
315 
316 #if ARCH_ALPHA
318 #elif ARCH_ARM
320 #elif ARCH_PPC
322 #elif ARCH_X86
324 #elif ARCH_MIPS
326 #endif
327 
328  return 0;
329 }
330 
332 {
333  if (s->codec_id == AV_CODEC_ID_MPEG4)
334  s->idsp.mpeg4_studio_profile = s->studio_profile;
335  ff_idctdsp_init(&s->idsp, s->avctx);
336 
337  /* load & permutate scantables
338  * note: only wmv uses different ones
339  */
340  if (s->alternate_scan) {
341  ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
342  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
343  } else {
344  ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
345  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
346  }
347  ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
348  ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
349 }
350 
352 {
353  int y_size = s->b8_stride * (2 * s->mb_height + 1);
354  int c_size = s->mb_stride * (s->mb_height + 1);
355  int yc_size = y_size + 2 * c_size;
356  int i;
357 
358  if (s->mb_height & 1)
359  yc_size += 2*s->b8_stride + 2*s->mb_stride;
360 
361  if (s->encoding) {
362  if (!FF_ALLOCZ_TYPED_ARRAY(s->me.map, ME_MAP_SIZE) ||
363  !FF_ALLOCZ_TYPED_ARRAY(s->me.score_map, ME_MAP_SIZE))
364  return AVERROR(ENOMEM);
365 
366  if (s->noise_reduction) {
367  if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_error_sum, 2))
368  return AVERROR(ENOMEM);
369  }
370  }
371  if (!FF_ALLOCZ_TYPED_ARRAY(s->blocks, 2))
372  return AVERROR(ENOMEM);
373  s->block = s->blocks[0];
374 
375  for (i = 0; i < 12; i++) {
376  s->pblocks[i] = &s->block[i];
377  }
378 
379  if (s->avctx->codec_tag == AV_RL32("VCR2")) {
380  // exchange uv
381  FFSWAP(void *, s->pblocks[4], s->pblocks[5]);
382  }
383 
384  if (s->out_format == FMT_H263) {
385  /* ac values */
386  if (!FF_ALLOCZ_TYPED_ARRAY(s->ac_val_base, yc_size))
387  return AVERROR(ENOMEM);
388  s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
389  s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
390  s->ac_val[2] = s->ac_val[1] + c_size;
391  }
392 
393  return 0;
394 }
395 
397 {
398  int nb_slices = s->slice_context_count, ret;
399 
400  /* We initialize the copies before the original so that
401  * fields allocated in init_duplicate_context are NULL after
402  * copying. This prevents double-frees upon allocation error. */
403  for (int i = 1; i < nb_slices; i++) {
404  s->thread_context[i] = av_memdup(s, sizeof(MpegEncContext));
405  if (!s->thread_context[i])
406  return AVERROR(ENOMEM);
407  if ((ret = init_duplicate_context(s->thread_context[i])) < 0)
408  return ret;
409  s->thread_context[i]->start_mb_y =
410  (s->mb_height * (i ) + nb_slices / 2) / nb_slices;
411  s->thread_context[i]->end_mb_y =
412  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
413  }
414  s->start_mb_y = 0;
415  s->end_mb_y = nb_slices > 1 ? (s->mb_height + nb_slices / 2) / nb_slices
416  : s->mb_height;
417  return init_duplicate_context(s);
418 }
419 
421 {
422  if (!s)
423  return;
424 
425  av_freep(&s->sc.edge_emu_buffer);
426  av_freep(&s->me.scratchpad);
427  s->me.temp =
428  s->sc.rd_scratchpad =
429  s->sc.b_scratchpad =
430  s->sc.obmc_scratchpad = NULL;
431 
432  av_freep(&s->dct_error_sum);
433  av_freep(&s->me.map);
434  av_freep(&s->me.score_map);
435  av_freep(&s->blocks);
436  av_freep(&s->ac_val_base);
437  s->block = NULL;
438 }
439 
441 {
442  for (int i = 1; i < s->slice_context_count; i++) {
443  free_duplicate_context(s->thread_context[i]);
444  av_freep(&s->thread_context[i]);
445  }
447 }
448 
450 {
451 #define COPY(a) bak->a = src->a
452  COPY(sc.edge_emu_buffer);
453  COPY(me.scratchpad);
454  COPY(me.temp);
455  COPY(sc.rd_scratchpad);
456  COPY(sc.b_scratchpad);
457  COPY(sc.obmc_scratchpad);
458  COPY(me.map);
459  COPY(me.score_map);
460  COPY(blocks);
461  COPY(block);
462  COPY(start_mb_y);
463  COPY(end_mb_y);
464  COPY(me.map_generation);
465  COPY(pb);
466  COPY(dct_error_sum);
467  COPY(dct_count[0]);
468  COPY(dct_count[1]);
469  COPY(ac_val_base);
470  COPY(ac_val[0]);
471  COPY(ac_val[1]);
472  COPY(ac_val[2]);
473 #undef COPY
474 }
475 
477 {
478  MpegEncContext bak;
479  int i, ret;
480  // FIXME copy only needed parts
481  backup_duplicate_context(&bak, dst);
482  memcpy(dst, src, sizeof(MpegEncContext));
483  backup_duplicate_context(dst, &bak);
484  for (i = 0; i < 12; i++) {
485  dst->pblocks[i] = &dst->block[i];
486  }
487  if (dst->avctx->codec_tag == AV_RL32("VCR2")) {
488  // exchange uv
489  FFSWAP(void *, dst->pblocks[4], dst->pblocks[5]);
490  }
491  if (!dst->sc.edge_emu_buffer &&
492  (ret = ff_mpeg_framesize_alloc(dst->avctx, &dst->me,
493  &dst->sc, dst->linesize)) < 0) {
494  av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
495  "scratch buffers.\n");
496  return ret;
497  }
498  return 0;
499 }
500 
501 /**
502  * Set the given MpegEncContext to common defaults
503  * (same for encoding and decoding).
504  * The changed fields will not depend upon the
505  * prior state of the MpegEncContext.
506  */
508 {
509  s->y_dc_scale_table =
510  s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
511  s->chroma_qscale_table = ff_default_chroma_qscale_table;
512  s->progressive_frame = 1;
513  s->progressive_sequence = 1;
514  s->picture_structure = PICT_FRAME;
515 
516  s->coded_picture_number = 0;
517  s->picture_number = 0;
518 
519  s->f_code = 1;
520  s->b_code = 1;
521 
522  s->slice_context_count = 1;
523 }
524 
526 {
527  int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
528 
529  s->mb_width = (s->width + 15) / 16;
530  s->mb_stride = s->mb_width + 1;
531  s->b8_stride = s->mb_width * 2 + 1;
532  mb_array_size = s->mb_height * s->mb_stride;
533  mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
534 
535  /* set default edge pos, will be overridden
536  * in decode_header if needed */
537  s->h_edge_pos = s->mb_width * 16;
538  s->v_edge_pos = s->mb_height * 16;
539 
540  s->mb_num = s->mb_width * s->mb_height;
541 
542  s->block_wrap[0] =
543  s->block_wrap[1] =
544  s->block_wrap[2] =
545  s->block_wrap[3] = s->b8_stride;
546  s->block_wrap[4] =
547  s->block_wrap[5] = s->mb_stride;
548 
549  y_size = s->b8_stride * (2 * s->mb_height + 1);
550  c_size = s->mb_stride * (s->mb_height + 1);
551  yc_size = y_size + 2 * c_size;
552 
553  if (s->mb_height & 1)
554  yc_size += 2*s->b8_stride + 2*s->mb_stride;
555 
556  if (!FF_ALLOCZ_TYPED_ARRAY(s->mb_index2xy, s->mb_num + 1))
557  return AVERROR(ENOMEM);
558  for (y = 0; y < s->mb_height; y++)
559  for (x = 0; x < s->mb_width; x++)
560  s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
561 
562  s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
563 
564  if (s->encoding) {
565  /* Allocate MV tables */
566  if (!FF_ALLOCZ_TYPED_ARRAY(s->p_mv_table_base, mv_table_size) ||
567  !FF_ALLOCZ_TYPED_ARRAY(s->b_forw_mv_table_base, mv_table_size) ||
568  !FF_ALLOCZ_TYPED_ARRAY(s->b_back_mv_table_base, mv_table_size) ||
569  !FF_ALLOCZ_TYPED_ARRAY(s->b_bidir_forw_mv_table_base, mv_table_size) ||
570  !FF_ALLOCZ_TYPED_ARRAY(s->b_bidir_back_mv_table_base, mv_table_size) ||
571  !FF_ALLOCZ_TYPED_ARRAY(s->b_direct_mv_table_base, mv_table_size))
572  return AVERROR(ENOMEM);
573  s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
574  s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
575  s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
576  s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
577  s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
578  s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
579 
580  /* Allocate MB type table */
581  if (!FF_ALLOCZ_TYPED_ARRAY(s->mb_type, mb_array_size) ||
582  !FF_ALLOCZ_TYPED_ARRAY(s->lambda_table, mb_array_size) ||
583  !FF_ALLOC_TYPED_ARRAY (s->cplx_tab, mb_array_size) ||
584  !FF_ALLOC_TYPED_ARRAY (s->bits_tab, mb_array_size))
585  return AVERROR(ENOMEM);
586 
587 #define ALLOCZ_ARRAYS(p, mult, numb) ((p) = av_calloc(numb, mult * sizeof(*(p))))
588  if (s->codec_id == AV_CODEC_ID_MPEG4 ||
589  (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME)) {
590  int16_t (*tmp1)[2];
591  uint8_t *tmp2;
592  if (!(tmp1 = ALLOCZ_ARRAYS(s->b_field_mv_table_base, 8, mv_table_size)) ||
593  !(tmp2 = ALLOCZ_ARRAYS(s->b_field_select_table[0][0], 2 * 4, mv_table_size)) ||
594  !ALLOCZ_ARRAYS(s->p_field_select_table[0], 2 * 2, mv_table_size))
595  return AVERROR(ENOMEM);
596 
597  s->p_field_select_table[1] = s->p_field_select_table[0] + 2 * mv_table_size;
598  tmp1 += s->mb_stride + 1;
599 
600  for (int i = 0; i < 2; i++) {
601  for (int j = 0; j < 2; j++) {
602  for (int k = 0; k < 2; k++) {
603  s->b_field_mv_table[i][j][k] = tmp1;
604  tmp1 += mv_table_size;
605  }
606  s->b_field_select_table[i][j] = tmp2;
607  tmp2 += 2 * mv_table_size;
608  }
609  }
610  }
611  }
612 
613  if (s->codec_id == AV_CODEC_ID_MPEG4 ||
614  (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME)) {
615  int16_t (*tmp)[2];
616  /* interlaced direct mode decoding tables */
617  if (!(tmp = ALLOCZ_ARRAYS(s->p_field_mv_table_base, 4, mv_table_size)))
618  return AVERROR(ENOMEM);
619  tmp += s->mb_stride + 1;
620  for (int i = 0; i < 2; i++) {
621  for (int j = 0; j < 2; j++) {
622  s->p_field_mv_table[i][j] = tmp;
623  tmp += mv_table_size;
624  }
625  }
626  }
627 
628  if (s->out_format == FMT_H263) {
629  /* cbp values, cbp, ac_pred, pred_dir */
630  if (!(s->coded_block_base = av_mallocz(y_size + (s->mb_height&1)*2*s->b8_stride)) ||
631  !(s->cbp_table = av_mallocz(mb_array_size)) ||
632  !(s->pred_dir_table = av_mallocz(mb_array_size)))
633  return AVERROR(ENOMEM);
634  s->coded_block = s->coded_block_base + s->b8_stride + 1;
635  }
636 
637  if (s->h263_pred || s->h263_plus || !s->encoding) {
638  /* dc values */
639  // MN: we need these for error resilience of intra-frames
640  if (!FF_ALLOCZ_TYPED_ARRAY(s->dc_val_base, yc_size))
641  return AVERROR(ENOMEM);
642  s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
643  s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
644  s->dc_val[2] = s->dc_val[1] + c_size;
645  for (i = 0; i < yc_size; i++)
646  s->dc_val_base[i] = 1024;
647  }
648 
649  /* which mb is an intra block, init macroblock skip table */
650  if (!(s->mbintra_table = av_mallocz(mb_array_size)) ||
651  // Note the + 1 is for a quicker MPEG-4 slice_end detection
652  !(s->mbskip_table = av_mallocz(mb_array_size + 2)))
653  return AVERROR(ENOMEM);
654  memset(s->mbintra_table, 1, mb_array_size);
655 
656  return !CONFIG_MPEGVIDEODEC || s->encoding ? 0 : ff_mpeg_er_init(s);
657 }
658 
660 {
661  int i, j, k;
662 
663  memset(&s->next_picture, 0, sizeof(s->next_picture));
664  memset(&s->last_picture, 0, sizeof(s->last_picture));
665  memset(&s->current_picture, 0, sizeof(s->current_picture));
666  memset(&s->new_picture, 0, sizeof(s->new_picture));
667 
668  memset(s->thread_context, 0, sizeof(s->thread_context));
669 
670  s->me.map = NULL;
671  s->me.score_map = NULL;
672  s->dct_error_sum = NULL;
673  s->block = NULL;
674  s->blocks = NULL;
675  memset(s->pblocks, 0, sizeof(s->pblocks));
676  s->ac_val_base = NULL;
677  s->ac_val[0] =
678  s->ac_val[1] =
679  s->ac_val[2] =NULL;
680  s->sc.edge_emu_buffer = NULL;
681  s->me.scratchpad = NULL;
682  s->me.temp =
683  s->sc.rd_scratchpad =
684  s->sc.b_scratchpad =
685  s->sc.obmc_scratchpad = NULL;
686 
687 
688  s->bitstream_buffer = NULL;
689  s->allocated_bitstream_buffer_size = 0;
690  s->picture = NULL;
691  s->mb_type = NULL;
692  s->p_mv_table_base = NULL;
693  s->b_forw_mv_table_base = NULL;
694  s->b_back_mv_table_base = NULL;
695  s->b_bidir_forw_mv_table_base = NULL;
696  s->b_bidir_back_mv_table_base = NULL;
697  s->b_direct_mv_table_base = NULL;
698  s->p_mv_table = NULL;
699  s->b_forw_mv_table = NULL;
700  s->b_back_mv_table = NULL;
701  s->b_bidir_forw_mv_table = NULL;
702  s->b_bidir_back_mv_table = NULL;
703  s->b_direct_mv_table = NULL;
704  s->b_field_mv_table_base = NULL;
705  s->p_field_mv_table_base = NULL;
706  for (i = 0; i < 2; i++) {
707  for (j = 0; j < 2; j++) {
708  for (k = 0; k < 2; k++) {
709  s->b_field_mv_table[i][j][k] = NULL;
710  }
711  s->b_field_select_table[i][j] = NULL;
712  s->p_field_mv_table[i][j] = NULL;
713  }
714  s->p_field_select_table[i] = NULL;
715  }
716 
717  s->dc_val_base = NULL;
718  s->coded_block_base = NULL;
719  s->mbintra_table = NULL;
720  s->cbp_table = NULL;
721  s->pred_dir_table = NULL;
722 
723  s->mbskip_table = NULL;
724 
725  s->er.error_status_table = NULL;
726  s->er.er_temp_buffer = NULL;
727  s->mb_index2xy = NULL;
728  s->lambda_table = NULL;
729 
730  s->cplx_tab = NULL;
731  s->bits_tab = NULL;
732 }
733 
734 /**
735  * init common structure for both encoder and decoder.
736  * this assumes that some variables like width/height are already set
737  */
739 {
740  int i, ret;
741  int nb_slices = (HAVE_THREADS &&
742  s->avctx->active_thread_type & FF_THREAD_SLICE) ?
743  s->avctx->thread_count : 1;
744 
745  clear_context(s);
746 
747  if (s->encoding && s->avctx->slices)
748  nb_slices = s->avctx->slices;
749 
750  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
751  s->mb_height = (s->height + 31) / 32 * 2;
752  else
753  s->mb_height = (s->height + 15) / 16;
754 
755  if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
756  av_log(s->avctx, AV_LOG_ERROR,
757  "decoding to AV_PIX_FMT_NONE is not supported.\n");
758  return AVERROR(EINVAL);
759  }
760 
761  if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
762  int max_slices;
763  if (s->mb_height)
764  max_slices = FFMIN(MAX_THREADS, s->mb_height);
765  else
766  max_slices = MAX_THREADS;
767  av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
768  " reducing to %d\n", nb_slices, max_slices);
769  nb_slices = max_slices;
770  }
771 
772  if ((s->width || s->height) &&
773  av_image_check_size(s->width, s->height, 0, s->avctx))
774  return AVERROR(EINVAL);
775 
776  dct_init(s);
777 
778  /* set chroma shifts */
779  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
780  &s->chroma_x_shift,
781  &s->chroma_y_shift);
782  if (ret)
783  return ret;
784 
785  if (!FF_ALLOCZ_TYPED_ARRAY(s->picture, MAX_PICTURE_COUNT))
786  return AVERROR(ENOMEM);
787  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
788  s->picture[i].f = av_frame_alloc();
789  if (!s->picture[i].f)
790  goto fail_nomem;
791  }
792 
793  if (!(s->next_picture.f = av_frame_alloc()) ||
794  !(s->last_picture.f = av_frame_alloc()) ||
795  !(s->current_picture.f = av_frame_alloc()) ||
796  !(s->new_picture = av_frame_alloc()))
797  goto fail_nomem;
798 
800  goto fail;
801 
802 #if FF_API_FLAG_TRUNCATED
803  s->parse_context.state = -1;
804 #endif
805 
806  s->context_initialized = 1;
807  memset(s->thread_context, 0, sizeof(s->thread_context));
808  s->thread_context[0] = s;
809  s->slice_context_count = nb_slices;
810 
811 // if (s->width && s->height) {
813  if (ret < 0)
814  goto fail;
815 // }
816 
817  return 0;
818  fail_nomem:
819  ret = AVERROR(ENOMEM);
820  fail:
822  return ret;
823 }
824 
826 {
827  int i, j, k;
828 
830 
831  av_freep(&s->mb_type);
832  av_freep(&s->p_mv_table_base);
833  av_freep(&s->b_forw_mv_table_base);
834  av_freep(&s->b_back_mv_table_base);
835  av_freep(&s->b_bidir_forw_mv_table_base);
836  av_freep(&s->b_bidir_back_mv_table_base);
837  av_freep(&s->b_direct_mv_table_base);
838  s->p_mv_table = NULL;
839  s->b_forw_mv_table = NULL;
840  s->b_back_mv_table = NULL;
841  s->b_bidir_forw_mv_table = NULL;
842  s->b_bidir_back_mv_table = NULL;
843  s->b_direct_mv_table = NULL;
844  av_freep(&s->b_field_mv_table_base);
845  av_freep(&s->b_field_select_table[0][0]);
846  av_freep(&s->p_field_mv_table_base);
847  av_freep(&s->p_field_select_table[0]);
848  for (i = 0; i < 2; i++) {
849  for (j = 0; j < 2; j++) {
850  for (k = 0; k < 2; k++) {
851  s->b_field_mv_table[i][j][k] = NULL;
852  }
853  s->b_field_select_table[i][j] = NULL;
854  s->p_field_mv_table[i][j] = NULL;
855  }
856  s->p_field_select_table[i] = NULL;
857  }
858 
859  av_freep(&s->dc_val_base);
860  av_freep(&s->coded_block_base);
861  av_freep(&s->mbintra_table);
862  av_freep(&s->cbp_table);
863  av_freep(&s->pred_dir_table);
864 
865  av_freep(&s->mbskip_table);
866 
867  av_freep(&s->er.error_status_table);
868  av_freep(&s->er.er_temp_buffer);
869  av_freep(&s->mb_index2xy);
870  av_freep(&s->lambda_table);
871 
872  av_freep(&s->cplx_tab);
873  av_freep(&s->bits_tab);
874 
875  s->linesize = s->uvlinesize = 0;
876 }
877 
878 /* init common structure for both encoder and decoder */
880 {
881  if (!s)
882  return;
883 
885  if (s->slice_context_count > 1)
886  s->slice_context_count = 1;
887 
888 #if FF_API_FLAG_TRUNCATED
889  av_freep(&s->parse_context.buffer);
890  s->parse_context.buffer_size = 0;
891 #endif
892 
893  av_freep(&s->bitstream_buffer);
894  s->allocated_bitstream_buffer_size = 0;
895 
896  if (!s->avctx)
897  return;
898 
899  if (s->picture) {
900  for (int i = 0; i < MAX_PICTURE_COUNT; i++)
901  ff_mpv_picture_free(s->avctx, &s->picture[i]);
902  }
903  av_freep(&s->picture);
904  ff_mpv_picture_free(s->avctx, &s->last_picture);
905  ff_mpv_picture_free(s->avctx, &s->current_picture);
906  ff_mpv_picture_free(s->avctx, &s->next_picture);
907  av_frame_free(&s->new_picture);
908 
909  s->context_initialized = 0;
910  s->context_reinit = 0;
911  s->last_picture_ptr =
912  s->next_picture_ptr =
913  s->current_picture_ptr = NULL;
914  s->linesize = s->uvlinesize = 0;
915 }
916 
917 
919  uint8_t *dest, uint8_t *src,
920  int field_based, int field_select,
921  int src_x, int src_y,
922  int width, int height, ptrdiff_t stride,
923  int h_edge_pos, int v_edge_pos,
924  int w, int h, h264_chroma_mc_func *pix_op,
925  int motion_x, int motion_y)
926 {
927  const int lowres = s->avctx->lowres;
928  const int op_index = FFMIN(lowres, 3);
929  const int s_mask = (2 << lowres) - 1;
930  int emu = 0;
931  int sx, sy;
932 
933  if (s->quarter_sample) {
934  motion_x /= 2;
935  motion_y /= 2;
936  }
937 
938  sx = motion_x & s_mask;
939  sy = motion_y & s_mask;
940  src_x += motion_x >> lowres + 1;
941  src_y += motion_y >> lowres + 1;
942 
943  src += src_y * stride + src_x;
944 
945  if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
946  (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
947  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, src,
948  s->linesize, s->linesize,
949  w + 1, (h + 1) << field_based,
950  src_x, src_y << field_based,
951  h_edge_pos, v_edge_pos);
952  src = s->sc.edge_emu_buffer;
953  emu = 1;
954  }
955 
956  sx = (sx << 2) >> lowres;
957  sy = (sy << 2) >> lowres;
958  if (field_select)
959  src += s->linesize;
960  pix_op[op_index](dest, src, stride, h, sx, sy);
961  return emu;
962 }
963 
964 /* apply one mpeg motion vector to the three components */
966  uint8_t *dest_y,
967  uint8_t *dest_cb,
968  uint8_t *dest_cr,
969  int field_based,
970  int bottom_field,
971  int field_select,
972  uint8_t **ref_picture,
973  h264_chroma_mc_func *pix_op,
974  int motion_x, int motion_y,
975  int h, int mb_y)
976 {
977  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
978  int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
979  ptrdiff_t uvlinesize, linesize;
980  const int lowres = s->avctx->lowres;
981  const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
982  const int block_s = 8>>lowres;
983  const int s_mask = (2 << lowres) - 1;
984  const int h_edge_pos = s->h_edge_pos >> lowres;
985  const int v_edge_pos = s->v_edge_pos >> lowres;
986  linesize = s->current_picture.f->linesize[0] << field_based;
987  uvlinesize = s->current_picture.f->linesize[1] << field_based;
988 
989  // FIXME obviously not perfect but qpel will not work in lowres anyway
990  if (s->quarter_sample) {
991  motion_x /= 2;
992  motion_y /= 2;
993  }
994 
995  if(field_based){
996  motion_y += (bottom_field - field_select)*((1 << lowres)-1);
997  }
998 
999  sx = motion_x & s_mask;
1000  sy = motion_y & s_mask;
1001  src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
1002  src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
1003 
1004  if (s->out_format == FMT_H263) {
1005  uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
1006  uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
1007  uvsrc_x = src_x >> 1;
1008  uvsrc_y = src_y >> 1;
1009  } else if (s->out_format == FMT_H261) {
1010  // even chroma mv's are full pel in H261
1011  mx = motion_x / 4;
1012  my = motion_y / 4;
1013  uvsx = (2 * mx) & s_mask;
1014  uvsy = (2 * my) & s_mask;
1015  uvsrc_x = s->mb_x * block_s + (mx >> lowres);
1016  uvsrc_y = mb_y * block_s + (my >> lowres);
1017  } else {
1018  if(s->chroma_y_shift){
1019  mx = motion_x / 2;
1020  my = motion_y / 2;
1021  uvsx = mx & s_mask;
1022  uvsy = my & s_mask;
1023  uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
1024  uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
1025  } else {
1026  if(s->chroma_x_shift){
1027  //Chroma422
1028  mx = motion_x / 2;
1029  uvsx = mx & s_mask;
1030  uvsy = motion_y & s_mask;
1031  uvsrc_y = src_y;
1032  uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
1033  } else {
1034  //Chroma444
1035  uvsx = motion_x & s_mask;
1036  uvsy = motion_y & s_mask;
1037  uvsrc_x = src_x;
1038  uvsrc_y = src_y;
1039  }
1040  }
1041  }
1042 
1043  ptr_y = ref_picture[0] + src_y * linesize + src_x;
1044  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1045  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1046 
1047  if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
1048  (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
1049  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
1050  linesize >> field_based, linesize >> field_based,
1051  17, 17 + field_based,
1052  src_x, src_y << field_based, h_edge_pos,
1053  v_edge_pos);
1054  ptr_y = s->sc.edge_emu_buffer;
1055  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1056  uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
1057  uint8_t *vbuf =ubuf + 10 * s->uvlinesize;
1058  if (s->workaround_bugs & FF_BUG_IEDGE)
1059  vbuf -= s->uvlinesize;
1060  s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
1061  uvlinesize >> field_based, uvlinesize >> field_based,
1062  9, 9 + field_based,
1063  uvsrc_x, uvsrc_y << field_based,
1064  h_edge_pos >> 1, v_edge_pos >> 1);
1065  s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
1066  uvlinesize >> field_based,uvlinesize >> field_based,
1067  9, 9 + field_based,
1068  uvsrc_x, uvsrc_y << field_based,
1069  h_edge_pos >> 1, v_edge_pos >> 1);
1070  ptr_cb = ubuf;
1071  ptr_cr = vbuf;
1072  }
1073  }
1074 
1075  // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
1076  if (bottom_field) {
1077  dest_y += s->linesize;
1078  dest_cb += s->uvlinesize;
1079  dest_cr += s->uvlinesize;
1080  }
1081 
1082  if (field_select) {
1083  ptr_y += s->linesize;
1084  ptr_cb += s->uvlinesize;
1085  ptr_cr += s->uvlinesize;
1086  }
1087 
1088  sx = (sx << 2) >> lowres;
1089  sy = (sy << 2) >> lowres;
1090  pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
1091 
1092  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1093  int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
1094  uvsx = (uvsx << 2) >> lowres;
1095  uvsy = (uvsy << 2) >> lowres;
1096  if (hc) {
1097  pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
1098  pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
1099  }
1100  }
1101  // FIXME h261 lowres loop filter
1102 }
1103 
1105  uint8_t *dest_cb, uint8_t *dest_cr,
1106  uint8_t **ref_picture,
1107  h264_chroma_mc_func * pix_op,
1108  int mx, int my)
1109 {
1110  const int lowres = s->avctx->lowres;
1111  const int op_index = FFMIN(lowres, 3);
1112  const int block_s = 8 >> lowres;
1113  const int s_mask = (2 << lowres) - 1;
1114  const int h_edge_pos = s->h_edge_pos >> lowres + 1;
1115  const int v_edge_pos = s->v_edge_pos >> lowres + 1;
1116  int emu = 0, src_x, src_y, sx, sy;
1117  ptrdiff_t offset;
1118  uint8_t *ptr;
1119 
1120  if (s->quarter_sample) {
1121  mx /= 2;
1122  my /= 2;
1123  }
1124 
1125  /* In case of 8X8, we construct a single chroma motion vector
1126  with a special rounding */
1127  mx = ff_h263_round_chroma(mx);
1128  my = ff_h263_round_chroma(my);
1129 
1130  sx = mx & s_mask;
1131  sy = my & s_mask;
1132  src_x = s->mb_x * block_s + (mx >> lowres + 1);
1133  src_y = s->mb_y * block_s + (my >> lowres + 1);
1134 
1135  offset = src_y * s->uvlinesize + src_x;
1136  ptr = ref_picture[1] + offset;
1137  if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
1138  (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
1139  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
1140  s->uvlinesize, s->uvlinesize,
1141  9, 9,
1142  src_x, src_y, h_edge_pos, v_edge_pos);
1143  ptr = s->sc.edge_emu_buffer;
1144  emu = 1;
1145  }
1146  sx = (sx << 2) >> lowres;
1147  sy = (sy << 2) >> lowres;
1148  pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
1149 
1150  ptr = ref_picture[2] + offset;
1151  if (emu) {
1152  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
1153  s->uvlinesize, s->uvlinesize,
1154  9, 9,
1155  src_x, src_y, h_edge_pos, v_edge_pos);
1156  ptr = s->sc.edge_emu_buffer;
1157  }
1158  pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
1159 }
1160 
1161 /**
1162  * motion compensation of a single macroblock
1163  * @param s context
1164  * @param dest_y luma destination pointer
1165  * @param dest_cb chroma cb/u destination pointer
1166  * @param dest_cr chroma cr/v destination pointer
1167  * @param dir direction (0->forward, 1->backward)
1168  * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
1169  * @param pix_op halfpel motion compensation function (average or put normally)
1170  * the motion vectors are taken from s->mv and the MV type from s->mv_type
1171  */
1172 static inline void MPV_motion_lowres(MpegEncContext *s,
1173  uint8_t *dest_y, uint8_t *dest_cb,
1174  uint8_t *dest_cr,
1175  int dir, uint8_t **ref_picture,
1176  h264_chroma_mc_func *pix_op)
1177 {
1178  int mx, my;
1179  int mb_x, mb_y, i;
1180  const int lowres = s->avctx->lowres;
1181  const int block_s = 8 >>lowres;
1182 
1183  mb_x = s->mb_x;
1184  mb_y = s->mb_y;
1185 
1186  switch (s->mv_type) {
1187  case MV_TYPE_16X16:
1188  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1189  0, 0, 0,
1190  ref_picture, pix_op,
1191  s->mv[dir][0][0], s->mv[dir][0][1],
1192  2 * block_s, mb_y);
1193  break;
1194  case MV_TYPE_8X8:
1195  mx = 0;
1196  my = 0;
1197  for (i = 0; i < 4; i++) {
1198  hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
1199  s->linesize) * block_s,
1200  ref_picture[0], 0, 0,
1201  (2 * mb_x + (i & 1)) * block_s,
1202  (2 * mb_y + (i >> 1)) * block_s,
1203  s->width, s->height, s->linesize,
1204  s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
1205  block_s, block_s, pix_op,
1206  s->mv[dir][i][0], s->mv[dir][i][1]);
1207 
1208  mx += s->mv[dir][i][0];
1209  my += s->mv[dir][i][1];
1210  }
1211 
1212  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
1213  chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
1214  pix_op, mx, my);
1215  break;
1216  case MV_TYPE_FIELD:
1217  if (s->picture_structure == PICT_FRAME) {
1218  /* top field */
1219  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1220  1, 0, s->field_select[dir][0],
1221  ref_picture, pix_op,
1222  s->mv[dir][0][0], s->mv[dir][0][1],
1223  block_s, mb_y);
1224  /* bottom field */
1225  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1226  1, 1, s->field_select[dir][1],
1227  ref_picture, pix_op,
1228  s->mv[dir][1][0], s->mv[dir][1][1],
1229  block_s, mb_y);
1230  } else {
1231  if (s->picture_structure != s->field_select[dir][0] + 1 &&
1232  s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
1233  ref_picture = s->current_picture_ptr->f->data;
1234 
1235  }
1236  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1237  0, 0, s->field_select[dir][0],
1238  ref_picture, pix_op,
1239  s->mv[dir][0][0],
1240  s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
1241  }
1242  break;
1243  case MV_TYPE_16X8:
1244  for (i = 0; i < 2; i++) {
1245  uint8_t **ref2picture;
1246 
1247  if (s->picture_structure == s->field_select[dir][i] + 1 ||
1248  s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
1249  ref2picture = ref_picture;
1250  } else {
1251  ref2picture = s->current_picture_ptr->f->data;
1252  }
1253 
1254  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1255  0, 0, s->field_select[dir][i],
1256  ref2picture, pix_op,
1257  s->mv[dir][i][0], s->mv[dir][i][1] +
1258  2 * block_s * i, block_s, mb_y >> 1);
1259 
1260  dest_y += 2 * block_s * s->linesize;
1261  dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
1262  dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
1263  }
1264  break;
1265  case MV_TYPE_DMV:
1266  if (s->picture_structure == PICT_FRAME) {
1267  for (i = 0; i < 2; i++) {
1268  int j;
1269  for (j = 0; j < 2; j++) {
1270  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1271  1, j, j ^ i,
1272  ref_picture, pix_op,
1273  s->mv[dir][2 * i + j][0],
1274  s->mv[dir][2 * i + j][1],
1275  block_s, mb_y);
1276  }
1277  pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
1278  }
1279  } else {
1280  for (i = 0; i < 2; i++) {
1281  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1282  0, 0, s->picture_structure != i + 1,
1283  ref_picture, pix_op,
1284  s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
1285  2 * block_s, mb_y >> 1);
1286 
1287  // after put we make avg of the same block
1288  pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
1289 
1290  // opposite parity is always in the same
1291  // frame if this is second field
1292  if (!s->first_field) {
1293  ref_picture = s->current_picture_ptr->f->data;
1294  }
1295  }
1296  }
1297  break;
1298  default:
1299  av_assert2(0);
1300  }
1301 }
1302 
1303 /**
1304  * find the lowest MB row referenced in the MVs
1305  */
1307 {
1308  int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1309  int my, off, i, mvs;
1310 
1311  if (s->picture_structure != PICT_FRAME || s->mcsel)
1312  goto unhandled;
1313 
1314  switch (s->mv_type) {
1315  case MV_TYPE_16X16:
1316  mvs = 1;
1317  break;
1318  case MV_TYPE_16X8:
1319  mvs = 2;
1320  break;
1321  case MV_TYPE_8X8:
1322  mvs = 4;
1323  break;
1324  default:
1325  goto unhandled;
1326  }
1327 
1328  for (i = 0; i < mvs; i++) {
1329  my = s->mv[dir][i][1];
1330  my_max = FFMAX(my_max, my);
1331  my_min = FFMIN(my_min, my);
1332  }
1333 
1334  off = ((FFMAX(-my_min, my_max)<<qpel_shift) + 63) >> 6;
1335 
1336  return av_clip(s->mb_y + off, 0, s->mb_height - 1);
1337 unhandled:
1338  return s->mb_height-1;
1339 }
1340 
1341 /* put block[] to dest[] */
1342 static inline void put_dct(MpegEncContext *s,
1343  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1344 {
1345  s->dct_unquantize_intra(s, block, i, qscale);
1346  s->idsp.idct_put(dest, line_size, block);
1347 }
1348 
1349 /* add block[] to dest[] */
1350 static inline void add_dct(MpegEncContext *s,
1351  int16_t *block, int i, uint8_t *dest, int line_size)
1352 {
1353  if (s->block_last_index[i] >= 0) {
1354  s->idsp.idct_add(dest, line_size, block);
1355  }
1356 }
1357 
1358 static inline void add_dequant_dct(MpegEncContext *s,
1359  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1360 {
1361  if (s->block_last_index[i] >= 0) {
1362  s->dct_unquantize_inter(s, block, i, qscale);
1363 
1364  s->idsp.idct_add(dest, line_size, block);
1365  }
1366 }
1367 
1368 /**
1369  * Clean dc, ac, coded_block for the current non-intra MB.
1370  */
1372 {
1373  int wrap = s->b8_stride;
1374  int xy = s->block_index[0];
1375 
1376  s->dc_val[0][xy ] =
1377  s->dc_val[0][xy + 1 ] =
1378  s->dc_val[0][xy + wrap] =
1379  s->dc_val[0][xy + 1 + wrap] = 1024;
1380  /* ac pred */
1381  memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
1382  memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1383  if (s->msmpeg4_version>=3) {
1384  s->coded_block[xy ] =
1385  s->coded_block[xy + 1 ] =
1386  s->coded_block[xy + wrap] =
1387  s->coded_block[xy + 1 + wrap] = 0;
1388  }
1389  /* chroma */
1390  wrap = s->mb_stride;
1391  xy = s->mb_x + s->mb_y * wrap;
1392  s->dc_val[1][xy] =
1393  s->dc_val[2][xy] = 1024;
1394  /* ac pred */
1395  memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1396  memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1397 
1398  s->mbintra_table[xy]= 0;
1399 }
1400 
1401 /* generic function called after a macroblock has been parsed by the
1402  decoder or after it has been encoded by the encoder.
1403 
1404  Important variables used:
1405  s->mb_intra : true if intra macroblock
1406  s->mv_dir : motion vector direction
1407  s->mv_type : motion vector type
1408  s->mv : motion vector
1409  s->interlaced_dct : true if interlaced dct used (mpeg2)
1410  */
1411 static av_always_inline
1413  int lowres_flag, int is_mpeg12)
1414 {
1415 #define IS_ENCODER(s) (CONFIG_MPEGVIDEOENC && !lowres_flag && (s)->encoding)
1416 #define IS_MPEG12(s) (CONFIG_SMALL ? ((s)->out_format == FMT_MPEG1) : is_mpeg12)
1417  const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1418 
1419  s->current_picture.qscale_table[mb_xy] = s->qscale;
1420 
1421  /* update DC predictors for P macroblocks */
1422  if (!s->mb_intra) {
1423  if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
1424  if(s->mbintra_table[mb_xy])
1426  } else {
1427  s->last_dc[0] =
1428  s->last_dc[1] =
1429  s->last_dc[2] = 128 << s->intra_dc_precision;
1430  }
1431  }
1432  else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
1433  s->mbintra_table[mb_xy]=1;
1434 
1435  if (!IS_ENCODER(s) || (s->avctx->flags & AV_CODEC_FLAG_PSNR) || s->frame_skip_threshold || s->frame_skip_factor ||
1436  !((s->intra_only || s->pict_type == AV_PICTURE_TYPE_B) &&
1437  s->avctx->mb_decision != FF_MB_DECISION_RD)) { // FIXME precalc
1438  uint8_t *dest_y, *dest_cb, *dest_cr;
1439  int dct_linesize, dct_offset;
1440  op_pixels_func (*op_pix)[4];
1441  qpel_mc_func (*op_qpix)[16];
1442  const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
1443  const int uvlinesize = s->current_picture.f->linesize[1];
1444  const int readable = s->pict_type != AV_PICTURE_TYPE_B || IS_ENCODER(s) || s->avctx->draw_horiz_band || lowres_flag;
1445  const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
1446 
1447  /* avoid copy if macroblock skipped in last frame too */
1448  /* skip only during decoding as we might trash the buffers during encoding a bit */
1449  if (!IS_ENCODER(s)) {
1450  uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
1451 
1452  if (s->mb_skipped) {
1453  s->mb_skipped= 0;
1454  av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
1455  *mbskip_ptr = 1;
1456  } else if(!s->current_picture.reference) {
1457  *mbskip_ptr = 1;
1458  } else{
1459  *mbskip_ptr = 0; /* not skipped */
1460  }
1461  }
1462 
1463  dct_linesize = linesize << s->interlaced_dct;
1464  dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
1465 
1466  if(readable){
1467  dest_y= s->dest[0];
1468  dest_cb= s->dest[1];
1469  dest_cr= s->dest[2];
1470  }else{
1471  dest_y = s->sc.b_scratchpad;
1472  dest_cb= s->sc.b_scratchpad+16*linesize;
1473  dest_cr= s->sc.b_scratchpad+32*linesize;
1474  }
1475 
1476  if (!s->mb_intra) {
1477  /* motion handling */
1478  /* decoding or more than one mb_type (MC was already done otherwise) */
1479  if (!IS_ENCODER(s)) {
1480 
1481  if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
1482  if (s->mv_dir & MV_DIR_FORWARD) {
1483  ff_thread_await_progress(&s->last_picture_ptr->tf,
1485  0);
1486  }
1487  if (s->mv_dir & MV_DIR_BACKWARD) {
1488  ff_thread_await_progress(&s->next_picture_ptr->tf,
1490  0);
1491  }
1492  }
1493 
1494  if(lowres_flag){
1495  h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
1496 
1497  if (s->mv_dir & MV_DIR_FORWARD) {
1498  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix);
1499  op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
1500  }
1501  if (s->mv_dir & MV_DIR_BACKWARD) {
1502  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix);
1503  }
1504  }else{
1505  op_qpix = s->me.qpel_put;
1506  if ((is_mpeg12 || !s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
1507  op_pix = s->hdsp.put_pixels_tab;
1508  }else{
1509  op_pix = s->hdsp.put_no_rnd_pixels_tab;
1510  }
1511  if (s->mv_dir & MV_DIR_FORWARD) {
1512  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
1513  op_pix = s->hdsp.avg_pixels_tab;
1514  op_qpix= s->me.qpel_avg;
1515  }
1516  if (s->mv_dir & MV_DIR_BACKWARD) {
1517  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
1518  }
1519  }
1520  }
1521 
1522  /* skip dequant / idct if we are really late ;) */
1523  if(s->avctx->skip_idct){
1524  if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
1525  ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
1526  || s->avctx->skip_idct >= AVDISCARD_ALL)
1527  goto skip_idct;
1528  }
1529 
1530  /* add dct residue */
1531  if (IS_ENCODER(s) || !(IS_MPEG12(s) || s->msmpeg4_version
1532  || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
1533  add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
1534  add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
1535  add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
1536  add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
1537 
1538  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1539  if (s->chroma_y_shift){
1540  add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
1541  add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
1542  }else{
1543  dct_linesize >>= 1;
1544  dct_offset >>=1;
1545  add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
1546  add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
1547  add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
1548  add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
1549  }
1550  }
1551  } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
1552  add_dct(s, block[0], 0, dest_y , dct_linesize);
1553  add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
1554  add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
1555  add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
1556 
1557  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1558  if(s->chroma_y_shift){//Chroma420
1559  add_dct(s, block[4], 4, dest_cb, uvlinesize);
1560  add_dct(s, block[5], 5, dest_cr, uvlinesize);
1561  }else{
1562  //chroma422
1563  dct_linesize = uvlinesize << s->interlaced_dct;
1564  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
1565 
1566  add_dct(s, block[4], 4, dest_cb, dct_linesize);
1567  add_dct(s, block[5], 5, dest_cr, dct_linesize);
1568  add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
1569  add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
1570  if(!s->chroma_x_shift){//Chroma444
1571  add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
1572  add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
1573  add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
1574  add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
1575  }
1576  }
1577  }//fi gray
1578  } else if (CONFIG_WMV2_DECODER) {
1579  ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
1580  }
1581  } else {
1582  /* Only MPEG-4 Simple Studio Profile is supported in > 8-bit mode.
1583  TODO: Integrate 10-bit properly into mpegvideo.c so that ER works properly */
1584  if (!is_mpeg12 && CONFIG_MPEG4_DECODER && /* s->codec_id == AV_CODEC_ID_MPEG4 && */
1585  s->avctx->bits_per_raw_sample > 8) {
1586  ff_mpeg4_decode_studio(s, dest_y, dest_cb, dest_cr, block_size,
1587  uvlinesize, dct_linesize, dct_offset);
1588  }
1589  /* dct only in intra block */
1590  else if (IS_ENCODER(s) || !IS_MPEG12(s)) {
1591  put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
1592  put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
1593  put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
1594  put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
1595 
1596  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1597  if(s->chroma_y_shift){
1598  put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
1599  put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
1600  }else{
1601  dct_offset >>=1;
1602  dct_linesize >>=1;
1603  put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
1604  put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
1605  put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
1606  put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
1607  }
1608  }
1609  }else{
1610  s->idsp.idct_put(dest_y, dct_linesize, block[0]);
1611  s->idsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
1612  s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]);
1613  s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
1614 
1615  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1616  if(s->chroma_y_shift){
1617  s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
1618  s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
1619  }else{
1620 
1621  dct_linesize = uvlinesize << s->interlaced_dct;
1622  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
1623 
1624  s->idsp.idct_put(dest_cb, dct_linesize, block[4]);
1625  s->idsp.idct_put(dest_cr, dct_linesize, block[5]);
1626  s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
1627  s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
1628  if(!s->chroma_x_shift){//Chroma444
1629  s->idsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
1630  s->idsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
1631  s->idsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
1632  s->idsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
1633  }
1634  }
1635  }//gray
1636  }
1637  }
1638 skip_idct:
1639  if(!readable){
1640  s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
1641  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1642  s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
1643  s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
1644  }
1645  }
1646  }
1647 }
1648 
1650 {
1651  if (s->avctx->debug & FF_DEBUG_DCT_COEFF) {
1652  /* print DCT coefficients */
1653  av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1654  for (int i = 0; i < 6; i++) {
1655  for (int j = 0; j < 64; j++) {
1656  av_log(s->avctx, AV_LOG_DEBUG, "%5d",
1657  block[i][s->idsp.idct_permutation[j]]);
1658  }
1659  av_log(s->avctx, AV_LOG_DEBUG, "\n");
1660  }
1661  }
1662 
1663 #if !CONFIG_SMALL
1664  if(s->out_format == FMT_MPEG1) {
1665  if(s->avctx->lowres) mpv_reconstruct_mb_internal(s, block, 1, 1);
1666  else mpv_reconstruct_mb_internal(s, block, 0, 1);
1667  } else
1668 #endif
1669  if(s->avctx->lowres) mpv_reconstruct_mb_internal(s, block, 1, 0);
1670  else mpv_reconstruct_mb_internal(s, block, 0, 0);
1671 }
1672 
1673 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
1674  const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
1675  const int uvlinesize = s->current_picture.f->linesize[1];
1676  const int width_of_mb = (4 + (s->avctx->bits_per_raw_sample > 8)) - s->avctx->lowres;
1677  const int height_of_mb = 4 - s->avctx->lowres;
1678 
1679  s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
1680  s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
1681  s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
1682  s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
1683  s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
1684  s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
1685  //block_index is not used by mpeg2, so it is not affected by chroma_format
1686 
1687  s->dest[0] = s->current_picture.f->data[0] + (int)((s->mb_x - 1U) << width_of_mb);
1688  s->dest[1] = s->current_picture.f->data[1] + (int)((s->mb_x - 1U) << (width_of_mb - s->chroma_x_shift));
1689  s->dest[2] = s->current_picture.f->data[2] + (int)((s->mb_x - 1U) << (width_of_mb - s->chroma_x_shift));
1690 
1691  if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
1692  {
1693  if(s->picture_structure==PICT_FRAME){
1694  s->dest[0] += s->mb_y * linesize << height_of_mb;
1695  s->dest[1] += s->mb_y * uvlinesize << (height_of_mb - s->chroma_y_shift);
1696  s->dest[2] += s->mb_y * uvlinesize << (height_of_mb - s->chroma_y_shift);
1697  }else{
1698  s->dest[0] += (s->mb_y>>1) * linesize << height_of_mb;
1699  s->dest[1] += (s->mb_y>>1) * uvlinesize << (height_of_mb - s->chroma_y_shift);
1700  s->dest[2] += (s->mb_y>>1) * uvlinesize << (height_of_mb - s->chroma_y_shift);
1701  av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
1702  }
1703  }
1704 }
1705 
1706 /**
1707  * set qscale and update qscale dependent variables.
1708  */
1709 void ff_set_qscale(MpegEncContext * s, int qscale)
1710 {
1711  if (qscale < 1)
1712  qscale = 1;
1713  else if (qscale > 31)
1714  qscale = 31;
1715 
1716  s->qscale = qscale;
1717  s->chroma_qscale= s->chroma_qscale_table[qscale];
1718 
1719  s->y_dc_scale= s->y_dc_scale_table[ qscale ];
1720  s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
1721 }
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:98
ff_h263_round_chroma
static int ff_h263_round_chroma(int x)
Definition: motion_est.h:101
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:38
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:738
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:246
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
free_duplicate_contexts
static void free_duplicate_contexts(MpegEncContext *s)
Definition: mpegvideo.c:440
level
uint8_t level
Definition: svq3.c:206
av_clip
#define av_clip
Definition: common.h:95
ff_mpeg_framesize_alloc
int ff_mpeg_framesize_alloc(AVCodecContext *avctx, MotionEstContext *me, ScratchpadContext *sc, int linesize)
Definition: mpegpicture.c:78
blockdsp.h
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegutils.h:117
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
mpeg_motion_lowres
static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t **ref_picture, h264_chroma_mc_func *pix_op, int motion_x, int motion_y, int h, int mb_y)
Definition: mpegvideo.c:965
ff_mpv_init_context_frame
int ff_mpv_init_context_frame(MpegEncContext *s)
Initialize and allocates MpegEncContext fields dependent on the resolution.
Definition: mpegvideo.c:525
backup_duplicate_context
static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
Definition: mpegvideo.c:449
ff_mpv_common_defaults
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:507
hpel_motion_lowres
static int hpel_motion_lowres(MpegEncContext *s, uint8_t *dest, uint8_t *src, int field_based, int field_select, int src_x, int src_y, int width, int height, ptrdiff_t stride, int h_edge_pos, int v_edge_pos, int w, int h, h264_chroma_mc_func *pix_op, int motion_x, int motion_y)
Definition: mpegvideo.c:918
ff_mpeg1_dc_scale_table
const uint8_t ff_mpeg1_dc_scale_table[128]
Definition: mpegvideodata.c:33
mpeg4videodec.h
MV_TYPE_16X8
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:248
lowest_referenced_row
static int lowest_referenced_row(MpegEncContext *s, int dir)
find the lowest MB row referenced in the MVs
Definition: mpegvideo.c:1306
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:62
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:111
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo.c:476
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
w
uint8_t w
Definition: llviddspenc.c:38
ff_mpeg2_non_linear_qscale
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:26
ff_clean_intra_table_entries
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
Definition: mpegvideo.c:1371
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:37
init_duplicate_context
static int init_duplicate_context(MpegEncContext *s)
Definition: mpegvideo.c:351
ff_mpv_common_init_arm
av_cold void ff_mpv_common_init_arm(MpegEncContext *s)
Definition: mpegvideo_arm.c:44
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:1673
mpegvideo.h
ff_wmv2_add_mb
void ff_wmv2_add_mb(MpegEncContext *s, int16_t block1[6][64], uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr)
Definition: wmv2dec.c:85
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:79
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:253
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:287
mpegutils.h
ff_thread_await_progress
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:240
free_duplicate_context
static void free_duplicate_context(MpegEncContext *s)
Definition: mpegvideo.c:420
MV_DIR_BACKWARD
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:243
MV_TYPE_DMV
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:250
IS_MPEG12
#define IS_MPEG12(s)
mpv_reconstruct_mb_internal
static av_always_inline void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64], int lowres_flag, int is_mpeg12)
Definition: mpegvideo.c:1412
av_memdup
void * av_memdup(const void *p, size_t size)
Duplicate a buffer with av_malloc().
Definition: mem.c:312
chroma_4mv_motion_lowres
static void chroma_4mv_motion_lowres(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, h264_chroma_mc_func *pix_op, int mx, int my)
Definition: mpegvideo.c:1104
dct_unquantize_mpeg1_inter_c
static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:81
U
#define U(x)
Definition: vp56_arith.h:37
fail
#define fail()
Definition: checkasm.h:131
wrap
#define wrap(func)
Definition: neontest.h:65
h264_chroma_mc_func
void(* h264_chroma_mc_func)(uint8_t *dst, uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: h264chroma.h:25
MpegEncContext::linesize
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:117
MAX_THREADS
#define MAX_THREADS
Definition: frame_thread_encoder.c:36
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2690
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:39
FF_ALLOC_TYPED_ARRAY
#define FF_ALLOC_TYPED_ARRAY(p, nelem)
Definition: internal.h:97
MAX_PICTURE_COUNT
#define MAX_PICTURE_COUNT
Definition: mpegpicture.h:33
dct_unquantize_mpeg1_intra_c
static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:52
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:99
ff_mpv_common_end
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:879
MpegEncContext::pblocks
int16_t(*[12] pblocks)[64]
Definition: mpegvideo.h:464
avassert.h
gray16
static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
Definition: mpegvideo.c:269
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
width
#define width
add_dct
static void add_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size)
Definition: mpegvideo.c:1350
s
#define s(width, name)
Definition: cbs_vp9.c:256
ALLOCZ_ARRAYS
#define ALLOCZ_ARRAYS(p, mult, numb)
AV_CODEC_ID_WMV2
@ AV_CODEC_ID_WMV2
Definition: codec_id.h:68
dct_unquantize_mpeg2_intra_bitexact
static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:140
FMT_H261
@ FMT_H261
Definition: mpegutils.h:118
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
limits.h
ff_mpegvideodsp_init
av_cold void ff_mpegvideodsp_init(MpegVideoDSPContext *c)
Definition: mpegvideodsp.c:110
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:54
threadframe.h
NULL
#define NULL
Definition: coverity.c:32
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:331
me
#define me
Definition: vf_colormatrix.c:104
put_dct
static void put_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo.c:1342
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
ff_set_qscale
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:1709
mathops.h
FF_BUG_IEDGE
#define FF_BUG_IEDGE
Definition: avcodec.h:1286
lowres
static int lowres
Definition: ffplay.c:335
qpeldsp.h
ff_alternate_horizontal_scan
const uint8_t ff_alternate_horizontal_scan[64]
Definition: mpegvideodata.c:84
MpegEncContext::me
MotionEstContext me
Definition: mpegvideo.h:262
ME_MAP_SIZE
#define ME_MAP_SIZE
Definition: motion_est.h:38
op_pixels_func
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
dct_unquantize_mpeg2_intra_c
static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:110
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:247
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1328
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:53
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:249
ff_mpeg_er_init
int ff_mpeg_er_init(MpegEncContext *s)
Definition: mpeg_er.c:100
ff_mpv_motion
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
Definition: mpegvideo_motion.c:974
h264chroma.h
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1475
height
#define height
mpegvideodata.h
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
attributes.h
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:249
IS_ENCODER
#define IS_ENCODER(s)
dct_unquantize_mpeg2_inter_c
static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:174
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1474
clear_context
static void clear_context(MpegEncContext *s)
Definition: mpegvideo.c:659
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
FMT_H263
@ FMT_H263
Definition: mpegutils.h:119
dct_unquantize_h263_inter_c
static void dct_unquantize_h263_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:242
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
ff_alternate_vertical_scan
const uint8_t ff_alternate_vertical_scan[64]
Definition: mpegvideodata.c:95
ff_init_scantable
av_cold void ff_init_scantable(const uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: idctdsp.c:30
ff_h264chroma_init
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:41
internal.h
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
ff_mpv_common_init_ppc
void ff_mpv_common_init_ppc(MpegEncContext *s)
Definition: mpegvideo_altivec.c:119
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
MpegEncContext::sc
ScratchpadContext sc
Definition: mpegvideo.h:185
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:264
ff_mpv_picture_free
void av_cold ff_mpv_picture_free(AVCodecContext *avctx, Picture *pic)
Definition: mpegpicture.c:474
FF_DEBUG_NOMC
#define FF_DEBUG_NOMC
Definition: avcodec.h:1337
idctdsp.h
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
ret
ret
Definition: filter_design.txt:187
wmv2dec.h
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
dct_init
static av_cold int dct_init(MpegEncContext *s)
Definition: mpegvideo.c:282
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
ff_mpv_free_context_frame
void ff_mpv_free_context_frame(MpegEncContext *s)
Frees and resets MpegEncContext fields depending on the resolution as well as the slice thread contex...
Definition: mpegvideo.c:825
MpegEncContext::block
int16_t(* block)[64]
points to one of the following blocks
Definition: mpegvideo.h:466
ff_mpeg4_decode_studio
void ff_mpeg4_decode_studio(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int block_size, int uvlinesize, int dct_linesize, int dct_offset)
Definition: mpeg4videodec.c:74
ScratchpadContext::edge_emu_buffer
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
Definition: mpegpicture.h:37
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
dct_unquantize_h263_intra_c
static void dct_unquantize_h263_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:208
ff_mpv_common_init_x86
av_cold void ff_mpv_common_init_x86(MpegEncContext *s)
Definition: mpegvideo.c:454
MPV_motion_lowres
static void MPV_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, h264_chroma_mc_func *pix_op)
motion compensation of a single macroblock
Definition: mpegvideo.c:1172
ff_mpv_reconstruct_mb
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo.c:1649
ff_mpv_common_init_axp
av_cold void ff_mpv_common_init_axp(MpegEncContext *s)
Definition: mpegvideo_alpha.c:106
FF_MB_DECISION_RD
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:865
ff_mpv_common_init_mips
av_cold void ff_mpv_common_init_mips(MpegEncContext *s)
Definition: mpegvideo_init_mips.c:26
ff_default_chroma_qscale_table
const uint8_t ff_default_chroma_qscale_table[32]
Definition: mpegvideodata.c:21
ff_mpv_init_duplicate_contexts
int ff_mpv_init_duplicate_contexts(MpegEncContext *s)
Initialize an MpegEncContext's thread contexts.
Definition: mpegvideo.c:396
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:278
COPY
#define COPY(a)
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:414
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:242
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
mpeg_er.h
imgutils.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:370
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
add_dequant_dct
static void add_dequant_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo.c:1358
h
h
Definition: vp9dsp_template.c:2038
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
ff_blockdsp_init
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
Definition: blockdsp.c:59
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:50
int
int
Definition: ffmpeg_filter.c:153
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:52
ff_mpv_common_init_neon
av_cold void ff_mpv_common_init_neon(MpegEncContext *s)
Definition: mpegvideo.c:127
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:62
gray8
static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
Definition: mpegvideo.c:275