FFmpeg
mpegvideo.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29 
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
34 
35 #include "avcodec.h"
36 #include "blockdsp.h"
37 #include "h264chroma.h"
38 #include "idctdsp.h"
39 #include "mathops.h"
40 #include "mpeg_er.h"
41 #include "mpegutils.h"
42 #include "mpegvideo.h"
43 #include "mpegvideodata.h"
44 #include "qpeldsp.h"
45 #include "thread.h"
46 #include "wmv2.h"
47 #include <limits.h>
48 
50  int16_t *block, int n, int qscale)
51 {
52  int i, level, nCoeffs;
53  const uint16_t *quant_matrix;
54 
55  nCoeffs= s->block_last_index[n];
56 
57  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
58  /* XXX: only MPEG-1 */
59  quant_matrix = s->intra_matrix;
60  for(i=1;i<=nCoeffs;i++) {
61  int j= s->intra_scantable.permutated[i];
62  level = block[j];
63  if (level) {
64  if (level < 0) {
65  level = -level;
66  level = (int)(level * qscale * quant_matrix[j]) >> 3;
67  level = (level - 1) | 1;
68  level = -level;
69  } else {
70  level = (int)(level * qscale * quant_matrix[j]) >> 3;
71  level = (level - 1) | 1;
72  }
73  block[j] = level;
74  }
75  }
76 }
77 
79  int16_t *block, int n, int qscale)
80 {
81  int i, level, nCoeffs;
82  const uint16_t *quant_matrix;
83 
84  nCoeffs= s->block_last_index[n];
85 
86  quant_matrix = s->inter_matrix;
87  for(i=0; i<=nCoeffs; i++) {
88  int j= s->intra_scantable.permutated[i];
89  level = block[j];
90  if (level) {
91  if (level < 0) {
92  level = -level;
93  level = (((level << 1) + 1) * qscale *
94  ((int) (quant_matrix[j]))) >> 4;
95  level = (level - 1) | 1;
96  level = -level;
97  } else {
98  level = (((level << 1) + 1) * qscale *
99  ((int) (quant_matrix[j]))) >> 4;
100  level = (level - 1) | 1;
101  }
102  block[j] = level;
103  }
104  }
105 }
106 
108  int16_t *block, int n, int qscale)
109 {
110  int i, level, nCoeffs;
111  const uint16_t *quant_matrix;
112 
113  if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
114  else qscale <<= 1;
115 
116  if(s->alternate_scan) nCoeffs= 63;
117  else nCoeffs= s->block_last_index[n];
118 
119  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
120  quant_matrix = s->intra_matrix;
121  for(i=1;i<=nCoeffs;i++) {
122  int j= s->intra_scantable.permutated[i];
123  level = block[j];
124  if (level) {
125  if (level < 0) {
126  level = -level;
127  level = (int)(level * qscale * quant_matrix[j]) >> 4;
128  level = -level;
129  } else {
130  level = (int)(level * qscale * quant_matrix[j]) >> 4;
131  }
132  block[j] = level;
133  }
134  }
135 }
136 
138  int16_t *block, int n, int qscale)
139 {
140  int i, level, nCoeffs;
141  const uint16_t *quant_matrix;
142  int sum=-1;
143 
144  if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
145  else qscale <<= 1;
146 
147  if(s->alternate_scan) nCoeffs= 63;
148  else nCoeffs= s->block_last_index[n];
149 
150  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
151  sum += block[0];
152  quant_matrix = s->intra_matrix;
153  for(i=1;i<=nCoeffs;i++) {
154  int j= s->intra_scantable.permutated[i];
155  level = block[j];
156  if (level) {
157  if (level < 0) {
158  level = -level;
159  level = (int)(level * qscale * quant_matrix[j]) >> 4;
160  level = -level;
161  } else {
162  level = (int)(level * qscale * quant_matrix[j]) >> 4;
163  }
164  block[j] = level;
165  sum+=level;
166  }
167  }
168  block[63]^=sum&1;
169 }
170 
172  int16_t *block, int n, int qscale)
173 {
174  int i, level, nCoeffs;
175  const uint16_t *quant_matrix;
176  int sum=-1;
177 
178  if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
179  else qscale <<= 1;
180 
181  if(s->alternate_scan) nCoeffs= 63;
182  else nCoeffs= s->block_last_index[n];
183 
184  quant_matrix = s->inter_matrix;
185  for(i=0; i<=nCoeffs; i++) {
186  int j= s->intra_scantable.permutated[i];
187  level = block[j];
188  if (level) {
189  if (level < 0) {
190  level = -level;
191  level = (((level << 1) + 1) * qscale *
192  ((int) (quant_matrix[j]))) >> 5;
193  level = -level;
194  } else {
195  level = (((level << 1) + 1) * qscale *
196  ((int) (quant_matrix[j]))) >> 5;
197  }
198  block[j] = level;
199  sum+=level;
200  }
201  }
202  block[63]^=sum&1;
203 }
204 
206  int16_t *block, int n, int qscale)
207 {
208  int i, level, qmul, qadd;
209  int nCoeffs;
210 
211  av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
212 
213  qmul = qscale << 1;
214 
215  if (!s->h263_aic) {
216  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
217  qadd = (qscale - 1) | 1;
218  }else{
219  qadd = 0;
220  }
221  if(s->ac_pred)
222  nCoeffs=63;
223  else
224  nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ];
225 
226  for(i=1; i<=nCoeffs; i++) {
227  level = block[i];
228  if (level) {
229  if (level < 0) {
230  level = level * qmul - qadd;
231  } else {
232  level = level * qmul + qadd;
233  }
234  block[i] = level;
235  }
236  }
237 }
238 
240  int16_t *block, int n, int qscale)
241 {
242  int i, level, qmul, qadd;
243  int nCoeffs;
244 
245  av_assert2(s->block_last_index[n]>=0);
246 
247  qadd = (qscale - 1) | 1;
248  qmul = qscale << 1;
249 
250  nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
251 
252  for(i=0; i<=nCoeffs; i++) {
253  level = block[i];
254  if (level) {
255  if (level < 0) {
256  level = level * qmul - qadd;
257  } else {
258  level = level * qmul + qadd;
259  }
260  block[i] = level;
261  }
262  }
263 }
264 
265 
266 static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
267 {
268  while(h--)
269  memset(dst + h*linesize, 128, 16);
270 }
271 
272 static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
273 {
274  while(h--)
275  memset(dst + h*linesize, 128, 8);
276 }
277 
278 /* init common dct for both encoder and decoder */
280 {
281  ff_blockdsp_init(&s->bdsp, s->avctx);
282  ff_h264chroma_init(&s->h264chroma, 8); //for lowres
283  ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
284  ff_mpegvideodsp_init(&s->mdsp);
285  ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
286 
287  if (s->avctx->debug & FF_DEBUG_NOMC) {
288  int i;
289  for (i=0; i<4; i++) {
290  s->hdsp.avg_pixels_tab[0][i] = gray16;
291  s->hdsp.put_pixels_tab[0][i] = gray16;
292  s->hdsp.put_no_rnd_pixels_tab[0][i] = gray16;
293 
294  s->hdsp.avg_pixels_tab[1][i] = gray8;
295  s->hdsp.put_pixels_tab[1][i] = gray8;
296  s->hdsp.put_no_rnd_pixels_tab[1][i] = gray8;
297  }
298  }
299 
300  s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
301  s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
302  s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
303  s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
304  s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
305  if (s->avctx->flags & AV_CODEC_FLAG_BITEXACT)
306  s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
307  s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
308 
309  if (HAVE_INTRINSICS_NEON)
311 
312  if (ARCH_ALPHA)
314  if (ARCH_ARM)
316  if (ARCH_PPC)
318  if (ARCH_X86)
320  if (ARCH_MIPS)
322 
323  return 0;
324 }
325 
327 {
328  if (s->codec_id == AV_CODEC_ID_MPEG4)
329  s->idsp.mpeg4_studio_profile = s->studio_profile;
330  ff_idctdsp_init(&s->idsp, s->avctx);
331 
332  /* load & permutate scantables
333  * note: only wmv uses different ones
334  */
335  if (s->alternate_scan) {
336  ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
337  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
338  } else {
339  ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
340  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
341  }
342  ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
343  ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
344 }
345 
347 {
348  int y_size = s->b8_stride * (2 * s->mb_height + 1);
349  int c_size = s->mb_stride * (s->mb_height + 1);
350  int yc_size = y_size + 2 * c_size;
351  int i;
352 
353  if (s->mb_height & 1)
354  yc_size += 2*s->b8_stride + 2*s->mb_stride;
355 
356  if (s->encoding) {
357  if (!FF_ALLOCZ_TYPED_ARRAY(s->me.map, ME_MAP_SIZE) ||
358  !FF_ALLOCZ_TYPED_ARRAY(s->me.score_map, ME_MAP_SIZE))
359  return AVERROR(ENOMEM);
360 
361  if (s->noise_reduction) {
362  if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_error_sum, 2))
363  return AVERROR(ENOMEM);
364  }
365  }
366  if (!FF_ALLOCZ_TYPED_ARRAY(s->blocks, 2))
367  return AVERROR(ENOMEM);
368  s->block = s->blocks[0];
369 
370  for (i = 0; i < 12; i++) {
371  s->pblocks[i] = &s->block[i];
372  }
373 
374  if (s->avctx->codec_tag == AV_RL32("VCR2")) {
375  // exchange uv
376  FFSWAP(void *, s->pblocks[4], s->pblocks[5]);
377  }
378 
379  if (s->out_format == FMT_H263) {
380  if (!(s->block32 = av_mallocz(sizeof(*s->block32))) ||
381  !(s->dpcm_macroblock = av_mallocz(sizeof(*s->dpcm_macroblock))))
382  return AVERROR(ENOMEM);
383  s->dpcm_direction = 0;
384 
385  /* ac values */
386  if (!FF_ALLOCZ_TYPED_ARRAY(s->ac_val_base, yc_size))
387  return AVERROR(ENOMEM);
388  s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
389  s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
390  s->ac_val[2] = s->ac_val[1] + c_size;
391  }
392 
393  return 0;
394 }
395 
397 {
398  int nb_slices = s->slice_context_count, ret;
399 
400  /* We initialize the copies before the original so that
401  * fields allocated in init_duplicate_context are NULL after
402  * copying. This prevents double-frees upon allocation error. */
403  for (int i = 1; i < nb_slices; i++) {
404  s->thread_context[i] = av_memdup(s, sizeof(MpegEncContext));
405  if (!s->thread_context[i])
406  return AVERROR(ENOMEM);
407  if ((ret = init_duplicate_context(s->thread_context[i])) < 0)
408  return ret;
409  s->thread_context[i]->start_mb_y =
410  (s->mb_height * (i ) + nb_slices / 2) / nb_slices;
411  s->thread_context[i]->end_mb_y =
412  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
413  }
414  s->start_mb_y = 0;
415  s->end_mb_y = nb_slices > 1 ? (s->mb_height + nb_slices / 2) / nb_slices
416  : s->mb_height;
417  return init_duplicate_context(s);
418 }
419 
421 {
422  if (!s)
423  return;
424 
425  av_freep(&s->sc.edge_emu_buffer);
426  av_freep(&s->me.scratchpad);
427  s->me.temp =
428  s->sc.rd_scratchpad =
429  s->sc.b_scratchpad =
430  s->sc.obmc_scratchpad = NULL;
431 
432  av_freep(&s->dct_error_sum);
433  av_freep(&s->me.map);
434  av_freep(&s->me.score_map);
435  av_freep(&s->blocks);
436  av_freep(&s->block32);
437  av_freep(&s->dpcm_macroblock);
438  av_freep(&s->ac_val_base);
439  s->block = NULL;
440 }
441 
443 {
444  for (int i = 1; i < s->slice_context_count; i++) {
445  free_duplicate_context(s->thread_context[i]);
446  av_freep(&s->thread_context[i]);
447  }
449 }
450 
452 {
453 #define COPY(a) bak->a = src->a
454  COPY(sc.edge_emu_buffer);
455  COPY(me.scratchpad);
456  COPY(me.temp);
457  COPY(sc.rd_scratchpad);
458  COPY(sc.b_scratchpad);
459  COPY(sc.obmc_scratchpad);
460  COPY(me.map);
461  COPY(me.score_map);
462  COPY(blocks);
463  COPY(block);
464  COPY(block32);
465  COPY(dpcm_macroblock);
466  COPY(dpcm_direction);
467  COPY(start_mb_y);
468  COPY(end_mb_y);
469  COPY(me.map_generation);
470  COPY(pb);
471  COPY(dct_error_sum);
472  COPY(dct_count[0]);
473  COPY(dct_count[1]);
474  COPY(ac_val_base);
475  COPY(ac_val[0]);
476  COPY(ac_val[1]);
477  COPY(ac_val[2]);
478 #undef COPY
479 }
480 
482 {
483  MpegEncContext bak;
484  int i, ret;
485  // FIXME copy only needed parts
486  backup_duplicate_context(&bak, dst);
487  memcpy(dst, src, sizeof(MpegEncContext));
488  backup_duplicate_context(dst, &bak);
489  for (i = 0; i < 12; i++) {
490  dst->pblocks[i] = &dst->block[i];
491  }
492  if (dst->avctx->codec_tag == AV_RL32("VCR2")) {
493  // exchange uv
494  FFSWAP(void *, dst->pblocks[4], dst->pblocks[5]);
495  }
496  if (!dst->sc.edge_emu_buffer &&
497  (ret = ff_mpeg_framesize_alloc(dst->avctx, &dst->me,
498  &dst->sc, dst->linesize)) < 0) {
499  av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
500  "scratch buffers.\n");
501  return ret;
502  }
503  return 0;
504 }
505 
506 /**
507  * Set the given MpegEncContext to common defaults
508  * (same for encoding and decoding).
509  * The changed fields will not depend upon the
510  * prior state of the MpegEncContext.
511  */
513 {
514  s->y_dc_scale_table =
515  s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
516  s->chroma_qscale_table = ff_default_chroma_qscale_table;
517  s->progressive_frame = 1;
518  s->progressive_sequence = 1;
519  s->picture_structure = PICT_FRAME;
520 
521  s->coded_picture_number = 0;
522  s->picture_number = 0;
523 
524  s->f_code = 1;
525  s->b_code = 1;
526 
527  s->slice_context_count = 1;
528 }
529 
531 {
532  int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
533 
534  s->mb_width = (s->width + 15) / 16;
535  s->mb_stride = s->mb_width + 1;
536  s->b8_stride = s->mb_width * 2 + 1;
537  mb_array_size = s->mb_height * s->mb_stride;
538  mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
539 
540  /* set default edge pos, will be overridden
541  * in decode_header if needed */
542  s->h_edge_pos = s->mb_width * 16;
543  s->v_edge_pos = s->mb_height * 16;
544 
545  s->mb_num = s->mb_width * s->mb_height;
546 
547  s->block_wrap[0] =
548  s->block_wrap[1] =
549  s->block_wrap[2] =
550  s->block_wrap[3] = s->b8_stride;
551  s->block_wrap[4] =
552  s->block_wrap[5] = s->mb_stride;
553 
554  y_size = s->b8_stride * (2 * s->mb_height + 1);
555  c_size = s->mb_stride * (s->mb_height + 1);
556  yc_size = y_size + 2 * c_size;
557 
558  if (s->mb_height & 1)
559  yc_size += 2*s->b8_stride + 2*s->mb_stride;
560 
561  if (!FF_ALLOCZ_TYPED_ARRAY(s->mb_index2xy, s->mb_num + 1))
562  return AVERROR(ENOMEM);
563  for (y = 0; y < s->mb_height; y++)
564  for (x = 0; x < s->mb_width; x++)
565  s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
566 
567  s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
568 
569  if (s->encoding) {
570  /* Allocate MV tables */
571  if (!FF_ALLOCZ_TYPED_ARRAY(s->p_mv_table_base, mv_table_size) ||
572  !FF_ALLOCZ_TYPED_ARRAY(s->b_forw_mv_table_base, mv_table_size) ||
573  !FF_ALLOCZ_TYPED_ARRAY(s->b_back_mv_table_base, mv_table_size) ||
574  !FF_ALLOCZ_TYPED_ARRAY(s->b_bidir_forw_mv_table_base, mv_table_size) ||
575  !FF_ALLOCZ_TYPED_ARRAY(s->b_bidir_back_mv_table_base, mv_table_size) ||
576  !FF_ALLOCZ_TYPED_ARRAY(s->b_direct_mv_table_base, mv_table_size))
577  return AVERROR(ENOMEM);
578  s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
579  s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
580  s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
581  s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
582  s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
583  s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
584 
585  /* Allocate MB type table */
586  if (!FF_ALLOCZ_TYPED_ARRAY(s->mb_type, mb_array_size) ||
587  !FF_ALLOCZ_TYPED_ARRAY(s->lambda_table, mb_array_size) ||
588  !FF_ALLOC_TYPED_ARRAY (s->cplx_tab, mb_array_size) ||
589  !FF_ALLOC_TYPED_ARRAY (s->bits_tab, mb_array_size))
590  return AVERROR(ENOMEM);
591 
592 #define ALLOCZ_ARRAYS(p, mult, numb) ((p) = av_calloc(numb, mult * sizeof(*(p))))
593  if (s->codec_id == AV_CODEC_ID_MPEG4 ||
594  (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME)) {
595  int16_t (*tmp1)[2];
596  uint8_t *tmp2;
597  if (!(tmp1 = ALLOCZ_ARRAYS(s->b_field_mv_table_base, 8, mv_table_size)) ||
598  !(tmp2 = ALLOCZ_ARRAYS(s->b_field_select_table[0][0], 2 * 4, mv_table_size)) ||
599  !ALLOCZ_ARRAYS(s->p_field_select_table[0], 2 * 2, mv_table_size))
600  return AVERROR(ENOMEM);
601 
602  s->p_field_select_table[1] = s->p_field_select_table[0] + 2 * mv_table_size;
603  tmp1 += s->mb_stride + 1;
604 
605  for (int i = 0; i < 2; i++) {
606  for (int j = 0; j < 2; j++) {
607  for (int k = 0; k < 2; k++) {
608  s->b_field_mv_table[i][j][k] = tmp1;
609  tmp1 += mv_table_size;
610  }
611  s->b_field_select_table[i][j] = tmp2;
612  tmp2 += 2 * mv_table_size;
613  }
614  }
615  }
616  }
617 
618  if (s->codec_id == AV_CODEC_ID_MPEG4 ||
619  (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME)) {
620  int16_t (*tmp)[2];
621  /* interlaced direct mode decoding tables */
622  if (!(tmp = ALLOCZ_ARRAYS(s->p_field_mv_table_base, 4, mv_table_size)))
623  return AVERROR(ENOMEM);
624  tmp += s->mb_stride + 1;
625  for (int i = 0; i < 2; i++) {
626  for (int j = 0; j < 2; j++) {
627  s->p_field_mv_table[i][j] = tmp;
628  tmp += mv_table_size;
629  }
630  }
631  }
632 
633  if (s->out_format == FMT_H263) {
634  /* cbp values, cbp, ac_pred, pred_dir */
635  if (!(s->coded_block_base = av_mallocz(y_size + (s->mb_height&1)*2*s->b8_stride)) ||
636  !(s->cbp_table = av_mallocz(mb_array_size)) ||
637  !(s->pred_dir_table = av_mallocz(mb_array_size)))
638  return AVERROR(ENOMEM);
639  s->coded_block = s->coded_block_base + s->b8_stride + 1;
640  }
641 
642  if (s->h263_pred || s->h263_plus || !s->encoding) {
643  /* dc values */
644  // MN: we need these for error resilience of intra-frames
645  if (!FF_ALLOCZ_TYPED_ARRAY(s->dc_val_base, yc_size))
646  return AVERROR(ENOMEM);
647  s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
648  s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
649  s->dc_val[2] = s->dc_val[1] + c_size;
650  for (i = 0; i < yc_size; i++)
651  s->dc_val_base[i] = 1024;
652  }
653 
654  /* which mb is an intra block, init macroblock skip table */
655  if (!(s->mbintra_table = av_mallocz(mb_array_size)) ||
656  // Note the + 1 is for a quicker MPEG-4 slice_end detection
657  !(s->mbskip_table = av_mallocz(mb_array_size + 2)))
658  return AVERROR(ENOMEM);
659  memset(s->mbintra_table, 1, mb_array_size);
660 
661  return !CONFIG_MPEGVIDEODEC || s->encoding ? 0 : ff_mpeg_er_init(s);
662 }
663 
665 {
666  int i, j, k;
667 
668  memset(&s->next_picture, 0, sizeof(s->next_picture));
669  memset(&s->last_picture, 0, sizeof(s->last_picture));
670  memset(&s->current_picture, 0, sizeof(s->current_picture));
671  memset(&s->new_picture, 0, sizeof(s->new_picture));
672 
673  memset(s->thread_context, 0, sizeof(s->thread_context));
674 
675  s->me.map = NULL;
676  s->me.score_map = NULL;
677  s->dct_error_sum = NULL;
678  s->block = NULL;
679  s->blocks = NULL;
680  s->block32 = NULL;
681  memset(s->pblocks, 0, sizeof(s->pblocks));
682  s->dpcm_direction = 0;
683  s->dpcm_macroblock = NULL;
684  s->ac_val_base = NULL;
685  s->ac_val[0] =
686  s->ac_val[1] =
687  s->ac_val[2] =NULL;
688  s->sc.edge_emu_buffer = NULL;
689  s->me.scratchpad = NULL;
690  s->me.temp =
691  s->sc.rd_scratchpad =
692  s->sc.b_scratchpad =
693  s->sc.obmc_scratchpad = NULL;
694 
695 
696  s->bitstream_buffer = NULL;
697  s->allocated_bitstream_buffer_size = 0;
698  s->picture = NULL;
699  s->mb_type = NULL;
700  s->p_mv_table_base = NULL;
701  s->b_forw_mv_table_base = NULL;
702  s->b_back_mv_table_base = NULL;
703  s->b_bidir_forw_mv_table_base = NULL;
704  s->b_bidir_back_mv_table_base = NULL;
705  s->b_direct_mv_table_base = NULL;
706  s->p_mv_table = NULL;
707  s->b_forw_mv_table = NULL;
708  s->b_back_mv_table = NULL;
709  s->b_bidir_forw_mv_table = NULL;
710  s->b_bidir_back_mv_table = NULL;
711  s->b_direct_mv_table = NULL;
712  s->b_field_mv_table_base = NULL;
713  s->p_field_mv_table_base = NULL;
714  for (i = 0; i < 2; i++) {
715  for (j = 0; j < 2; j++) {
716  for (k = 0; k < 2; k++) {
717  s->b_field_mv_table[i][j][k] = NULL;
718  }
719  s->b_field_select_table[i][j] = NULL;
720  s->p_field_mv_table[i][j] = NULL;
721  }
722  s->p_field_select_table[i] = NULL;
723  }
724 
725  s->dc_val_base = NULL;
726  s->coded_block_base = NULL;
727  s->mbintra_table = NULL;
728  s->cbp_table = NULL;
729  s->pred_dir_table = NULL;
730 
731  s->mbskip_table = NULL;
732 
733  s->er.error_status_table = NULL;
734  s->er.er_temp_buffer = NULL;
735  s->mb_index2xy = NULL;
736  s->lambda_table = NULL;
737 
738  s->cplx_tab = NULL;
739  s->bits_tab = NULL;
740 }
741 
742 /**
743  * init common structure for both encoder and decoder.
744  * this assumes that some variables like width/height are already set
745  */
747 {
748  int i, ret;
749  int nb_slices = (HAVE_THREADS &&
750  s->avctx->active_thread_type & FF_THREAD_SLICE) ?
751  s->avctx->thread_count : 1;
752 
753  clear_context(s);
754 
755  if (s->encoding && s->avctx->slices)
756  nb_slices = s->avctx->slices;
757 
758  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
759  s->mb_height = (s->height + 31) / 32 * 2;
760  else
761  s->mb_height = (s->height + 15) / 16;
762 
763  if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
764  av_log(s->avctx, AV_LOG_ERROR,
765  "decoding to AV_PIX_FMT_NONE is not supported.\n");
766  return AVERROR(EINVAL);
767  }
768 
769  if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
770  int max_slices;
771  if (s->mb_height)
772  max_slices = FFMIN(MAX_THREADS, s->mb_height);
773  else
774  max_slices = MAX_THREADS;
775  av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
776  " reducing to %d\n", nb_slices, max_slices);
777  nb_slices = max_slices;
778  }
779 
780  if ((s->width || s->height) &&
781  av_image_check_size(s->width, s->height, 0, s->avctx))
782  return AVERROR(EINVAL);
783 
784  dct_init(s);
785 
786  /* set chroma shifts */
787  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
788  &s->chroma_x_shift,
789  &s->chroma_y_shift);
790  if (ret)
791  return ret;
792 
793  if (!FF_ALLOCZ_TYPED_ARRAY(s->picture, MAX_PICTURE_COUNT))
794  return AVERROR(ENOMEM);
795  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
796  s->picture[i].f = av_frame_alloc();
797  if (!s->picture[i].f)
798  goto fail_nomem;
799  }
800 
801  if (!(s->next_picture.f = av_frame_alloc()) ||
802  !(s->last_picture.f = av_frame_alloc()) ||
803  !(s->current_picture.f = av_frame_alloc()) ||
804  !(s->new_picture.f = av_frame_alloc()))
805  goto fail_nomem;
806 
808  goto fail;
809 
810 #if FF_API_FLAG_TRUNCATED
811  s->parse_context.state = -1;
812 #endif
813 
814  s->context_initialized = 1;
815  memset(s->thread_context, 0, sizeof(s->thread_context));
816  s->thread_context[0] = s;
817  s->slice_context_count = nb_slices;
818 
819 // if (s->width && s->height) {
821  if (ret < 0)
822  goto fail;
823 // }
824 
825  return 0;
826  fail_nomem:
827  ret = AVERROR(ENOMEM);
828  fail:
830  return ret;
831 }
832 
834 {
835  int i, j, k;
836 
838 
839  av_freep(&s->mb_type);
840  av_freep(&s->p_mv_table_base);
841  av_freep(&s->b_forw_mv_table_base);
842  av_freep(&s->b_back_mv_table_base);
843  av_freep(&s->b_bidir_forw_mv_table_base);
844  av_freep(&s->b_bidir_back_mv_table_base);
845  av_freep(&s->b_direct_mv_table_base);
846  s->p_mv_table = NULL;
847  s->b_forw_mv_table = NULL;
848  s->b_back_mv_table = NULL;
849  s->b_bidir_forw_mv_table = NULL;
850  s->b_bidir_back_mv_table = NULL;
851  s->b_direct_mv_table = NULL;
852  av_freep(&s->b_field_mv_table_base);
853  av_freep(&s->b_field_select_table[0][0]);
854  av_freep(&s->p_field_mv_table_base);
855  av_freep(&s->p_field_select_table[0]);
856  for (i = 0; i < 2; i++) {
857  for (j = 0; j < 2; j++) {
858  for (k = 0; k < 2; k++) {
859  s->b_field_mv_table[i][j][k] = NULL;
860  }
861  s->b_field_select_table[i][j] = NULL;
862  s->p_field_mv_table[i][j] = NULL;
863  }
864  s->p_field_select_table[i] = NULL;
865  }
866 
867  av_freep(&s->dc_val_base);
868  av_freep(&s->coded_block_base);
869  av_freep(&s->mbintra_table);
870  av_freep(&s->cbp_table);
871  av_freep(&s->pred_dir_table);
872 
873  av_freep(&s->mbskip_table);
874 
875  av_freep(&s->er.error_status_table);
876  av_freep(&s->er.er_temp_buffer);
877  av_freep(&s->mb_index2xy);
878  av_freep(&s->lambda_table);
879 
880  av_freep(&s->cplx_tab);
881  av_freep(&s->bits_tab);
882 
883  s->linesize = s->uvlinesize = 0;
884 }
885 
886 /* init common structure for both encoder and decoder */
888 {
889  int i;
890 
891  if (!s)
892  return;
893 
895  if (s->slice_context_count > 1)
896  s->slice_context_count = 1;
897 
898 #if FF_API_FLAG_TRUNCATED
899  av_freep(&s->parse_context.buffer);
900  s->parse_context.buffer_size = 0;
901 #endif
902 
903  av_freep(&s->bitstream_buffer);
904  s->allocated_bitstream_buffer_size = 0;
905 
906  if (!s->avctx)
907  return;
908 
909  if (s->picture) {
910  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
911  ff_free_picture_tables(&s->picture[i]);
912  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
913  av_frame_free(&s->picture[i].f);
914  }
915  }
916  av_freep(&s->picture);
917  ff_free_picture_tables(&s->last_picture);
918  ff_mpeg_unref_picture(s->avctx, &s->last_picture);
919  av_frame_free(&s->last_picture.f);
920  ff_free_picture_tables(&s->current_picture);
921  ff_mpeg_unref_picture(s->avctx, &s->current_picture);
922  av_frame_free(&s->current_picture.f);
923  ff_free_picture_tables(&s->next_picture);
924  ff_mpeg_unref_picture(s->avctx, &s->next_picture);
925  av_frame_free(&s->next_picture.f);
926  ff_free_picture_tables(&s->new_picture);
927  ff_mpeg_unref_picture(s->avctx, &s->new_picture);
928  av_frame_free(&s->new_picture.f);
929 
930  s->context_initialized = 0;
931  s->context_reinit = 0;
932  s->last_picture_ptr =
933  s->next_picture_ptr =
934  s->current_picture_ptr = NULL;
935  s->linesize = s->uvlinesize = 0;
936 }
937 
938 
940  uint8_t *dest, uint8_t *src,
941  int field_based, int field_select,
942  int src_x, int src_y,
943  int width, int height, ptrdiff_t stride,
944  int h_edge_pos, int v_edge_pos,
945  int w, int h, h264_chroma_mc_func *pix_op,
946  int motion_x, int motion_y)
947 {
948  const int lowres = s->avctx->lowres;
949  const int op_index = FFMIN(lowres, 3);
950  const int s_mask = (2 << lowres) - 1;
951  int emu = 0;
952  int sx, sy;
953 
954  if (s->quarter_sample) {
955  motion_x /= 2;
956  motion_y /= 2;
957  }
958 
959  sx = motion_x & s_mask;
960  sy = motion_y & s_mask;
961  src_x += motion_x >> lowres + 1;
962  src_y += motion_y >> lowres + 1;
963 
964  src += src_y * stride + src_x;
965 
966  if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
967  (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
968  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, src,
969  s->linesize, s->linesize,
970  w + 1, (h + 1) << field_based,
971  src_x, src_y << field_based,
972  h_edge_pos, v_edge_pos);
973  src = s->sc.edge_emu_buffer;
974  emu = 1;
975  }
976 
977  sx = (sx << 2) >> lowres;
978  sy = (sy << 2) >> lowres;
979  if (field_select)
980  src += s->linesize;
981  pix_op[op_index](dest, src, stride, h, sx, sy);
982  return emu;
983 }
984 
985 /* apply one mpeg motion vector to the three components */
987  uint8_t *dest_y,
988  uint8_t *dest_cb,
989  uint8_t *dest_cr,
990  int field_based,
991  int bottom_field,
992  int field_select,
993  uint8_t **ref_picture,
994  h264_chroma_mc_func *pix_op,
995  int motion_x, int motion_y,
996  int h, int mb_y)
997 {
998  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
999  int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
1000  ptrdiff_t uvlinesize, linesize;
1001  const int lowres = s->avctx->lowres;
1002  const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
1003  const int block_s = 8>>lowres;
1004  const int s_mask = (2 << lowres) - 1;
1005  const int h_edge_pos = s->h_edge_pos >> lowres;
1006  const int v_edge_pos = s->v_edge_pos >> lowres;
1007  linesize = s->current_picture.f->linesize[0] << field_based;
1008  uvlinesize = s->current_picture.f->linesize[1] << field_based;
1009 
1010  // FIXME obviously not perfect but qpel will not work in lowres anyway
1011  if (s->quarter_sample) {
1012  motion_x /= 2;
1013  motion_y /= 2;
1014  }
1015 
1016  if(field_based){
1017  motion_y += (bottom_field - field_select)*((1 << lowres)-1);
1018  }
1019 
1020  sx = motion_x & s_mask;
1021  sy = motion_y & s_mask;
1022  src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
1023  src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
1024 
1025  if (s->out_format == FMT_H263) {
1026  uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
1027  uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
1028  uvsrc_x = src_x >> 1;
1029  uvsrc_y = src_y >> 1;
1030  } else if (s->out_format == FMT_H261) {
1031  // even chroma mv's are full pel in H261
1032  mx = motion_x / 4;
1033  my = motion_y / 4;
1034  uvsx = (2 * mx) & s_mask;
1035  uvsy = (2 * my) & s_mask;
1036  uvsrc_x = s->mb_x * block_s + (mx >> lowres);
1037  uvsrc_y = mb_y * block_s + (my >> lowres);
1038  } else {
1039  if(s->chroma_y_shift){
1040  mx = motion_x / 2;
1041  my = motion_y / 2;
1042  uvsx = mx & s_mask;
1043  uvsy = my & s_mask;
1044  uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
1045  uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
1046  } else {
1047  if(s->chroma_x_shift){
1048  //Chroma422
1049  mx = motion_x / 2;
1050  uvsx = mx & s_mask;
1051  uvsy = motion_y & s_mask;
1052  uvsrc_y = src_y;
1053  uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
1054  } else {
1055  //Chroma444
1056  uvsx = motion_x & s_mask;
1057  uvsy = motion_y & s_mask;
1058  uvsrc_x = src_x;
1059  uvsrc_y = src_y;
1060  }
1061  }
1062  }
1063 
1064  ptr_y = ref_picture[0] + src_y * linesize + src_x;
1065  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1066  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1067 
1068  if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
1069  (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
1070  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
1071  linesize >> field_based, linesize >> field_based,
1072  17, 17 + field_based,
1073  src_x, src_y << field_based, h_edge_pos,
1074  v_edge_pos);
1075  ptr_y = s->sc.edge_emu_buffer;
1076  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1077  uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
1078  uint8_t *vbuf =ubuf + 10 * s->uvlinesize;
1079  if (s->workaround_bugs & FF_BUG_IEDGE)
1080  vbuf -= s->uvlinesize;
1081  s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
1082  uvlinesize >> field_based, uvlinesize >> field_based,
1083  9, 9 + field_based,
1084  uvsrc_x, uvsrc_y << field_based,
1085  h_edge_pos >> 1, v_edge_pos >> 1);
1086  s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
1087  uvlinesize >> field_based,uvlinesize >> field_based,
1088  9, 9 + field_based,
1089  uvsrc_x, uvsrc_y << field_based,
1090  h_edge_pos >> 1, v_edge_pos >> 1);
1091  ptr_cb = ubuf;
1092  ptr_cr = vbuf;
1093  }
1094  }
1095 
1096  // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
1097  if (bottom_field) {
1098  dest_y += s->linesize;
1099  dest_cb += s->uvlinesize;
1100  dest_cr += s->uvlinesize;
1101  }
1102 
1103  if (field_select) {
1104  ptr_y += s->linesize;
1105  ptr_cb += s->uvlinesize;
1106  ptr_cr += s->uvlinesize;
1107  }
1108 
1109  sx = (sx << 2) >> lowres;
1110  sy = (sy << 2) >> lowres;
1111  pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
1112 
1113  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1114  int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
1115  uvsx = (uvsx << 2) >> lowres;
1116  uvsy = (uvsy << 2) >> lowres;
1117  if (hc) {
1118  pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
1119  pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
1120  }
1121  }
1122  // FIXME h261 lowres loop filter
1123 }
1124 
1126  uint8_t *dest_cb, uint8_t *dest_cr,
1127  uint8_t **ref_picture,
1128  h264_chroma_mc_func * pix_op,
1129  int mx, int my)
1130 {
1131  const int lowres = s->avctx->lowres;
1132  const int op_index = FFMIN(lowres, 3);
1133  const int block_s = 8 >> lowres;
1134  const int s_mask = (2 << lowres) - 1;
1135  const int h_edge_pos = s->h_edge_pos >> lowres + 1;
1136  const int v_edge_pos = s->v_edge_pos >> lowres + 1;
1137  int emu = 0, src_x, src_y, sx, sy;
1138  ptrdiff_t offset;
1139  uint8_t *ptr;
1140 
1141  if (s->quarter_sample) {
1142  mx /= 2;
1143  my /= 2;
1144  }
1145 
1146  /* In case of 8X8, we construct a single chroma motion vector
1147  with a special rounding */
1148  mx = ff_h263_round_chroma(mx);
1149  my = ff_h263_round_chroma(my);
1150 
1151  sx = mx & s_mask;
1152  sy = my & s_mask;
1153  src_x = s->mb_x * block_s + (mx >> lowres + 1);
1154  src_y = s->mb_y * block_s + (my >> lowres + 1);
1155 
1156  offset = src_y * s->uvlinesize + src_x;
1157  ptr = ref_picture[1] + offset;
1158  if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
1159  (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
1160  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
1161  s->uvlinesize, s->uvlinesize,
1162  9, 9,
1163  src_x, src_y, h_edge_pos, v_edge_pos);
1164  ptr = s->sc.edge_emu_buffer;
1165  emu = 1;
1166  }
1167  sx = (sx << 2) >> lowres;
1168  sy = (sy << 2) >> lowres;
1169  pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
1170 
1171  ptr = ref_picture[2] + offset;
1172  if (emu) {
1173  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
1174  s->uvlinesize, s->uvlinesize,
1175  9, 9,
1176  src_x, src_y, h_edge_pos, v_edge_pos);
1177  ptr = s->sc.edge_emu_buffer;
1178  }
1179  pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
1180 }
1181 
1182 /**
1183  * motion compensation of a single macroblock
1184  * @param s context
1185  * @param dest_y luma destination pointer
1186  * @param dest_cb chroma cb/u destination pointer
1187  * @param dest_cr chroma cr/v destination pointer
1188  * @param dir direction (0->forward, 1->backward)
1189  * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
1190  * @param pix_op halfpel motion compensation function (average or put normally)
1191  * the motion vectors are taken from s->mv and the MV type from s->mv_type
1192  */
1193 static inline void MPV_motion_lowres(MpegEncContext *s,
1194  uint8_t *dest_y, uint8_t *dest_cb,
1195  uint8_t *dest_cr,
1196  int dir, uint8_t **ref_picture,
1197  h264_chroma_mc_func *pix_op)
1198 {
1199  int mx, my;
1200  int mb_x, mb_y, i;
1201  const int lowres = s->avctx->lowres;
1202  const int block_s = 8 >>lowres;
1203 
1204  mb_x = s->mb_x;
1205  mb_y = s->mb_y;
1206 
1207  switch (s->mv_type) {
1208  case MV_TYPE_16X16:
1209  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1210  0, 0, 0,
1211  ref_picture, pix_op,
1212  s->mv[dir][0][0], s->mv[dir][0][1],
1213  2 * block_s, mb_y);
1214  break;
1215  case MV_TYPE_8X8:
1216  mx = 0;
1217  my = 0;
1218  for (i = 0; i < 4; i++) {
1219  hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
1220  s->linesize) * block_s,
1221  ref_picture[0], 0, 0,
1222  (2 * mb_x + (i & 1)) * block_s,
1223  (2 * mb_y + (i >> 1)) * block_s,
1224  s->width, s->height, s->linesize,
1225  s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
1226  block_s, block_s, pix_op,
1227  s->mv[dir][i][0], s->mv[dir][i][1]);
1228 
1229  mx += s->mv[dir][i][0];
1230  my += s->mv[dir][i][1];
1231  }
1232 
1233  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
1234  chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
1235  pix_op, mx, my);
1236  break;
1237  case MV_TYPE_FIELD:
1238  if (s->picture_structure == PICT_FRAME) {
1239  /* top field */
1240  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1241  1, 0, s->field_select[dir][0],
1242  ref_picture, pix_op,
1243  s->mv[dir][0][0], s->mv[dir][0][1],
1244  block_s, mb_y);
1245  /* bottom field */
1246  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1247  1, 1, s->field_select[dir][1],
1248  ref_picture, pix_op,
1249  s->mv[dir][1][0], s->mv[dir][1][1],
1250  block_s, mb_y);
1251  } else {
1252  if (s->picture_structure != s->field_select[dir][0] + 1 &&
1253  s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
1254  ref_picture = s->current_picture_ptr->f->data;
1255 
1256  }
1257  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1258  0, 0, s->field_select[dir][0],
1259  ref_picture, pix_op,
1260  s->mv[dir][0][0],
1261  s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
1262  }
1263  break;
1264  case MV_TYPE_16X8:
1265  for (i = 0; i < 2; i++) {
1266  uint8_t **ref2picture;
1267 
1268  if (s->picture_structure == s->field_select[dir][i] + 1 ||
1269  s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
1270  ref2picture = ref_picture;
1271  } else {
1272  ref2picture = s->current_picture_ptr->f->data;
1273  }
1274 
1275  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1276  0, 0, s->field_select[dir][i],
1277  ref2picture, pix_op,
1278  s->mv[dir][i][0], s->mv[dir][i][1] +
1279  2 * block_s * i, block_s, mb_y >> 1);
1280 
1281  dest_y += 2 * block_s * s->linesize;
1282  dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
1283  dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
1284  }
1285  break;
1286  case MV_TYPE_DMV:
1287  if (s->picture_structure == PICT_FRAME) {
1288  for (i = 0; i < 2; i++) {
1289  int j;
1290  for (j = 0; j < 2; j++) {
1291  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1292  1, j, j ^ i,
1293  ref_picture, pix_op,
1294  s->mv[dir][2 * i + j][0],
1295  s->mv[dir][2 * i + j][1],
1296  block_s, mb_y);
1297  }
1298  pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
1299  }
1300  } else {
1301  for (i = 0; i < 2; i++) {
1302  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1303  0, 0, s->picture_structure != i + 1,
1304  ref_picture, pix_op,
1305  s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
1306  2 * block_s, mb_y >> 1);
1307 
1308  // after put we make avg of the same block
1309  pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
1310 
1311  // opposite parity is always in the same
1312  // frame if this is second field
1313  if (!s->first_field) {
1314  ref_picture = s->current_picture_ptr->f->data;
1315  }
1316  }
1317  }
1318  break;
1319  default:
1320  av_assert2(0);
1321  }
1322 }
1323 
1324 /**
1325  * find the lowest MB row referenced in the MVs
1326  */
1328 {
1329  int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1330  int my, off, i, mvs;
1331 
1332  if (s->picture_structure != PICT_FRAME || s->mcsel)
1333  goto unhandled;
1334 
1335  switch (s->mv_type) {
1336  case MV_TYPE_16X16:
1337  mvs = 1;
1338  break;
1339  case MV_TYPE_16X8:
1340  mvs = 2;
1341  break;
1342  case MV_TYPE_8X8:
1343  mvs = 4;
1344  break;
1345  default:
1346  goto unhandled;
1347  }
1348 
1349  for (i = 0; i < mvs; i++) {
1350  my = s->mv[dir][i][1];
1351  my_max = FFMAX(my_max, my);
1352  my_min = FFMIN(my_min, my);
1353  }
1354 
1355  off = ((FFMAX(-my_min, my_max)<<qpel_shift) + 63) >> 6;
1356 
1357  return av_clip(s->mb_y + off, 0, s->mb_height - 1);
1358 unhandled:
1359  return s->mb_height-1;
1360 }
1361 
1362 /* put block[] to dest[] */
1363 static inline void put_dct(MpegEncContext *s,
1364  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1365 {
1366  s->dct_unquantize_intra(s, block, i, qscale);
1367  s->idsp.idct_put(dest, line_size, block);
1368 }
1369 
1370 /* add block[] to dest[] */
1371 static inline void add_dct(MpegEncContext *s,
1372  int16_t *block, int i, uint8_t *dest, int line_size)
1373 {
1374  if (s->block_last_index[i] >= 0) {
1375  s->idsp.idct_add(dest, line_size, block);
1376  }
1377 }
1378 
1379 static inline void add_dequant_dct(MpegEncContext *s,
1380  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1381 {
1382  if (s->block_last_index[i] >= 0) {
1383  s->dct_unquantize_inter(s, block, i, qscale);
1384 
1385  s->idsp.idct_add(dest, line_size, block);
1386  }
1387 }
1388 
1389 /**
1390  * Clean dc, ac, coded_block for the current non-intra MB.
1391  */
1393 {
1394  int wrap = s->b8_stride;
1395  int xy = s->block_index[0];
1396 
1397  s->dc_val[0][xy ] =
1398  s->dc_val[0][xy + 1 ] =
1399  s->dc_val[0][xy + wrap] =
1400  s->dc_val[0][xy + 1 + wrap] = 1024;
1401  /* ac pred */
1402  memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
1403  memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1404  if (s->msmpeg4_version>=3) {
1405  s->coded_block[xy ] =
1406  s->coded_block[xy + 1 ] =
1407  s->coded_block[xy + wrap] =
1408  s->coded_block[xy + 1 + wrap] = 0;
1409  }
1410  /* chroma */
1411  wrap = s->mb_stride;
1412  xy = s->mb_x + s->mb_y * wrap;
1413  s->dc_val[1][xy] =
1414  s->dc_val[2][xy] = 1024;
1415  /* ac pred */
1416  memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1417  memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1418 
1419  s->mbintra_table[xy]= 0;
1420 }
1421 
1422 /* generic function called after a macroblock has been parsed by the
1423  decoder or after it has been encoded by the encoder.
1424 
1425  Important variables used:
1426  s->mb_intra : true if intra macroblock
1427  s->mv_dir : motion vector direction
1428  s->mv_type : motion vector type
1429  s->mv : motion vector
1430  s->interlaced_dct : true if interlaced dct used (mpeg2)
1431  */
1432 static av_always_inline
1434  int lowres_flag, int is_mpeg12)
1435 {
1436 #define IS_ENCODER(s) (CONFIG_MPEGVIDEOENC && !lowres_flag && (s)->encoding)
1437 #define IS_MPEG12(s) (CONFIG_SMALL ? ((s)->out_format == FMT_MPEG1) : is_mpeg12)
1438  const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1439 
1440  s->current_picture.qscale_table[mb_xy] = s->qscale;
1441 
1442  /* update DC predictors for P macroblocks */
1443  if (!s->mb_intra) {
1444  if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
1445  if(s->mbintra_table[mb_xy])
1447  } else {
1448  s->last_dc[0] =
1449  s->last_dc[1] =
1450  s->last_dc[2] = 128 << s->intra_dc_precision;
1451  }
1452  }
1453  else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
1454  s->mbintra_table[mb_xy]=1;
1455 
1456  if (!IS_ENCODER(s) || (s->avctx->flags & AV_CODEC_FLAG_PSNR) || s->frame_skip_threshold || s->frame_skip_factor ||
1457  !((s->intra_only || s->pict_type == AV_PICTURE_TYPE_B) &&
1458  s->avctx->mb_decision != FF_MB_DECISION_RD)) { // FIXME precalc
1459  uint8_t *dest_y, *dest_cb, *dest_cr;
1460  int dct_linesize, dct_offset;
1461  op_pixels_func (*op_pix)[4];
1462  qpel_mc_func (*op_qpix)[16];
1463  const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
1464  const int uvlinesize = s->current_picture.f->linesize[1];
1465  const int readable = s->pict_type != AV_PICTURE_TYPE_B || IS_ENCODER(s) || s->avctx->draw_horiz_band || lowres_flag;
1466  const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
1467 
1468  /* avoid copy if macroblock skipped in last frame too */
1469  /* skip only during decoding as we might trash the buffers during encoding a bit */
1470  if (!IS_ENCODER(s)) {
1471  uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
1472 
1473  if (s->mb_skipped) {
1474  s->mb_skipped= 0;
1475  av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
1476  *mbskip_ptr = 1;
1477  } else if(!s->current_picture.reference) {
1478  *mbskip_ptr = 1;
1479  } else{
1480  *mbskip_ptr = 0; /* not skipped */
1481  }
1482  }
1483 
1484  dct_linesize = linesize << s->interlaced_dct;
1485  dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
1486 
1487  if(readable){
1488  dest_y= s->dest[0];
1489  dest_cb= s->dest[1];
1490  dest_cr= s->dest[2];
1491  }else{
1492  dest_y = s->sc.b_scratchpad;
1493  dest_cb= s->sc.b_scratchpad+16*linesize;
1494  dest_cr= s->sc.b_scratchpad+32*linesize;
1495  }
1496 
1497  if (!s->mb_intra) {
1498  /* motion handling */
1499  /* decoding or more than one mb_type (MC was already done otherwise) */
1500  if (!IS_ENCODER(s)) {
1501 
1502  if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
1503  if (s->mv_dir & MV_DIR_FORWARD) {
1504  ff_thread_await_progress(&s->last_picture_ptr->tf,
1506  0);
1507  }
1508  if (s->mv_dir & MV_DIR_BACKWARD) {
1509  ff_thread_await_progress(&s->next_picture_ptr->tf,
1511  0);
1512  }
1513  }
1514 
1515  if(lowres_flag){
1516  h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
1517 
1518  if (s->mv_dir & MV_DIR_FORWARD) {
1519  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix);
1520  op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
1521  }
1522  if (s->mv_dir & MV_DIR_BACKWARD) {
1523  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix);
1524  }
1525  }else{
1526  op_qpix = s->me.qpel_put;
1527  if ((is_mpeg12 || !s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
1528  op_pix = s->hdsp.put_pixels_tab;
1529  }else{
1530  op_pix = s->hdsp.put_no_rnd_pixels_tab;
1531  }
1532  if (s->mv_dir & MV_DIR_FORWARD) {
1533  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
1534  op_pix = s->hdsp.avg_pixels_tab;
1535  op_qpix= s->me.qpel_avg;
1536  }
1537  if (s->mv_dir & MV_DIR_BACKWARD) {
1538  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
1539  }
1540  }
1541  }
1542 
1543  /* skip dequant / idct if we are really late ;) */
1544  if(s->avctx->skip_idct){
1545  if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
1546  ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
1547  || s->avctx->skip_idct >= AVDISCARD_ALL)
1548  goto skip_idct;
1549  }
1550 
1551  /* add dct residue */
1552  if (IS_ENCODER(s) || !(IS_MPEG12(s) || s->msmpeg4_version
1553  || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
1554  add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
1555  add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
1556  add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
1557  add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
1558 
1559  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1560  if (s->chroma_y_shift){
1561  add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
1562  add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
1563  }else{
1564  dct_linesize >>= 1;
1565  dct_offset >>=1;
1566  add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
1567  add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
1568  add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
1569  add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
1570  }
1571  }
1572  } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
1573  add_dct(s, block[0], 0, dest_y , dct_linesize);
1574  add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
1575  add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
1576  add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
1577 
1578  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1579  if(s->chroma_y_shift){//Chroma420
1580  add_dct(s, block[4], 4, dest_cb, uvlinesize);
1581  add_dct(s, block[5], 5, dest_cr, uvlinesize);
1582  }else{
1583  //chroma422
1584  dct_linesize = uvlinesize << s->interlaced_dct;
1585  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
1586 
1587  add_dct(s, block[4], 4, dest_cb, dct_linesize);
1588  add_dct(s, block[5], 5, dest_cr, dct_linesize);
1589  add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
1590  add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
1591  if(!s->chroma_x_shift){//Chroma444
1592  add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
1593  add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
1594  add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
1595  add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
1596  }
1597  }
1598  }//fi gray
1599  } else if (CONFIG_WMV2_DECODER) {
1600  ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
1601  }
1602  } else {
1603  /* Only MPEG-4 Simple Studio Profile is supported in > 8-bit mode.
1604  TODO: Integrate 10-bit properly into mpegvideo.c so that ER works properly */
1605  if (!is_mpeg12 && s->avctx->bits_per_raw_sample > 8) {
1606  const int act_block_size = block_size * 2;
1607 
1608  if(s->dpcm_direction == 0) {
1609  s->idsp.idct_put(dest_y, dct_linesize, (int16_t*)(*s->block32)[0]);
1610  s->idsp.idct_put(dest_y + act_block_size, dct_linesize, (int16_t*)(*s->block32)[1]);
1611  s->idsp.idct_put(dest_y + dct_offset, dct_linesize, (int16_t*)(*s->block32)[2]);
1612  s->idsp.idct_put(dest_y + dct_offset + act_block_size, dct_linesize, (int16_t*)(*s->block32)[3]);
1613 
1614  dct_linesize = uvlinesize << s->interlaced_dct;
1615  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
1616 
1617  s->idsp.idct_put(dest_cb, dct_linesize, (int16_t*)(*s->block32)[4]);
1618  s->idsp.idct_put(dest_cr, dct_linesize, (int16_t*)(*s->block32)[5]);
1619  s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, (int16_t*)(*s->block32)[6]);
1620  s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, (int16_t*)(*s->block32)[7]);
1621  if(!s->chroma_x_shift){//Chroma444
1622  s->idsp.idct_put(dest_cb + act_block_size, dct_linesize, (int16_t*)(*s->block32)[8]);
1623  s->idsp.idct_put(dest_cr + act_block_size, dct_linesize, (int16_t*)(*s->block32)[9]);
1624  s->idsp.idct_put(dest_cb + act_block_size + dct_offset, dct_linesize, (int16_t*)(*s->block32)[10]);
1625  s->idsp.idct_put(dest_cr + act_block_size + dct_offset, dct_linesize, (int16_t*)(*s->block32)[11]);
1626  }
1627  } else if(s->dpcm_direction == 1) {
1628  int i, w, h;
1629  uint16_t *dest_pcm[3] = {(uint16_t*)dest_y, (uint16_t*)dest_cb, (uint16_t*)dest_cr};
1630  int linesize[3] = {dct_linesize, uvlinesize, uvlinesize};
1631  for(i = 0; i < 3; i++) {
1632  int idx = 0;
1633  int vsub = i ? s->chroma_y_shift : 0;
1634  int hsub = i ? s->chroma_x_shift : 0;
1635  for(h = 0; h < (16 >> vsub); h++){
1636  for(w = 0; w < (16 >> hsub); w++)
1637  dest_pcm[i][w] = (*s->dpcm_macroblock)[i][idx++];
1638  dest_pcm[i] += linesize[i] / 2;
1639  }
1640  }
1641  } else {
1642  int i, w, h;
1643  uint16_t *dest_pcm[3] = {(uint16_t*)dest_y, (uint16_t*)dest_cb, (uint16_t*)dest_cr};
1644  int linesize[3] = {dct_linesize, uvlinesize, uvlinesize};
1645  av_assert2(s->dpcm_direction == -1);
1646  for(i = 0; i < 3; i++) {
1647  int idx = 0;
1648  int vsub = i ? s->chroma_y_shift : 0;
1649  int hsub = i ? s->chroma_x_shift : 0;
1650  dest_pcm[i] += (linesize[i] / 2) * ((16 >> vsub) - 1);
1651  for(h = (16 >> vsub)-1; h >= 1; h--){
1652  for(w = (16 >> hsub)-1; w >= 1; w--)
1653  dest_pcm[i][w] = (*s->dpcm_macroblock)[i][idx++];
1654  dest_pcm[i] -= linesize[i] / 2;
1655  }
1656  }
1657  }
1658  }
1659  /* dct only in intra block */
1660  else if (IS_ENCODER(s) || !IS_MPEG12(s)) {
1661  put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
1662  put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
1663  put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
1664  put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
1665 
1666  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1667  if(s->chroma_y_shift){
1668  put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
1669  put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
1670  }else{
1671  dct_offset >>=1;
1672  dct_linesize >>=1;
1673  put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
1674  put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
1675  put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
1676  put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
1677  }
1678  }
1679  }else{
1680  s->idsp.idct_put(dest_y, dct_linesize, block[0]);
1681  s->idsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
1682  s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]);
1683  s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
1684 
1685  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1686  if(s->chroma_y_shift){
1687  s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
1688  s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
1689  }else{
1690 
1691  dct_linesize = uvlinesize << s->interlaced_dct;
1692  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
1693 
1694  s->idsp.idct_put(dest_cb, dct_linesize, block[4]);
1695  s->idsp.idct_put(dest_cr, dct_linesize, block[5]);
1696  s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
1697  s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
1698  if(!s->chroma_x_shift){//Chroma444
1699  s->idsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
1700  s->idsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
1701  s->idsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
1702  s->idsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
1703  }
1704  }
1705  }//gray
1706  }
1707  }
1708 skip_idct:
1709  if(!readable){
1710  s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
1711  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1712  s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
1713  s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
1714  }
1715  }
1716  }
1717 }
1718 
1720 {
1721  if (CONFIG_XVMC &&
1722  s->avctx->hwaccel && s->avctx->hwaccel->decode_mb) {
1723  s->avctx->hwaccel->decode_mb(s); //xvmc uses pblocks
1724  return;
1725  }
1726 
1727  if (s->avctx->debug & FF_DEBUG_DCT_COEFF) {
1728  /* print DCT coefficients */
1729  av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1730  for (int i = 0; i < 6; i++) {
1731  for (int j = 0; j < 64; j++) {
1732  av_log(s->avctx, AV_LOG_DEBUG, "%5d",
1733  block[i][s->idsp.idct_permutation[j]]);
1734  }
1735  av_log(s->avctx, AV_LOG_DEBUG, "\n");
1736  }
1737  }
1738 
1739 #if !CONFIG_SMALL
1740  if(s->out_format == FMT_MPEG1) {
1741  if(s->avctx->lowres) mpv_reconstruct_mb_internal(s, block, 1, 1);
1742  else mpv_reconstruct_mb_internal(s, block, 0, 1);
1743  } else
1744 #endif
1745  if(s->avctx->lowres) mpv_reconstruct_mb_internal(s, block, 1, 0);
1746  else mpv_reconstruct_mb_internal(s, block, 0, 0);
1747 }
1748 
1749 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
1750  const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
1751  const int uvlinesize = s->current_picture.f->linesize[1];
1752  const int width_of_mb = (4 + (s->avctx->bits_per_raw_sample > 8)) - s->avctx->lowres;
1753  const int height_of_mb = 4 - s->avctx->lowres;
1754 
1755  s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
1756  s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
1757  s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
1758  s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
1759  s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
1760  s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
1761  //block_index is not used by mpeg2, so it is not affected by chroma_format
1762 
1763  s->dest[0] = s->current_picture.f->data[0] + (int)((s->mb_x - 1U) << width_of_mb);
1764  s->dest[1] = s->current_picture.f->data[1] + (int)((s->mb_x - 1U) << (width_of_mb - s->chroma_x_shift));
1765  s->dest[2] = s->current_picture.f->data[2] + (int)((s->mb_x - 1U) << (width_of_mb - s->chroma_x_shift));
1766 
1767  if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
1768  {
1769  if(s->picture_structure==PICT_FRAME){
1770  s->dest[0] += s->mb_y * linesize << height_of_mb;
1771  s->dest[1] += s->mb_y * uvlinesize << (height_of_mb - s->chroma_y_shift);
1772  s->dest[2] += s->mb_y * uvlinesize << (height_of_mb - s->chroma_y_shift);
1773  }else{
1774  s->dest[0] += (s->mb_y>>1) * linesize << height_of_mb;
1775  s->dest[1] += (s->mb_y>>1) * uvlinesize << (height_of_mb - s->chroma_y_shift);
1776  s->dest[2] += (s->mb_y>>1) * uvlinesize << (height_of_mb - s->chroma_y_shift);
1777  av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
1778  }
1779  }
1780 }
1781 
1782 /**
1783  * set qscale and update qscale dependent variables.
1784  */
1785 void ff_set_qscale(MpegEncContext * s, int qscale)
1786 {
1787  if (qscale < 1)
1788  qscale = 1;
1789  else if (qscale > 31)
1790  qscale = 31;
1791 
1792  s->qscale = qscale;
1793  s->chroma_qscale= s->chroma_qscale_table[qscale];
1794 
1795  s->y_dc_scale= s->y_dc_scale_table[ qscale ];
1796  s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
1797 }
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:98
ff_h263_round_chroma
static int ff_h263_round_chroma(int x)
Definition: motion_est.h:101
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:38
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:746
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:252
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
ff_init_scantable
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: idctdsp.c:29
free_duplicate_contexts
static void free_duplicate_contexts(MpegEncContext *s)
Definition: mpegvideo.c:442
level
uint8_t level
Definition: svq3.c:202
av_clip
#define av_clip
Definition: common.h:96
ff_mpeg_framesize_alloc
int ff_mpeg_framesize_alloc(AVCodecContext *avctx, MotionEstContext *me, ScratchpadContext *sc, int linesize)
Definition: mpegpicture.c:58
blockdsp.h
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegutils.h:123
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
mpeg_motion_lowres
static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t **ref_picture, h264_chroma_mc_func *pix_op, int motion_x, int motion_y, int h, int mb_y)
Definition: mpegvideo.c:986
ff_mpv_init_context_frame
int ff_mpv_init_context_frame(MpegEncContext *s)
Initialize and allocates MpegEncContext fields dependent on the resolution.
Definition: mpegvideo.c:530
backup_duplicate_context
static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
Definition: mpegvideo.c:451
ff_mpv_common_defaults
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:512
hpel_motion_lowres
static int hpel_motion_lowres(MpegEncContext *s, uint8_t *dest, uint8_t *src, int field_based, int field_select, int src_x, int src_y, int width, int height, ptrdiff_t stride, int h_edge_pos, int v_edge_pos, int w, int h, h264_chroma_mc_func *pix_op, int motion_x, int motion_y)
Definition: mpegvideo.c:939
ff_mpeg1_dc_scale_table
const uint8_t ff_mpeg1_dc_scale_table[128]
Definition: mpegvideodata.c:33
MV_TYPE_16X8
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:254
lowest_referenced_row
static int lowest_referenced_row(MpegEncContext *s, int dir)
find the lowest MB row referenced in the MVs
Definition: mpegvideo.c:1327
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:62
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:109
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:26
w
uint8_t w
Definition: llviddspenc.c:38
ff_free_picture_tables
void ff_free_picture_tables(Picture *pic)
Definition: mpegpicture.c:454
ff_wmv2_add_mb
void ff_wmv2_add_mb(MpegEncContext *s, int16_t block[6][64], uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr)
Definition: wmv2dec.c:60
ff_mpeg2_non_linear_qscale
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:26
ff_clean_intra_table_entries
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
Definition: mpegvideo.c:1392
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:37
init_duplicate_context
static int init_duplicate_context(MpegEncContext *s)
Definition: mpegvideo.c:346
ff_mpv_common_init_arm
av_cold void ff_mpv_common_init_arm(MpegEncContext *s)
Definition: mpegvideo_arm.c:44
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:1749
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:86
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:247
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:281
mpegutils.h
thread.h
ff_thread_await_progress
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
free_duplicate_context
static void free_duplicate_context(MpegEncContext *s)
Definition: mpegvideo.c:420
MV_DIR_BACKWARD
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:249
MV_TYPE_DMV
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:256
IS_MPEG12
#define IS_MPEG12(s)
hsub
static void hsub(htype *dst, const htype *src, int bins)
Definition: vf_median.c:74
mpv_reconstruct_mb_internal
static av_always_inline void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64], int lowres_flag, int is_mpeg12)
Definition: mpegvideo.c:1433
av_memdup
void * av_memdup(const void *p, size_t size)
Duplicate a buffer with av_malloc().
Definition: mem.c:311
chroma_4mv_motion_lowres
static void chroma_4mv_motion_lowres(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, h264_chroma_mc_func *pix_op, int mx, int my)
Definition: mpegvideo.c:1125
dct_unquantize_mpeg1_inter_c
static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:78
U
#define U(x)
Definition: vp56_arith.h:37
fail
#define fail()
Definition: checkasm.h:128
wrap
#define wrap(func)
Definition: neontest.h:65
h264_chroma_mc_func
void(* h264_chroma_mc_func)(uint8_t *dst, uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: h264chroma.h:25
MpegEncContext::linesize
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:122
MAX_THREADS
#define MAX_THREADS
Definition: frame_thread_encoder.c:35
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2688
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
FF_ALLOC_TYPED_ARRAY
#define FF_ALLOC_TYPED_ARRAY(p, nelem)
Definition: internal.h:97
MAX_PICTURE_COUNT
#define MAX_PICTURE_COUNT
Definition: mpegpicture.h:33
dct_unquantize_mpeg1_intra_c
static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:49
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:97
ff_mpv_common_end
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:887
MpegEncContext::pblocks
int16_t(*[12] pblocks)[64]
Definition: mpegvideo.h:484
avassert.h
gray16
static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
Definition: mpegvideo.c:266
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
width
#define width
add_dct
static void add_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size)
Definition: mpegvideo.c:1371
s
#define s(width, name)
Definition: cbs_vp9.c:257
ff_mpeg_unref_picture
void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
Deallocate a picture.
Definition: mpegpicture.c:295
ALLOCZ_ARRAYS
#define ALLOCZ_ARRAYS(p, mult, numb)
AV_CODEC_ID_WMV2
@ AV_CODEC_ID_WMV2
Definition: codec_id.h:68
dct_unquantize_mpeg2_intra_bitexact
static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:137
FMT_H261
@ FMT_H261
Definition: mpegutils.h:124
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
limits.h
ff_mpegvideodsp_init
av_cold void ff_mpegvideodsp_init(MpegVideoDSPContext *c)
Definition: mpegvideodsp.c:110
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:54
NULL
#define NULL
Definition: coverity.c:32
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:326
me
#define me
Definition: vf_colormatrix.c:104
put_dct
static void put_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo.c:1363
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
ff_set_qscale
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:1785
mathops.h
FF_BUG_IEDGE
#define FF_BUG_IEDGE
Definition: avcodec.h:1266
lowres
static int lowres
Definition: ffplay.c:334
qpeldsp.h
ff_alternate_horizontal_scan
const uint8_t ff_alternate_horizontal_scan[64]
Definition: mpegvideodata.c:84
MpegEncContext::me
MotionEstContext me
Definition: mpegvideo.h:268
ME_MAP_SIZE
#define ME_MAP_SIZE
Definition: motion_est.h:38
op_pixels_func
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
wmv2.h
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
dct_unquantize_mpeg2_intra_c
static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:107
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:253
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1308
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:53
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:243
ff_mpeg_er_init
int ff_mpeg_er_init(MpegEncContext *s)
Definition: mpeg_er.c:100
ff_mpv_motion
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
Definition: mpegvideo_motion.c:972
h264chroma.h
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1452
height
#define height
mpegvideodata.h
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
attributes.h
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:255
IS_ENCODER
#define IS_ENCODER(s)
dct_unquantize_mpeg2_inter_c
static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:171
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1451
clear_context
static void clear_context(MpegEncContext *s)
Definition: mpegvideo.c:664
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
FMT_H263
@ FMT_H263
Definition: mpegutils.h:125
dct_unquantize_h263_inter_c
static void dct_unquantize_h263_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:239
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
ff_alternate_vertical_scan
const uint8_t ff_alternate_vertical_scan[64]
Definition: mpegvideodata.c:95
ff_h264chroma_init
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:41
internal.h
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
ff_mpv_common_init_ppc
void ff_mpv_common_init_ppc(MpegEncContext *s)
Definition: mpegvideo_altivec.c:119
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
MpegEncContext::sc
ScratchpadContext sc
Definition: mpegvideo.h:190
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:238
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:263
FF_DEBUG_NOMC
#define FF_DEBUG_NOMC
Definition: avcodec.h:1317
idctdsp.h
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
ret
ret
Definition: filter_design.txt:187
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
dct_init
static av_cold int dct_init(MpegEncContext *s)
Definition: mpegvideo.c:279
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
ff_mpv_free_context_frame
void ff_mpv_free_context_frame(MpegEncContext *s)
Frees and resets MpegEncContext fields depending on the resolution as well as the slice thread contex...
Definition: mpegvideo.c:833
MpegEncContext::block
int16_t(* block)[64]
points to one of the following blocks
Definition: mpegvideo.h:486
ScratchpadContext::edge_emu_buffer
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
Definition: mpegpicture.h:37
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo.c:481
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
dct_unquantize_h263_intra_c
static void dct_unquantize_h263_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:205
ff_mpv_common_init_x86
av_cold void ff_mpv_common_init_x86(MpegEncContext *s)
Definition: mpegvideo.c:454
MPV_motion_lowres
static void MPV_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, h264_chroma_mc_func *pix_op)
motion compensation of a single macroblock
Definition: mpegvideo.c:1193
ff_mpv_reconstruct_mb
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo.c:1719
ff_mpv_common_init_axp
av_cold void ff_mpv_common_init_axp(MpegEncContext *s)
Definition: mpegvideo_alpha.c:106
FF_MB_DECISION_RD
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:859
ff_mpv_common_init_mips
av_cold void ff_mpv_common_init_mips(MpegEncContext *s)
Definition: mpegvideo_init_mips.c:26
ff_default_chroma_qscale_table
const uint8_t ff_default_chroma_qscale_table[32]
Definition: mpegvideodata.c:21
ff_mpv_init_duplicate_contexts
int ff_mpv_init_duplicate_contexts(MpegEncContext *s)
Initialize an MpegEncContext's thread contexts.
Definition: mpegvideo.c:396
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:272
COPY
#define COPY(a)
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:408
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:248
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
mpeg_er.h
imgutils.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:362
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
add_dequant_dct
static void add_dequant_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo.c:1379
h
h
Definition: vp9dsp_template.c:2038
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
ff_blockdsp_init
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
Definition: blockdsp.c:59
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:50
int
int
Definition: ffmpeg_filter.c:153
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:52
ff_mpv_common_init_neon
av_cold void ff_mpv_common_init_neon(MpegEncContext *s)
Definition: mpegvideo.c:127
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:69
gray8
static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
Definition: mpegvideo.c:272