FFmpeg
mpegvideo.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29 
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
35 #include "avcodec.h"
36 #include "blockdsp.h"
37 #include "h264chroma.h"
38 #include "idctdsp.h"
39 #include "internal.h"
40 #include "mathops.h"
41 #include "mpeg_er.h"
42 #include "mpegutils.h"
43 #include "mpegvideo.h"
44 #include "mpegvideodata.h"
45 #include "mjpegenc.h"
46 #include "msmpeg4.h"
47 #include "qpeldsp.h"
48 #include "thread.h"
49 #include "wmv2.h"
50 #include <limits.h>
51 
53  int16_t *block, int n, int qscale)
54 {
55  int i, level, nCoeffs;
56  const uint16_t *quant_matrix;
57 
58  nCoeffs= s->block_last_index[n];
59 
60  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
61  /* XXX: only MPEG-1 */
62  quant_matrix = s->intra_matrix;
63  for(i=1;i<=nCoeffs;i++) {
64  int j= s->intra_scantable.permutated[i];
65  level = block[j];
66  if (level) {
67  if (level < 0) {
68  level = -level;
69  level = (int)(level * qscale * quant_matrix[j]) >> 3;
70  level = (level - 1) | 1;
71  level = -level;
72  } else {
73  level = (int)(level * qscale * quant_matrix[j]) >> 3;
74  level = (level - 1) | 1;
75  }
76  block[j] = level;
77  }
78  }
79 }
80 
82  int16_t *block, int n, int qscale)
83 {
84  int i, level, nCoeffs;
85  const uint16_t *quant_matrix;
86 
87  nCoeffs= s->block_last_index[n];
88 
89  quant_matrix = s->inter_matrix;
90  for(i=0; i<=nCoeffs; i++) {
91  int j= s->intra_scantable.permutated[i];
92  level = block[j];
93  if (level) {
94  if (level < 0) {
95  level = -level;
96  level = (((level << 1) + 1) * qscale *
97  ((int) (quant_matrix[j]))) >> 4;
98  level = (level - 1) | 1;
99  level = -level;
100  } else {
101  level = (((level << 1) + 1) * qscale *
102  ((int) (quant_matrix[j]))) >> 4;
103  level = (level - 1) | 1;
104  }
105  block[j] = level;
106  }
107  }
108 }
109 
111  int16_t *block, int n, int qscale)
112 {
113  int i, level, nCoeffs;
114  const uint16_t *quant_matrix;
115 
116  if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
117  else qscale <<= 1;
118 
119  if(s->alternate_scan) nCoeffs= 63;
120  else nCoeffs= s->block_last_index[n];
121 
122  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
123  quant_matrix = s->intra_matrix;
124  for(i=1;i<=nCoeffs;i++) {
125  int j= s->intra_scantable.permutated[i];
126  level = block[j];
127  if (level) {
128  if (level < 0) {
129  level = -level;
130  level = (int)(level * qscale * quant_matrix[j]) >> 4;
131  level = -level;
132  } else {
133  level = (int)(level * qscale * quant_matrix[j]) >> 4;
134  }
135  block[j] = level;
136  }
137  }
138 }
139 
141  int16_t *block, int n, int qscale)
142 {
143  int i, level, nCoeffs;
144  const uint16_t *quant_matrix;
145  int sum=-1;
146 
147  if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
148  else qscale <<= 1;
149 
150  if(s->alternate_scan) nCoeffs= 63;
151  else nCoeffs= s->block_last_index[n];
152 
153  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
154  sum += block[0];
155  quant_matrix = s->intra_matrix;
156  for(i=1;i<=nCoeffs;i++) {
157  int j= s->intra_scantable.permutated[i];
158  level = block[j];
159  if (level) {
160  if (level < 0) {
161  level = -level;
162  level = (int)(level * qscale * quant_matrix[j]) >> 4;
163  level = -level;
164  } else {
165  level = (int)(level * qscale * quant_matrix[j]) >> 4;
166  }
167  block[j] = level;
168  sum+=level;
169  }
170  }
171  block[63]^=sum&1;
172 }
173 
175  int16_t *block, int n, int qscale)
176 {
177  int i, level, nCoeffs;
178  const uint16_t *quant_matrix;
179  int sum=-1;
180 
181  if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
182  else qscale <<= 1;
183 
184  if(s->alternate_scan) nCoeffs= 63;
185  else nCoeffs= s->block_last_index[n];
186 
187  quant_matrix = s->inter_matrix;
188  for(i=0; i<=nCoeffs; i++) {
189  int j= s->intra_scantable.permutated[i];
190  level = block[j];
191  if (level) {
192  if (level < 0) {
193  level = -level;
194  level = (((level << 1) + 1) * qscale *
195  ((int) (quant_matrix[j]))) >> 5;
196  level = -level;
197  } else {
198  level = (((level << 1) + 1) * qscale *
199  ((int) (quant_matrix[j]))) >> 5;
200  }
201  block[j] = level;
202  sum+=level;
203  }
204  }
205  block[63]^=sum&1;
206 }
207 
209  int16_t *block, int n, int qscale)
210 {
211  int i, level, qmul, qadd;
212  int nCoeffs;
213 
214  av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
215 
216  qmul = qscale << 1;
217 
218  if (!s->h263_aic) {
219  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
220  qadd = (qscale - 1) | 1;
221  }else{
222  qadd = 0;
223  }
224  if(s->ac_pred)
225  nCoeffs=63;
226  else
227  nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ];
228 
229  for(i=1; i<=nCoeffs; i++) {
230  level = block[i];
231  if (level) {
232  if (level < 0) {
233  level = level * qmul - qadd;
234  } else {
235  level = level * qmul + qadd;
236  }
237  block[i] = level;
238  }
239  }
240 }
241 
243  int16_t *block, int n, int qscale)
244 {
245  int i, level, qmul, qadd;
246  int nCoeffs;
247 
248  av_assert2(s->block_last_index[n]>=0);
249 
250  qadd = (qscale - 1) | 1;
251  qmul = qscale << 1;
252 
253  nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
254 
255  for(i=0; i<=nCoeffs; i++) {
256  level = block[i];
257  if (level) {
258  if (level < 0) {
259  level = level * qmul - qadd;
260  } else {
261  level = level * qmul + qadd;
262  }
263  block[i] = level;
264  }
265  }
266 }
267 
268 
269 static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
270 {
271  while(h--)
272  memset(dst + h*linesize, 128, 16);
273 }
274 
275 static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
276 {
277  while(h--)
278  memset(dst + h*linesize, 128, 8);
279 }
280 
281 /* init common dct for both encoder and decoder */
283 {
284  ff_blockdsp_init(&s->bdsp, s->avctx);
285  ff_h264chroma_init(&s->h264chroma, 8); //for lowres
286  ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
287  ff_mpegvideodsp_init(&s->mdsp);
288  ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
289 
290  if (s->avctx->debug & FF_DEBUG_NOMC) {
291  int i;
292  for (i=0; i<4; i++) {
293  s->hdsp.avg_pixels_tab[0][i] = gray16;
294  s->hdsp.put_pixels_tab[0][i] = gray16;
295  s->hdsp.put_no_rnd_pixels_tab[0][i] = gray16;
296 
297  s->hdsp.avg_pixels_tab[1][i] = gray8;
298  s->hdsp.put_pixels_tab[1][i] = gray8;
299  s->hdsp.put_no_rnd_pixels_tab[1][i] = gray8;
300  }
301  }
302 
303  s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
304  s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
305  s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
306  s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
307  s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
308  if (s->avctx->flags & AV_CODEC_FLAG_BITEXACT)
309  s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
310  s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
311 
312  if (HAVE_INTRINSICS_NEON)
314 
315  if (ARCH_ALPHA)
317  if (ARCH_ARM)
319  if (ARCH_PPC)
321  if (ARCH_X86)
323  if (ARCH_MIPS)
325 
326  return 0;
327 }
328 
330 {
331  if (s->codec_id == AV_CODEC_ID_MPEG4)
332  s->idsp.mpeg4_studio_profile = s->studio_profile;
333  ff_idctdsp_init(&s->idsp, s->avctx);
334 
335  /* load & permutate scantables
336  * note: only wmv uses different ones
337  */
338  if (s->alternate_scan) {
339  ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
340  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
341  } else {
342  ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
343  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
344  }
345  ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
346  ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
347 }
348 
350 {
351  return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, 0, 0,
352  s->chroma_x_shift, s->chroma_y_shift, s->out_format,
353  s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
354  &s->linesize, &s->uvlinesize);
355 }
356 
358 {
359  int y_size = s->b8_stride * (2 * s->mb_height + 1);
360  int c_size = s->mb_stride * (s->mb_height + 1);
361  int yc_size = y_size + 2 * c_size;
362  int i;
363 
364  if (s->mb_height & 1)
365  yc_size += 2*s->b8_stride + 2*s->mb_stride;
366 
367  s->sc.edge_emu_buffer =
368  s->me.scratchpad =
369  s->me.temp =
370  s->sc.rd_scratchpad =
371  s->sc.b_scratchpad =
372  s->sc.obmc_scratchpad = NULL;
373 
374  if (s->encoding) {
375  FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
376  ME_MAP_SIZE * sizeof(uint32_t), fail)
377  FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
378  ME_MAP_SIZE * sizeof(uint32_t), fail)
379  if (s->noise_reduction) {
380  FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
381  2 * 64 * sizeof(int), fail)
382  }
383  }
384  FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
385  s->block = s->blocks[0];
386 
387  for (i = 0; i < 12; i++) {
388  s->pblocks[i] = &s->block[i];
389  }
390 
391  FF_ALLOCZ_OR_GOTO(s->avctx, s->block32, sizeof(*s->block32), fail)
392  s->dpcm_direction = 0;
393  FF_ALLOCZ_OR_GOTO(s->avctx, s->dpcm_macroblock, sizeof(*s->dpcm_macroblock), fail)
394 
395  if (s->avctx->codec_tag == AV_RL32("VCR2")) {
396  // exchange uv
397  FFSWAP(void *, s->pblocks[4], s->pblocks[5]);
398  }
399 
400  if (s->out_format == FMT_H263) {
401  /* ac values */
402  FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
403  yc_size * sizeof(int16_t) * 16, fail);
404  s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
405  s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
406  s->ac_val[2] = s->ac_val[1] + c_size;
407  }
408 
409  return 0;
410 fail:
411  return AVERROR(ENOMEM); // free() through ff_mpv_common_end()
412 }
413 
415 {
416  if (!s)
417  return;
418 
419  av_freep(&s->sc.edge_emu_buffer);
420  av_freep(&s->me.scratchpad);
421  s->me.temp =
422  s->sc.rd_scratchpad =
423  s->sc.b_scratchpad =
424  s->sc.obmc_scratchpad = NULL;
425 
426  av_freep(&s->dct_error_sum);
427  av_freep(&s->me.map);
428  av_freep(&s->me.score_map);
429  av_freep(&s->blocks);
430  av_freep(&s->block32);
431  av_freep(&s->dpcm_macroblock);
432  av_freep(&s->ac_val_base);
433  s->block = NULL;
434 }
435 
437 {
438 #define COPY(a) bak->a = src->a
439  COPY(sc.edge_emu_buffer);
440  COPY(me.scratchpad);
441  COPY(me.temp);
442  COPY(sc.rd_scratchpad);
443  COPY(sc.b_scratchpad);
444  COPY(sc.obmc_scratchpad);
445  COPY(me.map);
446  COPY(me.score_map);
447  COPY(blocks);
448  COPY(block);
449  COPY(block32);
450  COPY(dpcm_macroblock);
451  COPY(dpcm_direction);
452  COPY(start_mb_y);
453  COPY(end_mb_y);
454  COPY(me.map_generation);
455  COPY(pb);
456  COPY(dct_error_sum);
457  COPY(dct_count[0]);
458  COPY(dct_count[1]);
459  COPY(ac_val_base);
460  COPY(ac_val[0]);
461  COPY(ac_val[1]);
462  COPY(ac_val[2]);
463 #undef COPY
464 }
465 
467 {
468  MpegEncContext bak;
469  int i, ret;
470  // FIXME copy only needed parts
471  backup_duplicate_context(&bak, dst);
472  memcpy(dst, src, sizeof(MpegEncContext));
473  backup_duplicate_context(dst, &bak);
474  for (i = 0; i < 12; i++) {
475  dst->pblocks[i] = &dst->block[i];
476  }
477  if (dst->avctx->codec_tag == AV_RL32("VCR2")) {
478  // exchange uv
479  FFSWAP(void *, dst->pblocks[4], dst->pblocks[5]);
480  }
481  if (!dst->sc.edge_emu_buffer &&
482  (ret = ff_mpeg_framesize_alloc(dst->avctx, &dst->me,
483  &dst->sc, dst->linesize)) < 0) {
484  av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
485  "scratch buffers.\n");
486  return ret;
487  }
488  return 0;
489 }
490 
492  const AVCodecContext *src)
493 {
494  int i, ret;
495  MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
496 
497  if (dst == src)
498  return 0;
499 
500  av_assert0(s != s1);
501 
502  // FIXME can parameters change on I-frames?
503  // in that case dst may need a reinit
504  if (!s->context_initialized) {
505  int err;
506  memcpy(s, s1, sizeof(MpegEncContext));
507 
508  s->avctx = dst;
509  s->bitstream_buffer = NULL;
510  s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
511 
512  if (s1->context_initialized){
513 // s->picture_range_start += MAX_PICTURE_COUNT;
514 // s->picture_range_end += MAX_PICTURE_COUNT;
516  if((err = ff_mpv_common_init(s)) < 0){
517  memset(s, 0, sizeof(MpegEncContext));
518  s->avctx = dst;
519  return err;
520  }
521  }
522  }
523 
524  if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
525  s->context_reinit = 0;
526  s->height = s1->height;
527  s->width = s1->width;
529  return ret;
530  }
531 
532  s->avctx->coded_height = s1->avctx->coded_height;
533  s->avctx->coded_width = s1->avctx->coded_width;
534  s->avctx->width = s1->avctx->width;
535  s->avctx->height = s1->avctx->height;
536 
537  s->quarter_sample = s1->quarter_sample;
538 
539  s->coded_picture_number = s1->coded_picture_number;
540  s->picture_number = s1->picture_number;
541 
542  av_assert0(!s->picture || s->picture != s1->picture);
543  if(s->picture)
544  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
545  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
546  if (s1->picture && s1->picture[i].f->buf[0] &&
547  (ret = ff_mpeg_ref_picture(s->avctx, &s->picture[i], &s1->picture[i])) < 0)
548  return ret;
549  }
550 
551 #define UPDATE_PICTURE(pic)\
552 do {\
553  ff_mpeg_unref_picture(s->avctx, &s->pic);\
554  if (s1->pic.f && s1->pic.f->buf[0])\
555  ret = ff_mpeg_ref_picture(s->avctx, &s->pic, &s1->pic);\
556  else\
557  ret = ff_update_picture_tables(&s->pic, &s1->pic);\
558  if (ret < 0)\
559  return ret;\
560 } while (0)
561 
562  UPDATE_PICTURE(current_picture);
564  UPDATE_PICTURE(next_picture);
565 
566 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
567  ((pic && pic >= old_ctx->picture && \
568  pic < old_ctx->picture + MAX_PICTURE_COUNT) ? \
569  &new_ctx->picture[pic - old_ctx->picture] : NULL)
570 
571  s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
572  s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
573  s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
574 
575  // Error/bug resilience
576  s->next_p_frame_damaged = s1->next_p_frame_damaged;
577  s->workaround_bugs = s1->workaround_bugs;
578  s->padding_bug_score = s1->padding_bug_score;
579 
580  // MPEG-4 timing info
581  memcpy(&s->last_time_base, &s1->last_time_base,
582  (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
583  (char *) &s1->last_time_base);
584 
585  // B-frame info
586  s->max_b_frames = s1->max_b_frames;
587  s->low_delay = s1->low_delay;
588  s->droppable = s1->droppable;
589 
590  // DivX handling (doesn't work)
591  s->divx_packed = s1->divx_packed;
592 
593  if (s1->bitstream_buffer) {
594  if (s1->bitstream_buffer_size +
595  AV_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size) {
596  av_fast_malloc(&s->bitstream_buffer,
597  &s->allocated_bitstream_buffer_size,
598  s1->allocated_bitstream_buffer_size);
599  if (!s->bitstream_buffer) {
600  s->bitstream_buffer_size = 0;
601  return AVERROR(ENOMEM);
602  }
603  }
604  s->bitstream_buffer_size = s1->bitstream_buffer_size;
605  memcpy(s->bitstream_buffer, s1->bitstream_buffer,
606  s1->bitstream_buffer_size);
607  memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
609  }
610 
611  // linesize-dependent scratch buffer allocation
612  if (!s->sc.edge_emu_buffer)
613  if (s1->linesize) {
614  if (ff_mpeg_framesize_alloc(s->avctx, &s->me,
615  &s->sc, s1->linesize) < 0) {
616  av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
617  "scratch buffers.\n");
618  return AVERROR(ENOMEM);
619  }
620  } else {
621  av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
622  "be allocated due to unknown size.\n");
623  }
624 
625  // MPEG-2/interlacing info
626  memcpy(&s->progressive_sequence, &s1->progressive_sequence,
627  (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
628 
629  if (!s1->first_field) {
630  s->last_pict_type = s1->pict_type;
631  if (s1->current_picture_ptr)
632  s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
633  }
634 
635  return 0;
636 }
637 
638 /**
639  * Set the given MpegEncContext to common defaults
640  * (same for encoding and decoding).
641  * The changed fields will not depend upon the
642  * prior state of the MpegEncContext.
643  */
645 {
646  s->y_dc_scale_table =
647  s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
648  s->chroma_qscale_table = ff_default_chroma_qscale_table;
649  s->progressive_frame = 1;
650  s->progressive_sequence = 1;
651  s->picture_structure = PICT_FRAME;
652 
653  s->coded_picture_number = 0;
654  s->picture_number = 0;
655 
656  s->f_code = 1;
657  s->b_code = 1;
658 
659  s->slice_context_count = 1;
660 }
661 
662 /**
663  * Set the given MpegEncContext to defaults for decoding.
664  * the changed fields will not depend upon
665  * the prior state of the MpegEncContext.
666  */
668 {
670 }
671 
673 {
674  s->avctx = avctx;
675  s->width = avctx->coded_width;
676  s->height = avctx->coded_height;
677  s->codec_id = avctx->codec->id;
678  s->workaround_bugs = avctx->workaround_bugs;
679 
680  /* convert fourcc to upper case */
681  s->codec_tag = avpriv_toupper4(avctx->codec_tag);
682 }
683 
684 /**
685  * Initialize and allocates MpegEncContext fields dependent on the resolution.
686  */
688 {
689  int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
690 
691  s->mb_width = (s->width + 15) / 16;
692  s->mb_stride = s->mb_width + 1;
693  s->b8_stride = s->mb_width * 2 + 1;
694  mb_array_size = s->mb_height * s->mb_stride;
695  mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
696 
697  /* set default edge pos, will be overridden
698  * in decode_header if needed */
699  s->h_edge_pos = s->mb_width * 16;
700  s->v_edge_pos = s->mb_height * 16;
701 
702  s->mb_num = s->mb_width * s->mb_height;
703 
704  s->block_wrap[0] =
705  s->block_wrap[1] =
706  s->block_wrap[2] =
707  s->block_wrap[3] = s->b8_stride;
708  s->block_wrap[4] =
709  s->block_wrap[5] = s->mb_stride;
710 
711  y_size = s->b8_stride * (2 * s->mb_height + 1);
712  c_size = s->mb_stride * (s->mb_height + 1);
713  yc_size = y_size + 2 * c_size;
714 
715  if (s->mb_height & 1)
716  yc_size += 2*s->b8_stride + 2*s->mb_stride;
717 
718  FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
719  fail); // error resilience code looks cleaner with this
720  for (y = 0; y < s->mb_height; y++)
721  for (x = 0; x < s->mb_width; x++)
722  s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
723 
724  s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
725 
726  if (s->encoding) {
727  /* Allocate MV tables */
728  FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
729  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
730  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
731  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
732  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
733  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
734  s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
735  s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
736  s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
737  s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
738  s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
739  s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
740 
741  /* Allocate MB type table */
742  FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
743 
744  FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
745 
746  FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
747  mb_array_size * sizeof(float), fail);
748  FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
749  mb_array_size * sizeof(float), fail);
750 
751  }
752 
753  if (s->codec_id == AV_CODEC_ID_MPEG4 ||
754  (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME)) {
755  /* interlaced direct mode decoding tables */
756  for (i = 0; i < 2; i++) {
757  int j, k;
758  for (j = 0; j < 2; j++) {
759  for (k = 0; k < 2; k++) {
760  FF_ALLOCZ_OR_GOTO(s->avctx,
761  s->b_field_mv_table_base[i][j][k],
762  mv_table_size * 2 * sizeof(int16_t),
763  fail);
764  s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
765  s->mb_stride + 1;
766  }
767  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
768  FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
769  s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
770  }
771  FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
772  }
773  }
774  if (s->out_format == FMT_H263) {
775  /* cbp values */
776  FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size + (s->mb_height&1)*2*s->b8_stride, fail);
777  s->coded_block = s->coded_block_base + s->b8_stride + 1;
778 
779  /* cbp, ac_pred, pred_dir */
780  FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
781  FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
782  }
783 
784  if (s->h263_pred || s->h263_plus || !s->encoding) {
785  /* dc values */
786  // MN: we need these for error resilience of intra-frames
787  FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
788  s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
789  s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
790  s->dc_val[2] = s->dc_val[1] + c_size;
791  for (i = 0; i < yc_size; i++)
792  s->dc_val_base[i] = 1024;
793  }
794 
795  /* which mb is an intra block */
796  FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
797  memset(s->mbintra_table, 1, mb_array_size);
798 
799  /* init macroblock skip table */
800  FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
801  // Note the + 1 is for a quicker MPEG-4 slice_end detection
802 
803  return ff_mpeg_er_init(s);
804 fail:
805  return AVERROR(ENOMEM);
806 }
807 
809 {
810  int i, j, k;
811 
812  memset(&s->next_picture, 0, sizeof(s->next_picture));
813  memset(&s->last_picture, 0, sizeof(s->last_picture));
814  memset(&s->current_picture, 0, sizeof(s->current_picture));
815  memset(&s->new_picture, 0, sizeof(s->new_picture));
816 
817  memset(s->thread_context, 0, sizeof(s->thread_context));
818 
819  s->me.map = NULL;
820  s->me.score_map = NULL;
821  s->dct_error_sum = NULL;
822  s->block = NULL;
823  s->blocks = NULL;
824  s->block32 = NULL;
825  memset(s->pblocks, 0, sizeof(s->pblocks));
826  s->dpcm_direction = 0;
827  s->dpcm_macroblock = NULL;
828  s->ac_val_base = NULL;
829  s->ac_val[0] =
830  s->ac_val[1] =
831  s->ac_val[2] =NULL;
832  s->sc.edge_emu_buffer = NULL;
833  s->me.scratchpad = NULL;
834  s->me.temp =
835  s->sc.rd_scratchpad =
836  s->sc.b_scratchpad =
837  s->sc.obmc_scratchpad = NULL;
838 
839 
840  s->bitstream_buffer = NULL;
841  s->allocated_bitstream_buffer_size = 0;
842  s->picture = NULL;
843  s->mb_type = NULL;
844  s->p_mv_table_base = NULL;
845  s->b_forw_mv_table_base = NULL;
846  s->b_back_mv_table_base = NULL;
847  s->b_bidir_forw_mv_table_base = NULL;
848  s->b_bidir_back_mv_table_base = NULL;
849  s->b_direct_mv_table_base = NULL;
850  s->p_mv_table = NULL;
851  s->b_forw_mv_table = NULL;
852  s->b_back_mv_table = NULL;
853  s->b_bidir_forw_mv_table = NULL;
854  s->b_bidir_back_mv_table = NULL;
855  s->b_direct_mv_table = NULL;
856  for (i = 0; i < 2; i++) {
857  for (j = 0; j < 2; j++) {
858  for (k = 0; k < 2; k++) {
859  s->b_field_mv_table_base[i][j][k] = NULL;
860  s->b_field_mv_table[i][j][k] = NULL;
861  }
862  s->b_field_select_table[i][j] = NULL;
863  s->p_field_mv_table_base[i][j] = NULL;
864  s->p_field_mv_table[i][j] = NULL;
865  }
866  s->p_field_select_table[i] = NULL;
867  }
868 
869  s->dc_val_base = NULL;
870  s->coded_block_base = NULL;
871  s->mbintra_table = NULL;
872  s->cbp_table = NULL;
873  s->pred_dir_table = NULL;
874 
875  s->mbskip_table = NULL;
876 
877  s->er.error_status_table = NULL;
878  s->er.er_temp_buffer = NULL;
879  s->mb_index2xy = NULL;
880  s->lambda_table = NULL;
881 
882  s->cplx_tab = NULL;
883  s->bits_tab = NULL;
884 }
885 
886 /**
887  * init common structure for both encoder and decoder.
888  * this assumes that some variables like width/height are already set
889  */
891 {
892  int i, ret;
893  int nb_slices = (HAVE_THREADS &&
894  s->avctx->active_thread_type & FF_THREAD_SLICE) ?
895  s->avctx->thread_count : 1;
896 
897  clear_context(s);
898 
899  if (s->encoding && s->avctx->slices)
900  nb_slices = s->avctx->slices;
901 
902  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
903  s->mb_height = (s->height + 31) / 32 * 2;
904  else
905  s->mb_height = (s->height + 15) / 16;
906 
907  if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
908  av_log(s->avctx, AV_LOG_ERROR,
909  "decoding to AV_PIX_FMT_NONE is not supported.\n");
910  return AVERROR(EINVAL);
911  }
912 
913  if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
914  int max_slices;
915  if (s->mb_height)
916  max_slices = FFMIN(MAX_THREADS, s->mb_height);
917  else
918  max_slices = MAX_THREADS;
919  av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
920  " reducing to %d\n", nb_slices, max_slices);
921  nb_slices = max_slices;
922  }
923 
924  if ((s->width || s->height) &&
925  av_image_check_size(s->width, s->height, 0, s->avctx))
926  return AVERROR(EINVAL);
927 
928  dct_init(s);
929 
930  /* set chroma shifts */
931  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
932  &s->chroma_x_shift,
933  &s->chroma_y_shift);
934  if (ret)
935  return ret;
936 
937  FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
938  MAX_PICTURE_COUNT * sizeof(Picture), fail_nomem);
939  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
940  s->picture[i].f = av_frame_alloc();
941  if (!s->picture[i].f)
942  goto fail_nomem;
943  }
944  s->next_picture.f = av_frame_alloc();
945  if (!s->next_picture.f)
946  goto fail_nomem;
947  s->last_picture.f = av_frame_alloc();
948  if (!s->last_picture.f)
949  goto fail_nomem;
950  s->current_picture.f = av_frame_alloc();
951  if (!s->current_picture.f)
952  goto fail_nomem;
953  s->new_picture.f = av_frame_alloc();
954  if (!s->new_picture.f)
955  goto fail_nomem;
956 
957  if ((ret = init_context_frame(s)))
958  goto fail_nomem;
959 
960  s->parse_context.state = -1;
961 
962  s->context_initialized = 1;
963  memset(s->thread_context, 0, sizeof(s->thread_context));
964  s->thread_context[0] = s;
965 
966 // if (s->width && s->height) {
967  if (nb_slices > 1) {
968  for (i = 0; i < nb_slices; i++) {
969  if (i) {
970  s->thread_context[i] = av_memdup(s, sizeof(MpegEncContext));
971  if (!s->thread_context[i])
972  goto fail_nomem;
973  }
974  if ((ret = init_duplicate_context(s->thread_context[i])) < 0)
975  goto fail;
976  s->thread_context[i]->start_mb_y =
977  (s->mb_height * (i) + nb_slices / 2) / nb_slices;
978  s->thread_context[i]->end_mb_y =
979  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
980  }
981  } else {
982  if ((ret = init_duplicate_context(s)) < 0)
983  goto fail;
984  s->start_mb_y = 0;
985  s->end_mb_y = s->mb_height;
986  }
987  s->slice_context_count = nb_slices;
988 // }
989 
990  return 0;
991  fail_nomem:
992  ret = AVERROR(ENOMEM);
993  fail:
995  return ret;
996 }
997 
998 /**
999  * Frees and resets MpegEncContext fields depending on the resolution.
1000  * Is used during resolution changes to avoid a full reinitialization of the
1001  * codec.
1002  */
1004 {
1005  int i, j, k;
1006 
1007  av_freep(&s->mb_type);
1008  av_freep(&s->p_mv_table_base);
1009  av_freep(&s->b_forw_mv_table_base);
1010  av_freep(&s->b_back_mv_table_base);
1011  av_freep(&s->b_bidir_forw_mv_table_base);
1012  av_freep(&s->b_bidir_back_mv_table_base);
1013  av_freep(&s->b_direct_mv_table_base);
1014  s->p_mv_table = NULL;
1015  s->b_forw_mv_table = NULL;
1016  s->b_back_mv_table = NULL;
1017  s->b_bidir_forw_mv_table = NULL;
1018  s->b_bidir_back_mv_table = NULL;
1019  s->b_direct_mv_table = NULL;
1020  for (i = 0; i < 2; i++) {
1021  for (j = 0; j < 2; j++) {
1022  for (k = 0; k < 2; k++) {
1023  av_freep(&s->b_field_mv_table_base[i][j][k]);
1024  s->b_field_mv_table[i][j][k] = NULL;
1025  }
1026  av_freep(&s->b_field_select_table[i][j]);
1027  av_freep(&s->p_field_mv_table_base[i][j]);
1028  s->p_field_mv_table[i][j] = NULL;
1029  }
1030  av_freep(&s->p_field_select_table[i]);
1031  }
1032 
1033  av_freep(&s->dc_val_base);
1034  av_freep(&s->coded_block_base);
1035  av_freep(&s->mbintra_table);
1036  av_freep(&s->cbp_table);
1037  av_freep(&s->pred_dir_table);
1038 
1039  av_freep(&s->mbskip_table);
1040 
1041  av_freep(&s->er.error_status_table);
1042  av_freep(&s->er.er_temp_buffer);
1043  av_freep(&s->mb_index2xy);
1044  av_freep(&s->lambda_table);
1045 
1046  av_freep(&s->cplx_tab);
1047  av_freep(&s->bits_tab);
1048 
1049  s->linesize = s->uvlinesize = 0;
1050 }
1051 
1053 {
1054  int i, err = 0;
1055 
1056  if (!s->context_initialized)
1057  return AVERROR(EINVAL);
1058 
1059  if (s->slice_context_count > 1) {
1060  for (i = 0; i < s->slice_context_count; i++) {
1061  free_duplicate_context(s->thread_context[i]);
1062  }
1063  for (i = 1; i < s->slice_context_count; i++) {
1064  av_freep(&s->thread_context[i]);
1065  }
1066  } else
1068 
1070 
1071  if (s->picture)
1072  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1073  s->picture[i].needs_realloc = 1;
1074  }
1075 
1076  s->last_picture_ptr =
1077  s->next_picture_ptr =
1078  s->current_picture_ptr = NULL;
1079 
1080  // init
1081  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1082  s->mb_height = (s->height + 31) / 32 * 2;
1083  else
1084  s->mb_height = (s->height + 15) / 16;
1085 
1086  if ((s->width || s->height) &&
1087  (err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
1088  goto fail;
1089 
1090  /* set chroma shifts */
1091  err = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1092  &s->chroma_x_shift,
1093  &s->chroma_y_shift);
1094  if (err < 0)
1095  return err;
1096 
1097  if ((err = init_context_frame(s)))
1098  goto fail;
1099 
1100  memset(s->thread_context, 0, sizeof(s->thread_context));
1101  s->thread_context[0] = s;
1102 
1103  if (s->width && s->height) {
1104  int nb_slices = s->slice_context_count;
1105  if (nb_slices > 1) {
1106  for (i = 0; i < nb_slices; i++) {
1107  if (i) {
1108  s->thread_context[i] = av_memdup(s, sizeof(MpegEncContext));
1109  if (!s->thread_context[i]) {
1110  err = AVERROR(ENOMEM);
1111  goto fail;
1112  }
1113  }
1114  if ((err = init_duplicate_context(s->thread_context[i])) < 0)
1115  goto fail;
1116  s->thread_context[i]->start_mb_y =
1117  (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1118  s->thread_context[i]->end_mb_y =
1119  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1120  }
1121  } else {
1122  err = init_duplicate_context(s);
1123  if (err < 0)
1124  goto fail;
1125  s->start_mb_y = 0;
1126  s->end_mb_y = s->mb_height;
1127  }
1128  s->slice_context_count = nb_slices;
1129  }
1130 
1131  return 0;
1132  fail:
1134  return err;
1135 }
1136 
1137 /* init common structure for both encoder and decoder */
1139 {
1140  int i;
1141 
1142  if (!s)
1143  return ;
1144 
1145  if (s->slice_context_count > 1) {
1146  for (i = 0; i < s->slice_context_count; i++) {
1147  free_duplicate_context(s->thread_context[i]);
1148  }
1149  for (i = 1; i < s->slice_context_count; i++) {
1150  av_freep(&s->thread_context[i]);
1151  }
1152  s->slice_context_count = 1;
1153  } else free_duplicate_context(s);
1154 
1155  av_freep(&s->parse_context.buffer);
1156  s->parse_context.buffer_size = 0;
1157 
1158  av_freep(&s->bitstream_buffer);
1159  s->allocated_bitstream_buffer_size = 0;
1160 
1161  if (s->picture) {
1162  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1163  ff_free_picture_tables(&s->picture[i]);
1164  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1165  av_frame_free(&s->picture[i].f);
1166  }
1167  }
1168  av_freep(&s->picture);
1169  ff_free_picture_tables(&s->last_picture);
1170  ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1171  av_frame_free(&s->last_picture.f);
1172  ff_free_picture_tables(&s->current_picture);
1173  ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1174  av_frame_free(&s->current_picture.f);
1175  ff_free_picture_tables(&s->next_picture);
1176  ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1177  av_frame_free(&s->next_picture.f);
1178  ff_free_picture_tables(&s->new_picture);
1179  ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1180  av_frame_free(&s->new_picture.f);
1181 
1183 
1184  s->context_initialized = 0;
1185  s->last_picture_ptr =
1186  s->next_picture_ptr =
1187  s->current_picture_ptr = NULL;
1188  s->linesize = s->uvlinesize = 0;
1189 }
1190 
1191 
1192 static void gray_frame(AVFrame *frame)
1193 {
1194  int i, h_chroma_shift, v_chroma_shift;
1195 
1196  av_pix_fmt_get_chroma_sub_sample(frame->format, &h_chroma_shift, &v_chroma_shift);
1197 
1198  for(i=0; i<frame->height; i++)
1199  memset(frame->data[0] + frame->linesize[0]*i, 0x80, frame->width);
1200  for(i=0; i<AV_CEIL_RSHIFT(frame->height, v_chroma_shift); i++) {
1201  memset(frame->data[1] + frame->linesize[1]*i,
1202  0x80, AV_CEIL_RSHIFT(frame->width, h_chroma_shift));
1203  memset(frame->data[2] + frame->linesize[2]*i,
1204  0x80, AV_CEIL_RSHIFT(frame->width, h_chroma_shift));
1205  }
1206 }
1207 
1208 /**
1209  * generic function called after decoding
1210  * the header and before a frame is decoded.
1211  */
1213 {
1214  int i, ret;
1215  Picture *pic;
1216  s->mb_skipped = 0;
1217 
1218  if (!ff_thread_can_start_frame(avctx)) {
1219  av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1220  return -1;
1221  }
1222 
1223  /* mark & release old frames */
1224  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1225  s->last_picture_ptr != s->next_picture_ptr &&
1226  s->last_picture_ptr->f->buf[0]) {
1227  ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1228  }
1229 
1230  /* release forgotten pictures */
1231  /* if (MPEG-124 / H.263) */
1232  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1233  if (&s->picture[i] != s->last_picture_ptr &&
1234  &s->picture[i] != s->next_picture_ptr &&
1235  s->picture[i].reference && !s->picture[i].needs_realloc) {
1236  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1237  }
1238  }
1239 
1240  ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1241  ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1242  ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1243 
1244  /* release non reference frames */
1245  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1246  if (!s->picture[i].reference)
1247  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1248  }
1249 
1250  if (s->current_picture_ptr && !s->current_picture_ptr->f->buf[0]) {
1251  // we already have an unused image
1252  // (maybe it was set before reading the header)
1253  pic = s->current_picture_ptr;
1254  } else {
1255  i = ff_find_unused_picture(s->avctx, s->picture, 0);
1256  if (i < 0) {
1257  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1258  return i;
1259  }
1260  pic = &s->picture[i];
1261  }
1262 
1263  pic->reference = 0;
1264  if (!s->droppable) {
1265  if (s->pict_type != AV_PICTURE_TYPE_B)
1266  pic->reference = 3;
1267  }
1268 
1269  pic->f->coded_picture_number = s->coded_picture_number++;
1270 
1271  if (alloc_picture(s, pic) < 0)
1272  return -1;
1273 
1274  s->current_picture_ptr = pic;
1275  // FIXME use only the vars from current_pic
1276  s->current_picture_ptr->f->top_field_first = s->top_field_first;
1277  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1278  s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1279  if (s->picture_structure != PICT_FRAME)
1280  s->current_picture_ptr->f->top_field_first =
1281  (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1282  }
1283  s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame &&
1284  !s->progressive_sequence;
1285  s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1286 
1287  s->current_picture_ptr->f->pict_type = s->pict_type;
1288  // if (s->avctx->flags && AV_CODEC_FLAG_QSCALE)
1289  // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1290  s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1291 
1292  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1293  s->current_picture_ptr)) < 0)
1294  return ret;
1295 
1296  if (s->pict_type != AV_PICTURE_TYPE_B) {
1297  s->last_picture_ptr = s->next_picture_ptr;
1298  if (!s->droppable)
1299  s->next_picture_ptr = s->current_picture_ptr;
1300  }
1301  ff_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1302  s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1303  s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
1304  s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
1305  s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
1306  s->pict_type, s->droppable);
1307 
1308  if ((!s->last_picture_ptr || !s->last_picture_ptr->f->buf[0]) &&
1309  (s->pict_type != AV_PICTURE_TYPE_I)) {
1310  int h_chroma_shift, v_chroma_shift;
1311  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1312  &h_chroma_shift, &v_chroma_shift);
1313  if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f->buf[0])
1314  av_log(avctx, AV_LOG_DEBUG,
1315  "allocating dummy last picture for B frame\n");
1316  else if (s->pict_type != AV_PICTURE_TYPE_I)
1317  av_log(avctx, AV_LOG_ERROR,
1318  "warning: first frame is no keyframe\n");
1319 
1320  /* Allocate a dummy frame */
1321  i = ff_find_unused_picture(s->avctx, s->picture, 0);
1322  if (i < 0) {
1323  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1324  return i;
1325  }
1326  s->last_picture_ptr = &s->picture[i];
1327 
1328  s->last_picture_ptr->reference = 3;
1329  s->last_picture_ptr->f->key_frame = 0;
1330  s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
1331 
1332  if (alloc_picture(s, s->last_picture_ptr) < 0) {
1333  s->last_picture_ptr = NULL;
1334  return -1;
1335  }
1336 
1337  if (!avctx->hwaccel) {
1338  for(i=0; i<avctx->height; i++)
1339  memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i,
1340  0x80, avctx->width);
1341  if (s->last_picture_ptr->f->data[2]) {
1342  for(i=0; i<AV_CEIL_RSHIFT(avctx->height, v_chroma_shift); i++) {
1343  memset(s->last_picture_ptr->f->data[1] + s->last_picture_ptr->f->linesize[1]*i,
1344  0x80, AV_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1345  memset(s->last_picture_ptr->f->data[2] + s->last_picture_ptr->f->linesize[2]*i,
1346  0x80, AV_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1347  }
1348  }
1349 
1350  if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
1351  for(i=0; i<avctx->height; i++)
1352  memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i, 16, avctx->width);
1353  }
1354  }
1355 
1356  ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1357  ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1358  }
1359  if ((!s->next_picture_ptr || !s->next_picture_ptr->f->buf[0]) &&
1360  s->pict_type == AV_PICTURE_TYPE_B) {
1361  /* Allocate a dummy frame */
1362  i = ff_find_unused_picture(s->avctx, s->picture, 0);
1363  if (i < 0) {
1364  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1365  return i;
1366  }
1367  s->next_picture_ptr = &s->picture[i];
1368 
1369  s->next_picture_ptr->reference = 3;
1370  s->next_picture_ptr->f->key_frame = 0;
1371  s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
1372 
1373  if (alloc_picture(s, s->next_picture_ptr) < 0) {
1374  s->next_picture_ptr = NULL;
1375  return -1;
1376  }
1377  ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1378  ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1379  }
1380 
1381 #if 0 // BUFREF-FIXME
1382  memset(s->last_picture.f->data, 0, sizeof(s->last_picture.f->data));
1383  memset(s->next_picture.f->data, 0, sizeof(s->next_picture.f->data));
1384 #endif
1385  if (s->last_picture_ptr) {
1386  if (s->last_picture_ptr->f->buf[0] &&
1387  (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1388  s->last_picture_ptr)) < 0)
1389  return ret;
1390  }
1391  if (s->next_picture_ptr) {
1392  if (s->next_picture_ptr->f->buf[0] &&
1393  (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1394  s->next_picture_ptr)) < 0)
1395  return ret;
1396  }
1397 
1398  av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1399  s->last_picture_ptr->f->buf[0]));
1400 
1401  if (s->picture_structure!= PICT_FRAME) {
1402  int i;
1403  for (i = 0; i < 4; i++) {
1404  if (s->picture_structure == PICT_BOTTOM_FIELD) {
1405  s->current_picture.f->data[i] +=
1406  s->current_picture.f->linesize[i];
1407  }
1408  s->current_picture.f->linesize[i] *= 2;
1409  s->last_picture.f->linesize[i] *= 2;
1410  s->next_picture.f->linesize[i] *= 2;
1411  }
1412  }
1413 
1414  /* set dequantizer, we can't do it during init as
1415  * it might change for MPEG-4 and we can't do it in the header
1416  * decode as init is not called for MPEG-4 there yet */
1417  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1418  s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1419  s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1420  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1421  s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1422  s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1423  } else {
1424  s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1425  s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1426  }
1427 
1428  if (s->avctx->debug & FF_DEBUG_NOMC) {
1429  gray_frame(s->current_picture_ptr->f);
1430  }
1431 
1432  return 0;
1433 }
1434 
1435 /* called after a frame has been decoded. */
1437 {
1438  emms_c();
1439 
1440  if (s->current_picture.reference)
1441  ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1442 }
1443 
1445 {
1446  ff_print_debug_info2(s->avctx, pict, s->mbskip_table, p->mb_type,
1447  p->qscale_table, p->motion_val, &s->low_delay,
1448  s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
1449 }
1450 
1452 {
1454  int offset = 2*s->mb_stride + 1;
1455  if(!ref)
1456  return AVERROR(ENOMEM);
1457  av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
1458  ref->size -= offset;
1459  ref->data += offset;
1460  return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
1461 }
1462 
1464  uint8_t *dest, uint8_t *src,
1465  int field_based, int field_select,
1466  int src_x, int src_y,
1467  int width, int height, ptrdiff_t stride,
1468  int h_edge_pos, int v_edge_pos,
1469  int w, int h, h264_chroma_mc_func *pix_op,
1470  int motion_x, int motion_y)
1471 {
1472  const int lowres = s->avctx->lowres;
1473  const int op_index = FFMIN(lowres, 3);
1474  const int s_mask = (2 << lowres) - 1;
1475  int emu = 0;
1476  int sx, sy;
1477 
1478  if (s->quarter_sample) {
1479  motion_x /= 2;
1480  motion_y /= 2;
1481  }
1482 
1483  sx = motion_x & s_mask;
1484  sy = motion_y & s_mask;
1485  src_x += motion_x >> lowres + 1;
1486  src_y += motion_y >> lowres + 1;
1487 
1488  src += src_y * stride + src_x;
1489 
1490  if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
1491  (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
1492  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, src,
1493  s->linesize, s->linesize,
1494  w + 1, (h + 1) << field_based,
1495  src_x, src_y << field_based,
1496  h_edge_pos, v_edge_pos);
1497  src = s->sc.edge_emu_buffer;
1498  emu = 1;
1499  }
1500 
1501  sx = (sx << 2) >> lowres;
1502  sy = (sy << 2) >> lowres;
1503  if (field_select)
1504  src += s->linesize;
1505  pix_op[op_index](dest, src, stride, h, sx, sy);
1506  return emu;
1507 }
1508 
1509 /* apply one mpeg motion vector to the three components */
1511  uint8_t *dest_y,
1512  uint8_t *dest_cb,
1513  uint8_t *dest_cr,
1514  int field_based,
1515  int bottom_field,
1516  int field_select,
1517  uint8_t **ref_picture,
1518  h264_chroma_mc_func *pix_op,
1519  int motion_x, int motion_y,
1520  int h, int mb_y)
1521 {
1522  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1523  int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
1524  ptrdiff_t uvlinesize, linesize;
1525  const int lowres = s->avctx->lowres;
1526  const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
1527  const int block_s = 8>>lowres;
1528  const int s_mask = (2 << lowres) - 1;
1529  const int h_edge_pos = s->h_edge_pos >> lowres;
1530  const int v_edge_pos = s->v_edge_pos >> lowres;
1531  linesize = s->current_picture.f->linesize[0] << field_based;
1532  uvlinesize = s->current_picture.f->linesize[1] << field_based;
1533 
1534  // FIXME obviously not perfect but qpel will not work in lowres anyway
1535  if (s->quarter_sample) {
1536  motion_x /= 2;
1537  motion_y /= 2;
1538  }
1539 
1540  if(field_based){
1541  motion_y += (bottom_field - field_select)*((1 << lowres)-1);
1542  }
1543 
1544  sx = motion_x & s_mask;
1545  sy = motion_y & s_mask;
1546  src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
1547  src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
1548 
1549  if (s->out_format == FMT_H263) {
1550  uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
1551  uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
1552  uvsrc_x = src_x >> 1;
1553  uvsrc_y = src_y >> 1;
1554  } else if (s->out_format == FMT_H261) {
1555  // even chroma mv's are full pel in H261
1556  mx = motion_x / 4;
1557  my = motion_y / 4;
1558  uvsx = (2 * mx) & s_mask;
1559  uvsy = (2 * my) & s_mask;
1560  uvsrc_x = s->mb_x * block_s + (mx >> lowres);
1561  uvsrc_y = mb_y * block_s + (my >> lowres);
1562  } else {
1563  if(s->chroma_y_shift){
1564  mx = motion_x / 2;
1565  my = motion_y / 2;
1566  uvsx = mx & s_mask;
1567  uvsy = my & s_mask;
1568  uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
1569  uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
1570  } else {
1571  if(s->chroma_x_shift){
1572  //Chroma422
1573  mx = motion_x / 2;
1574  uvsx = mx & s_mask;
1575  uvsy = motion_y & s_mask;
1576  uvsrc_y = src_y;
1577  uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
1578  } else {
1579  //Chroma444
1580  uvsx = motion_x & s_mask;
1581  uvsy = motion_y & s_mask;
1582  uvsrc_x = src_x;
1583  uvsrc_y = src_y;
1584  }
1585  }
1586  }
1587 
1588  ptr_y = ref_picture[0] + src_y * linesize + src_x;
1589  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1590  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1591 
1592  if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
1593  (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
1594  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
1595  linesize >> field_based, linesize >> field_based,
1596  17, 17 + field_based,
1597  src_x, src_y << field_based, h_edge_pos,
1598  v_edge_pos);
1599  ptr_y = s->sc.edge_emu_buffer;
1600  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1601  uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
1602  uint8_t *vbuf =ubuf + 10 * s->uvlinesize;
1603  if (s->workaround_bugs & FF_BUG_IEDGE)
1604  vbuf -= s->uvlinesize;
1605  s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
1606  uvlinesize >> field_based, uvlinesize >> field_based,
1607  9, 9 + field_based,
1608  uvsrc_x, uvsrc_y << field_based,
1609  h_edge_pos >> 1, v_edge_pos >> 1);
1610  s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
1611  uvlinesize >> field_based,uvlinesize >> field_based,
1612  9, 9 + field_based,
1613  uvsrc_x, uvsrc_y << field_based,
1614  h_edge_pos >> 1, v_edge_pos >> 1);
1615  ptr_cb = ubuf;
1616  ptr_cr = vbuf;
1617  }
1618  }
1619 
1620  // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
1621  if (bottom_field) {
1622  dest_y += s->linesize;
1623  dest_cb += s->uvlinesize;
1624  dest_cr += s->uvlinesize;
1625  }
1626 
1627  if (field_select) {
1628  ptr_y += s->linesize;
1629  ptr_cb += s->uvlinesize;
1630  ptr_cr += s->uvlinesize;
1631  }
1632 
1633  sx = (sx << 2) >> lowres;
1634  sy = (sy << 2) >> lowres;
1635  pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
1636 
1637  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1638  int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
1639  uvsx = (uvsx << 2) >> lowres;
1640  uvsy = (uvsy << 2) >> lowres;
1641  if (hc) {
1642  pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
1643  pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
1644  }
1645  }
1646  // FIXME h261 lowres loop filter
1647 }
1648 
1650  uint8_t *dest_cb, uint8_t *dest_cr,
1651  uint8_t **ref_picture,
1652  h264_chroma_mc_func * pix_op,
1653  int mx, int my)
1654 {
1655  const int lowres = s->avctx->lowres;
1656  const int op_index = FFMIN(lowres, 3);
1657  const int block_s = 8 >> lowres;
1658  const int s_mask = (2 << lowres) - 1;
1659  const int h_edge_pos = s->h_edge_pos >> lowres + 1;
1660  const int v_edge_pos = s->v_edge_pos >> lowres + 1;
1661  int emu = 0, src_x, src_y, sx, sy;
1662  ptrdiff_t offset;
1663  uint8_t *ptr;
1664 
1665  if (s->quarter_sample) {
1666  mx /= 2;
1667  my /= 2;
1668  }
1669 
1670  /* In case of 8X8, we construct a single chroma motion vector
1671  with a special rounding */
1672  mx = ff_h263_round_chroma(mx);
1673  my = ff_h263_round_chroma(my);
1674 
1675  sx = mx & s_mask;
1676  sy = my & s_mask;
1677  src_x = s->mb_x * block_s + (mx >> lowres + 1);
1678  src_y = s->mb_y * block_s + (my >> lowres + 1);
1679 
1680  offset = src_y * s->uvlinesize + src_x;
1681  ptr = ref_picture[1] + offset;
1682  if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
1683  (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
1684  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
1685  s->uvlinesize, s->uvlinesize,
1686  9, 9,
1687  src_x, src_y, h_edge_pos, v_edge_pos);
1688  ptr = s->sc.edge_emu_buffer;
1689  emu = 1;
1690  }
1691  sx = (sx << 2) >> lowres;
1692  sy = (sy << 2) >> lowres;
1693  pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
1694 
1695  ptr = ref_picture[2] + offset;
1696  if (emu) {
1697  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
1698  s->uvlinesize, s->uvlinesize,
1699  9, 9,
1700  src_x, src_y, h_edge_pos, v_edge_pos);
1701  ptr = s->sc.edge_emu_buffer;
1702  }
1703  pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
1704 }
1705 
1706 /**
1707  * motion compensation of a single macroblock
1708  * @param s context
1709  * @param dest_y luma destination pointer
1710  * @param dest_cb chroma cb/u destination pointer
1711  * @param dest_cr chroma cr/v destination pointer
1712  * @param dir direction (0->forward, 1->backward)
1713  * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
1714  * @param pix_op halfpel motion compensation function (average or put normally)
1715  * the motion vectors are taken from s->mv and the MV type from s->mv_type
1716  */
1717 static inline void MPV_motion_lowres(MpegEncContext *s,
1718  uint8_t *dest_y, uint8_t *dest_cb,
1719  uint8_t *dest_cr,
1720  int dir, uint8_t **ref_picture,
1721  h264_chroma_mc_func *pix_op)
1722 {
1723  int mx, my;
1724  int mb_x, mb_y, i;
1725  const int lowres = s->avctx->lowres;
1726  const int block_s = 8 >>lowres;
1727 
1728  mb_x = s->mb_x;
1729  mb_y = s->mb_y;
1730 
1731  switch (s->mv_type) {
1732  case MV_TYPE_16X16:
1733  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1734  0, 0, 0,
1735  ref_picture, pix_op,
1736  s->mv[dir][0][0], s->mv[dir][0][1],
1737  2 * block_s, mb_y);
1738  break;
1739  case MV_TYPE_8X8:
1740  mx = 0;
1741  my = 0;
1742  for (i = 0; i < 4; i++) {
1743  hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
1744  s->linesize) * block_s,
1745  ref_picture[0], 0, 0,
1746  (2 * mb_x + (i & 1)) * block_s,
1747  (2 * mb_y + (i >> 1)) * block_s,
1748  s->width, s->height, s->linesize,
1749  s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
1750  block_s, block_s, pix_op,
1751  s->mv[dir][i][0], s->mv[dir][i][1]);
1752 
1753  mx += s->mv[dir][i][0];
1754  my += s->mv[dir][i][1];
1755  }
1756 
1757  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
1758  chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
1759  pix_op, mx, my);
1760  break;
1761  case MV_TYPE_FIELD:
1762  if (s->picture_structure == PICT_FRAME) {
1763  /* top field */
1764  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1765  1, 0, s->field_select[dir][0],
1766  ref_picture, pix_op,
1767  s->mv[dir][0][0], s->mv[dir][0][1],
1768  block_s, mb_y);
1769  /* bottom field */
1770  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1771  1, 1, s->field_select[dir][1],
1772  ref_picture, pix_op,
1773  s->mv[dir][1][0], s->mv[dir][1][1],
1774  block_s, mb_y);
1775  } else {
1776  if (s->picture_structure != s->field_select[dir][0] + 1 &&
1777  s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
1778  ref_picture = s->current_picture_ptr->f->data;
1779 
1780  }
1781  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1782  0, 0, s->field_select[dir][0],
1783  ref_picture, pix_op,
1784  s->mv[dir][0][0],
1785  s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
1786  }
1787  break;
1788  case MV_TYPE_16X8:
1789  for (i = 0; i < 2; i++) {
1790  uint8_t **ref2picture;
1791 
1792  if (s->picture_structure == s->field_select[dir][i] + 1 ||
1793  s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
1794  ref2picture = ref_picture;
1795  } else {
1796  ref2picture = s->current_picture_ptr->f->data;
1797  }
1798 
1799  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1800  0, 0, s->field_select[dir][i],
1801  ref2picture, pix_op,
1802  s->mv[dir][i][0], s->mv[dir][i][1] +
1803  2 * block_s * i, block_s, mb_y >> 1);
1804 
1805  dest_y += 2 * block_s * s->linesize;
1806  dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
1807  dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
1808  }
1809  break;
1810  case MV_TYPE_DMV:
1811  if (s->picture_structure == PICT_FRAME) {
1812  for (i = 0; i < 2; i++) {
1813  int j;
1814  for (j = 0; j < 2; j++) {
1815  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1816  1, j, j ^ i,
1817  ref_picture, pix_op,
1818  s->mv[dir][2 * i + j][0],
1819  s->mv[dir][2 * i + j][1],
1820  block_s, mb_y);
1821  }
1822  pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
1823  }
1824  } else {
1825  for (i = 0; i < 2; i++) {
1826  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1827  0, 0, s->picture_structure != i + 1,
1828  ref_picture, pix_op,
1829  s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
1830  2 * block_s, mb_y >> 1);
1831 
1832  // after put we make avg of the same block
1833  pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
1834 
1835  // opposite parity is always in the same
1836  // frame if this is second field
1837  if (!s->first_field) {
1838  ref_picture = s->current_picture_ptr->f->data;
1839  }
1840  }
1841  }
1842  break;
1843  default:
1844  av_assert2(0);
1845  }
1846 }
1847 
1848 /**
1849  * find the lowest MB row referenced in the MVs
1850  */
1852 {
1853  int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1854  int my, off, i, mvs;
1855 
1856  if (s->picture_structure != PICT_FRAME || s->mcsel)
1857  goto unhandled;
1858 
1859  switch (s->mv_type) {
1860  case MV_TYPE_16X16:
1861  mvs = 1;
1862  break;
1863  case MV_TYPE_16X8:
1864  mvs = 2;
1865  break;
1866  case MV_TYPE_8X8:
1867  mvs = 4;
1868  break;
1869  default:
1870  goto unhandled;
1871  }
1872 
1873  for (i = 0; i < mvs; i++) {
1874  my = s->mv[dir][i][1];
1875  my_max = FFMAX(my_max, my);
1876  my_min = FFMIN(my_min, my);
1877  }
1878 
1879  off = ((FFMAX(-my_min, my_max)<<qpel_shift) + 63) >> 6;
1880 
1881  return av_clip(s->mb_y + off, 0, s->mb_height - 1);
1882 unhandled:
1883  return s->mb_height-1;
1884 }
1885 
1886 /* put block[] to dest[] */
1887 static inline void put_dct(MpegEncContext *s,
1888  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1889 {
1890  s->dct_unquantize_intra(s, block, i, qscale);
1891  s->idsp.idct_put(dest, line_size, block);
1892 }
1893 
1894 /* add block[] to dest[] */
1895 static inline void add_dct(MpegEncContext *s,
1896  int16_t *block, int i, uint8_t *dest, int line_size)
1897 {
1898  if (s->block_last_index[i] >= 0) {
1899  s->idsp.idct_add(dest, line_size, block);
1900  }
1901 }
1902 
1903 static inline void add_dequant_dct(MpegEncContext *s,
1904  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1905 {
1906  if (s->block_last_index[i] >= 0) {
1907  s->dct_unquantize_inter(s, block, i, qscale);
1908 
1909  s->idsp.idct_add(dest, line_size, block);
1910  }
1911 }
1912 
1913 /**
1914  * Clean dc, ac, coded_block for the current non-intra MB.
1915  */
1917 {
1918  int wrap = s->b8_stride;
1919  int xy = s->block_index[0];
1920 
1921  s->dc_val[0][xy ] =
1922  s->dc_val[0][xy + 1 ] =
1923  s->dc_val[0][xy + wrap] =
1924  s->dc_val[0][xy + 1 + wrap] = 1024;
1925  /* ac pred */
1926  memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
1927  memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1928  if (s->msmpeg4_version>=3) {
1929  s->coded_block[xy ] =
1930  s->coded_block[xy + 1 ] =
1931  s->coded_block[xy + wrap] =
1932  s->coded_block[xy + 1 + wrap] = 0;
1933  }
1934  /* chroma */
1935  wrap = s->mb_stride;
1936  xy = s->mb_x + s->mb_y * wrap;
1937  s->dc_val[1][xy] =
1938  s->dc_val[2][xy] = 1024;
1939  /* ac pred */
1940  memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1941  memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1942 
1943  s->mbintra_table[xy]= 0;
1944 }
1945 
1946 /* generic function called after a macroblock has been parsed by the
1947  decoder or after it has been encoded by the encoder.
1948 
1949  Important variables used:
1950  s->mb_intra : true if intra macroblock
1951  s->mv_dir : motion vector direction
1952  s->mv_type : motion vector type
1953  s->mv : motion vector
1954  s->interlaced_dct : true if interlaced dct used (mpeg2)
1955  */
1956 static av_always_inline
1958  int lowres_flag, int is_mpeg12)
1959 {
1960  const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1961 
1962  if (CONFIG_XVMC &&
1963  s->avctx->hwaccel && s->avctx->hwaccel->decode_mb) {
1964  s->avctx->hwaccel->decode_mb(s);//xvmc uses pblocks
1965  return;
1966  }
1967 
1968  if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
1969  /* print DCT coefficients */
1970  int i,j;
1971  av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1972  for(i=0; i<6; i++){
1973  for(j=0; j<64; j++){
1974  av_log(s->avctx, AV_LOG_DEBUG, "%5d",
1975  block[i][s->idsp.idct_permutation[j]]);
1976  }
1977  av_log(s->avctx, AV_LOG_DEBUG, "\n");
1978  }
1979  }
1980 
1981  s->current_picture.qscale_table[mb_xy] = s->qscale;
1982 
1983  /* update DC predictors for P macroblocks */
1984  if (!s->mb_intra) {
1985  if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
1986  if(s->mbintra_table[mb_xy])
1988  } else {
1989  s->last_dc[0] =
1990  s->last_dc[1] =
1991  s->last_dc[2] = 128 << s->intra_dc_precision;
1992  }
1993  }
1994  else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
1995  s->mbintra_table[mb_xy]=1;
1996 
1997  if ((s->avctx->flags & AV_CODEC_FLAG_PSNR) || s->frame_skip_threshold || s->frame_skip_factor ||
1998  !(s->encoding && (s->intra_only || s->pict_type == AV_PICTURE_TYPE_B) &&
1999  s->avctx->mb_decision != FF_MB_DECISION_RD)) { // FIXME precalc
2000  uint8_t *dest_y, *dest_cb, *dest_cr;
2001  int dct_linesize, dct_offset;
2002  op_pixels_func (*op_pix)[4];
2003  qpel_mc_func (*op_qpix)[16];
2004  const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
2005  const int uvlinesize = s->current_picture.f->linesize[1];
2006  const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2007  const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2008 
2009  /* avoid copy if macroblock skipped in last frame too */
2010  /* skip only during decoding as we might trash the buffers during encoding a bit */
2011  if(!s->encoding){
2012  uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2013 
2014  if (s->mb_skipped) {
2015  s->mb_skipped= 0;
2016  av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
2017  *mbskip_ptr = 1;
2018  } else if(!s->current_picture.reference) {
2019  *mbskip_ptr = 1;
2020  } else{
2021  *mbskip_ptr = 0; /* not skipped */
2022  }
2023  }
2024 
2025  dct_linesize = linesize << s->interlaced_dct;
2026  dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2027 
2028  if(readable){
2029  dest_y= s->dest[0];
2030  dest_cb= s->dest[1];
2031  dest_cr= s->dest[2];
2032  }else{
2033  dest_y = s->sc.b_scratchpad;
2034  dest_cb= s->sc.b_scratchpad+16*linesize;
2035  dest_cr= s->sc.b_scratchpad+32*linesize;
2036  }
2037 
2038  if (!s->mb_intra) {
2039  /* motion handling */
2040  /* decoding or more than one mb_type (MC was already done otherwise) */
2041  if(!s->encoding){
2042 
2043  if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2044  if (s->mv_dir & MV_DIR_FORWARD) {
2045  ff_thread_await_progress(&s->last_picture_ptr->tf,
2047  0);
2048  }
2049  if (s->mv_dir & MV_DIR_BACKWARD) {
2050  ff_thread_await_progress(&s->next_picture_ptr->tf,
2052  0);
2053  }
2054  }
2055 
2056  if(lowres_flag){
2057  h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
2058 
2059  if (s->mv_dir & MV_DIR_FORWARD) {
2060  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix);
2061  op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
2062  }
2063  if (s->mv_dir & MV_DIR_BACKWARD) {
2064  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix);
2065  }
2066  }else{
2067  op_qpix = s->me.qpel_put;
2068  if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2069  op_pix = s->hdsp.put_pixels_tab;
2070  }else{
2071  op_pix = s->hdsp.put_no_rnd_pixels_tab;
2072  }
2073  if (s->mv_dir & MV_DIR_FORWARD) {
2074  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
2075  op_pix = s->hdsp.avg_pixels_tab;
2076  op_qpix= s->me.qpel_avg;
2077  }
2078  if (s->mv_dir & MV_DIR_BACKWARD) {
2079  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
2080  }
2081  }
2082  }
2083 
2084  /* skip dequant / idct if we are really late ;) */
2085  if(s->avctx->skip_idct){
2086  if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2087  ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2088  || s->avctx->skip_idct >= AVDISCARD_ALL)
2089  goto skip_idct;
2090  }
2091 
2092  /* add dct residue */
2093  if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2094  || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2095  add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2096  add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2097  add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2098  add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2099 
2100  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
2101  if (s->chroma_y_shift){
2102  add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2103  add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2104  }else{
2105  dct_linesize >>= 1;
2106  dct_offset >>=1;
2107  add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2108  add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2109  add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2110  add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2111  }
2112  }
2113  } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2114  add_dct(s, block[0], 0, dest_y , dct_linesize);
2115  add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2116  add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2117  add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2118 
2119  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
2120  if(s->chroma_y_shift){//Chroma420
2121  add_dct(s, block[4], 4, dest_cb, uvlinesize);
2122  add_dct(s, block[5], 5, dest_cr, uvlinesize);
2123  }else{
2124  //chroma422
2125  dct_linesize = uvlinesize << s->interlaced_dct;
2126  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2127 
2128  add_dct(s, block[4], 4, dest_cb, dct_linesize);
2129  add_dct(s, block[5], 5, dest_cr, dct_linesize);
2130  add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2131  add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2132  if(!s->chroma_x_shift){//Chroma444
2133  add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
2134  add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
2135  add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
2136  add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
2137  }
2138  }
2139  }//fi gray
2140  }
2141  else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2142  ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2143  }
2144  } else {
2145  /* Only MPEG-4 Simple Studio Profile is supported in > 8-bit mode.
2146  TODO: Integrate 10-bit properly into mpegvideo.c so that ER works properly */
2147  if (s->avctx->bits_per_raw_sample > 8){
2148  const int act_block_size = block_size * 2;
2149 
2150  if(s->dpcm_direction == 0) {
2151  s->idsp.idct_put(dest_y, dct_linesize, (int16_t*)(*s->block32)[0]);
2152  s->idsp.idct_put(dest_y + act_block_size, dct_linesize, (int16_t*)(*s->block32)[1]);
2153  s->idsp.idct_put(dest_y + dct_offset, dct_linesize, (int16_t*)(*s->block32)[2]);
2154  s->idsp.idct_put(dest_y + dct_offset + act_block_size, dct_linesize, (int16_t*)(*s->block32)[3]);
2155 
2156  dct_linesize = uvlinesize << s->interlaced_dct;
2157  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2158 
2159  s->idsp.idct_put(dest_cb, dct_linesize, (int16_t*)(*s->block32)[4]);
2160  s->idsp.idct_put(dest_cr, dct_linesize, (int16_t*)(*s->block32)[5]);
2161  s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, (int16_t*)(*s->block32)[6]);
2162  s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, (int16_t*)(*s->block32)[7]);
2163  if(!s->chroma_x_shift){//Chroma444
2164  s->idsp.idct_put(dest_cb + act_block_size, dct_linesize, (int16_t*)(*s->block32)[8]);
2165  s->idsp.idct_put(dest_cr + act_block_size, dct_linesize, (int16_t*)(*s->block32)[9]);
2166  s->idsp.idct_put(dest_cb + act_block_size + dct_offset, dct_linesize, (int16_t*)(*s->block32)[10]);
2167  s->idsp.idct_put(dest_cr + act_block_size + dct_offset, dct_linesize, (int16_t*)(*s->block32)[11]);
2168  }
2169  } else if(s->dpcm_direction == 1) {
2170  int i, w, h;
2171  uint16_t *dest_pcm[3] = {(uint16_t*)dest_y, (uint16_t*)dest_cb, (uint16_t*)dest_cr};
2172  int linesize[3] = {dct_linesize, uvlinesize, uvlinesize};
2173  for(i = 0; i < 3; i++) {
2174  int idx = 0;
2175  int vsub = i ? s->chroma_y_shift : 0;
2176  int hsub = i ? s->chroma_x_shift : 0;
2177  for(h = 0; h < (16 >> vsub); h++){
2178  for(w = 0; w < (16 >> hsub); w++)
2179  dest_pcm[i][w] = (*s->dpcm_macroblock)[i][idx++];
2180  dest_pcm[i] += linesize[i] / 2;
2181  }
2182  }
2183  } else if(s->dpcm_direction == -1) {
2184  int i, w, h;
2185  uint16_t *dest_pcm[3] = {(uint16_t*)dest_y, (uint16_t*)dest_cb, (uint16_t*)dest_cr};
2186  int linesize[3] = {dct_linesize, uvlinesize, uvlinesize};
2187  for(i = 0; i < 3; i++) {
2188  int idx = 0;
2189  int vsub = i ? s->chroma_y_shift : 0;
2190  int hsub = i ? s->chroma_x_shift : 0;
2191  dest_pcm[i] += (linesize[i] / 2) * ((16 >> vsub) - 1);
2192  for(h = (16 >> vsub)-1; h >= 1; h--){
2193  for(w = (16 >> hsub)-1; w >= 1; w--)
2194  dest_pcm[i][w] = (*s->dpcm_macroblock)[i][idx++];
2195  dest_pcm[i] -= linesize[i] / 2;
2196  }
2197  }
2198  }
2199  }
2200  /* dct only in intra block */
2201  else if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2202  put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2203  put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2204  put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2205  put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2206 
2207  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
2208  if(s->chroma_y_shift){
2209  put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2210  put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2211  }else{
2212  dct_offset >>=1;
2213  dct_linesize >>=1;
2214  put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2215  put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2216  put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2217  put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2218  }
2219  }
2220  }else{
2221  s->idsp.idct_put(dest_y, dct_linesize, block[0]);
2222  s->idsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2223  s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]);
2224  s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2225 
2226  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
2227  if(s->chroma_y_shift){
2228  s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
2229  s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
2230  }else{
2231 
2232  dct_linesize = uvlinesize << s->interlaced_dct;
2233  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2234 
2235  s->idsp.idct_put(dest_cb, dct_linesize, block[4]);
2236  s->idsp.idct_put(dest_cr, dct_linesize, block[5]);
2237  s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2238  s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2239  if(!s->chroma_x_shift){//Chroma444
2240  s->idsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
2241  s->idsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
2242  s->idsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
2243  s->idsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
2244  }
2245  }
2246  }//gray
2247  }
2248  }
2249 skip_idct:
2250  if(!readable){
2251  s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2252  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
2253  s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2254  s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2255  }
2256  }
2257  }
2258 }
2259 
2261 {
2262 #if !CONFIG_SMALL
2263  if(s->out_format == FMT_MPEG1) {
2264  if(s->avctx->lowres) mpv_reconstruct_mb_internal(s, block, 1, 1);
2265  else mpv_reconstruct_mb_internal(s, block, 0, 1);
2266  } else
2267 #endif
2268  if(s->avctx->lowres) mpv_reconstruct_mb_internal(s, block, 1, 0);
2269  else mpv_reconstruct_mb_internal(s, block, 0, 0);
2270 }
2271 
2273 {
2274  ff_draw_horiz_band(s->avctx, s->current_picture_ptr->f,
2275  s->last_picture_ptr ? s->last_picture_ptr->f : NULL, y, h, s->picture_structure,
2276  s->first_field, s->low_delay);
2277 }
2278 
2279 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2280  const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
2281  const int uvlinesize = s->current_picture.f->linesize[1];
2282  const int width_of_mb = (4 + (s->avctx->bits_per_raw_sample > 8)) - s->avctx->lowres;
2283  const int height_of_mb = 4 - s->avctx->lowres;
2284 
2285  s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2286  s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2287  s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2288  s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2289  s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2290  s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2291  //block_index is not used by mpeg2, so it is not affected by chroma_format
2292 
2293  s->dest[0] = s->current_picture.f->data[0] + (int)((s->mb_x - 1U) << width_of_mb);
2294  s->dest[1] = s->current_picture.f->data[1] + (int)((s->mb_x - 1U) << (width_of_mb - s->chroma_x_shift));
2295  s->dest[2] = s->current_picture.f->data[2] + (int)((s->mb_x - 1U) << (width_of_mb - s->chroma_x_shift));
2296 
2297  if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2298  {
2299  if(s->picture_structure==PICT_FRAME){
2300  s->dest[0] += s->mb_y * linesize << height_of_mb;
2301  s->dest[1] += s->mb_y * uvlinesize << (height_of_mb - s->chroma_y_shift);
2302  s->dest[2] += s->mb_y * uvlinesize << (height_of_mb - s->chroma_y_shift);
2303  }else{
2304  s->dest[0] += (s->mb_y>>1) * linesize << height_of_mb;
2305  s->dest[1] += (s->mb_y>>1) * uvlinesize << (height_of_mb - s->chroma_y_shift);
2306  s->dest[2] += (s->mb_y>>1) * uvlinesize << (height_of_mb - s->chroma_y_shift);
2307  av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2308  }
2309  }
2310 }
2311 
2313  int i;
2314  MpegEncContext *s = avctx->priv_data;
2315 
2316  if (!s || !s->picture)
2317  return;
2318 
2319  for (i = 0; i < MAX_PICTURE_COUNT; i++)
2320  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
2321  s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2322 
2323  ff_mpeg_unref_picture(s->avctx, &s->current_picture);
2324  ff_mpeg_unref_picture(s->avctx, &s->last_picture);
2325  ff_mpeg_unref_picture(s->avctx, &s->next_picture);
2326 
2327  s->mb_x= s->mb_y= 0;
2328  s->closed_gop= 0;
2329 
2330  s->parse_context.state= -1;
2331  s->parse_context.frame_start_found= 0;
2332  s->parse_context.overread= 0;
2333  s->parse_context.overread_index= 0;
2334  s->parse_context.index= 0;
2335  s->parse_context.last_index= 0;
2336  s->bitstream_buffer_size=0;
2337  s->pp_time=0;
2338 }
2339 
2340 /**
2341  * set qscale and update qscale dependent variables.
2342  */
2343 void ff_set_qscale(MpegEncContext * s, int qscale)
2344 {
2345  if (qscale < 1)
2346  qscale = 1;
2347  else if (qscale > 31)
2348  qscale = 31;
2349 
2350  s->qscale = qscale;
2351  s->chroma_qscale= s->chroma_qscale_table[qscale];
2352 
2353  s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2354  s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2355 }
2356 
2358 {
2359  if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
2360  ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
2361 }
ff_h263_round_chroma
static int ff_h263_round_chroma(int x)
Definition: motion_est.h:101
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:39
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:890
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1690
stride
int stride
Definition: mace.c:144
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:266
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
ff_init_scantable
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: idctdsp.c:29
level
uint8_t level
Definition: svq3.c:210
ff_mpeg_framesize_alloc
int ff_mpeg_framesize_alloc(AVCodecContext *avctx, MotionEstContext *me, ScratchpadContext *sc, int linesize)
Definition: mpegpicture.c:58
blockdsp.h
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegutils.h:124
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVCodecContext::workaround_bugs
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
Definition: avcodec.h:1560
mpeg_motion_lowres
static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t **ref_picture, h264_chroma_mc_func *pix_op, int motion_x, int motion_y, int h, int mb_y)
Definition: mpegvideo.c:1510
backup_duplicate_context
static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
Definition: mpegvideo.c:436
ff_mpv_common_defaults
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:644
hpel_motion_lowres
static int hpel_motion_lowres(MpegEncContext *s, uint8_t *dest, uint8_t *src, int field_based, int field_select, int src_x, int src_y, int width, int height, ptrdiff_t stride, int h_edge_pos, int v_edge_pos, int w, int h, h264_chroma_mc_func *pix_op, int motion_x, int motion_y)
Definition: mpegvideo.c:1463
FFSWAP
#define FFSWAP(type, a, b)
Definition: common.h:99
ff_thread_can_start_frame
int ff_thread_can_start_frame(AVCodecContext *avctx)
Definition: pthread_frame.c:906
ff_mpeg1_dc_scale_table
const uint8_t ff_mpeg1_dc_scale_table[128]
Definition: mpegvideodata.c:33
MV_TYPE_16X8
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:268
AVFrame::coded_picture_number
int coded_picture_number
picture number in bitstream order
Definition: frame.h:414
lowest_referenced_row
static int lowest_referenced_row(MpegEncContext *s, int dir)
find the lowest MB row referenced in the MVs
Definition: mpegvideo.c:1851
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:61
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
avpriv_toupper4
unsigned int avpriv_toupper4(unsigned int x)
Definition: utils.c:1877
ff_mpv_common_frame_size_change
int ff_mpv_common_frame_size_change(MpegEncContext *s)
Definition: mpegvideo.c:1052
w
uint8_t w
Definition: llviddspenc.c:38
internal.h
ff_free_picture_tables
void ff_free_picture_tables(Picture *pic)
Definition: mpegpicture.c:465
last_picture
enum AVPictureType last_picture
Definition: movenc.c:68
ff_mpeg2_non_linear_qscale
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:26
REBASE_PICTURE
#define REBASE_PICTURE(pic, new_ctx, old_ctx)
ff_clean_intra_table_entries
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
Definition: mpegvideo.c:1916
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:38
init_duplicate_context
static int init_duplicate_context(MpegEncContext *s)
Definition: mpegvideo.c:357
ff_mpv_common_init_arm
av_cold void ff_mpv_common_init_arm(MpegEncContext *s)
Definition: mpegvideo_arm.c:43
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:2279
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:98
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:312
Picture
Picture.
Definition: mpegpicture.h:45
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:342
mpegutils.h
thread.h
ff_thread_await_progress
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
free_duplicate_context
static void free_duplicate_context(MpegEncContext *s)
Definition: mpegvideo.c:414
MV_DIR_BACKWARD
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:263
MV_TYPE_DMV
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:270
hsub
static void hsub(htype *dst, const htype *src, int bins)
Definition: vf_median.c:75
mpv_reconstruct_mb_internal
static av_always_inline void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64], int lowres_flag, int is_mpeg12)
Definition: mpegvideo.c:1957
av_memdup
void * av_memdup(const void *p, size_t size)
Duplicate a buffer with av_malloc().
Definition: mem.c:285
chroma_4mv_motion_lowres
static void chroma_4mv_motion_lowres(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, h264_chroma_mc_func *pix_op, int mx, int my)
Definition: mpegvideo.c:1649
return
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a it should return
Definition: filter_design.txt:264
dct_unquantize_mpeg1_inter_c
static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:81
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:535
U
#define U(x)
Definition: vp56_arith.h:37
fail
#define fail()
Definition: checkasm.h:123
wrap
#define wrap(func)
Definition: neontest.h:65
h264_chroma_mc_func
void(* h264_chroma_mc_func)(uint8_t *dst, uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: h264chroma.h:25
MpegEncContext::linesize
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:134
ff_print_debug_info2
void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table, uint32_t *mbtype_table, int8_t *qscale_table, int16_t(*motion_val[2])[2], int *low_delay, int mb_width, int mb_height, int mb_stride, int quarter_sample)
Print debugging info for the given picture.
Definition: mpegutils.c:103
ff_mpeg_draw_horiz_band
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo.c:2272
MAX_THREADS
#define MAX_THREADS
Definition: frame_thread_encoder.c:34
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2577
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:714
MAX_PICTURE_COUNT
#define MAX_PICTURE_COUNT
Definition: mpegpicture.h:32
dct_unquantize_mpeg1_intra_c
static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:52
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
ff_mpv_common_end
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:1138
MpegEncContext::pblocks
int16_t(*[12] pblocks)[64]
Definition: mpegvideo.h:506
avassert.h
gray16
static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
Definition: mpegvideo.c:269
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
av_cold
#define av_cold
Definition: attributes.h:90
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:568
motion_vector.h
width
#define width
add_dct
static void add_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size)
Definition: mpegvideo.c:1895
s
#define s(width, name)
Definition: cbs_vp9.c:257
ff_mpeg_unref_picture
void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
Deallocate a picture.
Definition: mpegpicture.c:299
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
UPDATE_PICTURE
#define UPDATE_PICTURE(pic)
s1
#define s1
Definition: regdef.h:38
AV_CODEC_ID_WMV2
@ AV_CODEC_ID_WMV2
Definition: codec_id.h:67
dct_unquantize_mpeg2_intra_bitexact
static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:140
FMT_H261
@ FMT_H261
Definition: mpegutils.h:125
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
PICT_TOP_FIELD
#define PICT_TOP_FIELD
Definition: mpegutils.h:37
limits.h
ff_mpegvideodsp_init
av_cold void ff_mpegvideodsp_init(MpegVideoDSPContext *c)
Definition: mpegvideodsp.c:110
f
#define f(width, name)
Definition: cbs_vp9.c:255
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
Picture::reference
int reference
Definition: mpegpicture.h:87
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: avcodec.h:236
ff_find_unused_picture
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
Definition: mpegpicture.c:451
NULL
#define NULL
Definition: coverity.c:32
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:329
me
#define me
Definition: vf_colormatrix.c:104
put_dct
static void put_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo.c:1887
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
src
#define src
Definition: vp8dsp.c:254
ff_set_qscale
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:2343
mathops.h
FF_BUG_IEDGE
#define FF_BUG_IEDGE
Definition: avcodec.h:1575
lowres
static int lowres
Definition: ffplay.c:336
qpeldsp.h
ff_alternate_horizontal_scan
const uint8_t ff_alternate_horizontal_scan[64]
Definition: mpegvideodata.c:84
MpegEncContext::me
MotionEstContext me
Definition: mpegvideo.h:282
ff_alloc_picture
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int shared, int encoding, int chroma_x_shift, int chroma_y_shift, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
Definition: mpegpicture.c:236
ME_MAP_SIZE
#define ME_MAP_SIZE
Definition: motion_est.h:38
op_pixels_func
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
wmv2.h
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:50
dct_unquantize_mpeg2_intra_c
static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:110
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:267
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:29
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1623
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: avcodec.h:235
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:308
ff_mpeg_ref_picture
int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
Definition: mpegpicture.c:366
ff_draw_horiz_band
void ff_draw_horiz_band(AVCodecContext *avctx, AVFrame *cur, AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
Definition: mpegutils.c:51
ff_mpeg_er_init
int ff_mpeg_er_init(MpegEncContext *s)
Definition: mpeg_er.c:100
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:53
ff_mpv_motion
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
Definition: mpegvideo_motion.c:974
Picture::qscale_table_buf
AVBufferRef * qscale_table_buf
Definition: mpegpicture.h:49
alloc_picture
static int alloc_picture(MpegEncContext *s, Picture *pic)
Definition: mpegvideo.c:349
h264chroma.h
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1797
ff_mpeg_flush
void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo.c:2312
height
#define height
init_context_frame
static int init_context_frame(MpegEncContext *s)
Initialize and allocates MpegEncContext fields dependent on the resolution.
Definition: mpegvideo.c:687
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
ff_mpeg_update_thread_context
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: mpegvideo.c:491
mpegvideodata.h
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
attributes.h
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:269
ff_mpv_export_qp_table
int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
Definition: mpegvideo.c:1451
ff_print_debug_info
void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
Definition: mpegvideo.c:1444
Picture::motion_val
int16_t(*[2] motion_val)[2]
Definition: mpegpicture.h:53
dct_unquantize_mpeg2_inter_c
static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:174
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1796
clear_context
static void clear_context(MpegEncContext *s)
Definition: mpegvideo.c:808
AVCodec::id
enum AVCodecID id
Definition: codec.h:204
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
FMT_H263
@ FMT_H263
Definition: mpegutils.h:126
dct_unquantize_h263_inter_c
static void dct_unquantize_h263_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:242
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
ff_alternate_vertical_scan
const uint8_t ff_alternate_vertical_scan[64]
Definition: mpegvideodata.c:95
ff_h264chroma_init
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:41
internal.h
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
ff_mpv_common_init_ppc
void ff_mpv_common_init_ppc(MpegEncContext *s)
Definition: mpegvideo_altivec.c:118
av_always_inline
#define av_always_inline
Definition: attributes.h:49
MpegEncContext::sc
ScratchpadContext sc
Definition: mpegvideo.h:202
uint8_t
uint8_t
Definition: audio_convert.c:194
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:238
ff_mpv_decode_defaults
void ff_mpv_decode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for decoding.
Definition: mpegvideo.c:667
ff_wmv2_add_mb
void ff_wmv2_add_mb(MpegEncContext *s, int16_t block1[6][64], uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr)
Definition: wmv2.c:83
AVCodecContext::height
int height
Definition: avcodec.h:699
ff_mpv_frame_end
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo.c:1436
FF_DEBUG_NOMC
#define FF_DEBUG_NOMC
Definition: avcodec.h:1636
idctdsp.h
avcodec.h
msmpeg4.h
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
Picture::qscale_table
int8_t * qscale_table
Definition: mpegpicture.h:50
ff_mpv_frame_start
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo.c:1212
free_context_frame
static void free_context_frame(MpegEncContext *s)
Frees and resets MpegEncContext fields depending on the resolution.
Definition: mpegvideo.c:1003
dct_init
static av_cold int dct_init(MpegEncContext *s)
Definition: mpegvideo.c:282
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: avcodec.h:215
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:88
MpegEncContext::block
int16_t(* block)[64]
points to one of the following blocks
Definition: mpegvideo.h:508
AVCodecContext
main external API structure.
Definition: avcodec.h:526
ScratchpadContext::edge_emu_buffer
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
Definition: mpegpicture.h:36
Picture::mb_type
uint32_t * mb_type
types and macros are defined in mpegutils.h
Definition: mpegpicture.h:56
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo.c:466
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
Picture::f
struct AVFrame * f
Definition: mpegpicture.h:46
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
dct_unquantize_h263_intra_c
static void dct_unquantize_h263_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:208
ff_mpv_common_init_x86
av_cold void ff_mpv_common_init_x86(MpegEncContext *s)
Definition: mpegvideo.c:454
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
MPV_motion_lowres
static void MPV_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, h264_chroma_mc_func *pix_op)
motion compensation of a single macroblock
Definition: mpegvideo.c:1717
ff_mpv_reconstruct_mb
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo.c:2260
ff_mpv_common_init_axp
av_cold void ff_mpv_common_init_axp(MpegEncContext *s)
Definition: mpegvideo_alpha.c:106
av_buffer_ref
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
FF_MB_DECISION_RD
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:1017
ff_mpv_common_init_mips
av_cold void ff_mpv_common_init_mips(MpegEncContext *s)
Definition: mpegvideo_init_mips.c:50
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:714
FF_ALLOC_OR_GOTO
#define FF_ALLOC_OR_GOTO(ctx, p, size, label)
Definition: internal.h:140
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
ff_default_chroma_qscale_table
const uint8_t ff_default_chroma_qscale_table[32]
Definition: mpegvideodata.c:21
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:81
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:333
COPY
#define COPY(a)
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:551
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:262
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:553
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:502
mpeg_er.h
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:699
imgutils.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:331
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
ff_mpv_report_decode_progress
void ff_mpv_report_decode_progress(MpegEncContext *s)
Definition: mpegvideo.c:2357
add_dequant_dct
static void add_dequant_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo.c:1903
FF_ALLOCZ_OR_GOTO
#define FF_ALLOCZ_OR_GOTO(ctx, p, size, label)
Definition: internal.h:149
h
h
Definition: vp9dsp_template.c:2038
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:282
ff_blockdsp_init
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
Definition: blockdsp.c:60
AV_CODEC_ID_FLV1
@ AV_CODEC_ID_FLV1
Definition: codec_id.h:70
ff_mpv_decode_init
void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Definition: mpegvideo.c:672
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: avcodec.h:232
int
int
Definition: ffmpeg_filter.c:192
mjpegenc.h
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:51
ff_mpv_common_init_neon
av_cold void ff_mpv_common_init_neon(MpegEncContext *s)
Definition: mpegvideo.c:126
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:81
gray8
static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
Definition: mpegvideo.c:275
gray_frame
static void gray_frame(AVFrame *frame)
Definition: mpegvideo.c:1192
av_frame_set_qp_table
int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int qp_type)
Definition: frame.c:55