FFmpeg
mpegvideo.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29 
30 #include "libavutil/attributes.h"
31 #include "libavutil/avassert.h"
32 #include "libavutil/imgutils.h"
33 #include "libavutil/internal.h"
36 
37 #include "avcodec.h"
38 #include "blockdsp.h"
39 #include "h264chroma.h"
40 #include "idctdsp.h"
41 #include "internal.h"
42 #include "mathops.h"
43 #include "mpeg_er.h"
44 #include "mpegutils.h"
45 #include "mpegvideo.h"
46 #include "mpegvideodata.h"
47 #include "mjpegenc.h"
48 #include "msmpeg4.h"
49 #include "qpeldsp.h"
50 #include "thread.h"
51 #include "wmv2.h"
52 #include <limits.h>
53 
55  int16_t *block, int n, int qscale)
56 {
57  int i, level, nCoeffs;
58  const uint16_t *quant_matrix;
59 
60  nCoeffs= s->block_last_index[n];
61 
62  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
63  /* XXX: only MPEG-1 */
64  quant_matrix = s->intra_matrix;
65  for(i=1;i<=nCoeffs;i++) {
66  int j= s->intra_scantable.permutated[i];
67  level = block[j];
68  if (level) {
69  if (level < 0) {
70  level = -level;
71  level = (int)(level * qscale * quant_matrix[j]) >> 3;
72  level = (level - 1) | 1;
73  level = -level;
74  } else {
75  level = (int)(level * qscale * quant_matrix[j]) >> 3;
76  level = (level - 1) | 1;
77  }
78  block[j] = level;
79  }
80  }
81 }
82 
84  int16_t *block, int n, int qscale)
85 {
86  int i, level, nCoeffs;
87  const uint16_t *quant_matrix;
88 
89  nCoeffs= s->block_last_index[n];
90 
91  quant_matrix = s->inter_matrix;
92  for(i=0; i<=nCoeffs; i++) {
93  int j= s->intra_scantable.permutated[i];
94  level = block[j];
95  if (level) {
96  if (level < 0) {
97  level = -level;
98  level = (((level << 1) + 1) * qscale *
99  ((int) (quant_matrix[j]))) >> 4;
100  level = (level - 1) | 1;
101  level = -level;
102  } else {
103  level = (((level << 1) + 1) * qscale *
104  ((int) (quant_matrix[j]))) >> 4;
105  level = (level - 1) | 1;
106  }
107  block[j] = level;
108  }
109  }
110 }
111 
113  int16_t *block, int n, int qscale)
114 {
115  int i, level, nCoeffs;
116  const uint16_t *quant_matrix;
117 
118  if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
119  else qscale <<= 1;
120 
121  if(s->alternate_scan) nCoeffs= 63;
122  else nCoeffs= s->block_last_index[n];
123 
124  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
125  quant_matrix = s->intra_matrix;
126  for(i=1;i<=nCoeffs;i++) {
127  int j= s->intra_scantable.permutated[i];
128  level = block[j];
129  if (level) {
130  if (level < 0) {
131  level = -level;
132  level = (int)(level * qscale * quant_matrix[j]) >> 4;
133  level = -level;
134  } else {
135  level = (int)(level * qscale * quant_matrix[j]) >> 4;
136  }
137  block[j] = level;
138  }
139  }
140 }
141 
143  int16_t *block, int n, int qscale)
144 {
145  int i, level, nCoeffs;
146  const uint16_t *quant_matrix;
147  int sum=-1;
148 
149  if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
150  else qscale <<= 1;
151 
152  if(s->alternate_scan) nCoeffs= 63;
153  else nCoeffs= s->block_last_index[n];
154 
155  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
156  sum += block[0];
157  quant_matrix = s->intra_matrix;
158  for(i=1;i<=nCoeffs;i++) {
159  int j= s->intra_scantable.permutated[i];
160  level = block[j];
161  if (level) {
162  if (level < 0) {
163  level = -level;
164  level = (int)(level * qscale * quant_matrix[j]) >> 4;
165  level = -level;
166  } else {
167  level = (int)(level * qscale * quant_matrix[j]) >> 4;
168  }
169  block[j] = level;
170  sum+=level;
171  }
172  }
173  block[63]^=sum&1;
174 }
175 
177  int16_t *block, int n, int qscale)
178 {
179  int i, level, nCoeffs;
180  const uint16_t *quant_matrix;
181  int sum=-1;
182 
183  if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
184  else qscale <<= 1;
185 
186  if(s->alternate_scan) nCoeffs= 63;
187  else nCoeffs= s->block_last_index[n];
188 
189  quant_matrix = s->inter_matrix;
190  for(i=0; i<=nCoeffs; i++) {
191  int j= s->intra_scantable.permutated[i];
192  level = block[j];
193  if (level) {
194  if (level < 0) {
195  level = -level;
196  level = (((level << 1) + 1) * qscale *
197  ((int) (quant_matrix[j]))) >> 5;
198  level = -level;
199  } else {
200  level = (((level << 1) + 1) * qscale *
201  ((int) (quant_matrix[j]))) >> 5;
202  }
203  block[j] = level;
204  sum+=level;
205  }
206  }
207  block[63]^=sum&1;
208 }
209 
211  int16_t *block, int n, int qscale)
212 {
213  int i, level, qmul, qadd;
214  int nCoeffs;
215 
216  av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
217 
218  qmul = qscale << 1;
219 
220  if (!s->h263_aic) {
221  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
222  qadd = (qscale - 1) | 1;
223  }else{
224  qadd = 0;
225  }
226  if(s->ac_pred)
227  nCoeffs=63;
228  else
229  nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ];
230 
231  for(i=1; i<=nCoeffs; i++) {
232  level = block[i];
233  if (level) {
234  if (level < 0) {
235  level = level * qmul - qadd;
236  } else {
237  level = level * qmul + qadd;
238  }
239  block[i] = level;
240  }
241  }
242 }
243 
245  int16_t *block, int n, int qscale)
246 {
247  int i, level, qmul, qadd;
248  int nCoeffs;
249 
250  av_assert2(s->block_last_index[n]>=0);
251 
252  qadd = (qscale - 1) | 1;
253  qmul = qscale << 1;
254 
255  nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
256 
257  for(i=0; i<=nCoeffs; i++) {
258  level = block[i];
259  if (level) {
260  if (level < 0) {
261  level = level * qmul - qadd;
262  } else {
263  level = level * qmul + qadd;
264  }
265  block[i] = level;
266  }
267  }
268 }
269 
270 
271 static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
272 {
273  while(h--)
274  memset(dst + h*linesize, 128, 16);
275 }
276 
277 static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
278 {
279  while(h--)
280  memset(dst + h*linesize, 128, 8);
281 }
282 
283 /* init common dct for both encoder and decoder */
285 {
286  ff_blockdsp_init(&s->bdsp, s->avctx);
287  ff_h264chroma_init(&s->h264chroma, 8); //for lowres
288  ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
289  ff_mpegvideodsp_init(&s->mdsp);
290  ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
291 
292  if (s->avctx->debug & FF_DEBUG_NOMC) {
293  int i;
294  for (i=0; i<4; i++) {
295  s->hdsp.avg_pixels_tab[0][i] = gray16;
296  s->hdsp.put_pixels_tab[0][i] = gray16;
297  s->hdsp.put_no_rnd_pixels_tab[0][i] = gray16;
298 
299  s->hdsp.avg_pixels_tab[1][i] = gray8;
300  s->hdsp.put_pixels_tab[1][i] = gray8;
301  s->hdsp.put_no_rnd_pixels_tab[1][i] = gray8;
302  }
303  }
304 
305  s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
306  s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
307  s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
308  s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
309  s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
310  if (s->avctx->flags & AV_CODEC_FLAG_BITEXACT)
311  s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
312  s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
313 
314  if (HAVE_INTRINSICS_NEON)
316 
317  if (ARCH_ALPHA)
319  if (ARCH_ARM)
321  if (ARCH_PPC)
323  if (ARCH_X86)
325  if (ARCH_MIPS)
327 
328  return 0;
329 }
330 
332 {
333  if (s->codec_id == AV_CODEC_ID_MPEG4)
334  s->idsp.mpeg4_studio_profile = s->studio_profile;
335  ff_idctdsp_init(&s->idsp, s->avctx);
336 
337  /* load & permutate scantables
338  * note: only wmv uses different ones
339  */
340  if (s->alternate_scan) {
341  ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
342  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
343  } else {
344  ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
345  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
346  }
347  ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
348  ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
349 }
350 
352 {
353  return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, 0, 0,
354  s->chroma_x_shift, s->chroma_y_shift, s->out_format,
355  s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
356  &s->linesize, &s->uvlinesize);
357 }
358 
360 {
361  int y_size = s->b8_stride * (2 * s->mb_height + 1);
362  int c_size = s->mb_stride * (s->mb_height + 1);
363  int yc_size = y_size + 2 * c_size;
364  int i;
365 
366  if (s->mb_height & 1)
367  yc_size += 2*s->b8_stride + 2*s->mb_stride;
368 
369  if (s->encoding) {
370  if (!FF_ALLOCZ_TYPED_ARRAY(s->me.map, ME_MAP_SIZE) ||
371  !FF_ALLOCZ_TYPED_ARRAY(s->me.score_map, ME_MAP_SIZE))
372  return AVERROR(ENOMEM);
373 
374  if (s->noise_reduction) {
375  if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_error_sum, 2))
376  return AVERROR(ENOMEM);
377  }
378  }
379  if (!FF_ALLOCZ_TYPED_ARRAY(s->blocks, 2))
380  return AVERROR(ENOMEM);
381  s->block = s->blocks[0];
382 
383  for (i = 0; i < 12; i++) {
384  s->pblocks[i] = &s->block[i];
385  }
386 
387  if (!(s->block32 = av_mallocz(sizeof(*s->block32))) ||
388  !(s->dpcm_macroblock = av_mallocz(sizeof(*s->dpcm_macroblock))))
389  return AVERROR(ENOMEM);
390  s->dpcm_direction = 0;
391 
392  if (s->avctx->codec_tag == AV_RL32("VCR2")) {
393  // exchange uv
394  FFSWAP(void *, s->pblocks[4], s->pblocks[5]);
395  }
396 
397  if (s->out_format == FMT_H263) {
398  /* ac values */
399  if (!FF_ALLOCZ_TYPED_ARRAY(s->ac_val_base, yc_size))
400  return AVERROR(ENOMEM);
401  s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
402  s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
403  s->ac_val[2] = s->ac_val[1] + c_size;
404  }
405 
406  return 0;
407 }
408 
409 /**
410  * Initialize an MpegEncContext's thread contexts. Presumes that
411  * slice_context_count is already set and that all the fields
412  * that are freed/reset in free_duplicate_context() are NULL.
413  */
415 {
416  int nb_slices = s->slice_context_count, ret;
417 
418  /* We initialize the copies before the original so that
419  * fields allocated in init_duplicate_context are NULL after
420  * copying. This prevents double-frees upon allocation error. */
421  for (int i = 1; i < nb_slices; i++) {
422  s->thread_context[i] = av_memdup(s, sizeof(MpegEncContext));
423  if (!s->thread_context[i])
424  return AVERROR(ENOMEM);
425  if ((ret = init_duplicate_context(s->thread_context[i])) < 0)
426  return ret;
427  s->thread_context[i]->start_mb_y =
428  (s->mb_height * (i ) + nb_slices / 2) / nb_slices;
429  s->thread_context[i]->end_mb_y =
430  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
431  }
432  s->start_mb_y = 0;
433  s->end_mb_y = nb_slices > 1 ? (s->mb_height + nb_slices / 2) / nb_slices
434  : s->mb_height;
435  return init_duplicate_context(s);
436 }
437 
439 {
440  if (!s)
441  return;
442 
443  av_freep(&s->sc.edge_emu_buffer);
444  av_freep(&s->me.scratchpad);
445  s->me.temp =
446  s->sc.rd_scratchpad =
447  s->sc.b_scratchpad =
448  s->sc.obmc_scratchpad = NULL;
449 
450  av_freep(&s->dct_error_sum);
451  av_freep(&s->me.map);
452  av_freep(&s->me.score_map);
453  av_freep(&s->blocks);
454  av_freep(&s->block32);
455  av_freep(&s->dpcm_macroblock);
456  av_freep(&s->ac_val_base);
457  s->block = NULL;
458 }
459 
461 {
462  for (int i = 1; i < s->slice_context_count; i++) {
463  free_duplicate_context(s->thread_context[i]);
464  av_freep(&s->thread_context[i]);
465  }
467 }
468 
470 {
471 #define COPY(a) bak->a = src->a
472  COPY(sc.edge_emu_buffer);
473  COPY(me.scratchpad);
474  COPY(me.temp);
475  COPY(sc.rd_scratchpad);
476  COPY(sc.b_scratchpad);
477  COPY(sc.obmc_scratchpad);
478  COPY(me.map);
479  COPY(me.score_map);
480  COPY(blocks);
481  COPY(block);
482  COPY(block32);
483  COPY(dpcm_macroblock);
484  COPY(dpcm_direction);
485  COPY(start_mb_y);
486  COPY(end_mb_y);
487  COPY(me.map_generation);
488  COPY(pb);
489  COPY(dct_error_sum);
490  COPY(dct_count[0]);
491  COPY(dct_count[1]);
492  COPY(ac_val_base);
493  COPY(ac_val[0]);
494  COPY(ac_val[1]);
495  COPY(ac_val[2]);
496 #undef COPY
497 }
498 
500 {
501  MpegEncContext bak;
502  int i, ret;
503  // FIXME copy only needed parts
504  backup_duplicate_context(&bak, dst);
505  memcpy(dst, src, sizeof(MpegEncContext));
506  backup_duplicate_context(dst, &bak);
507  for (i = 0; i < 12; i++) {
508  dst->pblocks[i] = &dst->block[i];
509  }
510  if (dst->avctx->codec_tag == AV_RL32("VCR2")) {
511  // exchange uv
512  FFSWAP(void *, dst->pblocks[4], dst->pblocks[5]);
513  }
514  if (!dst->sc.edge_emu_buffer &&
515  (ret = ff_mpeg_framesize_alloc(dst->avctx, &dst->me,
516  &dst->sc, dst->linesize)) < 0) {
517  av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
518  "scratch buffers.\n");
519  return ret;
520  }
521  return 0;
522 }
523 
525  const AVCodecContext *src)
526 {
527  int i, ret;
528  MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
529 
530  if (dst == src)
531  return 0;
532 
533  av_assert0(s != s1);
534 
535  // FIXME can parameters change on I-frames?
536  // in that case dst may need a reinit
537  if (!s->context_initialized) {
538  int err;
539  memcpy(s, s1, sizeof(MpegEncContext));
540 
541  s->avctx = dst;
542  s->bitstream_buffer = NULL;
543  s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
544 
545  if (s1->context_initialized){
546 // s->picture_range_start += MAX_PICTURE_COUNT;
547 // s->picture_range_end += MAX_PICTURE_COUNT;
549  if((err = ff_mpv_common_init(s)) < 0){
550  memset(s, 0, sizeof(MpegEncContext));
551  s->avctx = dst;
552  return err;
553  }
554  }
555  }
556 
557  if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
558  s->height = s1->height;
559  s->width = s1->width;
561  return ret;
562  }
563 
564  s->avctx->coded_height = s1->avctx->coded_height;
565  s->avctx->coded_width = s1->avctx->coded_width;
566  s->avctx->width = s1->avctx->width;
567  s->avctx->height = s1->avctx->height;
568 
569  s->quarter_sample = s1->quarter_sample;
570 
571  s->coded_picture_number = s1->coded_picture_number;
572  s->picture_number = s1->picture_number;
573 
574  av_assert0(!s->picture || s->picture != s1->picture);
575  if(s->picture)
576  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
577  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
578  if (s1->picture && s1->picture[i].f->buf[0] &&
579  (ret = ff_mpeg_ref_picture(s->avctx, &s->picture[i], &s1->picture[i])) < 0)
580  return ret;
581  }
582 
583 #define UPDATE_PICTURE(pic)\
584 do {\
585  ff_mpeg_unref_picture(s->avctx, &s->pic);\
586  if (s1->pic.f && s1->pic.f->buf[0])\
587  ret = ff_mpeg_ref_picture(s->avctx, &s->pic, &s1->pic);\
588  else\
589  ret = ff_update_picture_tables(&s->pic, &s1->pic);\
590  if (ret < 0)\
591  return ret;\
592 } while (0)
593 
594  UPDATE_PICTURE(current_picture);
596  UPDATE_PICTURE(next_picture);
597 
598 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \
599  ((pic && pic >= old_ctx->picture && \
600  pic < old_ctx->picture + MAX_PICTURE_COUNT) ? \
601  &new_ctx->picture[pic - old_ctx->picture] : NULL)
602 
603  s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
604  s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
605  s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
606 
607  // Error/bug resilience
608  s->next_p_frame_damaged = s1->next_p_frame_damaged;
609  s->workaround_bugs = s1->workaround_bugs;
610  s->padding_bug_score = s1->padding_bug_score;
611 
612  // MPEG-4 timing info
613  memcpy(&s->last_time_base, &s1->last_time_base,
614  (char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
615  (char *) &s1->last_time_base);
616 
617  // B-frame info
618  s->max_b_frames = s1->max_b_frames;
619  s->low_delay = s1->low_delay;
620  s->droppable = s1->droppable;
621 
622  // DivX handling (doesn't work)
623  s->divx_packed = s1->divx_packed;
624 
625  if (s1->bitstream_buffer) {
626  if (s1->bitstream_buffer_size +
627  AV_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size) {
628  av_fast_malloc(&s->bitstream_buffer,
629  &s->allocated_bitstream_buffer_size,
630  s1->allocated_bitstream_buffer_size);
631  if (!s->bitstream_buffer) {
632  s->bitstream_buffer_size = 0;
633  return AVERROR(ENOMEM);
634  }
635  }
636  s->bitstream_buffer_size = s1->bitstream_buffer_size;
637  memcpy(s->bitstream_buffer, s1->bitstream_buffer,
638  s1->bitstream_buffer_size);
639  memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
641  }
642 
643  // linesize-dependent scratch buffer allocation
644  if (!s->sc.edge_emu_buffer)
645  if (s1->linesize) {
646  if (ff_mpeg_framesize_alloc(s->avctx, &s->me,
647  &s->sc, s1->linesize) < 0) {
648  av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
649  "scratch buffers.\n");
650  return AVERROR(ENOMEM);
651  }
652  } else {
653  av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
654  "be allocated due to unknown size.\n");
655  }
656 
657  // MPEG-2/interlacing info
658  memcpy(&s->progressive_sequence, &s1->progressive_sequence,
659  (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
660 
661  return 0;
662 }
663 
664 /**
665  * Set the given MpegEncContext to common defaults
666  * (same for encoding and decoding).
667  * The changed fields will not depend upon the
668  * prior state of the MpegEncContext.
669  */
671 {
672  s->y_dc_scale_table =
673  s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
674  s->chroma_qscale_table = ff_default_chroma_qscale_table;
675  s->progressive_frame = 1;
676  s->progressive_sequence = 1;
677  s->picture_structure = PICT_FRAME;
678 
679  s->coded_picture_number = 0;
680  s->picture_number = 0;
681 
682  s->f_code = 1;
683  s->b_code = 1;
684 
685  s->slice_context_count = 1;
686 }
687 
688 /**
689  * Initialize the given MpegEncContext for decoding.
690  * the changed fields will not depend upon
691  * the prior state of the MpegEncContext.
692  */
694 {
696 
697  s->avctx = avctx;
698  s->width = avctx->coded_width;
699  s->height = avctx->coded_height;
700  s->codec_id = avctx->codec->id;
701  s->workaround_bugs = avctx->workaround_bugs;
702 
703  /* convert fourcc to upper case */
704  s->codec_tag = ff_toupper4(avctx->codec_tag);
705 }
706 
707 /**
708  * Initialize and allocates MpegEncContext fields dependent on the resolution.
709  */
711 {
712  int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
713 
714  s->mb_width = (s->width + 15) / 16;
715  s->mb_stride = s->mb_width + 1;
716  s->b8_stride = s->mb_width * 2 + 1;
717  mb_array_size = s->mb_height * s->mb_stride;
718  mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
719 
720  /* set default edge pos, will be overridden
721  * in decode_header if needed */
722  s->h_edge_pos = s->mb_width * 16;
723  s->v_edge_pos = s->mb_height * 16;
724 
725  s->mb_num = s->mb_width * s->mb_height;
726 
727  s->block_wrap[0] =
728  s->block_wrap[1] =
729  s->block_wrap[2] =
730  s->block_wrap[3] = s->b8_stride;
731  s->block_wrap[4] =
732  s->block_wrap[5] = s->mb_stride;
733 
734  y_size = s->b8_stride * (2 * s->mb_height + 1);
735  c_size = s->mb_stride * (s->mb_height + 1);
736  yc_size = y_size + 2 * c_size;
737 
738  if (s->mb_height & 1)
739  yc_size += 2*s->b8_stride + 2*s->mb_stride;
740 
741  if (!FF_ALLOCZ_TYPED_ARRAY(s->mb_index2xy, s->mb_num + 1))
742  return AVERROR(ENOMEM);
743  for (y = 0; y < s->mb_height; y++)
744  for (x = 0; x < s->mb_width; x++)
745  s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
746 
747  s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
748 
749  if (s->encoding) {
750  /* Allocate MV tables */
751  if (!FF_ALLOCZ_TYPED_ARRAY(s->p_mv_table_base, mv_table_size) ||
752  !FF_ALLOCZ_TYPED_ARRAY(s->b_forw_mv_table_base, mv_table_size) ||
753  !FF_ALLOCZ_TYPED_ARRAY(s->b_back_mv_table_base, mv_table_size) ||
754  !FF_ALLOCZ_TYPED_ARRAY(s->b_bidir_forw_mv_table_base, mv_table_size) ||
755  !FF_ALLOCZ_TYPED_ARRAY(s->b_bidir_back_mv_table_base, mv_table_size) ||
756  !FF_ALLOCZ_TYPED_ARRAY(s->b_direct_mv_table_base, mv_table_size))
757  return AVERROR(ENOMEM);
758  s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
759  s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
760  s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
761  s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
762  s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
763  s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
764 
765  /* Allocate MB type table */
766  if (!FF_ALLOCZ_TYPED_ARRAY(s->mb_type, mb_array_size) ||
767  !FF_ALLOCZ_TYPED_ARRAY(s->lambda_table, mb_array_size) ||
768  !FF_ALLOC_TYPED_ARRAY (s->cplx_tab, mb_array_size) ||
769  !FF_ALLOC_TYPED_ARRAY (s->bits_tab, mb_array_size))
770  return AVERROR(ENOMEM);
771 
772 #define ALLOCZ_ARRAYS(p, mult, numb) ((p) = av_calloc(numb, mult * sizeof(*(p))))
773  if (s->codec_id == AV_CODEC_ID_MPEG4 ||
774  (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME)) {
775  int16_t (*tmp1)[2];
776  uint8_t *tmp2;
777  if (!(tmp1 = ALLOCZ_ARRAYS(s->b_field_mv_table_base, 8, mv_table_size)) ||
778  !(tmp2 = ALLOCZ_ARRAYS(s->b_field_select_table[0][0], 2 * 4, mv_table_size)) ||
779  !ALLOCZ_ARRAYS(s->p_field_select_table[0], 2 * 2, mv_table_size))
780  return AVERROR(ENOMEM);
781 
782  s->p_field_select_table[1] = s->p_field_select_table[0] + 2 * mv_table_size;
783  tmp1 += s->mb_stride + 1;
784 
785  for (int i = 0; i < 2; i++) {
786  for (int j = 0; j < 2; j++) {
787  for (int k = 0; k < 2; k++) {
788  s->b_field_mv_table[i][j][k] = tmp1;
789  tmp1 += mv_table_size;
790  }
791  s->b_field_select_table[i][j] = tmp2;
792  tmp2 += 2 * mv_table_size;
793  }
794  }
795  }
796  }
797 
798  if (s->codec_id == AV_CODEC_ID_MPEG4 ||
799  (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME)) {
800  int16_t (*tmp)[2];
801  /* interlaced direct mode decoding tables */
802  if (!(tmp = ALLOCZ_ARRAYS(s->p_field_mv_table_base, 4, mv_table_size)))
803  return AVERROR(ENOMEM);
804  tmp += s->mb_stride + 1;
805  for (int i = 0; i < 2; i++) {
806  for (int j = 0; j < 2; j++) {
807  s->p_field_mv_table[i][j] = tmp;
808  tmp += mv_table_size;
809  }
810  }
811  }
812 
813  if (s->out_format == FMT_H263) {
814  /* cbp values, cbp, ac_pred, pred_dir */
815  if (!FF_ALLOCZ_TYPED_ARRAY(s->coded_block_base, y_size + (s->mb_height&1)*2*s->b8_stride) ||
816  !FF_ALLOCZ_TYPED_ARRAY(s->cbp_table, mb_array_size) ||
817  !FF_ALLOCZ_TYPED_ARRAY(s->pred_dir_table, mb_array_size))
818  return AVERROR(ENOMEM);
819  s->coded_block = s->coded_block_base + s->b8_stride + 1;
820  }
821 
822  if (s->h263_pred || s->h263_plus || !s->encoding) {
823  /* dc values */
824  // MN: we need these for error resilience of intra-frames
825  if (!FF_ALLOCZ_TYPED_ARRAY(s->dc_val_base, yc_size))
826  return AVERROR(ENOMEM);
827  s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
828  s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
829  s->dc_val[2] = s->dc_val[1] + c_size;
830  for (i = 0; i < yc_size; i++)
831  s->dc_val_base[i] = 1024;
832  }
833 
834  /* which mb is an intra block, init macroblock skip table */
835  if (!FF_ALLOC_TYPED_ARRAY(s->mbintra_table, mb_array_size) ||
836  // Note the + 1 is for a quicker MPEG-4 slice_end detection
837  !FF_ALLOCZ_TYPED_ARRAY(s->mbskip_table, mb_array_size + 2))
838  return AVERROR(ENOMEM);
839  memset(s->mbintra_table, 1, mb_array_size);
840 
841  return ff_mpeg_er_init(s);
842 }
843 
845 {
846  int i, j, k;
847 
848  memset(&s->next_picture, 0, sizeof(s->next_picture));
849  memset(&s->last_picture, 0, sizeof(s->last_picture));
850  memset(&s->current_picture, 0, sizeof(s->current_picture));
851  memset(&s->new_picture, 0, sizeof(s->new_picture));
852 
853  memset(s->thread_context, 0, sizeof(s->thread_context));
854 
855  s->me.map = NULL;
856  s->me.score_map = NULL;
857  s->dct_error_sum = NULL;
858  s->block = NULL;
859  s->blocks = NULL;
860  s->block32 = NULL;
861  memset(s->pblocks, 0, sizeof(s->pblocks));
862  s->dpcm_direction = 0;
863  s->dpcm_macroblock = NULL;
864  s->ac_val_base = NULL;
865  s->ac_val[0] =
866  s->ac_val[1] =
867  s->ac_val[2] =NULL;
868  s->sc.edge_emu_buffer = NULL;
869  s->me.scratchpad = NULL;
870  s->me.temp =
871  s->sc.rd_scratchpad =
872  s->sc.b_scratchpad =
873  s->sc.obmc_scratchpad = NULL;
874 
875 
876  s->bitstream_buffer = NULL;
877  s->allocated_bitstream_buffer_size = 0;
878  s->picture = NULL;
879  s->mb_type = NULL;
880  s->p_mv_table_base = NULL;
881  s->b_forw_mv_table_base = NULL;
882  s->b_back_mv_table_base = NULL;
883  s->b_bidir_forw_mv_table_base = NULL;
884  s->b_bidir_back_mv_table_base = NULL;
885  s->b_direct_mv_table_base = NULL;
886  s->p_mv_table = NULL;
887  s->b_forw_mv_table = NULL;
888  s->b_back_mv_table = NULL;
889  s->b_bidir_forw_mv_table = NULL;
890  s->b_bidir_back_mv_table = NULL;
891  s->b_direct_mv_table = NULL;
892  s->b_field_mv_table_base = NULL;
893  s->p_field_mv_table_base = NULL;
894  for (i = 0; i < 2; i++) {
895  for (j = 0; j < 2; j++) {
896  for (k = 0; k < 2; k++) {
897  s->b_field_mv_table[i][j][k] = NULL;
898  }
899  s->b_field_select_table[i][j] = NULL;
900  s->p_field_mv_table[i][j] = NULL;
901  }
902  s->p_field_select_table[i] = NULL;
903  }
904 
905  s->dc_val_base = NULL;
906  s->coded_block_base = NULL;
907  s->mbintra_table = NULL;
908  s->cbp_table = NULL;
909  s->pred_dir_table = NULL;
910 
911  s->mbskip_table = NULL;
912 
913  s->er.error_status_table = NULL;
914  s->er.er_temp_buffer = NULL;
915  s->mb_index2xy = NULL;
916  s->lambda_table = NULL;
917 
918  s->cplx_tab = NULL;
919  s->bits_tab = NULL;
920 }
921 
922 /**
923  * init common structure for both encoder and decoder.
924  * this assumes that some variables like width/height are already set
925  */
927 {
928  int i, ret;
929  int nb_slices = (HAVE_THREADS &&
930  s->avctx->active_thread_type & FF_THREAD_SLICE) ?
931  s->avctx->thread_count : 1;
932 
933  clear_context(s);
934 
935  if (s->encoding && s->avctx->slices)
936  nb_slices = s->avctx->slices;
937 
938  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
939  s->mb_height = (s->height + 31) / 32 * 2;
940  else
941  s->mb_height = (s->height + 15) / 16;
942 
943  if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
944  av_log(s->avctx, AV_LOG_ERROR,
945  "decoding to AV_PIX_FMT_NONE is not supported.\n");
946  return AVERROR(EINVAL);
947  }
948 
949  if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
950  int max_slices;
951  if (s->mb_height)
952  max_slices = FFMIN(MAX_THREADS, s->mb_height);
953  else
954  max_slices = MAX_THREADS;
955  av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
956  " reducing to %d\n", nb_slices, max_slices);
957  nb_slices = max_slices;
958  }
959 
960  if ((s->width || s->height) &&
961  av_image_check_size(s->width, s->height, 0, s->avctx))
962  return AVERROR(EINVAL);
963 
964  dct_init(s);
965 
966  /* set chroma shifts */
967  ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
968  &s->chroma_x_shift,
969  &s->chroma_y_shift);
970  if (ret)
971  return ret;
972 
973  if (!FF_ALLOCZ_TYPED_ARRAY(s->picture, MAX_PICTURE_COUNT))
974  return AVERROR(ENOMEM);
975  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
976  s->picture[i].f = av_frame_alloc();
977  if (!s->picture[i].f)
978  goto fail_nomem;
979  }
980 
981  if (!(s->next_picture.f = av_frame_alloc()) ||
982  !(s->last_picture.f = av_frame_alloc()) ||
983  !(s->current_picture.f = av_frame_alloc()) ||
984  !(s->new_picture.f = av_frame_alloc()))
985  goto fail_nomem;
986 
987  if ((ret = init_context_frame(s)))
988  goto fail;
989 
990 #if FF_API_FLAG_TRUNCATED
991  s->parse_context.state = -1;
992 #endif
993 
994  s->context_initialized = 1;
995  memset(s->thread_context, 0, sizeof(s->thread_context));
996  s->thread_context[0] = s;
997  s->slice_context_count = nb_slices;
998 
999 // if (s->width && s->height) {
1001  if (ret < 0)
1002  goto fail;
1003 // }
1004 
1005  return 0;
1006  fail_nomem:
1007  ret = AVERROR(ENOMEM);
1008  fail:
1010  return ret;
1011 }
1012 
1013 /**
1014  * Frees and resets MpegEncContext fields depending on the resolution
1015  * as well as the slice thread contexts.
1016  * Is used during resolution changes to avoid a full reinitialization of the
1017  * codec.
1018  */
1020 {
1021  int i, j, k;
1022 
1024 
1025  av_freep(&s->mb_type);
1026  av_freep(&s->p_mv_table_base);
1027  av_freep(&s->b_forw_mv_table_base);
1028  av_freep(&s->b_back_mv_table_base);
1029  av_freep(&s->b_bidir_forw_mv_table_base);
1030  av_freep(&s->b_bidir_back_mv_table_base);
1031  av_freep(&s->b_direct_mv_table_base);
1032  s->p_mv_table = NULL;
1033  s->b_forw_mv_table = NULL;
1034  s->b_back_mv_table = NULL;
1035  s->b_bidir_forw_mv_table = NULL;
1036  s->b_bidir_back_mv_table = NULL;
1037  s->b_direct_mv_table = NULL;
1038  av_freep(&s->b_field_mv_table_base);
1039  av_freep(&s->b_field_select_table[0][0]);
1040  av_freep(&s->p_field_mv_table_base);
1041  av_freep(&s->p_field_select_table[0]);
1042  for (i = 0; i < 2; i++) {
1043  for (j = 0; j < 2; j++) {
1044  for (k = 0; k < 2; k++) {
1045  s->b_field_mv_table[i][j][k] = NULL;
1046  }
1047  s->b_field_select_table[i][j] = NULL;
1048  s->p_field_mv_table[i][j] = NULL;
1049  }
1050  s->p_field_select_table[i] = NULL;
1051  }
1052 
1053  av_freep(&s->dc_val_base);
1054  av_freep(&s->coded_block_base);
1055  av_freep(&s->mbintra_table);
1056  av_freep(&s->cbp_table);
1057  av_freep(&s->pred_dir_table);
1058 
1059  av_freep(&s->mbskip_table);
1060 
1061  av_freep(&s->er.error_status_table);
1062  av_freep(&s->er.er_temp_buffer);
1063  av_freep(&s->mb_index2xy);
1064  av_freep(&s->lambda_table);
1065 
1066  av_freep(&s->cplx_tab);
1067  av_freep(&s->bits_tab);
1068 
1069  s->linesize = s->uvlinesize = 0;
1070 }
1071 
1073 {
1074  int i, err = 0;
1075 
1076  if (!s->context_initialized)
1077  return AVERROR(EINVAL);
1078 
1080 
1081  if (s->picture)
1082  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1083  s->picture[i].needs_realloc = 1;
1084  }
1085 
1086  s->last_picture_ptr =
1087  s->next_picture_ptr =
1088  s->current_picture_ptr = NULL;
1089 
1090  // init
1091  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
1092  s->mb_height = (s->height + 31) / 32 * 2;
1093  else
1094  s->mb_height = (s->height + 15) / 16;
1095 
1096  if ((s->width || s->height) &&
1097  (err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
1098  goto fail;
1099 
1100  /* set chroma shifts */
1101  err = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1102  &s->chroma_x_shift,
1103  &s->chroma_y_shift);
1104  if (err < 0)
1105  goto fail;
1106 
1107  if ((err = init_context_frame(s)))
1108  goto fail;
1109 
1110  memset(s->thread_context, 0, sizeof(s->thread_context));
1111  s->thread_context[0] = s;
1112 
1113  if (s->width && s->height) {
1114  err = init_duplicate_contexts(s);
1115  if (err < 0)
1116  goto fail;
1117  }
1118  s->context_reinit = 0;
1119 
1120  return 0;
1121  fail:
1123  s->context_reinit = 1;
1124  return err;
1125 }
1126 
1127 /* init common structure for both encoder and decoder */
1129 {
1130  int i;
1131 
1132  if (!s)
1133  return;
1134 
1136  if (s->slice_context_count > 1)
1137  s->slice_context_count = 1;
1138 
1139 #if FF_API_FLAG_TRUNCATED
1140  av_freep(&s->parse_context.buffer);
1141  s->parse_context.buffer_size = 0;
1142 #endif
1143 
1144  av_freep(&s->bitstream_buffer);
1145  s->allocated_bitstream_buffer_size = 0;
1146 
1147  if (!s->avctx)
1148  return;
1149 
1150  if (s->picture) {
1151  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1152  ff_free_picture_tables(&s->picture[i]);
1153  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1154  av_frame_free(&s->picture[i].f);
1155  }
1156  }
1157  av_freep(&s->picture);
1158  ff_free_picture_tables(&s->last_picture);
1159  ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1160  av_frame_free(&s->last_picture.f);
1161  ff_free_picture_tables(&s->current_picture);
1162  ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1163  av_frame_free(&s->current_picture.f);
1164  ff_free_picture_tables(&s->next_picture);
1165  ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1166  av_frame_free(&s->next_picture.f);
1167  ff_free_picture_tables(&s->new_picture);
1168  ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1169  av_frame_free(&s->new_picture.f);
1170 
1171  s->context_initialized = 0;
1172  s->context_reinit = 0;
1173  s->last_picture_ptr =
1174  s->next_picture_ptr =
1175  s->current_picture_ptr = NULL;
1176  s->linesize = s->uvlinesize = 0;
1177 }
1178 
1179 
1180 static void gray_frame(AVFrame *frame)
1181 {
1182  int i, h_chroma_shift, v_chroma_shift;
1183 
1184  av_pix_fmt_get_chroma_sub_sample(frame->format, &h_chroma_shift, &v_chroma_shift);
1185 
1186  for(i=0; i<frame->height; i++)
1187  memset(frame->data[0] + frame->linesize[0]*i, 0x80, frame->width);
1188  for(i=0; i<AV_CEIL_RSHIFT(frame->height, v_chroma_shift); i++) {
1189  memset(frame->data[1] + frame->linesize[1]*i,
1190  0x80, AV_CEIL_RSHIFT(frame->width, h_chroma_shift));
1191  memset(frame->data[2] + frame->linesize[2]*i,
1192  0x80, AV_CEIL_RSHIFT(frame->width, h_chroma_shift));
1193  }
1194 }
1195 
1196 /**
1197  * generic function called after decoding
1198  * the header and before a frame is decoded.
1199  */
1201 {
1202  int i, ret;
1203  Picture *pic;
1204  s->mb_skipped = 0;
1205 
1206  if (!ff_thread_can_start_frame(avctx)) {
1207  av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1208  return -1;
1209  }
1210 
1211  /* mark & release old frames */
1212  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1213  s->last_picture_ptr != s->next_picture_ptr &&
1214  s->last_picture_ptr->f->buf[0]) {
1215  ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1216  }
1217 
1218  /* release forgotten pictures */
1219  /* if (MPEG-124 / H.263) */
1220  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1221  if (&s->picture[i] != s->last_picture_ptr &&
1222  &s->picture[i] != s->next_picture_ptr &&
1223  s->picture[i].reference && !s->picture[i].needs_realloc) {
1224  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1225  }
1226  }
1227 
1228  ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1229  ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1230  ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1231 
1232  /* release non reference frames */
1233  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1234  if (!s->picture[i].reference)
1235  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
1236  }
1237 
1238  if (s->current_picture_ptr && !s->current_picture_ptr->f->buf[0]) {
1239  // we already have an unused image
1240  // (maybe it was set before reading the header)
1241  pic = s->current_picture_ptr;
1242  } else {
1243  i = ff_find_unused_picture(s->avctx, s->picture, 0);
1244  if (i < 0) {
1245  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1246  return i;
1247  }
1248  pic = &s->picture[i];
1249  }
1250 
1251  pic->reference = 0;
1252  if (!s->droppable) {
1253  if (s->pict_type != AV_PICTURE_TYPE_B)
1254  pic->reference = 3;
1255  }
1256 
1257  pic->f->coded_picture_number = s->coded_picture_number++;
1258 
1259  if (alloc_picture(s, pic) < 0)
1260  return -1;
1261 
1262  s->current_picture_ptr = pic;
1263  // FIXME use only the vars from current_pic
1264  s->current_picture_ptr->f->top_field_first = s->top_field_first;
1265  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1266  s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1267  if (s->picture_structure != PICT_FRAME)
1268  s->current_picture_ptr->f->top_field_first =
1269  (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1270  }
1271  s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame &&
1272  !s->progressive_sequence;
1273  s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1274 
1275  s->current_picture_ptr->f->pict_type = s->pict_type;
1276  // if (s->avctx->flags && AV_CODEC_FLAG_QSCALE)
1277  // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1278  s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1279 
1280  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1281  s->current_picture_ptr)) < 0)
1282  return ret;
1283 
1284  if (s->pict_type != AV_PICTURE_TYPE_B) {
1285  s->last_picture_ptr = s->next_picture_ptr;
1286  if (!s->droppable)
1287  s->next_picture_ptr = s->current_picture_ptr;
1288  }
1289  ff_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1290  s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1291  s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
1292  s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
1293  s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
1294  s->pict_type, s->droppable);
1295 
1296  if ((!s->last_picture_ptr || !s->last_picture_ptr->f->buf[0]) &&
1297  (s->pict_type != AV_PICTURE_TYPE_I)) {
1298  int h_chroma_shift, v_chroma_shift;
1299  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1300  &h_chroma_shift, &v_chroma_shift);
1301  if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f->buf[0])
1302  av_log(avctx, AV_LOG_DEBUG,
1303  "allocating dummy last picture for B frame\n");
1304  else if (s->pict_type != AV_PICTURE_TYPE_I)
1305  av_log(avctx, AV_LOG_ERROR,
1306  "warning: first frame is no keyframe\n");
1307 
1308  /* Allocate a dummy frame */
1309  i = ff_find_unused_picture(s->avctx, s->picture, 0);
1310  if (i < 0) {
1311  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1312  return i;
1313  }
1314  s->last_picture_ptr = &s->picture[i];
1315 
1316  s->last_picture_ptr->reference = 3;
1317  s->last_picture_ptr->f->key_frame = 0;
1318  s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
1319 
1320  if (alloc_picture(s, s->last_picture_ptr) < 0) {
1321  s->last_picture_ptr = NULL;
1322  return -1;
1323  }
1324 
1325  if (!avctx->hwaccel) {
1326  for(i=0; i<avctx->height; i++)
1327  memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i,
1328  0x80, avctx->width);
1329  if (s->last_picture_ptr->f->data[2]) {
1330  for(i=0; i<AV_CEIL_RSHIFT(avctx->height, v_chroma_shift); i++) {
1331  memset(s->last_picture_ptr->f->data[1] + s->last_picture_ptr->f->linesize[1]*i,
1332  0x80, AV_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1333  memset(s->last_picture_ptr->f->data[2] + s->last_picture_ptr->f->linesize[2]*i,
1334  0x80, AV_CEIL_RSHIFT(avctx->width, h_chroma_shift));
1335  }
1336  }
1337 
1338  if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
1339  for(i=0; i<avctx->height; i++)
1340  memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i, 16, avctx->width);
1341  }
1342  }
1343 
1344  ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
1345  ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
1346  }
1347  if ((!s->next_picture_ptr || !s->next_picture_ptr->f->buf[0]) &&
1348  s->pict_type == AV_PICTURE_TYPE_B) {
1349  /* Allocate a dummy frame */
1350  i = ff_find_unused_picture(s->avctx, s->picture, 0);
1351  if (i < 0) {
1352  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1353  return i;
1354  }
1355  s->next_picture_ptr = &s->picture[i];
1356 
1357  s->next_picture_ptr->reference = 3;
1358  s->next_picture_ptr->f->key_frame = 0;
1359  s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
1360 
1361  if (alloc_picture(s, s->next_picture_ptr) < 0) {
1362  s->next_picture_ptr = NULL;
1363  return -1;
1364  }
1365  ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
1366  ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
1367  }
1368 
1369 #if 0 // BUFREF-FIXME
1370  memset(s->last_picture.f->data, 0, sizeof(s->last_picture.f->data));
1371  memset(s->next_picture.f->data, 0, sizeof(s->next_picture.f->data));
1372 #endif
1373  if (s->last_picture_ptr) {
1374  if (s->last_picture_ptr->f->buf[0] &&
1375  (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1376  s->last_picture_ptr)) < 0)
1377  return ret;
1378  }
1379  if (s->next_picture_ptr) {
1380  if (s->next_picture_ptr->f->buf[0] &&
1381  (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1382  s->next_picture_ptr)) < 0)
1383  return ret;
1384  }
1385 
1386  av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1387  s->last_picture_ptr->f->buf[0]));
1388 
1389  if (s->picture_structure!= PICT_FRAME) {
1390  int i;
1391  for (i = 0; i < 4; i++) {
1392  if (s->picture_structure == PICT_BOTTOM_FIELD) {
1393  s->current_picture.f->data[i] +=
1394  s->current_picture.f->linesize[i];
1395  }
1396  s->current_picture.f->linesize[i] *= 2;
1397  s->last_picture.f->linesize[i] *= 2;
1398  s->next_picture.f->linesize[i] *= 2;
1399  }
1400  }
1401 
1402  /* set dequantizer, we can't do it during init as
1403  * it might change for MPEG-4 and we can't do it in the header
1404  * decode as init is not called for MPEG-4 there yet */
1405  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1406  s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1407  s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1408  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1409  s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1410  s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1411  } else {
1412  s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1413  s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1414  }
1415 
1416  if (s->avctx->debug & FF_DEBUG_NOMC) {
1417  gray_frame(s->current_picture_ptr->f);
1418  }
1419 
1420  return 0;
1421 }
1422 
1423 /* called after a frame has been decoded. */
1425 {
1426  emms_c();
1427 
1428  if (s->current_picture.reference)
1429  ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
1430 }
1431 
1433 {
1434  ff_print_debug_info2(s->avctx, pict, s->mbskip_table, p->mb_type,
1435  p->qscale_table, p->motion_val,
1436  s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
1437 }
1438 
1440 {
1441  AVVideoEncParams *par;
1442  int mult = (qp_type == FF_QSCALE_TYPE_MPEG1) ? 2 : 1;
1443  unsigned int nb_mb = p->alloc_mb_height * p->alloc_mb_width;
1444  unsigned int x, y;
1445 
1446  if (!(s->avctx->export_side_data & AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS))
1447  return 0;
1448 
1450  if (!par)
1451  return AVERROR(ENOMEM);
1452 
1453  for (y = 0; y < p->alloc_mb_height; y++)
1454  for (x = 0; x < p->alloc_mb_width; x++) {
1455  const unsigned int block_idx = y * p->alloc_mb_width + x;
1456  const unsigned int mb_xy = y * p->alloc_mb_stride + x;
1457  AVVideoBlockParams *b = av_video_enc_params_block(par, block_idx);
1458 
1459  b->src_x = x * 16;
1460  b->src_y = y * 16;
1461  b->w = 16;
1462  b->h = 16;
1463 
1464  b->delta_qp = p->qscale_table[mb_xy] * mult;
1465  }
1466 
1467  return 0;
1468 }
1469 
1471  uint8_t *dest, uint8_t *src,
1472  int field_based, int field_select,
1473  int src_x, int src_y,
1474  int width, int height, ptrdiff_t stride,
1475  int h_edge_pos, int v_edge_pos,
1476  int w, int h, h264_chroma_mc_func *pix_op,
1477  int motion_x, int motion_y)
1478 {
1479  const int lowres = s->avctx->lowres;
1480  const int op_index = FFMIN(lowres, 3);
1481  const int s_mask = (2 << lowres) - 1;
1482  int emu = 0;
1483  int sx, sy;
1484 
1485  if (s->quarter_sample) {
1486  motion_x /= 2;
1487  motion_y /= 2;
1488  }
1489 
1490  sx = motion_x & s_mask;
1491  sy = motion_y & s_mask;
1492  src_x += motion_x >> lowres + 1;
1493  src_y += motion_y >> lowres + 1;
1494 
1495  src += src_y * stride + src_x;
1496 
1497  if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
1498  (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
1499  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, src,
1500  s->linesize, s->linesize,
1501  w + 1, (h + 1) << field_based,
1502  src_x, src_y << field_based,
1503  h_edge_pos, v_edge_pos);
1504  src = s->sc.edge_emu_buffer;
1505  emu = 1;
1506  }
1507 
1508  sx = (sx << 2) >> lowres;
1509  sy = (sy << 2) >> lowres;
1510  if (field_select)
1511  src += s->linesize;
1512  pix_op[op_index](dest, src, stride, h, sx, sy);
1513  return emu;
1514 }
1515 
1516 /* apply one mpeg motion vector to the three components */
1518  uint8_t *dest_y,
1519  uint8_t *dest_cb,
1520  uint8_t *dest_cr,
1521  int field_based,
1522  int bottom_field,
1523  int field_select,
1524  uint8_t **ref_picture,
1525  h264_chroma_mc_func *pix_op,
1526  int motion_x, int motion_y,
1527  int h, int mb_y)
1528 {
1529  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1530  int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
1531  ptrdiff_t uvlinesize, linesize;
1532  const int lowres = s->avctx->lowres;
1533  const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
1534  const int block_s = 8>>lowres;
1535  const int s_mask = (2 << lowres) - 1;
1536  const int h_edge_pos = s->h_edge_pos >> lowres;
1537  const int v_edge_pos = s->v_edge_pos >> lowres;
1538  linesize = s->current_picture.f->linesize[0] << field_based;
1539  uvlinesize = s->current_picture.f->linesize[1] << field_based;
1540 
1541  // FIXME obviously not perfect but qpel will not work in lowres anyway
1542  if (s->quarter_sample) {
1543  motion_x /= 2;
1544  motion_y /= 2;
1545  }
1546 
1547  if(field_based){
1548  motion_y += (bottom_field - field_select)*((1 << lowres)-1);
1549  }
1550 
1551  sx = motion_x & s_mask;
1552  sy = motion_y & s_mask;
1553  src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
1554  src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
1555 
1556  if (s->out_format == FMT_H263) {
1557  uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
1558  uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
1559  uvsrc_x = src_x >> 1;
1560  uvsrc_y = src_y >> 1;
1561  } else if (s->out_format == FMT_H261) {
1562  // even chroma mv's are full pel in H261
1563  mx = motion_x / 4;
1564  my = motion_y / 4;
1565  uvsx = (2 * mx) & s_mask;
1566  uvsy = (2 * my) & s_mask;
1567  uvsrc_x = s->mb_x * block_s + (mx >> lowres);
1568  uvsrc_y = mb_y * block_s + (my >> lowres);
1569  } else {
1570  if(s->chroma_y_shift){
1571  mx = motion_x / 2;
1572  my = motion_y / 2;
1573  uvsx = mx & s_mask;
1574  uvsy = my & s_mask;
1575  uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
1576  uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
1577  } else {
1578  if(s->chroma_x_shift){
1579  //Chroma422
1580  mx = motion_x / 2;
1581  uvsx = mx & s_mask;
1582  uvsy = motion_y & s_mask;
1583  uvsrc_y = src_y;
1584  uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
1585  } else {
1586  //Chroma444
1587  uvsx = motion_x & s_mask;
1588  uvsy = motion_y & s_mask;
1589  uvsrc_x = src_x;
1590  uvsrc_y = src_y;
1591  }
1592  }
1593  }
1594 
1595  ptr_y = ref_picture[0] + src_y * linesize + src_x;
1596  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1597  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1598 
1599  if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
1600  (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
1601  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
1602  linesize >> field_based, linesize >> field_based,
1603  17, 17 + field_based,
1604  src_x, src_y << field_based, h_edge_pos,
1605  v_edge_pos);
1606  ptr_y = s->sc.edge_emu_buffer;
1607  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1608  uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
1609  uint8_t *vbuf =ubuf + 10 * s->uvlinesize;
1610  if (s->workaround_bugs & FF_BUG_IEDGE)
1611  vbuf -= s->uvlinesize;
1612  s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
1613  uvlinesize >> field_based, uvlinesize >> field_based,
1614  9, 9 + field_based,
1615  uvsrc_x, uvsrc_y << field_based,
1616  h_edge_pos >> 1, v_edge_pos >> 1);
1617  s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
1618  uvlinesize >> field_based,uvlinesize >> field_based,
1619  9, 9 + field_based,
1620  uvsrc_x, uvsrc_y << field_based,
1621  h_edge_pos >> 1, v_edge_pos >> 1);
1622  ptr_cb = ubuf;
1623  ptr_cr = vbuf;
1624  }
1625  }
1626 
1627  // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
1628  if (bottom_field) {
1629  dest_y += s->linesize;
1630  dest_cb += s->uvlinesize;
1631  dest_cr += s->uvlinesize;
1632  }
1633 
1634  if (field_select) {
1635  ptr_y += s->linesize;
1636  ptr_cb += s->uvlinesize;
1637  ptr_cr += s->uvlinesize;
1638  }
1639 
1640  sx = (sx << 2) >> lowres;
1641  sy = (sy << 2) >> lowres;
1642  pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
1643 
1644  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
1645  int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
1646  uvsx = (uvsx << 2) >> lowres;
1647  uvsy = (uvsy << 2) >> lowres;
1648  if (hc) {
1649  pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
1650  pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
1651  }
1652  }
1653  // FIXME h261 lowres loop filter
1654 }
1655 
1657  uint8_t *dest_cb, uint8_t *dest_cr,
1658  uint8_t **ref_picture,
1659  h264_chroma_mc_func * pix_op,
1660  int mx, int my)
1661 {
1662  const int lowres = s->avctx->lowres;
1663  const int op_index = FFMIN(lowres, 3);
1664  const int block_s = 8 >> lowres;
1665  const int s_mask = (2 << lowres) - 1;
1666  const int h_edge_pos = s->h_edge_pos >> lowres + 1;
1667  const int v_edge_pos = s->v_edge_pos >> lowres + 1;
1668  int emu = 0, src_x, src_y, sx, sy;
1669  ptrdiff_t offset;
1670  uint8_t *ptr;
1671 
1672  if (s->quarter_sample) {
1673  mx /= 2;
1674  my /= 2;
1675  }
1676 
1677  /* In case of 8X8, we construct a single chroma motion vector
1678  with a special rounding */
1679  mx = ff_h263_round_chroma(mx);
1680  my = ff_h263_round_chroma(my);
1681 
1682  sx = mx & s_mask;
1683  sy = my & s_mask;
1684  src_x = s->mb_x * block_s + (mx >> lowres + 1);
1685  src_y = s->mb_y * block_s + (my >> lowres + 1);
1686 
1687  offset = src_y * s->uvlinesize + src_x;
1688  ptr = ref_picture[1] + offset;
1689  if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
1690  (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
1691  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
1692  s->uvlinesize, s->uvlinesize,
1693  9, 9,
1694  src_x, src_y, h_edge_pos, v_edge_pos);
1695  ptr = s->sc.edge_emu_buffer;
1696  emu = 1;
1697  }
1698  sx = (sx << 2) >> lowres;
1699  sy = (sy << 2) >> lowres;
1700  pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
1701 
1702  ptr = ref_picture[2] + offset;
1703  if (emu) {
1704  s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
1705  s->uvlinesize, s->uvlinesize,
1706  9, 9,
1707  src_x, src_y, h_edge_pos, v_edge_pos);
1708  ptr = s->sc.edge_emu_buffer;
1709  }
1710  pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
1711 }
1712 
1713 /**
1714  * motion compensation of a single macroblock
1715  * @param s context
1716  * @param dest_y luma destination pointer
1717  * @param dest_cb chroma cb/u destination pointer
1718  * @param dest_cr chroma cr/v destination pointer
1719  * @param dir direction (0->forward, 1->backward)
1720  * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
1721  * @param pix_op halfpel motion compensation function (average or put normally)
1722  * the motion vectors are taken from s->mv and the MV type from s->mv_type
1723  */
1724 static inline void MPV_motion_lowres(MpegEncContext *s,
1725  uint8_t *dest_y, uint8_t *dest_cb,
1726  uint8_t *dest_cr,
1727  int dir, uint8_t **ref_picture,
1728  h264_chroma_mc_func *pix_op)
1729 {
1730  int mx, my;
1731  int mb_x, mb_y, i;
1732  const int lowres = s->avctx->lowres;
1733  const int block_s = 8 >>lowres;
1734 
1735  mb_x = s->mb_x;
1736  mb_y = s->mb_y;
1737 
1738  switch (s->mv_type) {
1739  case MV_TYPE_16X16:
1740  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1741  0, 0, 0,
1742  ref_picture, pix_op,
1743  s->mv[dir][0][0], s->mv[dir][0][1],
1744  2 * block_s, mb_y);
1745  break;
1746  case MV_TYPE_8X8:
1747  mx = 0;
1748  my = 0;
1749  for (i = 0; i < 4; i++) {
1750  hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
1751  s->linesize) * block_s,
1752  ref_picture[0], 0, 0,
1753  (2 * mb_x + (i & 1)) * block_s,
1754  (2 * mb_y + (i >> 1)) * block_s,
1755  s->width, s->height, s->linesize,
1756  s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
1757  block_s, block_s, pix_op,
1758  s->mv[dir][i][0], s->mv[dir][i][1]);
1759 
1760  mx += s->mv[dir][i][0];
1761  my += s->mv[dir][i][1];
1762  }
1763 
1764  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
1765  chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
1766  pix_op, mx, my);
1767  break;
1768  case MV_TYPE_FIELD:
1769  if (s->picture_structure == PICT_FRAME) {
1770  /* top field */
1771  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1772  1, 0, s->field_select[dir][0],
1773  ref_picture, pix_op,
1774  s->mv[dir][0][0], s->mv[dir][0][1],
1775  block_s, mb_y);
1776  /* bottom field */
1777  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1778  1, 1, s->field_select[dir][1],
1779  ref_picture, pix_op,
1780  s->mv[dir][1][0], s->mv[dir][1][1],
1781  block_s, mb_y);
1782  } else {
1783  if (s->picture_structure != s->field_select[dir][0] + 1 &&
1784  s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
1785  ref_picture = s->current_picture_ptr->f->data;
1786 
1787  }
1788  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1789  0, 0, s->field_select[dir][0],
1790  ref_picture, pix_op,
1791  s->mv[dir][0][0],
1792  s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
1793  }
1794  break;
1795  case MV_TYPE_16X8:
1796  for (i = 0; i < 2; i++) {
1797  uint8_t **ref2picture;
1798 
1799  if (s->picture_structure == s->field_select[dir][i] + 1 ||
1800  s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
1801  ref2picture = ref_picture;
1802  } else {
1803  ref2picture = s->current_picture_ptr->f->data;
1804  }
1805 
1806  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1807  0, 0, s->field_select[dir][i],
1808  ref2picture, pix_op,
1809  s->mv[dir][i][0], s->mv[dir][i][1] +
1810  2 * block_s * i, block_s, mb_y >> 1);
1811 
1812  dest_y += 2 * block_s * s->linesize;
1813  dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
1814  dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
1815  }
1816  break;
1817  case MV_TYPE_DMV:
1818  if (s->picture_structure == PICT_FRAME) {
1819  for (i = 0; i < 2; i++) {
1820  int j;
1821  for (j = 0; j < 2; j++) {
1822  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1823  1, j, j ^ i,
1824  ref_picture, pix_op,
1825  s->mv[dir][2 * i + j][0],
1826  s->mv[dir][2 * i + j][1],
1827  block_s, mb_y);
1828  }
1829  pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
1830  }
1831  } else {
1832  for (i = 0; i < 2; i++) {
1833  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1834  0, 0, s->picture_structure != i + 1,
1835  ref_picture, pix_op,
1836  s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
1837  2 * block_s, mb_y >> 1);
1838 
1839  // after put we make avg of the same block
1840  pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
1841 
1842  // opposite parity is always in the same
1843  // frame if this is second field
1844  if (!s->first_field) {
1845  ref_picture = s->current_picture_ptr->f->data;
1846  }
1847  }
1848  }
1849  break;
1850  default:
1851  av_assert2(0);
1852  }
1853 }
1854 
1855 /**
1856  * find the lowest MB row referenced in the MVs
1857  */
1859 {
1860  int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1861  int my, off, i, mvs;
1862 
1863  if (s->picture_structure != PICT_FRAME || s->mcsel)
1864  goto unhandled;
1865 
1866  switch (s->mv_type) {
1867  case MV_TYPE_16X16:
1868  mvs = 1;
1869  break;
1870  case MV_TYPE_16X8:
1871  mvs = 2;
1872  break;
1873  case MV_TYPE_8X8:
1874  mvs = 4;
1875  break;
1876  default:
1877  goto unhandled;
1878  }
1879 
1880  for (i = 0; i < mvs; i++) {
1881  my = s->mv[dir][i][1];
1882  my_max = FFMAX(my_max, my);
1883  my_min = FFMIN(my_min, my);
1884  }
1885 
1886  off = ((FFMAX(-my_min, my_max)<<qpel_shift) + 63) >> 6;
1887 
1888  return av_clip(s->mb_y + off, 0, s->mb_height - 1);
1889 unhandled:
1890  return s->mb_height-1;
1891 }
1892 
1893 /* put block[] to dest[] */
1894 static inline void put_dct(MpegEncContext *s,
1895  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1896 {
1897  s->dct_unquantize_intra(s, block, i, qscale);
1898  s->idsp.idct_put(dest, line_size, block);
1899 }
1900 
1901 /* add block[] to dest[] */
1902 static inline void add_dct(MpegEncContext *s,
1903  int16_t *block, int i, uint8_t *dest, int line_size)
1904 {
1905  if (s->block_last_index[i] >= 0) {
1906  s->idsp.idct_add(dest, line_size, block);
1907  }
1908 }
1909 
1910 static inline void add_dequant_dct(MpegEncContext *s,
1911  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
1912 {
1913  if (s->block_last_index[i] >= 0) {
1914  s->dct_unquantize_inter(s, block, i, qscale);
1915 
1916  s->idsp.idct_add(dest, line_size, block);
1917  }
1918 }
1919 
1920 /**
1921  * Clean dc, ac, coded_block for the current non-intra MB.
1922  */
1924 {
1925  int wrap = s->b8_stride;
1926  int xy = s->block_index[0];
1927 
1928  s->dc_val[0][xy ] =
1929  s->dc_val[0][xy + 1 ] =
1930  s->dc_val[0][xy + wrap] =
1931  s->dc_val[0][xy + 1 + wrap] = 1024;
1932  /* ac pred */
1933  memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
1934  memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1935  if (s->msmpeg4_version>=3) {
1936  s->coded_block[xy ] =
1937  s->coded_block[xy + 1 ] =
1938  s->coded_block[xy + wrap] =
1939  s->coded_block[xy + 1 + wrap] = 0;
1940  }
1941  /* chroma */
1942  wrap = s->mb_stride;
1943  xy = s->mb_x + s->mb_y * wrap;
1944  s->dc_val[1][xy] =
1945  s->dc_val[2][xy] = 1024;
1946  /* ac pred */
1947  memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1948  memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1949 
1950  s->mbintra_table[xy]= 0;
1951 }
1952 
1953 /* generic function called after a macroblock has been parsed by the
1954  decoder or after it has been encoded by the encoder.
1955 
1956  Important variables used:
1957  s->mb_intra : true if intra macroblock
1958  s->mv_dir : motion vector direction
1959  s->mv_type : motion vector type
1960  s->mv : motion vector
1961  s->interlaced_dct : true if interlaced dct used (mpeg2)
1962  */
1963 static av_always_inline
1965  int lowres_flag, int is_mpeg12)
1966 {
1967  const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1968 
1969  if (CONFIG_XVMC &&
1970  s->avctx->hwaccel && s->avctx->hwaccel->decode_mb) {
1971  s->avctx->hwaccel->decode_mb(s);//xvmc uses pblocks
1972  return;
1973  }
1974 
1975  if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
1976  /* print DCT coefficients */
1977  int i,j;
1978  av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1979  for(i=0; i<6; i++){
1980  for(j=0; j<64; j++){
1981  av_log(s->avctx, AV_LOG_DEBUG, "%5d",
1982  block[i][s->idsp.idct_permutation[j]]);
1983  }
1984  av_log(s->avctx, AV_LOG_DEBUG, "\n");
1985  }
1986  }
1987 
1988  s->current_picture.qscale_table[mb_xy] = s->qscale;
1989 
1990  /* update DC predictors for P macroblocks */
1991  if (!s->mb_intra) {
1992  if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
1993  if(s->mbintra_table[mb_xy])
1995  } else {
1996  s->last_dc[0] =
1997  s->last_dc[1] =
1998  s->last_dc[2] = 128 << s->intra_dc_precision;
1999  }
2000  }
2001  else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2002  s->mbintra_table[mb_xy]=1;
2003 
2004  if ((s->avctx->flags & AV_CODEC_FLAG_PSNR) || s->frame_skip_threshold || s->frame_skip_factor ||
2005  !(s->encoding && (s->intra_only || s->pict_type == AV_PICTURE_TYPE_B) &&
2006  s->avctx->mb_decision != FF_MB_DECISION_RD)) { // FIXME precalc
2007  uint8_t *dest_y, *dest_cb, *dest_cr;
2008  int dct_linesize, dct_offset;
2009  op_pixels_func (*op_pix)[4];
2010  qpel_mc_func (*op_qpix)[16];
2011  const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
2012  const int uvlinesize = s->current_picture.f->linesize[1];
2013  const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2014  const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2015 
2016  /* avoid copy if macroblock skipped in last frame too */
2017  /* skip only during decoding as we might trash the buffers during encoding a bit */
2018  if(!s->encoding){
2019  uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2020 
2021  if (s->mb_skipped) {
2022  s->mb_skipped= 0;
2023  av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
2024  *mbskip_ptr = 1;
2025  } else if(!s->current_picture.reference) {
2026  *mbskip_ptr = 1;
2027  } else{
2028  *mbskip_ptr = 0; /* not skipped */
2029  }
2030  }
2031 
2032  dct_linesize = linesize << s->interlaced_dct;
2033  dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2034 
2035  if(readable){
2036  dest_y= s->dest[0];
2037  dest_cb= s->dest[1];
2038  dest_cr= s->dest[2];
2039  }else{
2040  dest_y = s->sc.b_scratchpad;
2041  dest_cb= s->sc.b_scratchpad+16*linesize;
2042  dest_cr= s->sc.b_scratchpad+32*linesize;
2043  }
2044 
2045  if (!s->mb_intra) {
2046  /* motion handling */
2047  /* decoding or more than one mb_type (MC was already done otherwise) */
2048  if(!s->encoding){
2049 
2050  if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2051  if (s->mv_dir & MV_DIR_FORWARD) {
2052  ff_thread_await_progress(&s->last_picture_ptr->tf,
2054  0);
2055  }
2056  if (s->mv_dir & MV_DIR_BACKWARD) {
2057  ff_thread_await_progress(&s->next_picture_ptr->tf,
2059  0);
2060  }
2061  }
2062 
2063  if(lowres_flag){
2064  h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
2065 
2066  if (s->mv_dir & MV_DIR_FORWARD) {
2067  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix);
2068  op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
2069  }
2070  if (s->mv_dir & MV_DIR_BACKWARD) {
2071  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix);
2072  }
2073  }else{
2074  op_qpix = s->me.qpel_put;
2075  if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2076  op_pix = s->hdsp.put_pixels_tab;
2077  }else{
2078  op_pix = s->hdsp.put_no_rnd_pixels_tab;
2079  }
2080  if (s->mv_dir & MV_DIR_FORWARD) {
2081  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
2082  op_pix = s->hdsp.avg_pixels_tab;
2083  op_qpix= s->me.qpel_avg;
2084  }
2085  if (s->mv_dir & MV_DIR_BACKWARD) {
2086  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
2087  }
2088  }
2089  }
2090 
2091  /* skip dequant / idct if we are really late ;) */
2092  if(s->avctx->skip_idct){
2093  if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2094  ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2095  || s->avctx->skip_idct >= AVDISCARD_ALL)
2096  goto skip_idct;
2097  }
2098 
2099  /* add dct residue */
2100  if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
2101  || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2102  add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2103  add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2104  add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2105  add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2106 
2107  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
2108  if (s->chroma_y_shift){
2109  add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2110  add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2111  }else{
2112  dct_linesize >>= 1;
2113  dct_offset >>=1;
2114  add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2115  add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2116  add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2117  add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2118  }
2119  }
2120  } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2121  add_dct(s, block[0], 0, dest_y , dct_linesize);
2122  add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2123  add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2124  add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2125 
2126  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
2127  if(s->chroma_y_shift){//Chroma420
2128  add_dct(s, block[4], 4, dest_cb, uvlinesize);
2129  add_dct(s, block[5], 5, dest_cr, uvlinesize);
2130  }else{
2131  //chroma422
2132  dct_linesize = uvlinesize << s->interlaced_dct;
2133  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2134 
2135  add_dct(s, block[4], 4, dest_cb, dct_linesize);
2136  add_dct(s, block[5], 5, dest_cr, dct_linesize);
2137  add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2138  add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2139  if(!s->chroma_x_shift){//Chroma444
2140  add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
2141  add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
2142  add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
2143  add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
2144  }
2145  }
2146  }//fi gray
2147  }
2148  else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2149  ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2150  }
2151  } else {
2152  /* Only MPEG-4 Simple Studio Profile is supported in > 8-bit mode.
2153  TODO: Integrate 10-bit properly into mpegvideo.c so that ER works properly */
2154  if (s->avctx->bits_per_raw_sample > 8){
2155  const int act_block_size = block_size * 2;
2156 
2157  if(s->dpcm_direction == 0) {
2158  s->idsp.idct_put(dest_y, dct_linesize, (int16_t*)(*s->block32)[0]);
2159  s->idsp.idct_put(dest_y + act_block_size, dct_linesize, (int16_t*)(*s->block32)[1]);
2160  s->idsp.idct_put(dest_y + dct_offset, dct_linesize, (int16_t*)(*s->block32)[2]);
2161  s->idsp.idct_put(dest_y + dct_offset + act_block_size, dct_linesize, (int16_t*)(*s->block32)[3]);
2162 
2163  dct_linesize = uvlinesize << s->interlaced_dct;
2164  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2165 
2166  s->idsp.idct_put(dest_cb, dct_linesize, (int16_t*)(*s->block32)[4]);
2167  s->idsp.idct_put(dest_cr, dct_linesize, (int16_t*)(*s->block32)[5]);
2168  s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, (int16_t*)(*s->block32)[6]);
2169  s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, (int16_t*)(*s->block32)[7]);
2170  if(!s->chroma_x_shift){//Chroma444
2171  s->idsp.idct_put(dest_cb + act_block_size, dct_linesize, (int16_t*)(*s->block32)[8]);
2172  s->idsp.idct_put(dest_cr + act_block_size, dct_linesize, (int16_t*)(*s->block32)[9]);
2173  s->idsp.idct_put(dest_cb + act_block_size + dct_offset, dct_linesize, (int16_t*)(*s->block32)[10]);
2174  s->idsp.idct_put(dest_cr + act_block_size + dct_offset, dct_linesize, (int16_t*)(*s->block32)[11]);
2175  }
2176  } else if(s->dpcm_direction == 1) {
2177  int i, w, h;
2178  uint16_t *dest_pcm[3] = {(uint16_t*)dest_y, (uint16_t*)dest_cb, (uint16_t*)dest_cr};
2179  int linesize[3] = {dct_linesize, uvlinesize, uvlinesize};
2180  for(i = 0; i < 3; i++) {
2181  int idx = 0;
2182  int vsub = i ? s->chroma_y_shift : 0;
2183  int hsub = i ? s->chroma_x_shift : 0;
2184  for(h = 0; h < (16 >> vsub); h++){
2185  for(w = 0; w < (16 >> hsub); w++)
2186  dest_pcm[i][w] = (*s->dpcm_macroblock)[i][idx++];
2187  dest_pcm[i] += linesize[i] / 2;
2188  }
2189  }
2190  } else if(s->dpcm_direction == -1) {
2191  int i, w, h;
2192  uint16_t *dest_pcm[3] = {(uint16_t*)dest_y, (uint16_t*)dest_cb, (uint16_t*)dest_cr};
2193  int linesize[3] = {dct_linesize, uvlinesize, uvlinesize};
2194  for(i = 0; i < 3; i++) {
2195  int idx = 0;
2196  int vsub = i ? s->chroma_y_shift : 0;
2197  int hsub = i ? s->chroma_x_shift : 0;
2198  dest_pcm[i] += (linesize[i] / 2) * ((16 >> vsub) - 1);
2199  for(h = (16 >> vsub)-1; h >= 1; h--){
2200  for(w = (16 >> hsub)-1; w >= 1; w--)
2201  dest_pcm[i][w] = (*s->dpcm_macroblock)[i][idx++];
2202  dest_pcm[i] -= linesize[i] / 2;
2203  }
2204  }
2205  }
2206  }
2207  /* dct only in intra block */
2208  else if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
2209  put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2210  put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2211  put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2212  put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2213 
2214  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
2215  if(s->chroma_y_shift){
2216  put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2217  put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2218  }else{
2219  dct_offset >>=1;
2220  dct_linesize >>=1;
2221  put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2222  put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2223  put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2224  put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2225  }
2226  }
2227  }else{
2228  s->idsp.idct_put(dest_y, dct_linesize, block[0]);
2229  s->idsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2230  s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]);
2231  s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2232 
2233  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
2234  if(s->chroma_y_shift){
2235  s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
2236  s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
2237  }else{
2238 
2239  dct_linesize = uvlinesize << s->interlaced_dct;
2240  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2241 
2242  s->idsp.idct_put(dest_cb, dct_linesize, block[4]);
2243  s->idsp.idct_put(dest_cr, dct_linesize, block[5]);
2244  s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2245  s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2246  if(!s->chroma_x_shift){//Chroma444
2247  s->idsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
2248  s->idsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
2249  s->idsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
2250  s->idsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
2251  }
2252  }
2253  }//gray
2254  }
2255  }
2256 skip_idct:
2257  if(!readable){
2258  s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2259  if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
2260  s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2261  s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2262  }
2263  }
2264  }
2265 }
2266 
2268 {
2269 #if !CONFIG_SMALL
2270  if(s->out_format == FMT_MPEG1) {
2271  if(s->avctx->lowres) mpv_reconstruct_mb_internal(s, block, 1, 1);
2272  else mpv_reconstruct_mb_internal(s, block, 0, 1);
2273  } else
2274 #endif
2275  if(s->avctx->lowres) mpv_reconstruct_mb_internal(s, block, 1, 0);
2276  else mpv_reconstruct_mb_internal(s, block, 0, 0);
2277 }
2278 
2280 {
2281  ff_draw_horiz_band(s->avctx, s->current_picture_ptr->f,
2282  s->last_picture_ptr ? s->last_picture_ptr->f : NULL, y, h, s->picture_structure,
2283  s->first_field, s->low_delay);
2284 }
2285 
2286 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2287  const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
2288  const int uvlinesize = s->current_picture.f->linesize[1];
2289  const int width_of_mb = (4 + (s->avctx->bits_per_raw_sample > 8)) - s->avctx->lowres;
2290  const int height_of_mb = 4 - s->avctx->lowres;
2291 
2292  s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2293  s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2294  s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2295  s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2296  s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2297  s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2298  //block_index is not used by mpeg2, so it is not affected by chroma_format
2299 
2300  s->dest[0] = s->current_picture.f->data[0] + (int)((s->mb_x - 1U) << width_of_mb);
2301  s->dest[1] = s->current_picture.f->data[1] + (int)((s->mb_x - 1U) << (width_of_mb - s->chroma_x_shift));
2302  s->dest[2] = s->current_picture.f->data[2] + (int)((s->mb_x - 1U) << (width_of_mb - s->chroma_x_shift));
2303 
2304  if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2305  {
2306  if(s->picture_structure==PICT_FRAME){
2307  s->dest[0] += s->mb_y * linesize << height_of_mb;
2308  s->dest[1] += s->mb_y * uvlinesize << (height_of_mb - s->chroma_y_shift);
2309  s->dest[2] += s->mb_y * uvlinesize << (height_of_mb - s->chroma_y_shift);
2310  }else{
2311  s->dest[0] += (s->mb_y>>1) * linesize << height_of_mb;
2312  s->dest[1] += (s->mb_y>>1) * uvlinesize << (height_of_mb - s->chroma_y_shift);
2313  s->dest[2] += (s->mb_y>>1) * uvlinesize << (height_of_mb - s->chroma_y_shift);
2314  av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2315  }
2316  }
2317 }
2318 
2320  int i;
2321  MpegEncContext *s = avctx->priv_data;
2322 
2323  if (!s || !s->picture)
2324  return;
2325 
2326  for (i = 0; i < MAX_PICTURE_COUNT; i++)
2327  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
2328  s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2329 
2330  ff_mpeg_unref_picture(s->avctx, &s->current_picture);
2331  ff_mpeg_unref_picture(s->avctx, &s->last_picture);
2332  ff_mpeg_unref_picture(s->avctx, &s->next_picture);
2333 
2334  s->mb_x= s->mb_y= 0;
2335 
2336 #if FF_API_FLAG_TRUNCATED
2337  s->parse_context.state= -1;
2338  s->parse_context.frame_start_found= 0;
2339  s->parse_context.overread= 0;
2340  s->parse_context.overread_index= 0;
2341  s->parse_context.index= 0;
2342  s->parse_context.last_index= 0;
2343 #endif
2344  s->bitstream_buffer_size=0;
2345  s->pp_time=0;
2346 }
2347 
2348 /**
2349  * set qscale and update qscale dependent variables.
2350  */
2351 void ff_set_qscale(MpegEncContext * s, int qscale)
2352 {
2353  if (qscale < 1)
2354  qscale = 1;
2355  else if (qscale > 31)
2356  qscale = 31;
2357 
2358  s->qscale = qscale;
2359  s->chroma_qscale= s->chroma_qscale_table[qscale];
2360 
2361  s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2362  s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2363 }
2364 
2366 {
2367  if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
2368  ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
2369 }
FF_ALLOCZ_TYPED_ARRAY
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:98
ff_h263_round_chroma
static int ff_h263_round_chroma(int x)
Definition: motion_est.h:101
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:38
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:926
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1359
stride
int stride
Definition: mace.c:144
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:255
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
ff_init_scantable
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: idctdsp.c:29
free_duplicate_contexts
static void free_duplicate_contexts(MpegEncContext *s)
Definition: mpegvideo.c:460
level
uint8_t level
Definition: svq3.c:204
av_clip
#define av_clip
Definition: common.h:96
ff_mpeg_framesize_alloc
int ff_mpeg_framesize_alloc(AVCodecContext *avctx, MotionEstContext *me, ScratchpadContext *sc, int linesize)
Definition: mpegpicture.c:58
blockdsp.h
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegutils.h:123
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVCodecContext::workaround_bugs
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
Definition: avcodec.h:1251
mpeg_motion_lowres
static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t **ref_picture, h264_chroma_mc_func *pix_op, int motion_x, int motion_y, int h, int mb_y)
Definition: mpegvideo.c:1517
backup_duplicate_context
static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
Definition: mpegvideo.c:469
ff_mpv_common_defaults
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:670
hpel_motion_lowres
static int hpel_motion_lowres(MpegEncContext *s, uint8_t *dest, uint8_t *src, int field_based, int field_select, int src_x, int src_y, int width, int height, ptrdiff_t stride, int h_edge_pos, int v_edge_pos, int w, int h, h264_chroma_mc_func *pix_op, int motion_x, int motion_y)
Definition: mpegvideo.c:1470
ff_thread_can_start_frame
int ff_thread_can_start_frame(AVCodecContext *avctx)
Definition: pthread_frame.c:934
ff_mpeg1_dc_scale_table
const uint8_t ff_mpeg1_dc_scale_table[128]
Definition: mpegvideodata.c:33
MV_TYPE_16X8
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:257
AVFrame::coded_picture_number
int coded_picture_number
picture number in bitstream order
Definition: frame.h:444
Picture::alloc_mb_width
int alloc_mb_width
mb_width used to allocate tables
Definition: mpegpicture.h:70
lowest_referenced_row
static int lowest_referenced_row(MpegEncContext *s, int dir)
find the lowest MB row referenced in the MVs
Definition: mpegvideo.c:1858
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:62
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:109
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:317
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:26
ff_mpv_common_frame_size_change
int ff_mpv_common_frame_size_change(MpegEncContext *s)
Definition: mpegvideo.c:1072
w
uint8_t w
Definition: llviddspenc.c:38
internal.h
ff_free_picture_tables
void ff_free_picture_tables(Picture *pic)
Definition: mpegpicture.c:454
last_picture
enum AVPictureType last_picture
Definition: movenc.c:69
b
#define b
Definition: input.c:40
ff_mpeg2_non_linear_qscale
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:26
REBASE_PICTURE
#define REBASE_PICTURE(pic, new_ctx, old_ctx)
ff_toupper4
unsigned int ff_toupper4(unsigned int x)
Definition: to_upper4.h:29
ff_clean_intra_table_entries
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
Definition: mpegvideo.c:1923
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:37
init_duplicate_context
static int init_duplicate_context(MpegEncContext *s)
Definition: mpegvideo.c:359
ff_mpv_common_init_arm
av_cold void ff_mpv_common_init_arm(MpegEncContext *s)
Definition: mpegvideo_arm.c:44
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:2286
mpegvideo.h
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:88
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:247
Picture
Picture.
Definition: mpegpicture.h:45
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:281
mpegutils.h
thread.h
ff_thread_await_progress
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them. reget_buffer() and buffer age optimizations no longer work. *The contents of buffers must not be written to after ff_thread_report_progress() has been called on them. This includes draw_edges(). Porting codecs to frame threading
free_duplicate_context
static void free_duplicate_context(MpegEncContext *s)
Definition: mpegvideo.c:438
MV_DIR_BACKWARD
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:252
MV_TYPE_DMV
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:259
hsub
static void hsub(htype *dst, const htype *src, int bins)
Definition: vf_median.c:74
mpv_reconstruct_mb_internal
static av_always_inline void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64], int lowres_flag, int is_mpeg12)
Definition: mpegvideo.c:1964
av_memdup
void * av_memdup(const void *p, size_t size)
Duplicate a buffer with av_malloc().
Definition: mem.c:311
chroma_4mv_motion_lowres
static void chroma_4mv_motion_lowres(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, h264_chroma_mc_func *pix_op, int mx, int my)
Definition: mpegvideo.c:1656
AV_VIDEO_ENC_PARAMS_MPEG2
@ AV_VIDEO_ENC_PARAMS_MPEG2
Definition: video_enc_params.h:65
dct_unquantize_mpeg1_inter_c
static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:83
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:392
U
#define U(x)
Definition: vp56_arith.h:37
fail
#define fail()
Definition: checkasm.h:127
wrap
#define wrap(func)
Definition: neontest.h:65
h264_chroma_mc_func
void(* h264_chroma_mc_func)(uint8_t *dst, uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
Definition: h264chroma.h:25
MpegEncContext::linesize
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:124
ff_mpeg_draw_horiz_band
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo.c:2279
MAX_THREADS
#define MAX_THREADS
Definition: frame_thread_encoder.c:35
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2688
ff_videodsp_init
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
Definition: videodsp.c:38
FF_ALLOC_TYPED_ARRAY
#define FF_ALLOC_TYPED_ARRAY(p, nelem)
Definition: internal.h:97
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:571
AVVideoEncParams
Video encoding parameters for a given frame.
Definition: video_enc_params.h:73
MAX_PICTURE_COUNT
#define MAX_PICTURE_COUNT
Definition: mpegpicture.h:32
dct_unquantize_mpeg1_intra_c
static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:54
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:97
mult
static int16_t mult(Float11 *f1, Float11 *f2)
Definition: g726.c:56
ff_mpv_common_end
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:1128
MpegEncContext::pblocks
int16_t(*[12] pblocks)[64]
Definition: mpegvideo.h:499
avassert.h
gray16
static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
Definition: mpegvideo.c:271
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
ff_thread_report_progress
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
Definition: pthread_frame.c:588
motion_vector.h
width
#define width
add_dct
static void add_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size)
Definition: mpegvideo.c:1902
s
#define s(width, name)
Definition: cbs_vp9.c:257
ff_mpeg_unref_picture
void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
Deallocate a picture.
Definition: mpegpicture.c:295
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:51
UPDATE_PICTURE
#define UPDATE_PICTURE(pic)
s1
#define s1
Definition: regdef.h:38
ALLOCZ_ARRAYS
#define ALLOCZ_ARRAYS(p, mult, numb)
AV_CODEC_ID_WMV2
@ AV_CODEC_ID_WMV2
Definition: codec_id.h:68
dct_unquantize_mpeg2_intra_bitexact
static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:142
FMT_H261
@ FMT_H261
Definition: mpegutils.h:124
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
PICT_TOP_FIELD
#define PICT_TOP_FIELD
Definition: mpegutils.h:36
limits.h
ff_mpegvideodsp_init
av_cold void ff_mpegvideodsp_init(MpegVideoDSPContext *c)
Definition: mpegvideodsp.c:110
f
#define f(width, name)
Definition: cbs_vp9.c:255
ff_hpeldsp_init
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
Picture::reference
int reference
Definition: mpegpicture.h:88
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:54
ff_find_unused_picture
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
Definition: mpegpicture.c:440
ff_print_debug_info2
void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table, uint32_t *mbtype_table, int8_t *qscale_table, int16_t(*motion_val[2])[2], int mb_width, int mb_height, int mb_stride, int quarter_sample)
Print debugging info for the given picture.
Definition: mpegutils.c:103
NULL
#define NULL
Definition: coverity.c:32
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:331
me
#define me
Definition: vf_colormatrix.c:104
put_dct
static void put_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo.c:1894
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
src
#define src
Definition: vp8dsp.c:255
ff_set_qscale
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:2351
mathops.h
FF_BUG_IEDGE
#define FF_BUG_IEDGE
Definition: avcodec.h:1266
lowres
static int lowres
Definition: ffplay.c:334
qpeldsp.h
ff_alternate_horizontal_scan
const uint8_t ff_alternate_horizontal_scan[64]
Definition: mpegvideodata.c:84
MpegEncContext::me
MotionEstContext me
Definition: mpegvideo.h:271
ff_alloc_picture
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int shared, int encoding, int chroma_x_shift, int chroma_y_shift, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
Definition: mpegpicture.c:232
ME_MAP_SIZE
#define ME_MAP_SIZE
Definition: motion_est.h:38
op_pixels_func
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
wmv2.h
qpel_mc_func
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:51
dct_unquantize_mpeg2_intra_c
static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:112
MV_TYPE_8X8
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:256
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:29
av_video_enc_params_create_side_data
AVVideoEncParams * av_video_enc_params_create_side_data(AVFrame *frame, enum AVVideoEncParamsType type, unsigned int nb_blocks)
Allocates memory for AVEncodeInfoFrame plus an array of.
Definition: video_enc_params.c:60
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1308
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:53
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:243
ff_mpeg_ref_picture
int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
Definition: mpegpicture.c:355
FF_QSCALE_TYPE_MPEG1
#define FF_QSCALE_TYPE_MPEG1
Definition: internal.h:96
ff_draw_horiz_band
void ff_draw_horiz_band(AVCodecContext *avctx, AVFrame *cur, AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
Definition: mpegutils.c:51
ff_mpeg_er_init
int ff_mpeg_er_init(MpegEncContext *s)
Definition: mpeg_er.c:100
AV_CODEC_ID_H263
@ AV_CODEC_ID_H263
Definition: codec_id.h:54
ff_mpv_motion
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
Definition: mpegvideo_motion.c:974
alloc_picture
static int alloc_picture(MpegEncContext *s, Picture *pic)
Definition: mpegvideo.c:351
h264chroma.h
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1452
ff_mpeg_flush
void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo.c:2319
Picture::alloc_mb_height
int alloc_mb_height
mb_height used to allocate tables
Definition: mpegpicture.h:71
height
#define height
init_context_frame
static int init_context_frame(MpegEncContext *s)
Initialize and allocates MpegEncContext fields dependent on the resolution.
Definition: mpegvideo.c:710
ff_mpeg_update_thread_context
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: mpegvideo.c:524
mpegvideodata.h
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
attributes.h
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:258
ff_mpv_export_qp_table
int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
Definition: mpegvideo.c:1439
ff_print_debug_info
void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
Definition: mpegvideo.c:1432
Picture::motion_val
int16_t(*[2] motion_val)[2]
Definition: mpegpicture.h:53
dct_unquantize_mpeg2_inter_c
static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:176
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1451
clear_context
static void clear_context(MpegEncContext *s)
Definition: mpegvideo.c:844
AVCodec::id
enum AVCodecID id
Definition: codec.h:216
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
FMT_H263
@ FMT_H263
Definition: mpegutils.h:125
dct_unquantize_h263_inter_c
static void dct_unquantize_h263_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:244
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
ff_alternate_vertical_scan
const uint8_t ff_alternate_vertical_scan[64]
Definition: mpegvideodata.c:95
ff_h264chroma_init
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
Definition: h264chroma.c:41
internal.h
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
ff_mpv_common_init_ppc
void ff_mpv_common_init_ppc(MpegEncContext *s)
Definition: mpegvideo_altivec.c:119
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
MpegEncContext::sc
ScratchpadContext sc
Definition: mpegvideo.h:192
ff_idctdsp_init
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
Definition: idctdsp.c:238
AVVideoBlockParams
Data structure for storing block-level encoding information.
Definition: video_enc_params.h:120
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:263
ff_wmv2_add_mb
void ff_wmv2_add_mb(MpegEncContext *s, int16_t block1[6][64], uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr)
Definition: wmv2.c:83
AVCodecContext::height
int height
Definition: avcodec.h:556
ff_mpv_frame_end
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo.c:1424
FF_DEBUG_NOMC
#define FF_DEBUG_NOMC
Definition: avcodec.h:1317
idctdsp.h
avcodec.h
msmpeg4.h
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
ret
ret
Definition: filter_design.txt:187
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
Picture::qscale_table
int8_t * qscale_table
Definition: mpegpicture.h:50
ff_mpv_frame_start
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo.c:1200
free_context_frame
static void free_context_frame(MpegEncContext *s)
Frees and resets MpegEncContext fields depending on the resolution as well as the slice thread contex...
Definition: mpegvideo.c:1019
dct_init
static av_cold int dct_init(MpegEncContext *s)
Definition: mpegvideo.c:284
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
#define AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS
Decoding only.
Definition: avcodec.h:351
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
MpegEncContext::block
int16_t(* block)[64]
points to one of the following blocks
Definition: mpegvideo.h:501
AVCodecContext
main external API structure.
Definition: avcodec.h:383
ScratchpadContext::edge_emu_buffer
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
Definition: mpegpicture.h:36
Picture::mb_type
uint32_t * mb_type
types and macros are defined in mpegutils.h
Definition: mpegpicture.h:56
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo.c:499
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
Picture::f
struct AVFrame * f
Definition: mpegpicture.h:46
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
dct_unquantize_h263_intra_c
static void dct_unquantize_h263_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.c:210
ff_mpv_common_init_x86
av_cold void ff_mpv_common_init_x86(MpegEncContext *s)
Definition: mpegvideo.c:454
MPV_motion_lowres
static void MPV_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, h264_chroma_mc_func *pix_op)
motion compensation of a single macroblock
Definition: mpegvideo.c:1724
ff_mpv_reconstruct_mb
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo.c:2267
ff_mpv_common_init_axp
av_cold void ff_mpv_common_init_axp(MpegEncContext *s)
Definition: mpegvideo_alpha.c:106
FF_MB_DECISION_RD
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:859
ff_mpv_common_init_mips
av_cold void ff_mpv_common_init_mips(MpegEncContext *s)
Definition: mpegvideo_init_mips.c:26
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:571
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
init_duplicate_contexts
static int init_duplicate_contexts(MpegEncContext *s)
Initialize an MpegEncContext's thread contexts.
Definition: mpegvideo.c:414
ff_default_chroma_qscale_table
const uint8_t ff_default_chroma_qscale_table[32]
Definition: mpegvideodata.c:21
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:272
COPY
#define COPY(a)
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:408
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:251
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:410
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:560
mpeg_er.h
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:556
imgutils.h
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:362
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
ff_mpv_report_decode_progress
void ff_mpv_report_decode_progress(MpegEncContext *s)
Definition: mpegvideo.c:2365
add_dequant_dct
static void add_dequant_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Definition: mpegvideo.c:1910
av_video_enc_params_block
static av_always_inline AVVideoBlockParams * av_video_enc_params_block(AVVideoEncParams *par, unsigned int idx)
Definition: video_enc_params.h:143
h
h
Definition: vp9dsp_template.c:2038
av_image_check_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:318
ff_blockdsp_init
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
Definition: blockdsp.c:59
AV_CODEC_ID_FLV1
@ AV_CODEC_ID_FLV1
Definition: codec_id.h:71
ff_mpv_decode_init
void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Initialize the given MpegEncContext for decoding.
Definition: mpegvideo.c:693
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:50
int
int
Definition: ffmpeg_filter.c:153
mjpegenc.h
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:52
ff_mpv_common_init_neon
av_cold void ff_mpv_common_init_neon(MpegEncContext *s)
Definition: mpegvideo.c:127
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:71
gray8
static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
Definition: mpegvideo.c:277
gray_frame
static void gray_frame(AVFrame *frame)
Definition: mpegvideo.c:1180
Picture::alloc_mb_stride
int alloc_mb_stride
mb_stride used to allocate tables
Definition: mpegpicture.h:72
video_enc_params.h