FFmpeg
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mpegvideo.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /**
26  * @file
27  * The simplest mpeg encoder (well, it was the simplest!).
28  */
29 
30 #include "libavutil/imgutils.h"
31 #include "avcodec.h"
32 #include "dsputil.h"
33 #include "h264chroma.h"
34 #include "internal.h"
35 #include "mathops.h"
36 #include "mpegvideo.h"
37 #include "mjpegenc.h"
38 #include "msmpeg4.h"
39 #include "xvmc_internal.h"
40 #include "thread.h"
41 #include <limits.h>
42 
43 //#undef NDEBUG
44 //#include <assert.h>
45 
47  int16_t *block, int n, int qscale);
49  int16_t *block, int n, int qscale);
51  int16_t *block, int n, int qscale);
53  int16_t *block, int n, int qscale);
55  int16_t *block, int n, int qscale);
57  int16_t *block, int n, int qscale);
59  int16_t *block, int n, int qscale);
60 
61 
62 //#define DEBUG
63 
64 
66 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
67  0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
68  16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
69 };
70 
72 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
73  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
74  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
75  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
76  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
80  8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
81 };
82 
83 static const uint8_t mpeg2_dc_scale_table1[128] = {
84 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
85  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
88  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
89  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
91  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
92  4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
93 };
94 
95 static const uint8_t mpeg2_dc_scale_table2[128] = {
96 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
97  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
98  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
99  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
100  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
101  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
102  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
103  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
104  2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
105 };
106 
107 static const uint8_t mpeg2_dc_scale_table3[128] = {
108 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
109  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
110  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
111  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
112  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
113  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
114  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
115  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
116  1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
117 };
118 
119 const uint8_t *const ff_mpeg2_dc_scale_table[4] = {
124 };
125 
129 };
130 
131 static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
132  int (*mv)[2][4][2],
133  int mb_x, int mb_y, int mb_intra, int mb_skipped)
134 {
135  MpegEncContext *s = opaque;
136 
137  s->mv_dir = mv_dir;
138  s->mv_type = mv_type;
139  s->mb_intra = mb_intra;
140  s->mb_skipped = mb_skipped;
141  s->mb_x = mb_x;
142  s->mb_y = mb_y;
143  memcpy(s->mv, mv, sizeof(*mv));
144 
147 
148  s->dsp.clear_blocks(s->block[0]);
149 
150  s->dest[0] = s->current_picture.f.data[0] + (s->mb_y * 16 * s->linesize) + s->mb_x * 16;
151  s->dest[1] = s->current_picture.f.data[1] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
152  s->dest[2] = s->current_picture.f.data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
153 
154  assert(ref == 0);
155  ff_MPV_decode_mb(s, s->block);
156 }
157 
159  const uint8_t *end,
160  uint32_t *av_restrict state)
161 {
162  int i;
163 
164  assert(p <= end);
165  if (p >= end)
166  return end;
167 
168  for (i = 0; i < 3; i++) {
169  uint32_t tmp = *state << 8;
170  *state = tmp + *(p++);
171  if (tmp == 0x100 || p == end)
172  return p;
173  }
174 
175  while (p < end) {
176  if (p[-1] > 1 ) p += 3;
177  else if (p[-2] ) p += 2;
178  else if (p[-3]|(p[-1]-1)) p++;
179  else {
180  p++;
181  break;
182  }
183  }
184 
185  p = FFMIN(p, end) - 4;
186  *state = AV_RB32(p);
187 
188  return p + 4;
189 }
190 
191 /* init common dct for both encoder and decoder */
193 {
194  ff_dsputil_init(&s->dsp, s->avctx);
195  ff_h264chroma_init(&s->h264chroma, 8); //for lowres
197 
203  if (s->flags & CODEC_FLAG_BITEXACT)
206 
207 #if ARCH_X86
209 #elif ARCH_ALPHA
211 #elif ARCH_ARM
213 #elif HAVE_ALTIVEC
215 #elif ARCH_BFIN
217 #endif
218 
219  /* load & permutate scantables
220  * note: only wmv uses different ones
221  */
222  if (s->alternate_scan) {
225  } else {
228  }
231 
232  return 0;
233 }
234 
236 {
237  *dst = *src;
238  dst->f.type = FF_BUFFER_TYPE_COPY;
239 }
240 
241 /**
242  * Release a frame buffer
243  */
245 {
246  pic->period_since_free = 0;
247  /* WM Image / Screen codecs allocate internal buffers with different
248  * dimensions / colorspaces; ignore user-defined callbacks for these. */
249  if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
252  ff_thread_release_buffer(s->avctx, &pic->f);
253  else
256 }
257 
259 {
260  int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
261 
262  // edge emu needs blocksize + filter length - 1
263  // (= 17x17 for halfpel / 21x21 for h264)
264  // VC1 computes luma and chroma simultaneously and needs 19X19 + 9x9
265  // at uvlinesize. It supports only YUV420 so 24x24 is enough
266  // linesize * interlaced * MBsize
267  FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 4 * 24,
268  fail);
269 
270  FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 4 * 16 * 2,
271  fail)
272  s->me.temp = s->me.scratchpad;
273  s->rd_scratchpad = s->me.scratchpad;
274  s->b_scratchpad = s->me.scratchpad;
275  s->obmc_scratchpad = s->me.scratchpad + 16;
276 
277  return 0;
278 fail:
280  return AVERROR(ENOMEM);
281 }
282 
283 /**
284  * Allocate a frame buffer
285  */
287 {
288  int r, ret;
289 
290  if (s->avctx->hwaccel) {
291  assert(!pic->f.hwaccel_picture_private);
292  if (s->avctx->hwaccel->priv_data_size) {
294  if (!pic->f.hwaccel_picture_private) {
295  av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
296  return -1;
297  }
298  }
299  }
300 
301  if (s->codec_id != AV_CODEC_ID_WMV3IMAGE &&
304  r = ff_thread_get_buffer(s->avctx, &pic->f);
305  else
306  r = avcodec_default_get_buffer(s->avctx, &pic->f);
307 
308  if (r < 0 || !pic->f.type || !pic->f.data[0]) {
309  av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %p)\n",
310  r, pic->f.type, pic->f.data[0]);
312  return -1;
313  }
314 
315  if (s->linesize && (s->linesize != pic->f.linesize[0] ||
316  s->uvlinesize != pic->f.linesize[1])) {
318  "get_buffer() failed (stride changed)\n");
319  free_frame_buffer(s, pic);
320  return -1;
321  }
322 
323  if (pic->f.linesize[1] != pic->f.linesize[2]) {
325  "get_buffer() failed (uv stride mismatch)\n");
326  free_frame_buffer(s, pic);
327  return -1;
328  }
329 
330  if (!s->edge_emu_buffer &&
331  (ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
333  "get_buffer() failed to allocate context scratch buffers.\n");
334  free_frame_buffer(s, pic);
335  return ret;
336  }
337 
338  return 0;
339 }
340 
341 /**
342  * Allocate a Picture.
343  * The pixels are allocated/set by calling get_buffer() if shared = 0
344  */
345 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
346 {
347  const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
348 
349  // the + 1 is needed so memset(,,stride*height) does not sig11
350 
351  const int mb_array_size = s->mb_stride * s->mb_height;
352  const int b8_array_size = s->b8_stride * s->mb_height * 2;
353  const int b4_array_size = s->b4_stride * s->mb_height * 4;
354  int i;
355  int r = -1;
356 
357  if (shared) {
358  assert(pic->f.data[0]);
359  assert(pic->f.type == 0 || pic->f.type == FF_BUFFER_TYPE_SHARED);
361  } else {
362  assert(!pic->f.data[0]);
363 
364  if (alloc_frame_buffer(s, pic) < 0)
365  return -1;
366 
367  s->linesize = pic->f.linesize[0];
368  s->uvlinesize = pic->f.linesize[1];
369  }
370 
371  if (pic->f.qscale_table == NULL) {
372  if (s->encoding) {
373  FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var,
374  mb_array_size * sizeof(int16_t), fail)
376  mb_array_size * sizeof(int16_t), fail)
378  mb_array_size * sizeof(int8_t ), fail)
379  }
380 
382  mb_array_size * sizeof(uint8_t) + 2, fail)// the + 2 is for the slice end check
384  (big_mb_num + s->mb_stride) * sizeof(uint8_t),
385  fail)
387  (big_mb_num + s->mb_stride) * sizeof(uint32_t),
388  fail)
389  pic->f.mb_type = pic->mb_type_base + 2 * s->mb_stride + 1;
390  pic->f.qscale_table = pic->qscale_table_base + 2 * s->mb_stride + 1;
391  if (s->out_format == FMT_H264) {
392  for (i = 0; i < 2; i++) {
394  2 * (b4_array_size + 4) * sizeof(int16_t),
395  fail)
396  pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
397  FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
398  4 * mb_array_size * sizeof(uint8_t), fail)
399  }
400  pic->f.motion_subsample_log2 = 2;
401  } else if (s->out_format == FMT_H263 || s->encoding ||
402  (s->avctx->debug & FF_DEBUG_MV) || s->avctx->debug_mv) {
403  for (i = 0; i < 2; i++) {
405  2 * (b8_array_size + 4) * sizeof(int16_t),
406  fail)
407  pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
408  FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i],
409  4 * mb_array_size * sizeof(uint8_t), fail)
410  }
411  pic->f.motion_subsample_log2 = 3;
412  }
413  if (s->avctx->debug&FF_DEBUG_DCT_COEFF) {
415  64 * mb_array_size * sizeof(int16_t) * 6, fail)
416  }
417  pic->f.qstride = s->mb_stride;
419  1 * sizeof(AVPanScan), fail)
420  }
421 
422  pic->owner2 = s;
423 
424  return 0;
425 fail: // for the FF_ALLOCZ_OR_GOTO macro
426  if (r >= 0)
427  free_frame_buffer(s, pic);
428  return -1;
429 }
430 
431 /**
432  * Deallocate a picture.
433  */
434 static void free_picture(MpegEncContext *s, Picture *pic)
435 {
436  int i;
437 
438  if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
439  free_frame_buffer(s, pic);
440  }
441 
442  av_freep(&pic->mb_var);
443  av_freep(&pic->mc_mb_var);
444  av_freep(&pic->mb_mean);
445  av_freep(&pic->f.mbskip_table);
447  pic->f.qscale_table = NULL;
448  av_freep(&pic->mb_type_base);
449  pic->f.mb_type = NULL;
450  av_freep(&pic->f.dct_coeff);
451  av_freep(&pic->f.pan_scan);
452  pic->f.mb_type = NULL;
453  for (i = 0; i < 2; i++) {
454  av_freep(&pic->motion_val_base[i]);
455  av_freep(&pic->f.ref_index[i]);
456  pic->f.motion_val[i] = NULL;
457  }
458 
459  if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
460  for (i = 0; i < 4; i++) {
461  pic->f.base[i] =
462  pic->f.data[i] = NULL;
463  }
464  pic->f.type = 0;
465  }
466 }
467 
469 {
470  int y_size = s->b8_stride * (2 * s->mb_height + 1);
471  int c_size = s->mb_stride * (s->mb_height + 1);
472  int yc_size = y_size + 2 * c_size;
473  int i;
474 
475  s->edge_emu_buffer =
476  s->me.scratchpad =
477  s->me.temp =
478  s->rd_scratchpad =
479  s->b_scratchpad =
480  s->obmc_scratchpad = NULL;
481 
482  if (s->encoding) {
483  FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
484  ME_MAP_SIZE * sizeof(uint32_t), fail)
486  ME_MAP_SIZE * sizeof(uint32_t), fail)
487  if (s->avctx->noise_reduction) {
489  2 * 64 * sizeof(int), fail)
490  }
491  }
492  FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
493  s->block = s->blocks[0];
494 
495  for (i = 0; i < 12; i++) {
496  s->pblocks[i] = &s->block[i];
497  }
498 
499  if (s->out_format == FMT_H263) {
500  /* ac values */
502  yc_size * sizeof(int16_t) * 16, fail);
503  s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
504  s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
505  s->ac_val[2] = s->ac_val[1] + c_size;
506  }
507 
508  return 0;
509 fail:
510  return -1; // free() through ff_MPV_common_end()
511 }
512 
514 {
515  if (s == NULL)
516  return;
517 
519  av_freep(&s->me.scratchpad);
520  s->me.temp =
521  s->rd_scratchpad =
522  s->b_scratchpad =
523  s->obmc_scratchpad = NULL;
524 
525  av_freep(&s->dct_error_sum);
526  av_freep(&s->me.map);
527  av_freep(&s->me.score_map);
528  av_freep(&s->blocks);
529  av_freep(&s->ac_val_base);
530  s->block = NULL;
531 }
532 
534 {
535 #define COPY(a) bak->a = src->a
536  COPY(edge_emu_buffer);
537  COPY(me.scratchpad);
538  COPY(me.temp);
539  COPY(rd_scratchpad);
540  COPY(b_scratchpad);
541  COPY(obmc_scratchpad);
542  COPY(me.map);
543  COPY(me.score_map);
544  COPY(blocks);
545  COPY(block);
546  COPY(start_mb_y);
547  COPY(end_mb_y);
548  COPY(me.map_generation);
549  COPY(pb);
550  COPY(dct_error_sum);
551  COPY(dct_count[0]);
552  COPY(dct_count[1]);
553  COPY(ac_val_base);
554  COPY(ac_val[0]);
555  COPY(ac_val[1]);
556  COPY(ac_val[2]);
557 #undef COPY
558 }
559 
561 {
562  MpegEncContext bak;
563  int i, ret;
564  // FIXME copy only needed parts
565  // START_TIMER
566  backup_duplicate_context(&bak, dst);
567  memcpy(dst, src, sizeof(MpegEncContext));
568  backup_duplicate_context(dst, &bak);
569  for (i = 0; i < 12; i++) {
570  dst->pblocks[i] = &dst->block[i];
571  }
572  if (!dst->edge_emu_buffer &&
573  (ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
574  av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
575  "scratch buffers.\n");
576  return ret;
577  }
578  // STOP_TIMER("update_duplicate_context")
579  // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
580  return 0;
581 }
582 
584  const AVCodecContext *src)
585 {
586  int i;
587  int err;
588  MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
589 
590  if (dst == src)
591  return 0;
592 
593  av_assert0(s != s1);
594 
595  // FIXME can parameters change on I-frames?
596  // in that case dst may need a reinit
597  if (!s->context_initialized) {
598  memcpy(s, s1, sizeof(MpegEncContext));
599 
600  s->avctx = dst;
601  s->bitstream_buffer = NULL;
603 
604  if (s1->context_initialized){
607  if((err = ff_MPV_common_init(s)) < 0){
608  memset(s, 0, sizeof(MpegEncContext));
609  s->avctx = dst;
610  return err;
611  }
612  }
613  }
614 
615  if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
616  s->context_reinit = 0;
617  s->height = s1->height;
618  s->width = s1->width;
619  if ((err = ff_MPV_common_frame_size_change(s)) < 0)
620  return err;
621  }
622 
623  s->avctx->coded_height = s1->avctx->coded_height;
624  s->avctx->coded_width = s1->avctx->coded_width;
625  s->avctx->width = s1->avctx->width;
626  s->avctx->height = s1->avctx->height;
627 
628  s->coded_picture_number = s1->coded_picture_number;
629  s->picture_number = s1->picture_number;
630  s->input_picture_number = s1->input_picture_number;
631 
632  av_assert0(!s->picture || s->picture != s1->picture);
633  memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
634  memcpy(&s->last_picture, &s1->last_picture,
635  (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
636 
637  // reset s->picture[].f.extended_data to s->picture[].f.data
638  for (i = 0; i < s->picture_count; i++) {
639  s->picture[i].f.extended_data = s->picture[i].f.data;
640  s->picture[i].period_since_free ++;
641  }
642 
643  s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
644  s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
645  s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
646 
647  // Error/bug resilience
648  s->next_p_frame_damaged = s1->next_p_frame_damaged;
649  s->workaround_bugs = s1->workaround_bugs;
650  s->padding_bug_score = s1->padding_bug_score;
651 
652  // MPEG4 timing info
653  memcpy(&s->time_increment_bits, &s1->time_increment_bits,
654  (char *) &s1->shape - (char *) &s1->time_increment_bits);
655 
656  // B-frame info
657  s->max_b_frames = s1->max_b_frames;
658  s->low_delay = s1->low_delay;
659  s->droppable = s1->droppable;
660 
661  // DivX handling (doesn't work)
662  s->divx_packed = s1->divx_packed;
663 
664  if (s1->bitstream_buffer) {
665  if (s1->bitstream_buffer_size +
669  s1->allocated_bitstream_buffer_size);
670  s->bitstream_buffer_size = s1->bitstream_buffer_size;
671  memcpy(s->bitstream_buffer, s1->bitstream_buffer,
672  s1->bitstream_buffer_size);
673  memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
675  }
676 
677  // linesize dependend scratch buffer allocation
678  if (!s->edge_emu_buffer)
679  if (s1->linesize) {
680  if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
681  av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
682  "scratch buffers.\n");
683  return AVERROR(ENOMEM);
684  }
685  } else {
686  av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
687  "be allocated due to unknown size.\n");
688  }
689 
690  // MPEG2/interlacing info
691  memcpy(&s->progressive_sequence, &s1->progressive_sequence,
692  (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
693 
694  if (!s1->first_field) {
695  s->last_pict_type = s1->pict_type;
696  if (s1->current_picture_ptr)
697  s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
698 
699  if (s1->pict_type != AV_PICTURE_TYPE_B) {
700  s->last_non_b_pict_type = s1->pict_type;
701  }
702  }
703 
704  return 0;
705 }
706 
707 /**
708  * Set the given MpegEncContext to common defaults
709  * (same for encoding and decoding).
710  * The changed fields will not depend upon the
711  * prior state of the MpegEncContext.
712  */
714 {
715  s->y_dc_scale_table =
718  s->progressive_frame = 1;
719  s->progressive_sequence = 1;
721 
722  s->coded_picture_number = 0;
723  s->picture_number = 0;
724  s->input_picture_number = 0;
725 
726  s->picture_in_gop_number = 0;
727 
728  s->f_code = 1;
729  s->b_code = 1;
730 
731  s->picture_range_start = 0;
733 
734  s->slice_context_count = 1;
735 }
736 
737 /**
738  * Set the given MpegEncContext to defaults for decoding.
739  * the changed fields will not depend upon
740  * the prior state of the MpegEncContext.
741  */
743 {
745 }
746 
747 static int init_er(MpegEncContext *s)
748 {
749  ERContext *er = &s->er;
750  int mb_array_size = s->mb_height * s->mb_stride;
751  int i;
752 
753  er->avctx = s->avctx;
754  er->dsp = &s->dsp;
755 
756  er->mb_index2xy = s->mb_index2xy;
757  er->mb_num = s->mb_num;
758  er->mb_width = s->mb_width;
759  er->mb_height = s->mb_height;
760  er->mb_stride = s->mb_stride;
761  er->b8_stride = s->b8_stride;
762 
764  er->error_status_table = av_mallocz(mb_array_size);
765  if (!er->er_temp_buffer || !er->error_status_table)
766  goto fail;
767 
768  er->mbskip_table = s->mbskip_table;
769  er->mbintra_table = s->mbintra_table;
770 
771  for (i = 0; i < FF_ARRAY_ELEMS(s->dc_val); i++)
772  er->dc_val[i] = s->dc_val[i];
773 
775  er->opaque = s;
776 
777  return 0;
778 fail:
779  av_freep(&er->er_temp_buffer);
781  return AVERROR(ENOMEM);
782 }
783 
784 /**
785  * Initialize and allocates MpegEncContext fields dependent on the resolution.
786  */
788 {
789  int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
790 
791  s->mb_width = (s->width + 15) / 16;
792  s->mb_stride = s->mb_width + 1;
793  s->b8_stride = s->mb_width * 2 + 1;
794  s->b4_stride = s->mb_width * 4 + 1;
795  mb_array_size = s->mb_height * s->mb_stride;
796  mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
797 
798  /* set default edge pos, will be overriden
799  * in decode_header if needed */
800  s->h_edge_pos = s->mb_width * 16;
801  s->v_edge_pos = s->mb_height * 16;
802 
803  s->mb_num = s->mb_width * s->mb_height;
804 
805  s->block_wrap[0] =
806  s->block_wrap[1] =
807  s->block_wrap[2] =
808  s->block_wrap[3] = s->b8_stride;
809  s->block_wrap[4] =
810  s->block_wrap[5] = s->mb_stride;
811 
812  y_size = s->b8_stride * (2 * s->mb_height + 1);
813  c_size = s->mb_stride * (s->mb_height + 1);
814  yc_size = y_size + 2 * c_size;
815 
816  FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int), fail); // error ressilience code looks cleaner with this
817  for (y = 0; y < s->mb_height; y++)
818  for (x = 0; x < s->mb_width; x++)
819  s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
820 
821  s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
822 
823  if (s->encoding) {
824  /* Allocate MV tables */
825  FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
826  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
827  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
828  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
829  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
830  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
831  s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
837 
838  /* Allocate MB type table */
839  FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
840 
841  FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
842 
844  mb_array_size * sizeof(float), fail);
846  mb_array_size * sizeof(float), fail);
847 
848  }
849 
850  if (s->codec_id == AV_CODEC_ID_MPEG4 ||
852  /* interlaced direct mode decoding tables */
853  for (i = 0; i < 2; i++) {
854  int j, k;
855  for (j = 0; j < 2; j++) {
856  for (k = 0; k < 2; k++) {
858  s->b_field_mv_table_base[i][j][k],
859  mv_table_size * 2 * sizeof(int16_t),
860  fail);
861  s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
862  s->mb_stride + 1;
863  }
864  FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
865  FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
866  s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
867  }
868  FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
869  }
870  }
871  if (s->out_format == FMT_H263) {
872  /* cbp values */
873  FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
874  s->coded_block = s->coded_block_base + s->b8_stride + 1;
875 
876  /* cbp, ac_pred, pred_dir */
877  FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
878  FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
879  }
880 
881  if (s->h263_pred || s->h263_plus || !s->encoding) {
882  /* dc values */
883  // MN: we need these for error resilience of intra-frames
884  FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
885  s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
886  s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
887  s->dc_val[2] = s->dc_val[1] + c_size;
888  for (i = 0; i < yc_size; i++)
889  s->dc_val_base[i] = 1024;
890  }
891 
892  /* which mb is a intra block */
893  FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
894  memset(s->mbintra_table, 1, mb_array_size);
895 
896  /* init macroblock skip table */
897  FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
898  // Note the + 1 is for a quicker mpeg4 slice_end detection
899 
900  return init_er(s);
901 fail:
902  return AVERROR(ENOMEM);
903 }
904 
905 /**
906  * init common structure for both encoder and decoder.
907  * this assumes that some variables like width/height are already set
908  */
910 {
911  int i;
912  int nb_slices = (HAVE_THREADS &&
914  s->avctx->thread_count : 1;
915 
916  if (s->encoding && s->avctx->slices)
917  nb_slices = s->avctx->slices;
918 
920  s->mb_height = (s->height + 31) / 32 * 2;
921  else if (s->codec_id != AV_CODEC_ID_H264)
922  s->mb_height = (s->height + 15) / 16;
923 
924  if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
926  "decoding to AV_PIX_FMT_NONE is not supported.\n");
927  return -1;
928  }
929 
930  if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
931  int max_slices;
932  if (s->mb_height)
933  max_slices = FFMIN(MAX_THREADS, s->mb_height);
934  else
935  max_slices = MAX_THREADS;
936  av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
937  " reducing to %d\n", nb_slices, max_slices);
938  nb_slices = max_slices;
939  }
940 
941  if ((s->width || s->height) &&
942  av_image_check_size(s->width, s->height, 0, s->avctx))
943  return -1;
944 
946 
947  s->flags = s->avctx->flags;
948  s->flags2 = s->avctx->flags2;
949 
950  /* set chroma shifts */
952 
953  /* convert fourcc to upper case */
956 
958 
959  if (s->encoding) {
960  if (s->msmpeg4_version) {
962  2 * 2 * (MAX_LEVEL + 1) *
963  (MAX_RUN + 1) * 2 * sizeof(int), fail);
964  }
965  FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
966 
967  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail)
968  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail)
969  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail)
970  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
971  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
972  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail)
975 
976  if (s->avctx->noise_reduction) {
977  FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail);
978  }
979  }
980 
983  s->picture_count * sizeof(Picture), fail);
984  for (i = 0; i < s->picture_count; i++) {
986  }
987 
988  if (init_context_frame(s))
989  goto fail;
990 
991  s->parse_context.state = -1;
992 
993  s->context_initialized = 1;
994  s->thread_context[0] = s;
995 
996 // if (s->width && s->height) {
997  if (nb_slices > 1) {
998  for (i = 1; i < nb_slices; i++) {
999  s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1000  memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1001  }
1002 
1003  for (i = 0; i < nb_slices; i++) {
1004  if (init_duplicate_context(s->thread_context[i]) < 0)
1005  goto fail;
1006  s->thread_context[i]->start_mb_y =
1007  (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1008  s->thread_context[i]->end_mb_y =
1009  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1010  }
1011  } else {
1012  if (init_duplicate_context(s) < 0)
1013  goto fail;
1014  s->start_mb_y = 0;
1015  s->end_mb_y = s->mb_height;
1016  }
1017  s->slice_context_count = nb_slices;
1018 // }
1019 
1020  return 0;
1021  fail:
1022  ff_MPV_common_end(s);
1023  return -1;
1024 }
1025 
1026 /**
1027  * Frees and resets MpegEncContext fields depending on the resolution.
1028  * Is used during resolution changes to avoid a full reinitialization of the
1029  * codec.
1030  */
1032 {
1033  int i, j, k;
1034 
1035  av_freep(&s->mb_type);
1042  s->p_mv_table = NULL;
1043  s->b_forw_mv_table = NULL;
1044  s->b_back_mv_table = NULL;
1047  s->b_direct_mv_table = NULL;
1048  for (i = 0; i < 2; i++) {
1049  for (j = 0; j < 2; j++) {
1050  for (k = 0; k < 2; k++) {
1051  av_freep(&s->b_field_mv_table_base[i][j][k]);
1052  s->b_field_mv_table[i][j][k] = NULL;
1053  }
1054  av_freep(&s->b_field_select_table[i][j]);
1055  av_freep(&s->p_field_mv_table_base[i][j]);
1056  s->p_field_mv_table[i][j] = NULL;
1057  }
1059  }
1060 
1061  av_freep(&s->dc_val_base);
1063  av_freep(&s->mbintra_table);
1064  av_freep(&s->cbp_table);
1065  av_freep(&s->pred_dir_table);
1066 
1067  av_freep(&s->mbskip_table);
1068 
1070  av_freep(&s->er.er_temp_buffer);
1071  av_freep(&s->mb_index2xy);
1072  av_freep(&s->lambda_table);
1073 
1074  av_freep(&s->cplx_tab);
1075  av_freep(&s->bits_tab);
1076 
1077  s->linesize = s->uvlinesize = 0;
1078 
1079  for (i = 0; i < 3; i++)
1081 
1082  return 0;
1083 }
1084 
1086 {
1087  int i, err = 0;
1088 
1089  if (s->slice_context_count > 1) {
1090  for (i = 0; i < s->slice_context_count; i++) {
1092  }
1093  for (i = 1; i < s->slice_context_count; i++) {
1094  av_freep(&s->thread_context[i]);
1095  }
1096  } else
1098 
1099  free_context_frame(s);
1100 
1101  if (s->picture)
1102  for (i = 0; i < s->picture_count; i++) {
1103  s->picture[i].needs_realloc = 1;
1104  }
1105 
1106  s->last_picture_ptr =
1107  s->next_picture_ptr =
1109 
1110  // init
1112  s->mb_height = (s->height + 31) / 32 * 2;
1113  else if (s->codec_id != AV_CODEC_ID_H264)
1114  s->mb_height = (s->height + 15) / 16;
1115 
1116  if ((s->width || s->height) &&
1117  av_image_check_size(s->width, s->height, 0, s->avctx))
1118  return AVERROR_INVALIDDATA;
1119 
1120  if ((err = init_context_frame(s)))
1121  goto fail;
1122 
1123  s->thread_context[0] = s;
1124 
1125  if (s->width && s->height) {
1126  int nb_slices = s->slice_context_count;
1127  if (nb_slices > 1) {
1128  for (i = 1; i < nb_slices; i++) {
1129  s->thread_context[i] = av_malloc(sizeof(MpegEncContext));
1130  memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
1131  }
1132 
1133  for (i = 0; i < nb_slices; i++) {
1134  if (init_duplicate_context(s->thread_context[i]) < 0)
1135  goto fail;
1136  s->thread_context[i]->start_mb_y =
1137  (s->mb_height * (i) + nb_slices / 2) / nb_slices;
1138  s->thread_context[i]->end_mb_y =
1139  (s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1140  }
1141  } else {
1142  if (init_duplicate_context(s) < 0)
1143  goto fail;
1144  s->start_mb_y = 0;
1145  s->end_mb_y = s->mb_height;
1146  }
1147  s->slice_context_count = nb_slices;
1148  }
1149 
1150  return 0;
1151  fail:
1152  ff_MPV_common_end(s);
1153  return err;
1154 }
1155 
1156 /* init common structure for both encoder and decoder */
1158 {
1159  int i;
1160 
1161  if (s->slice_context_count > 1) {
1162  for (i = 0; i < s->slice_context_count; i++) {
1164  }
1165  for (i = 1; i < s->slice_context_count; i++) {
1166  av_freep(&s->thread_context[i]);
1167  }
1168  s->slice_context_count = 1;
1169  } else free_duplicate_context(s);
1170 
1172  s->parse_context.buffer_size = 0;
1173 
1176 
1177  av_freep(&s->avctx->stats_out);
1178  av_freep(&s->ac_stats);
1179 
1184  av_freep(&s->q_intra_matrix);
1185  av_freep(&s->q_inter_matrix);
1188  av_freep(&s->input_picture);
1190  av_freep(&s->dct_offset);
1191 
1192  if (s->picture && !s->avctx->internal->is_copy) {
1193  for (i = 0; i < s->picture_count; i++) {
1194  free_picture(s, &s->picture[i]);
1195  }
1196  }
1197  av_freep(&s->picture);
1198 
1199  free_context_frame(s);
1200 
1203 
1204  s->context_initialized = 0;
1205  s->last_picture_ptr =
1206  s->next_picture_ptr =
1208  s->linesize = s->uvlinesize = 0;
1209 }
1210 
1212  uint8_t static_store[2][2 * MAX_RUN + MAX_LEVEL + 3])
1213 {
1214  int8_t max_level[MAX_RUN + 1], max_run[MAX_LEVEL + 1];
1215  uint8_t index_run[MAX_RUN + 1];
1216  int last, run, level, start, end, i;
1217 
1218  /* If table is static, we can quit if rl->max_level[0] is not NULL */
1219  if (static_store && rl->max_level[0])
1220  return;
1221 
1222  /* compute max_level[], max_run[] and index_run[] */
1223  for (last = 0; last < 2; last++) {
1224  if (last == 0) {
1225  start = 0;
1226  end = rl->last;
1227  } else {
1228  start = rl->last;
1229  end = rl->n;
1230  }
1231 
1232  memset(max_level, 0, MAX_RUN + 1);
1233  memset(max_run, 0, MAX_LEVEL + 1);
1234  memset(index_run, rl->n, MAX_RUN + 1);
1235  for (i = start; i < end; i++) {
1236  run = rl->table_run[i];
1237  level = rl->table_level[i];
1238  if (index_run[run] == rl->n)
1239  index_run[run] = i;
1240  if (level > max_level[run])
1241  max_level[run] = level;
1242  if (run > max_run[level])
1243  max_run[level] = run;
1244  }
1245  if (static_store)
1246  rl->max_level[last] = static_store[last];
1247  else
1248  rl->max_level[last] = av_malloc(MAX_RUN + 1);
1249  memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
1250  if (static_store)
1251  rl->max_run[last] = static_store[last] + MAX_RUN + 1;
1252  else
1253  rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
1254  memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
1255  if (static_store)
1256  rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
1257  else
1258  rl->index_run[last] = av_malloc(MAX_RUN + 1);
1259  memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
1260  }
1261 }
1262 
1264 {
1265  int i, q;
1266 
1267  for (q = 0; q < 32; q++) {
1268  int qmul = q * 2;
1269  int qadd = (q - 1) | 1;
1270 
1271  if (q == 0) {
1272  qmul = 1;
1273  qadd = 0;
1274  }
1275  for (i = 0; i < rl->vlc.table_size; i++) {
1276  int code = rl->vlc.table[i][0];
1277  int len = rl->vlc.table[i][1];
1278  int level, run;
1279 
1280  if (len == 0) { // illegal code
1281  run = 66;
1282  level = MAX_LEVEL;
1283  } else if (len < 0) { // more bits needed
1284  run = 0;
1285  level = code;
1286  } else {
1287  if (code == rl->n) { // esc
1288  run = 66;
1289  level = 0;
1290  } else {
1291  run = rl->table_run[code] + 1;
1292  level = rl->table_level[code] * qmul + qadd;
1293  if (code >= rl->last) run += 192;
1294  }
1295  }
1296  rl->rl_vlc[q][i].len = len;
1297  rl->rl_vlc[q][i].level = level;
1298  rl->rl_vlc[q][i].run = run;
1299  }
1300  }
1301 }
1302 
1303 void ff_release_unused_pictures(MpegEncContext*s, int remove_current)
1304 {
1305  int i;
1306 
1307  /* release non reference frames */
1308  for (i = 0; i < s->picture_count; i++) {
1309  if (s->picture[i].f.data[0] && !s->picture[i].f.reference &&
1310  (!s->picture[i].owner2 || s->picture[i].owner2 == s) &&
1311  (remove_current || &s->picture[i] != s->current_picture_ptr)
1312  /* && s->picture[i].type!= FF_BUFFER_TYPE_SHARED */) {
1313  free_frame_buffer(s, &s->picture[i]);
1314  }
1315  }
1316 }
1317 
1318 static inline int pic_is_unused(MpegEncContext *s, Picture *pic)
1319 {
1321  && pic->f.qscale_table //check if the frame has anything allocated
1322  && pic->period_since_free < s->avctx->thread_count)
1323  return 0;
1324  if (pic->f.data[0] == NULL)
1325  return 1;
1326  if (pic->needs_realloc && !(pic->f.reference & DELAYED_PIC_REF))
1327  if (!pic->owner2 || pic->owner2 == s)
1328  return 1;
1329  return 0;
1330 }
1331 
1332 static int find_unused_picture(MpegEncContext *s, int shared)
1333 {
1334  int i;
1335 
1336  if (shared) {
1337  for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1338  if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
1339  return i;
1340  }
1341  } else {
1342  for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1343  if (pic_is_unused(s, &s->picture[i]) && s->picture[i].f.type != 0)
1344  return i; // FIXME
1345  }
1346  for (i = s->picture_range_start; i < s->picture_range_end; i++) {
1347  if (pic_is_unused(s, &s->picture[i]))
1348  return i;
1349  }
1350  }
1351 
1353  "Internal error, picture buffer overflow\n");
1354  /* We could return -1, but the codec would crash trying to draw into a
1355  * non-existing frame anyway. This is safer than waiting for a random crash.
1356  * Also the return of this is never useful, an encoder must only allocate
1357  * as much as allowed in the specification. This has no relationship to how
1358  * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
1359  * enough for such valid streams).
1360  * Plus, a decoder has to check stream validity and remove frames if too
1361  * many reference frames are around. Waiting for "OOM" is not correct at
1362  * all. Similarly, missing reference frames have to be replaced by
1363  * interpolated/MC frames, anything else is a bug in the codec ...
1364  */
1365  abort();
1366  return -1;
1367 }
1368 
1370 {
1371  int ret = find_unused_picture(s, shared);
1372 
1373  if (ret >= 0 && ret < s->picture_range_end) {
1374  if (s->picture[ret].needs_realloc) {
1375  s->picture[ret].needs_realloc = 0;
1376  free_picture(s, &s->picture[ret]);
1378  }
1379  }
1380  return ret;
1381 }
1382 
1384 {
1385  int intra, i;
1386 
1387  for (intra = 0; intra < 2; intra++) {
1388  if (s->dct_count[intra] > (1 << 16)) {
1389  for (i = 0; i < 64; i++) {
1390  s->dct_error_sum[intra][i] >>= 1;
1391  }
1392  s->dct_count[intra] >>= 1;
1393  }
1394 
1395  for (i = 0; i < 64; i++) {
1396  s->dct_offset[intra][i] = (s->avctx->noise_reduction *
1397  s->dct_count[intra] +
1398  s->dct_error_sum[intra][i] / 2) /
1399  (s->dct_error_sum[intra][i] + 1);
1400  }
1401  }
1402 }
1403 
1404 /**
1405  * generic function for encode/decode called after coding/decoding
1406  * the header and before a frame is coded/decoded.
1407  */
1409 {
1410  int i;
1411  Picture *pic;
1412  s->mb_skipped = 0;
1413 
1414  if (!ff_thread_can_start_frame(avctx)) {
1415  av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
1416  return -1;
1417  }
1418 
1419  /* mark & release old frames */
1420  if (s->out_format != FMT_H264 || s->codec_id == AV_CODEC_ID_SVQ3) {
1421  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1423  s->last_picture_ptr->f.data[0]) {
1424  if (s->last_picture_ptr->owner2 == s)
1426  }
1427 
1428  /* release forgotten pictures */
1429  /* if (mpeg124/h263) */
1430  if (!s->encoding) {
1431  for (i = 0; i < s->picture_count; i++) {
1432  if (s->picture[i].owner2 == s && s->picture[i].f.data[0] &&
1433  &s->picture[i] != s->last_picture_ptr &&
1434  &s->picture[i] != s->next_picture_ptr &&
1435  s->picture[i].f.reference && !s->picture[i].needs_realloc) {
1436  if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1437  av_log(avctx, AV_LOG_ERROR,
1438  "releasing zombie picture\n");
1439  free_frame_buffer(s, &s->picture[i]);
1440  }
1441  }
1442  }
1443  }
1444 
1445  if (!s->encoding) {
1447 
1448  if (s->current_picture_ptr &&
1449  s->current_picture_ptr->f.data[0] == NULL) {
1450  // we already have a unused image
1451  // (maybe it was set before reading the header)
1452  pic = s->current_picture_ptr;
1453  } else {
1454  i = ff_find_unused_picture(s, 0);
1455  if (i < 0) {
1456  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1457  return i;
1458  }
1459  pic = &s->picture[i];
1460  }
1461 
1462  pic->f.reference = 0;
1463  if (!s->droppable) {
1464  if (s->codec_id == AV_CODEC_ID_H264)
1465  pic->f.reference = s->picture_structure;
1466  else if (s->pict_type != AV_PICTURE_TYPE_B)
1467  pic->f.reference = 3;
1468  }
1469 
1471 
1472  if (ff_alloc_picture(s, pic, 0) < 0)
1473  return -1;
1474 
1475  s->current_picture_ptr = pic;
1476  // FIXME use only the vars from current_pic
1478  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
1480  if (s->picture_structure != PICT_FRAME)
1483  }
1487  }
1488 
1490  // if (s->flags && CODEC_FLAG_QSCALE)
1491  // s->current_picture_ptr->quality = s->new_picture_ptr->quality;
1493 
1495 
1496  if (s->pict_type != AV_PICTURE_TYPE_B) {
1498  if (!s->droppable)
1500  }
1501  av_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1506  s->pict_type, s->droppable);
1507 
1508  if (s->codec_id != AV_CODEC_ID_H264) {
1509  if ((s->last_picture_ptr == NULL ||
1510  s->last_picture_ptr->f.data[0] == NULL) &&
1511  (s->pict_type != AV_PICTURE_TYPE_I ||
1512  s->picture_structure != PICT_FRAME)) {
1513  int h_chroma_shift, v_chroma_shift;
1515  &h_chroma_shift, &v_chroma_shift);
1516  if (s->pict_type != AV_PICTURE_TYPE_I)
1517  av_log(avctx, AV_LOG_ERROR,
1518  "warning: first frame is no keyframe\n");
1519  else if (s->picture_structure != PICT_FRAME)
1520  av_log(avctx, AV_LOG_INFO,
1521  "allocate dummy last picture for field based first keyframe\n");
1522 
1523  /* Allocate a dummy frame */
1524  i = ff_find_unused_picture(s, 0);
1525  if (i < 0) {
1526  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1527  return i;
1528  }
1529  s->last_picture_ptr = &s->picture[i];
1530  s->last_picture_ptr->f.key_frame = 0;
1531  if (ff_alloc_picture(s, s->last_picture_ptr, 0) < 0) {
1532  s->last_picture_ptr = NULL;
1533  return -1;
1534  }
1535 
1536  memset(s->last_picture_ptr->f.data[0], 0x80,
1537  avctx->height * s->last_picture_ptr->f.linesize[0]);
1538  memset(s->last_picture_ptr->f.data[1], 0x80,
1539  (avctx->height >> v_chroma_shift) *
1540  s->last_picture_ptr->f.linesize[1]);
1541  memset(s->last_picture_ptr->f.data[2], 0x80,
1542  (avctx->height >> v_chroma_shift) *
1543  s->last_picture_ptr->f.linesize[2]);
1544 
1546  for(i=0; i<avctx->height; i++)
1547  memset(s->last_picture_ptr->f.data[0] + s->last_picture_ptr->f.linesize[0]*i, 16, avctx->width);
1548  }
1549 
1550  ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 0);
1551  ff_thread_report_progress(&s->last_picture_ptr->f, INT_MAX, 1);
1552  s->last_picture_ptr->f.reference = 3;
1553  }
1554  if ((s->next_picture_ptr == NULL ||
1555  s->next_picture_ptr->f.data[0] == NULL) &&
1556  s->pict_type == AV_PICTURE_TYPE_B) {
1557  /* Allocate a dummy frame */
1558  i = ff_find_unused_picture(s, 0);
1559  if (i < 0) {
1560  av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
1561  return i;
1562  }
1563  s->next_picture_ptr = &s->picture[i];
1564  s->next_picture_ptr->f.key_frame = 0;
1565  if (ff_alloc_picture(s, s->next_picture_ptr, 0) < 0) {
1566  s->next_picture_ptr = NULL;
1567  return -1;
1568  }
1569  ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 0);
1570  ff_thread_report_progress(&s->next_picture_ptr->f, INT_MAX, 1);
1571  s->next_picture_ptr->f.reference = 3;
1572  }
1573  }
1574 
1575  memset(s->last_picture.f.data, 0, sizeof(s->last_picture.f.data));
1576  memset(s->next_picture.f.data, 0, sizeof(s->next_picture.f.data));
1577  if (s->last_picture_ptr)
1579  if (s->next_picture_ptr)
1581 
1582  if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME)) {
1583  if (s->next_picture_ptr)
1584  s->next_picture_ptr->owner2 = s;
1585  if (s->last_picture_ptr)
1586  s->last_picture_ptr->owner2 = s;
1587  }
1588 
1589  assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
1590  s->last_picture_ptr->f.data[0]));
1591 
1592  if (s->picture_structure!= PICT_FRAME && s->out_format != FMT_H264) {
1593  int i;
1594  for (i = 0; i < 4; i++) {
1596  s->current_picture.f.data[i] +=
1597  s->current_picture.f.linesize[i];
1598  }
1599  s->current_picture.f.linesize[i] *= 2;
1600  s->last_picture.f.linesize[i] *= 2;
1601  s->next_picture.f.linesize[i] *= 2;
1602  }
1603  }
1604 
1605  s->err_recognition = avctx->err_recognition;
1606 
1607  /* set dequantizer, we can't do it during init as
1608  * it might change for mpeg4 and we can't do it in the header
1609  * decode as init is not called for mpeg4 there yet */
1610  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1613  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1616  } else {
1619  }
1620 
1621  if (s->dct_error_sum) {
1622  assert(s->avctx->noise_reduction && s->encoding);
1624  }
1625 
1626  if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1627  return ff_xvmc_field_start(s, avctx);
1628 
1629  return 0;
1630 }
1631 
1632 /* generic function for encode/decode called after a
1633  * frame has been coded/decoded. */
1635 {
1636  int i;
1637  /* redraw edges for the frame if decoding didn't complete */
1638  // just to make sure that all data is rendered.
1639  if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) {
1640  ff_xvmc_field_end(s);
1641  } else if ((s->er.error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND)) &&
1642  !s->avctx->hwaccel &&
1644  s->unrestricted_mv &&
1646  !s->intra_only &&
1647  !(s->flags & CODEC_FLAG_EMU_EDGE) &&
1648  !s->avctx->lowres
1649  ) {
1651  int hshift = desc->log2_chroma_w;
1652  int vshift = desc->log2_chroma_h;
1654  s->h_edge_pos, s->v_edge_pos,
1656  EDGE_TOP | EDGE_BOTTOM);
1658  s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1659  EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1660  EDGE_TOP | EDGE_BOTTOM);
1662  s->h_edge_pos >> hshift, s->v_edge_pos >> vshift,
1663  EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift,
1664  EDGE_TOP | EDGE_BOTTOM);
1665  }
1666 
1667  emms_c();
1668 
1669  s->last_pict_type = s->pict_type;
1671  if (s->pict_type!= AV_PICTURE_TYPE_B) {
1673  }
1674 #if 0
1675  /* copy back current_picture variables */
1676  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
1677  if (s->picture[i].f.data[0] == s->current_picture.f.data[0]) {
1678  s->picture[i] = s->current_picture;
1679  break;
1680  }
1681  }
1682  assert(i < MAX_PICTURE_COUNT);
1683 #endif
1684 
1685  if (s->encoding) {
1686  /* release non-reference frames */
1687  for (i = 0; i < s->picture_count; i++) {
1688  if (s->picture[i].f.data[0] && !s->picture[i].f.reference
1689  /* && s->picture[i].type != FF_BUFFER_TYPE_SHARED */) {
1690  free_frame_buffer(s, &s->picture[i]);
1691  }
1692  }
1693  }
1694  // clear copies, to avoid confusion
1695 #if 0
1696  memset(&s->last_picture, 0, sizeof(Picture));
1697  memset(&s->next_picture, 0, sizeof(Picture));
1698  memset(&s->current_picture, 0, sizeof(Picture));
1699 #endif
1701 
1704  }
1705 }
1706 
1707 /**
1708  * Draw a line from (ex, ey) -> (sx, sy).
1709  * @param w width of the image
1710  * @param h height of the image
1711  * @param stride stride/linesize of the image
1712  * @param color color of the arrow
1713  */
1714 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey,
1715  int w, int h, int stride, int color)
1716 {
1717  int x, y, fr, f;
1718 
1719  sx = av_clip(sx, 0, w - 1);
1720  sy = av_clip(sy, 0, h - 1);
1721  ex = av_clip(ex, 0, w - 1);
1722  ey = av_clip(ey, 0, h - 1);
1723 
1724  buf[sy * stride + sx] += color;
1725 
1726  if (FFABS(ex - sx) > FFABS(ey - sy)) {
1727  if (sx > ex) {
1728  FFSWAP(int, sx, ex);
1729  FFSWAP(int, sy, ey);
1730  }
1731  buf += sx + sy * stride;
1732  ex -= sx;
1733  f = ((ey - sy) << 16) / ex;
1734  for (x = 0; x <= ex; x++) {
1735  y = (x * f) >> 16;
1736  fr = (x * f) & 0xFFFF;
1737  buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1738  if(fr) buf[(y + 1) * stride + x] += (color * fr ) >> 16;
1739  }
1740  } else {
1741  if (sy > ey) {
1742  FFSWAP(int, sx, ex);
1743  FFSWAP(int, sy, ey);
1744  }
1745  buf += sx + sy * stride;
1746  ey -= sy;
1747  if (ey)
1748  f = ((ex - sx) << 16) / ey;
1749  else
1750  f = 0;
1751  for(y= 0; y <= ey; y++){
1752  x = (y*f) >> 16;
1753  fr = (y*f) & 0xFFFF;
1754  buf[y * stride + x] += (color * (0x10000 - fr)) >> 16;
1755  if(fr) buf[y * stride + x + 1] += (color * fr ) >> 16;
1756  }
1757  }
1758 }
1759 
1760 /**
1761  * Draw an arrow from (ex, ey) -> (sx, sy).
1762  * @param w width of the image
1763  * @param h height of the image
1764  * @param stride stride/linesize of the image
1765  * @param color color of the arrow
1766  */
1767 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
1768  int ey, int w, int h, int stride, int color)
1769 {
1770  int dx,dy;
1771 
1772  sx = av_clip(sx, -100, w + 100);
1773  sy = av_clip(sy, -100, h + 100);
1774  ex = av_clip(ex, -100, w + 100);
1775  ey = av_clip(ey, -100, h + 100);
1776 
1777  dx = ex - sx;
1778  dy = ey - sy;
1779 
1780  if (dx * dx + dy * dy > 3 * 3) {
1781  int rx = dx + dy;
1782  int ry = -dx + dy;
1783  int length = ff_sqrt((rx * rx + ry * ry) << 8);
1784 
1785  // FIXME subpixel accuracy
1786  rx = ROUNDED_DIV(rx * 3 << 4, length);
1787  ry = ROUNDED_DIV(ry * 3 << 4, length);
1788 
1789  draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1790  draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1791  }
1792  draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1793 }
1794 
1795 /**
1796  * Print debugging info for the given picture.
1797  */
1798 void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table,
1799  uint8_t *visualization_buffer[3], int *low_delay,
1800  int mb_width, int mb_height, int mb_stride, int quarter_sample)
1801 {
1802  if ( avctx->hwaccel || !pict || !pict->mb_type
1804  return;
1805 
1806 
1807  if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
1808  int x,y;
1809 
1810  av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
1812  for (y = 0; y < mb_height; y++) {
1813  for (x = 0; x < mb_width; x++) {
1814  if (avctx->debug & FF_DEBUG_SKIP) {
1815  int count = mbskip_table[x + y * mb_stride];
1816  if (count > 9)
1817  count = 9;
1818  av_log(avctx, AV_LOG_DEBUG, "%1d", count);
1819  }
1820  if (avctx->debug & FF_DEBUG_QP) {
1821  av_log(avctx, AV_LOG_DEBUG, "%2d",
1822  pict->qscale_table[x + y * mb_stride]);
1823  }
1824  if (avctx->debug & FF_DEBUG_MB_TYPE) {
1825  int mb_type = pict->mb_type[x + y * mb_stride];
1826  // Type & MV direction
1827  if (IS_PCM(mb_type))
1828  av_log(avctx, AV_LOG_DEBUG, "P");
1829  else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1830  av_log(avctx, AV_LOG_DEBUG, "A");
1831  else if (IS_INTRA4x4(mb_type))
1832  av_log(avctx, AV_LOG_DEBUG, "i");
1833  else if (IS_INTRA16x16(mb_type))
1834  av_log(avctx, AV_LOG_DEBUG, "I");
1835  else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1836  av_log(avctx, AV_LOG_DEBUG, "d");
1837  else if (IS_DIRECT(mb_type))
1838  av_log(avctx, AV_LOG_DEBUG, "D");
1839  else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
1840  av_log(avctx, AV_LOG_DEBUG, "g");
1841  else if (IS_GMC(mb_type))
1842  av_log(avctx, AV_LOG_DEBUG, "G");
1843  else if (IS_SKIP(mb_type))
1844  av_log(avctx, AV_LOG_DEBUG, "S");
1845  else if (!USES_LIST(mb_type, 1))
1846  av_log(avctx, AV_LOG_DEBUG, ">");
1847  else if (!USES_LIST(mb_type, 0))
1848  av_log(avctx, AV_LOG_DEBUG, "<");
1849  else {
1850  av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1851  av_log(avctx, AV_LOG_DEBUG, "X");
1852  }
1853 
1854  // segmentation
1855  if (IS_8X8(mb_type))
1856  av_log(avctx, AV_LOG_DEBUG, "+");
1857  else if (IS_16X8(mb_type))
1858  av_log(avctx, AV_LOG_DEBUG, "-");
1859  else if (IS_8X16(mb_type))
1860  av_log(avctx, AV_LOG_DEBUG, "|");
1861  else if (IS_INTRA(mb_type) || IS_16X16(mb_type))
1862  av_log(avctx, AV_LOG_DEBUG, " ");
1863  else
1864  av_log(avctx, AV_LOG_DEBUG, "?");
1865 
1866 
1867  if (IS_INTERLACED(mb_type))
1868  av_log(avctx, AV_LOG_DEBUG, "=");
1869  else
1870  av_log(avctx, AV_LOG_DEBUG, " ");
1871  }
1872  }
1873  av_log(avctx, AV_LOG_DEBUG, "\n");
1874  }
1875  }
1876 
1877  if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
1878  (avctx->debug_mv)) {
1879  const int shift = 1 + quarter_sample;
1880  int mb_y;
1881  uint8_t *ptr;
1882  int i;
1883  int h_chroma_shift, v_chroma_shift, block_height;
1884  const int width = avctx->width;
1885  const int height = avctx->height;
1886  const int mv_sample_log2 = 4 - pict->motion_subsample_log2;
1887  const int mv_stride = (mb_width << mv_sample_log2) +
1888  (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
1889  *low_delay = 0; // needed to see the vectors without trashing the buffers
1890 
1891  avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1892 
1893  for (i = 0; i < 3; i++) {
1894  size_t size= (i == 0) ? pict->linesize[i] * FFALIGN(height, 16):
1895  pict->linesize[i] * FFALIGN(height, 16) >> v_chroma_shift;
1896  visualization_buffer[i]= av_realloc(visualization_buffer[i], size);
1897  memcpy(visualization_buffer[i], pict->data[i], size);
1898  pict->data[i] = visualization_buffer[i];
1899  }
1900  pict->type = FF_BUFFER_TYPE_COPY;
1901  pict->opaque= NULL;
1902  ptr = pict->data[0];
1903  block_height = 16 >> v_chroma_shift;
1904 
1905  for (mb_y = 0; mb_y < mb_height; mb_y++) {
1906  int mb_x;
1907  for (mb_x = 0; mb_x < mb_width; mb_x++) {
1908  const int mb_index = mb_x + mb_y * mb_stride;
1909  if ((avctx->debug_mv) && pict->motion_val[0]) {
1910  int type;
1911  for (type = 0; type < 3; type++) {
1912  int direction = 0;
1913  switch (type) {
1914  case 0:
1915  if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
1916  (pict->pict_type!= AV_PICTURE_TYPE_P))
1917  continue;
1918  direction = 0;
1919  break;
1920  case 1:
1921  if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
1922  (pict->pict_type!= AV_PICTURE_TYPE_B))
1923  continue;
1924  direction = 0;
1925  break;
1926  case 2:
1927  if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
1928  (pict->pict_type!= AV_PICTURE_TYPE_B))
1929  continue;
1930  direction = 1;
1931  break;
1932  }
1933  if (!USES_LIST(pict->mb_type[mb_index], direction))
1934  continue;
1935 
1936  if (IS_8X8(pict->mb_type[mb_index])) {
1937  int i;
1938  for (i = 0; i < 4; i++) {
1939  int sx = mb_x * 16 + 4 + 8 * (i & 1);
1940  int sy = mb_y * 16 + 4 + 8 * (i >> 1);
1941  int xy = (mb_x * 2 + (i & 1) +
1942  (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
1943  int mx = (pict->motion_val[direction][xy][0] >> shift) + sx;
1944  int my = (pict->motion_val[direction][xy][1] >> shift) + sy;
1945  draw_arrow(ptr, sx, sy, mx, my, width,
1946  height, pict->linesize[0], 100);
1947  }
1948  } else if (IS_16X8(pict->mb_type[mb_index])) {
1949  int i;
1950  for (i = 0; i < 2; i++) {
1951  int sx = mb_x * 16 + 8;
1952  int sy = mb_y * 16 + 4 + 8 * i;
1953  int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
1954  int mx = (pict->motion_val[direction][xy][0] >> shift);
1955  int my = (pict->motion_val[direction][xy][1] >> shift);
1956 
1957  if (IS_INTERLACED(pict->mb_type[mb_index]))
1958  my *= 2;
1959 
1960  draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1961  height, pict->linesize[0], 100);
1962  }
1963  } else if (IS_8X16(pict->mb_type[mb_index])) {
1964  int i;
1965  for (i = 0; i < 2; i++) {
1966  int sx = mb_x * 16 + 4 + 8 * i;
1967  int sy = mb_y * 16 + 8;
1968  int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
1969  int mx = pict->motion_val[direction][xy][0] >> shift;
1970  int my = pict->motion_val[direction][xy][1] >> shift;
1971 
1972  if (IS_INTERLACED(pict->mb_type[mb_index]))
1973  my *= 2;
1974 
1975  draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
1976  height, pict->linesize[0], 100);
1977  }
1978  } else {
1979  int sx= mb_x * 16 + 8;
1980  int sy= mb_y * 16 + 8;
1981  int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
1982  int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1983  int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1984  draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100);
1985  }
1986  }
1987  }
1988  if ((avctx->debug & FF_DEBUG_VIS_QP)) {
1989  uint64_t c = (pict->qscale_table[mb_index] * 128 / 31) *
1990  0x0101010101010101ULL;
1991  int y;
1992  for (y = 0; y < block_height; y++) {
1993  *(uint64_t *)(pict->data[1] + 8 * mb_x +
1994  (block_height * mb_y + y) *
1995  pict->linesize[1]) = c;
1996  *(uint64_t *)(pict->data[2] + 8 * mb_x +
1997  (block_height * mb_y + y) *
1998  pict->linesize[2]) = c;
1999  }
2000  }
2001  if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
2002  pict->motion_val[0]) {
2003  int mb_type = pict->mb_type[mb_index];
2004  uint64_t u,v;
2005  int y;
2006 #define COLOR(theta, r) \
2007  u = (int)(128 + r * cos(theta * 3.141592 / 180)); \
2008  v = (int)(128 + r * sin(theta * 3.141592 / 180));
2009 
2010 
2011  u = v = 128;
2012  if (IS_PCM(mb_type)) {
2013  COLOR(120, 48)
2014  } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
2015  IS_INTRA16x16(mb_type)) {
2016  COLOR(30, 48)
2017  } else if (IS_INTRA4x4(mb_type)) {
2018  COLOR(90, 48)
2019  } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
2020  // COLOR(120, 48)
2021  } else if (IS_DIRECT(mb_type)) {
2022  COLOR(150, 48)
2023  } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
2024  COLOR(170, 48)
2025  } else if (IS_GMC(mb_type)) {
2026  COLOR(190, 48)
2027  } else if (IS_SKIP(mb_type)) {
2028  // COLOR(180, 48)
2029  } else if (!USES_LIST(mb_type, 1)) {
2030  COLOR(240, 48)
2031  } else if (!USES_LIST(mb_type, 0)) {
2032  COLOR(0, 48)
2033  } else {
2034  av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
2035  COLOR(300,48)
2036  }
2037 
2038  u *= 0x0101010101010101ULL;
2039  v *= 0x0101010101010101ULL;
2040  for (y = 0; y < block_height; y++) {
2041  *(uint64_t *)(pict->data[1] + 8 * mb_x +
2042  (block_height * mb_y + y) * pict->linesize[1]) = u;
2043  *(uint64_t *)(pict->data[2] + 8 * mb_x +
2044  (block_height * mb_y + y) * pict->linesize[2]) = v;
2045  }
2046 
2047  // segmentation
2048  if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
2049  *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
2050  (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2051  *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
2052  (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
2053  }
2054  if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
2055  for (y = 0; y < 16; y++)
2056  pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
2057  pict->linesize[0]] ^= 0x80;
2058  }
2059  if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
2060  int dm = 1 << (mv_sample_log2 - 2);
2061  for (i = 0; i < 4; i++) {
2062  int sx = mb_x * 16 + 8 * (i & 1);
2063  int sy = mb_y * 16 + 8 * (i >> 1);
2064  int xy = (mb_x * 2 + (i & 1) +
2065  (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
2066  // FIXME bidir
2067  int32_t *mv = (int32_t *) &pict->motion_val[0][xy];
2068  if (mv[0] != mv[dm] ||
2069  mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
2070  for (y = 0; y < 8; y++)
2071  pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
2072  if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
2073  *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
2074  pict->linesize[0]) ^= 0x8080808080808080ULL;
2075  }
2076  }
2077 
2078  if (IS_INTERLACED(mb_type) &&
2079  avctx->codec->id == AV_CODEC_ID_H264) {
2080  // hmm
2081  }
2082  }
2083  mbskip_table[mb_index] = 0;
2084  }
2085  }
2086  }
2087 }
2088 
2090 {
2092  s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
2093 }
2094 
2095 static inline int hpel_motion_lowres(MpegEncContext *s,
2096  uint8_t *dest, uint8_t *src,
2097  int field_based, int field_select,
2098  int src_x, int src_y,
2099  int width, int height, int stride,
2100  int h_edge_pos, int v_edge_pos,
2101  int w, int h, h264_chroma_mc_func *pix_op,
2102  int motion_x, int motion_y)
2103 {
2104  const int lowres = s->avctx->lowres;
2105  const int op_index = FFMIN(lowres, 2);
2106  const int s_mask = (2 << lowres) - 1;
2107  int emu = 0;
2108  int sx, sy;
2109 
2110  if (s->quarter_sample) {
2111  motion_x /= 2;
2112  motion_y /= 2;
2113  }
2114 
2115  sx = motion_x & s_mask;
2116  sy = motion_y & s_mask;
2117  src_x += motion_x >> lowres + 1;
2118  src_y += motion_y >> lowres + 1;
2119 
2120  src += src_y * stride + src_x;
2121 
2122  if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
2123  (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2124  s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w + 1,
2125  (h + 1) << field_based, src_x,
2126  src_y << field_based,
2127  h_edge_pos,
2128  v_edge_pos);
2129  src = s->edge_emu_buffer;
2130  emu = 1;
2131  }
2132 
2133  sx = (sx << 2) >> lowres;
2134  sy = (sy << 2) >> lowres;
2135  if (field_select)
2136  src += s->linesize;
2137  pix_op[op_index](dest, src, stride, h, sx, sy);
2138  return emu;
2139 }
2140 
2141 /* apply one mpeg motion vector to the three components */
2143  uint8_t *dest_y,
2144  uint8_t *dest_cb,
2145  uint8_t *dest_cr,
2146  int field_based,
2147  int bottom_field,
2148  int field_select,
2149  uint8_t **ref_picture,
2150  h264_chroma_mc_func *pix_op,
2151  int motion_x, int motion_y,
2152  int h, int mb_y)
2153 {
2154  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2155  int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy,
2156  uvsx, uvsy;
2157  const int lowres = s->avctx->lowres;
2158  const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 2);
2159  const int block_s = 8>>lowres;
2160  const int s_mask = (2 << lowres) - 1;
2161  const int h_edge_pos = s->h_edge_pos >> lowres;
2162  const int v_edge_pos = s->v_edge_pos >> lowres;
2163  linesize = s->current_picture.f.linesize[0] << field_based;
2164  uvlinesize = s->current_picture.f.linesize[1] << field_based;
2165 
2166  // FIXME obviously not perfect but qpel will not work in lowres anyway
2167  if (s->quarter_sample) {
2168  motion_x /= 2;
2169  motion_y /= 2;
2170  }
2171 
2172  if(field_based){
2173  motion_y += (bottom_field - field_select)*((1 << lowres)-1);
2174  }
2175 
2176  sx = motion_x & s_mask;
2177  sy = motion_y & s_mask;
2178  src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
2179  src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
2180 
2181  if (s->out_format == FMT_H263) {
2182  uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
2183  uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
2184  uvsrc_x = src_x >> 1;
2185  uvsrc_y = src_y >> 1;
2186  } else if (s->out_format == FMT_H261) {
2187  // even chroma mv's are full pel in H261
2188  mx = motion_x / 4;
2189  my = motion_y / 4;
2190  uvsx = (2 * mx) & s_mask;
2191  uvsy = (2 * my) & s_mask;
2192  uvsrc_x = s->mb_x * block_s + (mx >> lowres);
2193  uvsrc_y = mb_y * block_s + (my >> lowres);
2194  } else {
2195  if(s->chroma_y_shift){
2196  mx = motion_x / 2;
2197  my = motion_y / 2;
2198  uvsx = mx & s_mask;
2199  uvsy = my & s_mask;
2200  uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
2201  uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
2202  } else {
2203  if(s->chroma_x_shift){
2204  //Chroma422
2205  mx = motion_x / 2;
2206  uvsx = mx & s_mask;
2207  uvsy = motion_y & s_mask;
2208  uvsrc_y = src_y;
2209  uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
2210  } else {
2211  //Chroma444
2212  uvsx = motion_x & s_mask;
2213  uvsy = motion_y & s_mask;
2214  uvsrc_x = src_x;
2215  uvsrc_y = src_y;
2216  }
2217  }
2218  }
2219 
2220  ptr_y = ref_picture[0] + src_y * linesize + src_x;
2221  ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
2222  ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
2223 
2224  if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
2225  (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
2226  s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
2227  linesize >> field_based, 17, 17 + field_based,
2228  src_x, src_y << field_based, h_edge_pos,
2229  v_edge_pos);
2230  ptr_y = s->edge_emu_buffer;
2231  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2232  uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
2233  s->vdsp.emulated_edge_mc(uvbuf , ptr_cb, uvlinesize >> field_based, 9,
2234  9 + field_based,
2235  uvsrc_x, uvsrc_y << field_based,
2236  h_edge_pos >> 1, v_edge_pos >> 1);
2237  s->vdsp.emulated_edge_mc(uvbuf + 16, ptr_cr, uvlinesize >> field_based, 9,
2238  9 + field_based,
2239  uvsrc_x, uvsrc_y << field_based,
2240  h_edge_pos >> 1, v_edge_pos >> 1);
2241  ptr_cb = uvbuf;
2242  ptr_cr = uvbuf + 16;
2243  }
2244  }
2245 
2246  // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
2247  if (bottom_field) {
2248  dest_y += s->linesize;
2249  dest_cb += s->uvlinesize;
2250  dest_cr += s->uvlinesize;
2251  }
2252 
2253  if (field_select) {
2254  ptr_y += s->linesize;
2255  ptr_cb += s->uvlinesize;
2256  ptr_cr += s->uvlinesize;
2257  }
2258 
2259  sx = (sx << 2) >> lowres;
2260  sy = (sy << 2) >> lowres;
2261  pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
2262 
2263  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
2264  int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
2265  uvsx = (uvsx << 2) >> lowres;
2266  uvsy = (uvsy << 2) >> lowres;
2267  if (hc) {
2268  pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
2269  pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
2270  }
2271  }
2272  // FIXME h261 lowres loop filter
2273 }
2274 
2276  uint8_t *dest_cb, uint8_t *dest_cr,
2277  uint8_t **ref_picture,
2278  h264_chroma_mc_func * pix_op,
2279  int mx, int my)
2280 {
2281  const int lowres = s->avctx->lowres;
2282  const int op_index = FFMIN(lowres, 2);
2283  const int block_s = 8 >> lowres;
2284  const int s_mask = (2 << lowres) - 1;
2285  const int h_edge_pos = s->h_edge_pos >> lowres + 1;
2286  const int v_edge_pos = s->v_edge_pos >> lowres + 1;
2287  int emu = 0, src_x, src_y, offset, sx, sy;
2288  uint8_t *ptr;
2289 
2290  if (s->quarter_sample) {
2291  mx /= 2;
2292  my /= 2;
2293  }
2294 
2295  /* In case of 8X8, we construct a single chroma motion vector
2296  with a special rounding */
2297  mx = ff_h263_round_chroma(mx);
2298  my = ff_h263_round_chroma(my);
2299 
2300  sx = mx & s_mask;
2301  sy = my & s_mask;
2302  src_x = s->mb_x * block_s + (mx >> lowres + 1);
2303  src_y = s->mb_y * block_s + (my >> lowres + 1);
2304 
2305  offset = src_y * s->uvlinesize + src_x;
2306  ptr = ref_picture[1] + offset;
2307  if (s->flags & CODEC_FLAG_EMU_EDGE) {
2308  if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
2309  (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
2311  9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
2312  ptr = s->edge_emu_buffer;
2313  emu = 1;
2314  }
2315  }
2316  sx = (sx << 2) >> lowres;
2317  sy = (sy << 2) >> lowres;
2318  pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
2319 
2320  ptr = ref_picture[2] + offset;
2321  if (emu) {
2322  s->vdsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
2323  src_x, src_y, h_edge_pos, v_edge_pos);
2324  ptr = s->edge_emu_buffer;
2325  }
2326  pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
2327 }
2328 
2329 /**
2330  * motion compensation of a single macroblock
2331  * @param s context
2332  * @param dest_y luma destination pointer
2333  * @param dest_cb chroma cb/u destination pointer
2334  * @param dest_cr chroma cr/v destination pointer
2335  * @param dir direction (0->forward, 1->backward)
2336  * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2337  * @param pix_op halfpel motion compensation function (average or put normally)
2338  * the motion vectors are taken from s->mv and the MV type from s->mv_type
2339  */
2340 static inline void MPV_motion_lowres(MpegEncContext *s,
2341  uint8_t *dest_y, uint8_t *dest_cb,
2342  uint8_t *dest_cr,
2343  int dir, uint8_t **ref_picture,
2344  h264_chroma_mc_func *pix_op)
2345 {
2346  int mx, my;
2347  int mb_x, mb_y, i;
2348  const int lowres = s->avctx->lowres;
2349  const int block_s = 8 >>lowres;
2350 
2351  mb_x = s->mb_x;
2352  mb_y = s->mb_y;
2353 
2354  switch (s->mv_type) {
2355  case MV_TYPE_16X16:
2356  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2357  0, 0, 0,
2358  ref_picture, pix_op,
2359  s->mv[dir][0][0], s->mv[dir][0][1],
2360  2 * block_s, mb_y);
2361  break;
2362  case MV_TYPE_8X8:
2363  mx = 0;
2364  my = 0;
2365  for (i = 0; i < 4; i++) {
2366  hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
2367  s->linesize) * block_s,
2368  ref_picture[0], 0, 0,
2369  (2 * mb_x + (i & 1)) * block_s,
2370  (2 * mb_y + (i >> 1)) * block_s,
2371  s->width, s->height, s->linesize,
2372  s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
2373  block_s, block_s, pix_op,
2374  s->mv[dir][i][0], s->mv[dir][i][1]);
2375 
2376  mx += s->mv[dir][i][0];
2377  my += s->mv[dir][i][1];
2378  }
2379 
2380  if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
2381  chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
2382  pix_op, mx, my);
2383  break;
2384  case MV_TYPE_FIELD:
2385  if (s->picture_structure == PICT_FRAME) {
2386  /* top field */
2387  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2388  1, 0, s->field_select[dir][0],
2389  ref_picture, pix_op,
2390  s->mv[dir][0][0], s->mv[dir][0][1],
2391  block_s, mb_y);
2392  /* bottom field */
2393  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2394  1, 1, s->field_select[dir][1],
2395  ref_picture, pix_op,
2396  s->mv[dir][1][0], s->mv[dir][1][1],
2397  block_s, mb_y);
2398  } else {
2399  if (s->picture_structure != s->field_select[dir][0] + 1 &&
2400  s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
2401  ref_picture = s->current_picture_ptr->f.data;
2402 
2403  }
2404  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2405  0, 0, s->field_select[dir][0],
2406  ref_picture, pix_op,
2407  s->mv[dir][0][0],
2408  s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
2409  }
2410  break;
2411  case MV_TYPE_16X8:
2412  for (i = 0; i < 2; i++) {
2413  uint8_t **ref2picture;
2414 
2415  if (s->picture_structure == s->field_select[dir][i] + 1 ||
2416  s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
2417  ref2picture = ref_picture;
2418  } else {
2419  ref2picture = s->current_picture_ptr->f.data;
2420  }
2421 
2422  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2423  0, 0, s->field_select[dir][i],
2424  ref2picture, pix_op,
2425  s->mv[dir][i][0], s->mv[dir][i][1] +
2426  2 * block_s * i, block_s, mb_y >> 1);
2427 
2428  dest_y += 2 * block_s * s->linesize;
2429  dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2430  dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
2431  }
2432  break;
2433  case MV_TYPE_DMV:
2434  if (s->picture_structure == PICT_FRAME) {
2435  for (i = 0; i < 2; i++) {
2436  int j;
2437  for (j = 0; j < 2; j++) {
2438  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2439  1, j, j ^ i,
2440  ref_picture, pix_op,
2441  s->mv[dir][2 * i + j][0],
2442  s->mv[dir][2 * i + j][1],
2443  block_s, mb_y);
2444  }
2446  }
2447  } else {
2448  for (i = 0; i < 2; i++) {
2449  mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
2450  0, 0, s->picture_structure != i + 1,
2451  ref_picture, pix_op,
2452  s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2453  2 * block_s, mb_y >> 1);
2454 
2455  // after put we make avg of the same block
2457 
2458  // opposite parity is always in the same
2459  // frame if this is second field
2460  if (!s->first_field) {
2461  ref_picture = s->current_picture_ptr->f.data;
2462  }
2463  }
2464  }
2465  break;
2466  default:
2467  av_assert2(0);
2468  }
2469 }
2470 
2471 /**
2472  * find the lowest MB row referenced in the MVs
2473  */
2475 {
2476  int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
2477  int my, off, i, mvs;
2478 
2479  if (s->picture_structure != PICT_FRAME || s->mcsel)
2480  goto unhandled;
2481 
2482  switch (s->mv_type) {
2483  case MV_TYPE_16X16:
2484  mvs = 1;
2485  break;
2486  case MV_TYPE_16X8:
2487  mvs = 2;
2488  break;
2489  case MV_TYPE_8X8:
2490  mvs = 4;
2491  break;
2492  default:
2493  goto unhandled;
2494  }
2495 
2496  for (i = 0; i < mvs; i++) {
2497  my = s->mv[dir][i][1]<<qpel_shift;
2498  my_max = FFMAX(my_max, my);
2499  my_min = FFMIN(my_min, my);
2500  }
2501 
2502  off = (FFMAX(-my_min, my_max) + 63) >> 6;
2503 
2504  return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
2505 unhandled:
2506  return s->mb_height-1;
2507 }
2508 
2509 /* put block[] to dest[] */
2510 static inline void put_dct(MpegEncContext *s,
2511  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2512 {
2513  s->dct_unquantize_intra(s, block, i, qscale);
2514  s->dsp.idct_put (dest, line_size, block);
2515 }
2516 
2517 /* add block[] to dest[] */
2518 static inline void add_dct(MpegEncContext *s,
2519  int16_t *block, int i, uint8_t *dest, int line_size)
2520 {
2521  if (s->block_last_index[i] >= 0) {
2522  s->dsp.idct_add (dest, line_size, block);
2523  }
2524 }
2525 
2526 static inline void add_dequant_dct(MpegEncContext *s,
2527  int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
2528 {
2529  if (s->block_last_index[i] >= 0) {
2530  s->dct_unquantize_inter(s, block, i, qscale);
2531 
2532  s->dsp.idct_add (dest, line_size, block);
2533  }
2534 }
2535 
2536 /**
2537  * Clean dc, ac, coded_block for the current non-intra MB.
2538  */
2540 {
2541  int wrap = s->b8_stride;
2542  int xy = s->block_index[0];
2543 
2544  s->dc_val[0][xy ] =
2545  s->dc_val[0][xy + 1 ] =
2546  s->dc_val[0][xy + wrap] =
2547  s->dc_val[0][xy + 1 + wrap] = 1024;
2548  /* ac pred */
2549  memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2550  memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2551  if (s->msmpeg4_version>=3) {
2552  s->coded_block[xy ] =
2553  s->coded_block[xy + 1 ] =
2554  s->coded_block[xy + wrap] =
2555  s->coded_block[xy + 1 + wrap] = 0;
2556  }
2557  /* chroma */
2558  wrap = s->mb_stride;
2559  xy = s->mb_x + s->mb_y * wrap;
2560  s->dc_val[1][xy] =
2561  s->dc_val[2][xy] = 1024;
2562  /* ac pred */
2563  memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2564  memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2565 
2566  s->mbintra_table[xy]= 0;
2567 }
2568 
2569 /* generic function called after a macroblock has been parsed by the
2570  decoder or after it has been encoded by the encoder.
2571 
2572  Important variables used:
2573  s->mb_intra : true if intra macroblock
2574  s->mv_dir : motion vector direction
2575  s->mv_type : motion vector type
2576  s->mv : motion vector
2577  s->interlaced_dct : true if interlaced dct used (mpeg2)
2578  */
2579 static av_always_inline
2581  int lowres_flag, int is_mpeg12)
2582 {
2583  const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2584  if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2585  ff_xvmc_decode_mb(s);//xvmc uses pblocks
2586  return;
2587  }
2588 
2589  if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2590  /* save DCT coefficients */
2591  int i,j;
2592  int16_t *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
2593  av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2594  for(i=0; i<6; i++){
2595  for(j=0; j<64; j++){
2596  *dct++ = block[i][s->dsp.idct_permutation[j]];
2597  av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
2598  }
2599  av_log(s->avctx, AV_LOG_DEBUG, "\n");
2600  }
2601  }
2602 
2603  s->current_picture.f.qscale_table[mb_xy] = s->qscale;
2604 
2605  /* update DC predictors for P macroblocks */
2606  if (!s->mb_intra) {
2607  if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2608  if(s->mbintra_table[mb_xy])
2610  } else {
2611  s->last_dc[0] =
2612  s->last_dc[1] =
2613  s->last_dc[2] = 128 << s->intra_dc_precision;
2614  }
2615  }
2616  else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2617  s->mbintra_table[mb_xy]=1;
2618 
2619  if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2620  uint8_t *dest_y, *dest_cb, *dest_cr;
2621  int dct_linesize, dct_offset;
2622  op_pixels_func (*op_pix)[4];
2623  qpel_mc_func (*op_qpix)[16];
2624  const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2625  const int uvlinesize = s->current_picture.f.linesize[1];
2626  const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2627  const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2628 
2629  /* avoid copy if macroblock skipped in last frame too */
2630  /* skip only during decoding as we might trash the buffers during encoding a bit */
2631  if(!s->encoding){
2632  uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2633 
2634  if (s->mb_skipped) {
2635  s->mb_skipped= 0;
2637  *mbskip_ptr = 1;
2638  } else if(!s->current_picture.f.reference) {
2639  *mbskip_ptr = 1;
2640  } else{
2641  *mbskip_ptr = 0; /* not skipped */
2642  }
2643  }
2644 
2645  dct_linesize = linesize << s->interlaced_dct;
2646  dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
2647 
2648  if(readable){
2649  dest_y= s->dest[0];
2650  dest_cb= s->dest[1];
2651  dest_cr= s->dest[2];
2652  }else{
2653  dest_y = s->b_scratchpad;
2654  dest_cb= s->b_scratchpad+16*linesize;
2655  dest_cr= s->b_scratchpad+32*linesize;
2656  }
2657 
2658  if (!s->mb_intra) {
2659  /* motion handling */
2660  /* decoding or more than one mb_type (MC was already done otherwise) */
2661  if(!s->encoding){
2662 
2663  if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2664  if (s->mv_dir & MV_DIR_FORWARD) {
2667  0);
2668  }
2669  if (s->mv_dir & MV_DIR_BACKWARD) {
2672  0);
2673  }
2674  }
2675 
2676  if(lowres_flag){
2678 
2679  if (s->mv_dir & MV_DIR_FORWARD) {
2680  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
2682  }
2683  if (s->mv_dir & MV_DIR_BACKWARD) {
2684  MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
2685  }
2686  }else{
2687  op_qpix= s->me.qpel_put;
2688  if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2689  op_pix = s->dsp.put_pixels_tab;
2690  }else{
2691  op_pix = s->dsp.put_no_rnd_pixels_tab;
2692  }
2693  if (s->mv_dir & MV_DIR_FORWARD) {
2694  ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2695  op_pix = s->dsp.avg_pixels_tab;
2696  op_qpix= s->me.qpel_avg;
2697  }
2698  if (s->mv_dir & MV_DIR_BACKWARD) {
2699  ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2700  }
2701  }
2702  }
2703 
2704  /* skip dequant / idct if we are really late ;) */
2705  if(s->avctx->skip_idct){
2708  || s->avctx->skip_idct >= AVDISCARD_ALL)
2709  goto skip_idct;
2710  }
2711 
2712  /* add dct residue */
2714  || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
2715  add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2716  add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2717  add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2718  add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2719 
2720  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2721  if (s->chroma_y_shift){
2722  add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2723  add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2724  }else{
2725  dct_linesize >>= 1;
2726  dct_offset >>=1;
2727  add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2728  add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2729  add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2730  add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2731  }
2732  }
2733  } else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
2734  add_dct(s, block[0], 0, dest_y , dct_linesize);
2735  add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2736  add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2737  add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2738 
2739  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2740  if(s->chroma_y_shift){//Chroma420
2741  add_dct(s, block[4], 4, dest_cb, uvlinesize);
2742  add_dct(s, block[5], 5, dest_cr, uvlinesize);
2743  }else{
2744  //chroma422
2745  dct_linesize = uvlinesize << s->interlaced_dct;
2746  dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
2747 
2748  add_dct(s, block[4], 4, dest_cb, dct_linesize);
2749  add_dct(s, block[5], 5, dest_cr, dct_linesize);
2750  add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2751  add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2752  if(!s->chroma_x_shift){//Chroma444
2753  add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
2754  add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
2755  add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
2756  add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
2757  }
2758  }
2759  }//fi gray
2760  }
2761  else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2762  ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2763  }
2764  } else {
2765  /* dct only in intra block */
2767  put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2768  put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2769  put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2770  put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2771 
2772  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2773  if(s->chroma_y_shift){
2774  put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2775  put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2776  }else{
2777  dct_offset >>=1;
2778  dct_linesize >>=1;
2779  put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2780  put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2781  put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2782  put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2783  }
2784  }
2785  }else{
2786  s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2787  s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2788  s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2789  s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2790 
2791  if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2792  if(s->chroma_y_shift){
2793  s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2794  s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2795  }else{
2796 
2797  dct_linesize = uvlinesize << s->interlaced_dct;
2798  dct_offset = s->interlaced_dct? uvlinesize : uvlinesize*block_size;
2799 
2800  s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2801  s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2802  s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2803  s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2804  if(!s->chroma_x_shift){//Chroma444
2805  s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
2806  s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
2807  s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
2808  s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
2809  }
2810  }
2811  }//gray
2812  }
2813  }
2814 skip_idct:
2815  if(!readable){
2816  s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2817  s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2818  s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2819  }
2820  }
2821 }
2822 
2823 void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){
2824 #if !CONFIG_SMALL
2825  if(s->out_format == FMT_MPEG1) {
2826  if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2827  else MPV_decode_mb_internal(s, block, 0, 1);
2828  } else
2829 #endif
2830  if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2831  else MPV_decode_mb_internal(s, block, 0, 0);
2832 }
2833 
2834 /**
2835  * @param h is the normal height, this will be reduced automatically if needed for the last row
2836  */
2838  Picture *last, int y, int h, int picture_structure,
2839  int first_field, int draw_edges, int low_delay,
2840  int v_edge_pos, int h_edge_pos)
2841 {
2842  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
2843  int hshift = desc->log2_chroma_w;
2844  int vshift = desc->log2_chroma_h;
2845  const int field_pic = picture_structure != PICT_FRAME;
2846  if(field_pic){
2847  h <<= 1;
2848  y <<= 1;
2849  }
2850 
2851  if (!avctx->hwaccel &&
2853  draw_edges &&
2854  cur->f.reference &&
2855  !(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
2856  int *linesize = cur->f.linesize;
2857  int sides = 0, edge_h;
2858  if (y==0) sides |= EDGE_TOP;
2859  if (y + h >= v_edge_pos)
2860  sides |= EDGE_BOTTOM;
2861 
2862  edge_h= FFMIN(h, v_edge_pos - y);
2863 
2864  dsp->draw_edges(cur->f.data[0] + y * linesize[0],
2865  linesize[0], h_edge_pos, edge_h,
2866  EDGE_WIDTH, EDGE_WIDTH, sides);
2867  dsp->draw_edges(cur->f.data[1] + (y >> vshift) * linesize[1],
2868  linesize[1], h_edge_pos >> hshift, edge_h >> vshift,
2869  EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2870  dsp->draw_edges(cur->f.data[2] + (y >> vshift) * linesize[2],
2871  linesize[2], h_edge_pos >> hshift, edge_h >> vshift,
2872  EDGE_WIDTH >> hshift, EDGE_WIDTH >> vshift, sides);
2873  }
2874 
2875  h = FFMIN(h, avctx->height - y);
2876 
2877  if(field_pic && first_field && !(avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2878 
2879  if (avctx->draw_horiz_band) {
2880  AVFrame *src;
2882  int i;
2883 
2884  if(cur->f.pict_type == AV_PICTURE_TYPE_B || low_delay ||
2886  src = &cur->f;
2887  else if (last)
2888  src = &last->f;
2889  else
2890  return;
2891 
2892  if (cur->f.pict_type == AV_PICTURE_TYPE_B &&
2893  picture_structure == PICT_FRAME &&
2894  avctx->codec_id != AV_CODEC_ID_H264 &&
2895  avctx->codec_id != AV_CODEC_ID_SVQ3) {
2896  for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
2897  offset[i] = 0;
2898  }else{
2899  offset[0]= y * src->linesize[0];
2900  offset[1]=
2901  offset[2]= (y >> vshift) * src->linesize[1];
2902  for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
2903  offset[i] = 0;
2904  }
2905 
2906  emms_c();
2907 
2908  avctx->draw_horiz_band(avctx, src, offset,
2909  y, picture_structure, h);
2910  }
2911 }
2912 
2914 {
2915  int draw_edges = s->unrestricted_mv && !s->intra_only;
2917  &s->last_picture, y, h, s->picture_structure,
2918  s->first_field, draw_edges, s->low_delay,
2919  s->v_edge_pos, s->h_edge_pos);
2920 }
2921 
2922 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2923  const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2924  const int uvlinesize = s->current_picture.f.linesize[1];
2925  const int mb_size= 4 - s->avctx->lowres;
2926 
2927  s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2928  s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2929  s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2930  s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2931  s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2932  s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2933  //block_index is not used by mpeg2, so it is not affected by chroma_format
2934 
2935  s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2936  s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2937  s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2938 
2940  {
2941  if(s->picture_structure==PICT_FRAME){
2942  s->dest[0] += s->mb_y * linesize << mb_size;
2943  s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2944  s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2945  }else{
2946  s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2947  s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2948  s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2950  }
2951  }
2952 }
2953 
2954 /**
2955  * Permute an 8x8 block.
2956  * @param block the block which will be permuted according to the given permutation vector
2957  * @param permutation the permutation vector
2958  * @param last the last non zero coefficient in scantable order, used to speed the permutation up
2959  * @param scantable the used scantable, this is only used to speed the permutation up, the block is not
2960  * (inverse) permutated to scantable order!
2961  */
2962 void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
2963 {
2964  int i;
2965  int16_t temp[64];
2966 
2967  if(last<=0) return;
2968  //if(permutation[1]==1) return; //FIXME it is ok but not clean and might fail for some permutations
2969 
2970  for(i=0; i<=last; i++){
2971  const int j= scantable[i];
2972  temp[j]= block[j];
2973  block[j]=0;
2974  }
2975 
2976  for(i=0; i<=last; i++){
2977  const int j= scantable[i];
2978  const int perm_j= permutation[j];
2979  block[perm_j]= temp[j];
2980  }
2981 }
2982 
2984  int i;
2985  MpegEncContext *s = avctx->priv_data;
2986 
2987  if(s==NULL || s->picture==NULL)
2988  return;
2989 
2990  for(i=0; i<s->picture_count; i++){
2991  if (s->picture[i].f.data[0] &&
2992  (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
2993  s->picture[i].f.type == FF_BUFFER_TYPE_USER))
2994  free_frame_buffer(s, &s->picture[i]);
2995  }
2997 
2998  s->mb_x= s->mb_y= 0;
2999  s->closed_gop= 0;
3000 
3001  s->parse_context.state= -1;
3003  s->parse_context.overread= 0;
3005  s->parse_context.index= 0;
3006  s->parse_context.last_index= 0;
3007  s->bitstream_buffer_size=0;
3008  s->pp_time=0;
3009 }
3010 
3012  int16_t *block, int n, int qscale)
3013 {
3014  int i, level, nCoeffs;
3015  const uint16_t *quant_matrix;
3016 
3017  nCoeffs= s->block_last_index[n];
3018 
3019  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3020  /* XXX: only mpeg1 */
3021  quant_matrix = s->intra_matrix;
3022  for(i=1;i<=nCoeffs;i++) {
3023  int j= s->intra_scantable.permutated[i];
3024  level = block[j];
3025  if (level) {
3026  if (level < 0) {
3027  level = -level;
3028  level = (int)(level * qscale * quant_matrix[j]) >> 3;
3029  level = (level - 1) | 1;
3030  level = -level;
3031  } else {
3032  level = (int)(level * qscale * quant_matrix[j]) >> 3;
3033  level = (level - 1) | 1;
3034  }
3035  block[j] = level;
3036  }
3037  }
3038 }
3039 
3041  int16_t *block, int n, int qscale)
3042 {
3043  int i, level, nCoeffs;
3044  const uint16_t *quant_matrix;
3045 
3046  nCoeffs= s->block_last_index[n];
3047 
3048  quant_matrix = s->inter_matrix;
3049  for(i=0; i<=nCoeffs; i++) {
3050  int j= s->intra_scantable.permutated[i];
3051  level = block[j];
3052  if (level) {
3053  if (level < 0) {
3054  level = -level;
3055  level = (((level << 1) + 1) * qscale *
3056  ((int) (quant_matrix[j]))) >> 4;
3057  level = (level - 1) | 1;
3058  level = -level;
3059  } else {
3060  level = (((level << 1) + 1) * qscale *
3061  ((int) (quant_matrix[j]))) >> 4;
3062  level = (level - 1) | 1;
3063  }
3064  block[j] = level;
3065  }
3066  }
3067 }
3068 
3070  int16_t *block, int n, int qscale)
3071 {
3072  int i, level, nCoeffs;
3073  const uint16_t *quant_matrix;
3074 
3075  if(s->alternate_scan) nCoeffs= 63;
3076  else nCoeffs= s->block_last_index[n];
3077 
3078  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3079  quant_matrix = s->intra_matrix;
3080  for(i=1;i<=nCoeffs;i++) {
3081  int j= s->intra_scantable.permutated[i];
3082  level = block[j];
3083  if (level) {
3084  if (level < 0) {
3085  level = -level;
3086  level = (int)(level * qscale * quant_matrix[j]) >> 3;
3087  level = -level;
3088  } else {
3089  level = (int)(level * qscale * quant_matrix[j]) >> 3;
3090  }
3091  block[j] = level;
3092  }
3093  }
3094 }
3095 
3097  int16_t *block, int n, int qscale)
3098 {
3099  int i, level, nCoeffs;
3100  const uint16_t *quant_matrix;
3101  int sum=-1;
3102 
3103  if(s->alternate_scan) nCoeffs= 63;
3104  else nCoeffs= s->block_last_index[n];
3105 
3106  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3107  sum += block[0];
3108  quant_matrix = s->intra_matrix;
3109  for(i=1;i<=nCoeffs;i++) {
3110  int j= s->intra_scantable.permutated[i];
3111  level = block[j];
3112  if (level) {
3113  if (level < 0) {
3114  level = -level;
3115  level = (int)(level * qscale * quant_matrix[j]) >> 3;
3116  level = -level;
3117  } else {
3118  level = (int)(level * qscale * quant_matrix[j]) >> 3;
3119  }
3120  block[j] = level;
3121  sum+=level;
3122  }
3123  }
3124  block[63]^=sum&1;
3125 }
3126 
3128  int16_t *block, int n, int qscale)
3129 {
3130  int i, level, nCoeffs;
3131  const uint16_t *quant_matrix;
3132  int sum=-1;
3133 
3134  if(s->alternate_scan) nCoeffs= 63;
3135  else nCoeffs= s->block_last_index[n];
3136 
3137  quant_matrix = s->inter_matrix;
3138  for(i=0; i<=nCoeffs; i++) {
3139  int j= s->intra_scantable.permutated[i];
3140  level = block[j];
3141  if (level) {
3142  if (level < 0) {
3143  level = -level;
3144  level = (((level << 1) + 1) * qscale *
3145  ((int) (quant_matrix[j]))) >> 4;
3146  level = -level;
3147  } else {
3148  level = (((level << 1) + 1) * qscale *
3149  ((int) (quant_matrix[j]))) >> 4;
3150  }
3151  block[j] = level;
3152  sum+=level;
3153  }
3154  }
3155  block[63]^=sum&1;
3156 }
3157 
3159  int16_t *block, int n, int qscale)
3160 {
3161  int i, level, qmul, qadd;
3162  int nCoeffs;
3163 
3164  assert(s->block_last_index[n]>=0);
3165 
3166  qmul = qscale << 1;
3167 
3168  if (!s->h263_aic) {
3169  block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
3170  qadd = (qscale - 1) | 1;
3171  }else{
3172  qadd = 0;
3173  }
3174  if(s->ac_pred)
3175  nCoeffs=63;
3176  else
3177  nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
3178 
3179  for(i=1; i<=nCoeffs; i++) {
3180  level = block[i];
3181  if (level) {
3182  if (level < 0) {
3183  level = level * qmul - qadd;
3184  } else {
3185  level = level * qmul + qadd;
3186  }
3187  block[i] = level;
3188  }
3189  }
3190 }
3191 
3193  int16_t *block, int n, int qscale)
3194 {
3195  int i, level, qmul, qadd;
3196  int nCoeffs;
3197 
3198  assert(s->block_last_index[n]>=0);
3199 
3200  qadd = (qscale - 1) | 1;
3201  qmul = qscale << 1;
3202 
3203  nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
3204 
3205  for(i=0; i<=nCoeffs; i++) {
3206  level = block[i];
3207  if (level) {
3208  if (level < 0) {
3209  level = level * qmul - qadd;
3210  } else {
3211  level = level * qmul + qadd;
3212  }
3213  block[i] = level;
3214  }
3215  }
3216 }
3217 
3218 /**
3219  * set qscale and update qscale dependent variables.
3220  */
3221 void ff_set_qscale(MpegEncContext * s, int qscale)
3222 {
3223  if (qscale < 1)
3224  qscale = 1;
3225  else if (qscale > 31)
3226  qscale = 31;
3227 
3228  s->qscale = qscale;
3229  s->chroma_qscale= s->chroma_qscale_table[qscale];
3230 
3231  s->y_dc_scale= s->y_dc_scale_table[ qscale ];
3233 }
3234 
3236 {
3239 }
3240 
3242 {
3243  ERContext *er = &s->er;
3244 
3245  er->cur_pic = s->current_picture_ptr;
3246  er->last_pic = s->last_picture_ptr;
3247  er->next_pic = s->next_picture_ptr;
3248 
3249  er->pp_time = s->pp_time;
3250  er->pb_time = s->pb_time;
3251  er->quarter_sample = s->quarter_sample;
3253 
3254  ff_er_frame_start(er);
3255 }