FFmpeg
mpeg12dec.c
Go to the documentation of this file.
1 /*
2  * MPEG-1/2 decoder
3  * Copyright (c) 2000, 2001 Fabrice Bellard
4  * Copyright (c) 2002-2013 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * MPEG-1/2 decoder
26  */
27 
28 #include "config_components.h"
29 
30 #define UNCHECKED_BITSTREAM_READER 1
31 #include <inttypes.h>
32 
33 #include "libavutil/attributes.h"
34 #include "libavutil/imgutils.h"
35 #include "libavutil/internal.h"
36 #include "libavutil/mem_internal.h"
37 #include "libavutil/reverse.h"
38 #include "libavutil/stereo3d.h"
39 #include "libavutil/timecode.h"
40 
41 #include "avcodec.h"
42 #include "codec_internal.h"
43 #include "decode.h"
44 #include "error_resilience.h"
45 #include "hwconfig.h"
46 #include "idctdsp.h"
47 #include "internal.h"
48 #include "mpeg_er.h"
49 #include "mpeg12.h"
50 #include "mpeg12data.h"
51 #include "mpeg12dec.h"
52 #include "mpegutils.h"
53 #include "mpegvideo.h"
54 #include "mpegvideodata.h"
55 #include "mpegvideodec.h"
56 #include "profiles.h"
57 #include "startcode.h"
58 #include "thread.h"
59 
60 #define A53_MAX_CC_COUNT 2000
61 
62 typedef struct Mpeg1Context {
64  int mpeg_enc_ctx_allocated; /* true if decoding context allocated */
65  int repeat_field; /* true if we must repeat the field */
66  AVPanScan pan_scan; /* some temporary storage for the panscan */
70  uint8_t afd;
71  int has_afd;
77  AVRational frame_rate_ext; /* MPEG-2 specific framerate modificator */
78  unsigned frame_rate_index;
79  int sync; /* Did we reach a sync point like a GOP/SEQ/KEYFrame? */
81  int tmpgexs;
84  int64_t timecode_frame_start; /*< GOP timecode frame start number, in non drop frame format */
85 } Mpeg1Context;
86 
87 #define MB_TYPE_ZERO_MV 0x20000000
88 
89 static const uint32_t ptype2mb_type[7] = {
92  MB_TYPE_L0,
97 };
98 
99 static const uint32_t btype2mb_type[11] = {
101  MB_TYPE_L1,
103  MB_TYPE_L0,
105  MB_TYPE_L0L1,
111 };
112 
113 /* as H.263, but only 17 codes */
114 static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred)
115 {
116  int code, sign, val, shift;
117 
118  code = get_vlc2(&s->gb, ff_mv_vlc.table, MV_VLC_BITS, 2);
119  if (code == 0)
120  return pred;
121  if (code < 0)
122  return 0xffff;
123 
124  sign = get_bits1(&s->gb);
125  shift = fcode - 1;
126  val = code;
127  if (shift) {
128  val = (val - 1) << shift;
129  val |= get_bits(&s->gb, shift);
130  val++;
131  }
132  if (sign)
133  val = -val;
134  val += pred;
135 
136  /* modulo decoding */
137  return sign_extend(val, 5 + shift);
138 }
139 
140 #define MAX_INDEX (64 - 1)
141 #define check_scantable_index(ctx, x) \
142  do { \
143  if ((x) > MAX_INDEX) { \
144  av_log(ctx->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n", \
145  ctx->mb_x, ctx->mb_y); \
146  return AVERROR_INVALIDDATA; \
147  } \
148  } while (0)
149 
151  int16_t *block, int n)
152 {
153  int level, i, j, run;
154  RLTable *rl = &ff_rl_mpeg1;
155  uint8_t *const scantable = s->intra_scantable.permutated;
156  const uint16_t *quant_matrix = s->inter_matrix;
157  const int qscale = s->qscale;
158 
159  {
160  OPEN_READER(re, &s->gb);
161  i = -1;
162  // special case for first coefficient, no need to add second VLC table
163  UPDATE_CACHE(re, &s->gb);
164  if (((int32_t) GET_CACHE(re, &s->gb)) < 0) {
165  level = (3 * qscale * quant_matrix[0]) >> 5;
166  level = (level - 1) | 1;
167  if (GET_CACHE(re, &s->gb) & 0x40000000)
168  level = -level;
169  block[0] = level;
170  i++;
171  SKIP_BITS(re, &s->gb, 2);
172  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
173  goto end;
174  }
175  /* now quantify & encode AC coefficients */
176  for (;;) {
177  GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0],
178  TEX_VLC_BITS, 2, 0);
179 
180  if (level != 0) {
181  i += run;
182  if (i > MAX_INDEX)
183  break;
184  j = scantable[i];
185  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
186  level = (level - 1) | 1;
187  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
188  SHOW_SBITS(re, &s->gb, 1);
189  SKIP_BITS(re, &s->gb, 1);
190  } else {
191  /* escape */
192  run = SHOW_UBITS(re, &s->gb, 6) + 1;
193  LAST_SKIP_BITS(re, &s->gb, 6);
194  UPDATE_CACHE(re, &s->gb);
195  level = SHOW_SBITS(re, &s->gb, 8);
196  SKIP_BITS(re, &s->gb, 8);
197  if (level == -128) {
198  level = SHOW_UBITS(re, &s->gb, 8) - 256;
199  SKIP_BITS(re, &s->gb, 8);
200  } else if (level == 0) {
201  level = SHOW_UBITS(re, &s->gb, 8);
202  SKIP_BITS(re, &s->gb, 8);
203  }
204  i += run;
205  if (i > MAX_INDEX)
206  break;
207  j = scantable[i];
208  if (level < 0) {
209  level = -level;
210  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
211  level = (level - 1) | 1;
212  level = -level;
213  } else {
214  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
215  level = (level - 1) | 1;
216  }
217  }
218 
219  block[j] = level;
220  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
221  break;
222  UPDATE_CACHE(re, &s->gb);
223  }
224 end:
225  LAST_SKIP_BITS(re, &s->gb, 2);
226  CLOSE_READER(re, &s->gb);
227  }
228 
230 
231  s->block_last_index[n] = i;
232  return 0;
233 }
234 
235 /**
236  * Changing this would eat up any speed benefits it has.
237  * Do not use "fast" flag if you need the code to be robust.
238  */
240  int16_t *block, int n)
241 {
242  int level, i, j, run;
243  RLTable *rl = &ff_rl_mpeg1;
244  uint8_t *const scantable = s->intra_scantable.permutated;
245  const int qscale = s->qscale;
246 
247  {
248  OPEN_READER(re, &s->gb);
249  i = -1;
250  // Special case for first coefficient, no need to add second VLC table.
251  UPDATE_CACHE(re, &s->gb);
252  if (((int32_t) GET_CACHE(re, &s->gb)) < 0) {
253  level = (3 * qscale) >> 1;
254  level = (level - 1) | 1;
255  if (GET_CACHE(re, &s->gb) & 0x40000000)
256  level = -level;
257  block[0] = level;
258  i++;
259  SKIP_BITS(re, &s->gb, 2);
260  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
261  goto end;
262  }
263 
264  /* now quantify & encode AC coefficients */
265  for (;;) {
266  GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0],
267  TEX_VLC_BITS, 2, 0);
268 
269  if (level != 0) {
270  i += run;
271  if (i > MAX_INDEX)
272  break;
273  j = scantable[i];
274  level = ((level * 2 + 1) * qscale) >> 1;
275  level = (level - 1) | 1;
276  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
277  SHOW_SBITS(re, &s->gb, 1);
278  SKIP_BITS(re, &s->gb, 1);
279  } else {
280  /* escape */
281  run = SHOW_UBITS(re, &s->gb, 6) + 1;
282  LAST_SKIP_BITS(re, &s->gb, 6);
283  UPDATE_CACHE(re, &s->gb);
284  level = SHOW_SBITS(re, &s->gb, 8);
285  SKIP_BITS(re, &s->gb, 8);
286  if (level == -128) {
287  level = SHOW_UBITS(re, &s->gb, 8) - 256;
288  SKIP_BITS(re, &s->gb, 8);
289  } else if (level == 0) {
290  level = SHOW_UBITS(re, &s->gb, 8);
291  SKIP_BITS(re, &s->gb, 8);
292  }
293  i += run;
294  if (i > MAX_INDEX)
295  break;
296  j = scantable[i];
297  if (level < 0) {
298  level = -level;
299  level = ((level * 2 + 1) * qscale) >> 1;
300  level = (level - 1) | 1;
301  level = -level;
302  } else {
303  level = ((level * 2 + 1) * qscale) >> 1;
304  level = (level - 1) | 1;
305  }
306  }
307 
308  block[j] = level;
309  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
310  break;
311  UPDATE_CACHE(re, &s->gb);
312  }
313 end:
314  LAST_SKIP_BITS(re, &s->gb, 2);
315  CLOSE_READER(re, &s->gb);
316  }
317 
319 
320  s->block_last_index[n] = i;
321  return 0;
322 }
323 
325  int16_t *block, int n)
326 {
327  int level, i, j, run;
328  RLTable *rl = &ff_rl_mpeg1;
329  uint8_t *const scantable = s->intra_scantable.permutated;
330  const uint16_t *quant_matrix;
331  const int qscale = s->qscale;
332  int mismatch;
333 
334  mismatch = 1;
335 
336  {
337  OPEN_READER(re, &s->gb);
338  i = -1;
339  if (n < 4)
340  quant_matrix = s->inter_matrix;
341  else
342  quant_matrix = s->chroma_inter_matrix;
343 
344  // Special case for first coefficient, no need to add second VLC table.
345  UPDATE_CACHE(re, &s->gb);
346  if (((int32_t) GET_CACHE(re, &s->gb)) < 0) {
347  level = (3 * qscale * quant_matrix[0]) >> 5;
348  if (GET_CACHE(re, &s->gb) & 0x40000000)
349  level = -level;
350  block[0] = level;
351  mismatch ^= level;
352  i++;
353  SKIP_BITS(re, &s->gb, 2);
354  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
355  goto end;
356  }
357 
358  /* now quantify & encode AC coefficients */
359  for (;;) {
360  GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0],
361  TEX_VLC_BITS, 2, 0);
362 
363  if (level != 0) {
364  i += run;
365  if (i > MAX_INDEX)
366  break;
367  j = scantable[i];
368  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
369  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
370  SHOW_SBITS(re, &s->gb, 1);
371  SKIP_BITS(re, &s->gb, 1);
372  } else {
373  /* escape */
374  run = SHOW_UBITS(re, &s->gb, 6) + 1;
375  LAST_SKIP_BITS(re, &s->gb, 6);
376  UPDATE_CACHE(re, &s->gb);
377  level = SHOW_SBITS(re, &s->gb, 12);
378  SKIP_BITS(re, &s->gb, 12);
379 
380  i += run;
381  if (i > MAX_INDEX)
382  break;
383  j = scantable[i];
384  if (level < 0) {
385  level = ((-level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
386  level = -level;
387  } else {
388  level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
389  }
390  }
391 
392  mismatch ^= level;
393  block[j] = level;
394  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
395  break;
396  UPDATE_CACHE(re, &s->gb);
397  }
398 end:
399  LAST_SKIP_BITS(re, &s->gb, 2);
400  CLOSE_READER(re, &s->gb);
401  }
402  block[63] ^= (mismatch & 1);
403 
405 
406  s->block_last_index[n] = i;
407  return 0;
408 }
409 
410 /**
411  * Changing this would eat up any speed benefits it has.
412  * Do not use "fast" flag if you need the code to be robust.
413  */
415  int16_t *block, int n)
416 {
417  int level, i, j, run;
418  RLTable *rl = &ff_rl_mpeg1;
419  uint8_t *const scantable = s->intra_scantable.permutated;
420  const int qscale = s->qscale;
421  OPEN_READER(re, &s->gb);
422  i = -1;
423 
424  // special case for first coefficient, no need to add second VLC table
425  UPDATE_CACHE(re, &s->gb);
426  if (((int32_t) GET_CACHE(re, &s->gb)) < 0) {
427  level = (3 * qscale) >> 1;
428  if (GET_CACHE(re, &s->gb) & 0x40000000)
429  level = -level;
430  block[0] = level;
431  i++;
432  SKIP_BITS(re, &s->gb, 2);
433  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
434  goto end;
435  }
436 
437  /* now quantify & encode AC coefficients */
438  for (;;) {
439  GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0], TEX_VLC_BITS, 2, 0);
440 
441  if (level != 0) {
442  i += run;
443  if (i > MAX_INDEX)
444  break;
445  j = scantable[i];
446  level = ((level * 2 + 1) * qscale) >> 1;
447  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
448  SHOW_SBITS(re, &s->gb, 1);
449  SKIP_BITS(re, &s->gb, 1);
450  } else {
451  /* escape */
452  run = SHOW_UBITS(re, &s->gb, 6) + 1;
453  LAST_SKIP_BITS(re, &s->gb, 6);
454  UPDATE_CACHE(re, &s->gb);
455  level = SHOW_SBITS(re, &s->gb, 12);
456  SKIP_BITS(re, &s->gb, 12);
457 
458  i += run;
459  if (i > MAX_INDEX)
460  break;
461  j = scantable[i];
462  if (level < 0) {
463  level = ((-level * 2 + 1) * qscale) >> 1;
464  level = -level;
465  } else {
466  level = ((level * 2 + 1) * qscale) >> 1;
467  }
468  }
469 
470  block[j] = level;
471  if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF || i > 63)
472  break;
473 
474  UPDATE_CACHE(re, &s->gb);
475  }
476 end:
477  LAST_SKIP_BITS(re, &s->gb, 2);
478  CLOSE_READER(re, &s->gb);
479 
481 
482  s->block_last_index[n] = i;
483  return 0;
484 }
485 
487  int16_t *block, int n)
488 {
489  int level, dc, diff, i, j, run;
490  int component;
491  RLTable *rl;
492  uint8_t *const scantable = s->intra_scantable.permutated;
493  const uint16_t *quant_matrix;
494  const int qscale = s->qscale;
495  int mismatch;
496 
497  /* DC coefficient */
498  if (n < 4) {
499  quant_matrix = s->intra_matrix;
500  component = 0;
501  } else {
502  quant_matrix = s->chroma_intra_matrix;
503  component = (n & 1) + 1;
504  }
505  diff = decode_dc(&s->gb, component);
506  dc = s->last_dc[component];
507  dc += diff;
508  s->last_dc[component] = dc;
509  block[0] = dc * (1 << (3 - s->intra_dc_precision));
510  ff_tlog(s->avctx, "dc=%d\n", block[0]);
511  mismatch = block[0] ^ 1;
512  i = 0;
513  if (s->intra_vlc_format)
514  rl = &ff_rl_mpeg2;
515  else
516  rl = &ff_rl_mpeg1;
517 
518  {
519  OPEN_READER(re, &s->gb);
520  /* now quantify & encode AC coefficients */
521  for (;;) {
522  UPDATE_CACHE(re, &s->gb);
523  GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0],
524  TEX_VLC_BITS, 2, 0);
525 
526  if (level == 127) {
527  break;
528  } else if (level != 0) {
529  i += run;
530  if (i > MAX_INDEX)
531  break;
532  j = scantable[i];
533  level = (level * qscale * quant_matrix[j]) >> 4;
534  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
535  SHOW_SBITS(re, &s->gb, 1);
536  LAST_SKIP_BITS(re, &s->gb, 1);
537  } else {
538  /* escape */
539  run = SHOW_UBITS(re, &s->gb, 6) + 1;
540  SKIP_BITS(re, &s->gb, 6);
541  level = SHOW_SBITS(re, &s->gb, 12);
542  LAST_SKIP_BITS(re, &s->gb, 12);
543  i += run;
544  if (i > MAX_INDEX)
545  break;
546  j = scantable[i];
547  if (level < 0) {
548  level = (-level * qscale * quant_matrix[j]) >> 4;
549  level = -level;
550  } else {
551  level = (level * qscale * quant_matrix[j]) >> 4;
552  }
553  }
554 
555  mismatch ^= level;
556  block[j] = level;
557  }
558  CLOSE_READER(re, &s->gb);
559  }
560  block[63] ^= mismatch & 1;
561 
563 
564  s->block_last_index[n] = i;
565  return 0;
566 }
567 
568 /**
569  * Changing this would eat up any speed benefits it has.
570  * Do not use "fast" flag if you need the code to be robust.
571  */
573  int16_t *block, int n)
574 {
575  int level, dc, diff, i, j, run;
576  int component;
577  RLTable *rl;
578  uint8_t *const scantable = s->intra_scantable.permutated;
579  const uint16_t *quant_matrix;
580  const int qscale = s->qscale;
581 
582  /* DC coefficient */
583  if (n < 4) {
584  quant_matrix = s->intra_matrix;
585  component = 0;
586  } else {
587  quant_matrix = s->chroma_intra_matrix;
588  component = (n & 1) + 1;
589  }
590  diff = decode_dc(&s->gb, component);
591  dc = s->last_dc[component];
592  dc += diff;
593  s->last_dc[component] = dc;
594  block[0] = dc * (1 << (3 - s->intra_dc_precision));
595  i = 0;
596  if (s->intra_vlc_format)
597  rl = &ff_rl_mpeg2;
598  else
599  rl = &ff_rl_mpeg1;
600 
601  {
602  OPEN_READER(re, &s->gb);
603  /* now quantify & encode AC coefficients */
604  for (;;) {
605  UPDATE_CACHE(re, &s->gb);
606  GET_RL_VLC(level, run, re, &s->gb, rl->rl_vlc[0],
607  TEX_VLC_BITS, 2, 0);
608 
609  if (level >= 64 || i > 63) {
610  break;
611  } else if (level != 0) {
612  i += run;
613  j = scantable[i];
614  level = (level * qscale * quant_matrix[j]) >> 4;
615  level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
616  SHOW_SBITS(re, &s->gb, 1);
617  LAST_SKIP_BITS(re, &s->gb, 1);
618  } else {
619  /* escape */
620  run = SHOW_UBITS(re, &s->gb, 6) + 1;
621  SKIP_BITS(re, &s->gb, 6);
622  level = SHOW_SBITS(re, &s->gb, 12);
623  LAST_SKIP_BITS(re, &s->gb, 12);
624  i += run;
625  j = scantable[i];
626  if (level < 0) {
627  level = (-level * qscale * quant_matrix[j]) >> 4;
628  level = -level;
629  } else {
630  level = (level * qscale * quant_matrix[j]) >> 4;
631  }
632  }
633 
634  block[j] = level;
635  }
636  CLOSE_READER(re, &s->gb);
637  }
638 
640 
641  s->block_last_index[n] = i;
642  return 0;
643 }
644 
645 /******************************************/
646 /* decoding */
647 
648 static inline int get_dmv(MpegEncContext *s)
649 {
650  if (get_bits1(&s->gb))
651  return 1 - (get_bits1(&s->gb) << 1);
652  else
653  return 0;
654 }
655 
656 /* motion type (for MPEG-2) */
657 #define MT_FIELD 1
658 #define MT_FRAME 2
659 #define MT_16X8 2
660 #define MT_DMV 3
661 
662 static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
663 {
664  int i, j, k, cbp, val, mb_type, motion_type;
665  const int mb_block_count = 4 + (1 << s->chroma_format);
666  int ret;
667 
668  ff_tlog(s->avctx, "decode_mb: x=%d y=%d\n", s->mb_x, s->mb_y);
669 
670  av_assert2(s->mb_skipped == 0);
671 
672  if (s->mb_skip_run-- != 0) {
673  if (s->pict_type == AV_PICTURE_TYPE_P) {
674  s->mb_skipped = 1;
675  s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride] =
677  } else {
678  int mb_type;
679 
680  if (s->mb_x)
681  mb_type = s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride - 1];
682  else
683  // FIXME not sure if this is allowed in MPEG at all
684  mb_type = s->current_picture.mb_type[s->mb_width + (s->mb_y - 1) * s->mb_stride - 1];
685  if (IS_INTRA(mb_type)) {
686  av_log(s->avctx, AV_LOG_ERROR, "skip with previntra\n");
687  return AVERROR_INVALIDDATA;
688  }
689  s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride] =
690  mb_type | MB_TYPE_SKIP;
691 
692  if ((s->mv[0][0][0] | s->mv[0][0][1] | s->mv[1][0][0] | s->mv[1][0][1]) == 0)
693  s->mb_skipped = 1;
694  }
695 
696  return 0;
697  }
698 
699  switch (s->pict_type) {
700  default:
701  case AV_PICTURE_TYPE_I:
702  if (get_bits1(&s->gb) == 0) {
703  if (get_bits1(&s->gb) == 0) {
704  av_log(s->avctx, AV_LOG_ERROR,
705  "Invalid mb type in I-frame at %d %d\n",
706  s->mb_x, s->mb_y);
707  return AVERROR_INVALIDDATA;
708  }
709  mb_type = MB_TYPE_QUANT | MB_TYPE_INTRA;
710  } else {
711  mb_type = MB_TYPE_INTRA;
712  }
713  break;
714  case AV_PICTURE_TYPE_P:
715  mb_type = get_vlc2(&s->gb, ff_mb_ptype_vlc.table, MB_PTYPE_VLC_BITS, 1);
716  if (mb_type < 0) {
717  av_log(s->avctx, AV_LOG_ERROR,
718  "Invalid mb type in P-frame at %d %d\n", s->mb_x, s->mb_y);
719  return AVERROR_INVALIDDATA;
720  }
721  mb_type = ptype2mb_type[mb_type];
722  break;
723  case AV_PICTURE_TYPE_B:
724  mb_type = get_vlc2(&s->gb, ff_mb_btype_vlc.table, MB_BTYPE_VLC_BITS, 1);
725  if (mb_type < 0) {
726  av_log(s->avctx, AV_LOG_ERROR,
727  "Invalid mb type in B-frame at %d %d\n", s->mb_x, s->mb_y);
728  return AVERROR_INVALIDDATA;
729  }
730  mb_type = btype2mb_type[mb_type];
731  break;
732  }
733  ff_tlog(s->avctx, "mb_type=%x\n", mb_type);
734 // motion_type = 0; /* avoid warning */
735  if (IS_INTRA(mb_type)) {
736  s->bdsp.clear_blocks(s->block[0]);
737 
738  if (!s->chroma_y_shift)
739  s->bdsp.clear_blocks(s->block[6]);
740 
741  /* compute DCT type */
742  // FIXME: add an interlaced_dct coded var?
743  if (s->picture_structure == PICT_FRAME &&
744  !s->frame_pred_frame_dct)
745  s->interlaced_dct = get_bits1(&s->gb);
746 
747  if (IS_QUANT(mb_type))
748  s->qscale = mpeg_get_qscale(s);
749 
750  if (s->concealment_motion_vectors) {
751  /* just parse them */
752  if (s->picture_structure != PICT_FRAME)
753  skip_bits1(&s->gb); /* field select */
754 
755  s->mv[0][0][0] =
756  s->last_mv[0][0][0] =
757  s->last_mv[0][1][0] = mpeg_decode_motion(s, s->mpeg_f_code[0][0],
758  s->last_mv[0][0][0]);
759  s->mv[0][0][1] =
760  s->last_mv[0][0][1] =
761  s->last_mv[0][1][1] = mpeg_decode_motion(s, s->mpeg_f_code[0][1],
762  s->last_mv[0][0][1]);
763 
764  check_marker(s->avctx, &s->gb, "after concealment_motion_vectors");
765  } else {
766  /* reset mv prediction */
767  memset(s->last_mv, 0, sizeof(s->last_mv));
768  }
769  s->mb_intra = 1;
770 
771  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
772  if (s->avctx->flags2 & AV_CODEC_FLAG2_FAST) {
773  for (i = 0; i < 6; i++)
774  mpeg2_fast_decode_block_intra(s, *s->pblocks[i], i);
775  } else {
776  for (i = 0; i < mb_block_count; i++)
777  if ((ret = mpeg2_decode_block_intra(s, *s->pblocks[i], i)) < 0)
778  return ret;
779  }
780  } else {
781  for (i = 0; i < 6; i++) {
783  s->intra_matrix,
784  s->intra_scantable.permutated,
785  s->last_dc, *s->pblocks[i],
786  i, s->qscale);
787  if (ret < 0) {
788  av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n",
789  s->mb_x, s->mb_y);
790  return ret;
791  }
792 
793  s->block_last_index[i] = ret;
794  }
795  }
796  } else {
797  if (mb_type & MB_TYPE_ZERO_MV) {
798  av_assert2(mb_type & MB_TYPE_CBP);
799 
800  s->mv_dir = MV_DIR_FORWARD;
801  if (s->picture_structure == PICT_FRAME) {
802  if (s->picture_structure == PICT_FRAME
803  && !s->frame_pred_frame_dct)
804  s->interlaced_dct = get_bits1(&s->gb);
805  s->mv_type = MV_TYPE_16X16;
806  } else {
807  s->mv_type = MV_TYPE_FIELD;
808  mb_type |= MB_TYPE_INTERLACED;
809  s->field_select[0][0] = s->picture_structure - 1;
810  }
811 
812  if (IS_QUANT(mb_type))
813  s->qscale = mpeg_get_qscale(s);
814 
815  s->last_mv[0][0][0] = 0;
816  s->last_mv[0][0][1] = 0;
817  s->last_mv[0][1][0] = 0;
818  s->last_mv[0][1][1] = 0;
819  s->mv[0][0][0] = 0;
820  s->mv[0][0][1] = 0;
821  } else {
822  av_assert2(mb_type & MB_TYPE_L0L1);
823  // FIXME decide if MBs in field pictures are MB_TYPE_INTERLACED
824  /* get additional motion vector type */
825  if (s->picture_structure == PICT_FRAME && s->frame_pred_frame_dct) {
826  motion_type = MT_FRAME;
827  } else {
828  motion_type = get_bits(&s->gb, 2);
829  if (s->picture_structure == PICT_FRAME && HAS_CBP(mb_type))
830  s->interlaced_dct = get_bits1(&s->gb);
831  }
832 
833  if (IS_QUANT(mb_type))
834  s->qscale = mpeg_get_qscale(s);
835 
836  /* motion vectors */
837  s->mv_dir = (mb_type >> 13) & 3;
838  ff_tlog(s->avctx, "motion_type=%d\n", motion_type);
839  switch (motion_type) {
840  case MT_FRAME: /* or MT_16X8 */
841  if (s->picture_structure == PICT_FRAME) {
842  mb_type |= MB_TYPE_16x16;
843  s->mv_type = MV_TYPE_16X16;
844  for (i = 0; i < 2; i++) {
845  if (USES_LIST(mb_type, i)) {
846  /* MT_FRAME */
847  s->mv[i][0][0] =
848  s->last_mv[i][0][0] =
849  s->last_mv[i][1][0] =
850  mpeg_decode_motion(s, s->mpeg_f_code[i][0],
851  s->last_mv[i][0][0]);
852  s->mv[i][0][1] =
853  s->last_mv[i][0][1] =
854  s->last_mv[i][1][1] =
855  mpeg_decode_motion(s, s->mpeg_f_code[i][1],
856  s->last_mv[i][0][1]);
857  /* full_pel: only for MPEG-1 */
858  if (s->full_pel[i]) {
859  s->mv[i][0][0] *= 2;
860  s->mv[i][0][1] *= 2;
861  }
862  }
863  }
864  } else {
865  mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED;
866  s->mv_type = MV_TYPE_16X8;
867  for (i = 0; i < 2; i++) {
868  if (USES_LIST(mb_type, i)) {
869  /* MT_16X8 */
870  for (j = 0; j < 2; j++) {
871  s->field_select[i][j] = get_bits1(&s->gb);
872  for (k = 0; k < 2; k++) {
873  val = mpeg_decode_motion(s, s->mpeg_f_code[i][k],
874  s->last_mv[i][j][k]);
875  s->last_mv[i][j][k] = val;
876  s->mv[i][j][k] = val;
877  }
878  }
879  }
880  }
881  }
882  break;
883  case MT_FIELD:
884  s->mv_type = MV_TYPE_FIELD;
885  if (s->picture_structure == PICT_FRAME) {
886  mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED;
887  for (i = 0; i < 2; i++) {
888  if (USES_LIST(mb_type, i)) {
889  for (j = 0; j < 2; j++) {
890  s->field_select[i][j] = get_bits1(&s->gb);
891  val = mpeg_decode_motion(s, s->mpeg_f_code[i][0],
892  s->last_mv[i][j][0]);
893  s->last_mv[i][j][0] = val;
894  s->mv[i][j][0] = val;
895  ff_tlog(s->avctx, "fmx=%d\n", val);
896  val = mpeg_decode_motion(s, s->mpeg_f_code[i][1],
897  s->last_mv[i][j][1] >> 1);
898  s->last_mv[i][j][1] = 2 * val;
899  s->mv[i][j][1] = val;
900  ff_tlog(s->avctx, "fmy=%d\n", val);
901  }
902  }
903  }
904  } else {
905  av_assert0(!s->progressive_sequence);
906  mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED;
907  for (i = 0; i < 2; i++) {
908  if (USES_LIST(mb_type, i)) {
909  s->field_select[i][0] = get_bits1(&s->gb);
910  for (k = 0; k < 2; k++) {
911  val = mpeg_decode_motion(s, s->mpeg_f_code[i][k],
912  s->last_mv[i][0][k]);
913  s->last_mv[i][0][k] = val;
914  s->last_mv[i][1][k] = val;
915  s->mv[i][0][k] = val;
916  }
917  }
918  }
919  }
920  break;
921  case MT_DMV:
922  if (s->progressive_sequence){
923  av_log(s->avctx, AV_LOG_ERROR, "MT_DMV in progressive_sequence\n");
924  return AVERROR_INVALIDDATA;
925  }
926  s->mv_type = MV_TYPE_DMV;
927  for (i = 0; i < 2; i++) {
928  if (USES_LIST(mb_type, i)) {
929  int dmx, dmy, mx, my, m;
930  const int my_shift = s->picture_structure == PICT_FRAME;
931 
932  mx = mpeg_decode_motion(s, s->mpeg_f_code[i][0],
933  s->last_mv[i][0][0]);
934  s->last_mv[i][0][0] = mx;
935  s->last_mv[i][1][0] = mx;
936  dmx = get_dmv(s);
937  my = mpeg_decode_motion(s, s->mpeg_f_code[i][1],
938  s->last_mv[i][0][1] >> my_shift);
939  dmy = get_dmv(s);
940 
941 
942  s->last_mv[i][0][1] = my * (1 << my_shift);
943  s->last_mv[i][1][1] = my * (1 << my_shift);
944 
945  s->mv[i][0][0] = mx;
946  s->mv[i][0][1] = my;
947  s->mv[i][1][0] = mx; // not used
948  s->mv[i][1][1] = my; // not used
949 
950  if (s->picture_structure == PICT_FRAME) {
951  mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED;
952 
953  // m = 1 + 2 * s->top_field_first;
954  m = s->top_field_first ? 1 : 3;
955 
956  /* top -> top pred */
957  s->mv[i][2][0] = ((mx * m + (mx > 0)) >> 1) + dmx;
958  s->mv[i][2][1] = ((my * m + (my > 0)) >> 1) + dmy - 1;
959  m = 4 - m;
960  s->mv[i][3][0] = ((mx * m + (mx > 0)) >> 1) + dmx;
961  s->mv[i][3][1] = ((my * m + (my > 0)) >> 1) + dmy + 1;
962  } else {
963  mb_type |= MB_TYPE_16x16;
964 
965  s->mv[i][2][0] = ((mx + (mx > 0)) >> 1) + dmx;
966  s->mv[i][2][1] = ((my + (my > 0)) >> 1) + dmy;
967  if (s->picture_structure == PICT_TOP_FIELD)
968  s->mv[i][2][1]--;
969  else
970  s->mv[i][2][1]++;
971  }
972  }
973  }
974  break;
975  default:
976  av_log(s->avctx, AV_LOG_ERROR,
977  "00 motion_type at %d %d\n", s->mb_x, s->mb_y);
978  return AVERROR_INVALIDDATA;
979  }
980  }
981 
982  s->mb_intra = 0;
983  if (HAS_CBP(mb_type)) {
984  s->bdsp.clear_blocks(s->block[0]);
985 
986  cbp = get_vlc2(&s->gb, ff_mb_pat_vlc.table, MB_PAT_VLC_BITS, 1);
987  if (mb_block_count > 6) {
988  cbp *= 1 << mb_block_count - 6;
989  cbp |= get_bits(&s->gb, mb_block_count - 6);
990  s->bdsp.clear_blocks(s->block[6]);
991  }
992  if (cbp <= 0) {
993  av_log(s->avctx, AV_LOG_ERROR,
994  "invalid cbp %d at %d %d\n", cbp, s->mb_x, s->mb_y);
995  return AVERROR_INVALIDDATA;
996  }
997 
998  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
999  if (s->avctx->flags2 & AV_CODEC_FLAG2_FAST) {
1000  for (i = 0; i < 6; i++) {
1001  if (cbp & 32)
1002  mpeg2_fast_decode_block_non_intra(s, *s->pblocks[i], i);
1003  else
1004  s->block_last_index[i] = -1;
1005  cbp += cbp;
1006  }
1007  } else {
1008  cbp <<= 12 - mb_block_count;
1009 
1010  for (i = 0; i < mb_block_count; i++) {
1011  if (cbp & (1 << 11)) {
1012  if ((ret = mpeg2_decode_block_non_intra(s, *s->pblocks[i], i)) < 0)
1013  return ret;
1014  } else {
1015  s->block_last_index[i] = -1;
1016  }
1017  cbp += cbp;
1018  }
1019  }
1020  } else {
1021  if (s->avctx->flags2 & AV_CODEC_FLAG2_FAST) {
1022  for (i = 0; i < 6; i++) {
1023  if (cbp & 32)
1024  mpeg1_fast_decode_block_inter(s, *s->pblocks[i], i);
1025  else
1026  s->block_last_index[i] = -1;
1027  cbp += cbp;
1028  }
1029  } else {
1030  for (i = 0; i < 6; i++) {
1031  if (cbp & 32) {
1032  if ((ret = mpeg1_decode_block_inter(s, *s->pblocks[i], i)) < 0)
1033  return ret;
1034  } else {
1035  s->block_last_index[i] = -1;
1036  }
1037  cbp += cbp;
1038  }
1039  }
1040  }
1041  } else {
1042  for (i = 0; i < 12; i++)
1043  s->block_last_index[i] = -1;
1044  }
1045  }
1046 
1047  s->current_picture.mb_type[s->mb_x + s->mb_y * s->mb_stride] = mb_type;
1048 
1049  return 0;
1050 }
1051 
1053 {
1054  Mpeg1Context *s = avctx->priv_data;
1055  MpegEncContext *s2 = &s->mpeg_enc_ctx;
1056 
1057  if ( avctx->codec_tag != AV_RL32("VCR2")
1058  && avctx->codec_tag != AV_RL32("BW10"))
1059  avctx->coded_width = avctx->coded_height = 0; // do not trust dimensions from input
1060  ff_mpv_decode_init(s2, avctx);
1061 
1062  /* we need some permutation to store matrices,
1063  * until the decoder sets the real permutation. */
1065  ff_mpeg12_common_init(&s->mpeg_enc_ctx);
1067 
1068  s2->chroma_format = 1;
1069  s->mpeg_enc_ctx_allocated = 0;
1070  s->repeat_field = 0;
1071  avctx->color_range = AVCOL_RANGE_MPEG;
1072  return 0;
1073 }
1074 
1075 #if HAVE_THREADS
1076 static int mpeg_decode_update_thread_context(AVCodecContext *avctx,
1077  const AVCodecContext *avctx_from)
1078 {
1079  Mpeg1Context *ctx = avctx->priv_data, *ctx_from = avctx_from->priv_data;
1080  MpegEncContext *s = &ctx->mpeg_enc_ctx, *s1 = &ctx_from->mpeg_enc_ctx;
1081  int err;
1082 
1083  if (avctx == avctx_from ||
1084  !ctx_from->mpeg_enc_ctx_allocated ||
1085  !s1->context_initialized)
1086  return 0;
1087 
1088  err = ff_mpeg_update_thread_context(avctx, avctx_from);
1089  if (err)
1090  return err;
1091 
1092  if (!ctx->mpeg_enc_ctx_allocated)
1093  memcpy(s + 1, s1 + 1, sizeof(Mpeg1Context) - sizeof(MpegEncContext));
1094 
1095  return 0;
1096 }
1097 #endif
1098 
1099 static void quant_matrix_rebuild(uint16_t *matrix, const uint8_t *old_perm,
1100  const uint8_t *new_perm)
1101 {
1102  uint16_t temp_matrix[64];
1103  int i;
1104 
1105  memcpy(temp_matrix, matrix, 64 * sizeof(uint16_t));
1106 
1107  for (i = 0; i < 64; i++)
1108  matrix[new_perm[i]] = temp_matrix[old_perm[i]];
1109 }
1110 
1112 #if CONFIG_MPEG1_NVDEC_HWACCEL
1114 #endif
1115 #if CONFIG_MPEG1_VDPAU_HWACCEL
1117 #endif
1120 };
1121 
1123 #if CONFIG_MPEG2_NVDEC_HWACCEL
1125 #endif
1126 #if CONFIG_MPEG2_VDPAU_HWACCEL
1128 #endif
1129 #if CONFIG_MPEG2_DXVA2_HWACCEL
1131 #endif
1132 #if CONFIG_MPEG2_D3D11VA_HWACCEL
1135 #endif
1136 #if CONFIG_MPEG2_VAAPI_HWACCEL
1138 #endif
1139 #if CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL
1141 #endif
1144 };
1145 
1146 static const enum AVPixelFormat mpeg12_pixfmt_list_422[] = {
1149 };
1150 
1151 static const enum AVPixelFormat mpeg12_pixfmt_list_444[] = {
1154 };
1155 
1157 {
1158  Mpeg1Context *s1 = avctx->priv_data;
1159  MpegEncContext *s = &s1->mpeg_enc_ctx;
1160  const enum AVPixelFormat *pix_fmts;
1161 
1162  if (CONFIG_GRAY && (avctx->flags & AV_CODEC_FLAG_GRAY))
1163  return AV_PIX_FMT_GRAY8;
1164 
1165  if (s->chroma_format < 2)
1169  else if (s->chroma_format == 2)
1171  else
1173 
1174  return ff_thread_get_format(avctx, pix_fmts);
1175 }
1176 
1177 /* Call this function when we know all parameters.
1178  * It may be called in different places for MPEG-1 and MPEG-2. */
1180 {
1181  Mpeg1Context *s1 = avctx->priv_data;
1182  MpegEncContext *s = &s1->mpeg_enc_ctx;
1183  uint8_t old_permutation[64];
1184  int ret;
1185 
1186  if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
1187  // MPEG-1 aspect
1188  AVRational aspect_inv = av_d2q(ff_mpeg1_aspect[s1->aspect_ratio_info], 255);
1189  avctx->sample_aspect_ratio = (AVRational) { aspect_inv.den, aspect_inv.num };
1190  } else { // MPEG-2
1191  // MPEG-2 aspect
1192  if (s1->aspect_ratio_info > 1) {
1193  AVRational dar =
1194  av_mul_q(av_div_q(ff_mpeg2_aspect[s1->aspect_ratio_info],
1195  (AVRational) { s1->pan_scan.width,
1196  s1->pan_scan.height }),
1197  (AVRational) { s->width, s->height });
1198 
1199  /* We ignore the spec here and guess a bit as reality does not
1200  * match the spec, see for example res_change_ffmpeg_aspect.ts
1201  * and sequence-display-aspect.mpg.
1202  * issue1613, 621, 562 */
1203  if ((s1->pan_scan.width == 0) || (s1->pan_scan.height == 0) ||
1204  (av_cmp_q(dar, (AVRational) { 4, 3 }) &&
1205  av_cmp_q(dar, (AVRational) { 16, 9 }))) {
1206  s->avctx->sample_aspect_ratio =
1207  av_div_q(ff_mpeg2_aspect[s1->aspect_ratio_info],
1208  (AVRational) { s->width, s->height });
1209  } else {
1210  s->avctx->sample_aspect_ratio =
1211  av_div_q(ff_mpeg2_aspect[s1->aspect_ratio_info],
1212  (AVRational) { s1->pan_scan.width, s1->pan_scan.height });
1213 // issue1613 4/3 16/9 -> 16/9
1214 // res_change_ffmpeg_aspect.ts 4/3 225/44 ->4/3
1215 // widescreen-issue562.mpg 4/3 16/9 -> 16/9
1216 // s->avctx->sample_aspect_ratio = av_mul_q(s->avctx->sample_aspect_ratio, (AVRational) {s->width, s->height});
1217  ff_dlog(avctx, "aspect A %d/%d\n",
1218  ff_mpeg2_aspect[s1->aspect_ratio_info].num,
1219  ff_mpeg2_aspect[s1->aspect_ratio_info].den);
1220  ff_dlog(avctx, "aspect B %d/%d\n", s->avctx->sample_aspect_ratio.num,
1221  s->avctx->sample_aspect_ratio.den);
1222  }
1223  } else {
1224  s->avctx->sample_aspect_ratio =
1225  ff_mpeg2_aspect[s1->aspect_ratio_info];
1226  }
1227  } // MPEG-2
1228 
1229  if (av_image_check_sar(s->width, s->height,
1230  avctx->sample_aspect_ratio) < 0) {
1231  av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
1232  avctx->sample_aspect_ratio.num,
1233  avctx->sample_aspect_ratio.den);
1234  avctx->sample_aspect_ratio = (AVRational){ 0, 1 };
1235  }
1236 
1237  if ((s1->mpeg_enc_ctx_allocated == 0) ||
1238  avctx->coded_width != s->width ||
1239  avctx->coded_height != s->height ||
1240  s1->save_width != s->width ||
1241  s1->save_height != s->height ||
1242  av_cmp_q(s1->save_aspect, s->avctx->sample_aspect_ratio) ||
1243  (s1->save_progressive_seq != s->progressive_sequence && FFALIGN(s->height, 16) != FFALIGN(s->height, 32)) ||
1244  0) {
1245  if (s1->mpeg_enc_ctx_allocated) {
1246 #if FF_API_FLAG_TRUNCATED
1247  ParseContext pc = s->parse_context;
1248  s->parse_context.buffer = 0;
1250  s->parse_context = pc;
1251 #else
1253 #endif
1254  s1->mpeg_enc_ctx_allocated = 0;
1255  }
1256 
1257  ret = ff_set_dimensions(avctx, s->width, s->height);
1258  if (ret < 0)
1259  return ret;
1260 
1261  if (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO && s->bit_rate) {
1262  avctx->rc_max_rate = s->bit_rate;
1263  } else if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO && s->bit_rate &&
1264  (s->bit_rate != 0x3FFFF*400 || s->vbv_delay != 0xFFFF)) {
1265  avctx->bit_rate = s->bit_rate;
1266  }
1267  s1->save_aspect = s->avctx->sample_aspect_ratio;
1268  s1->save_width = s->width;
1269  s1->save_height = s->height;
1270  s1->save_progressive_seq = s->progressive_sequence;
1271 
1272  /* low_delay may be forced, in this case we will have B-frames
1273  * that behave like P-frames. */
1274  avctx->has_b_frames = !s->low_delay;
1275 
1276  if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
1277  // MPEG-1 fps
1278  avctx->framerate = ff_mpeg12_frame_rate_tab[s1->frame_rate_index];
1279  avctx->ticks_per_frame = 1;
1280 
1282  } else { // MPEG-2
1283  // MPEG-2 fps
1284  av_reduce(&s->avctx->framerate.num,
1285  &s->avctx->framerate.den,
1286  ff_mpeg12_frame_rate_tab[s1->frame_rate_index].num * s1->frame_rate_ext.num,
1287  ff_mpeg12_frame_rate_tab[s1->frame_rate_index].den * s1->frame_rate_ext.den,
1288  1 << 30);
1289  avctx->ticks_per_frame = 2;
1290 
1291  switch (s->chroma_format) {
1292  case 1: avctx->chroma_sample_location = AVCHROMA_LOC_LEFT; break;
1293  case 2:
1294  case 3: avctx->chroma_sample_location = AVCHROMA_LOC_TOPLEFT; break;
1295  default: av_assert0(0);
1296  }
1297  } // MPEG-2
1298 
1299  avctx->pix_fmt = mpeg_get_pixelformat(avctx);
1300 
1301  /* Quantization matrices may need reordering
1302  * if DCT permutation is changed. */
1303  memcpy(old_permutation, s->idsp.idct_permutation, 64 * sizeof(uint8_t));
1304 
1306  if ((ret = ff_mpv_common_init(s)) < 0)
1307  return ret;
1308 
1309  quant_matrix_rebuild(s->intra_matrix, old_permutation, s->idsp.idct_permutation);
1310  quant_matrix_rebuild(s->inter_matrix, old_permutation, s->idsp.idct_permutation);
1311  quant_matrix_rebuild(s->chroma_intra_matrix, old_permutation, s->idsp.idct_permutation);
1312  quant_matrix_rebuild(s->chroma_inter_matrix, old_permutation, s->idsp.idct_permutation);
1313 
1314  s1->mpeg_enc_ctx_allocated = 1;
1315  }
1316  return 0;
1317 }
1318 
1319 static int mpeg1_decode_picture(AVCodecContext *avctx, const uint8_t *buf,
1320  int buf_size)
1321 {
1322  Mpeg1Context *s1 = avctx->priv_data;
1323  MpegEncContext *s = &s1->mpeg_enc_ctx;
1324  int ref, f_code, vbv_delay, ret;
1325 
1326  ret = init_get_bits8(&s->gb, buf, buf_size);
1327  if (ret < 0)
1328  return ret;
1329 
1330  ref = get_bits(&s->gb, 10); /* temporal ref */
1331  s->pict_type = get_bits(&s->gb, 3);
1332  if (s->pict_type == 0 || s->pict_type > 3)
1333  return AVERROR_INVALIDDATA;
1334 
1335  vbv_delay = get_bits(&s->gb, 16);
1336  s->vbv_delay = vbv_delay;
1337  if (s->pict_type == AV_PICTURE_TYPE_P ||
1338  s->pict_type == AV_PICTURE_TYPE_B) {
1339  s->full_pel[0] = get_bits1(&s->gb);
1340  f_code = get_bits(&s->gb, 3);
1341  if (f_code == 0 && (avctx->err_recognition & (AV_EF_BITSTREAM|AV_EF_COMPLIANT)))
1342  return AVERROR_INVALIDDATA;
1343  f_code += !f_code;
1344  s->mpeg_f_code[0][0] = f_code;
1345  s->mpeg_f_code[0][1] = f_code;
1346  }
1347  if (s->pict_type == AV_PICTURE_TYPE_B) {
1348  s->full_pel[1] = get_bits1(&s->gb);
1349  f_code = get_bits(&s->gb, 3);
1350  if (f_code == 0 && (avctx->err_recognition & (AV_EF_BITSTREAM|AV_EF_COMPLIANT)))
1351  return AVERROR_INVALIDDATA;
1352  f_code += !f_code;
1353  s->mpeg_f_code[1][0] = f_code;
1354  s->mpeg_f_code[1][1] = f_code;
1355  }
1356  s->current_picture.f->pict_type = s->pict_type;
1357  s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1358 
1359  if (avctx->debug & FF_DEBUG_PICT_INFO)
1360  av_log(avctx, AV_LOG_DEBUG,
1361  "vbv_delay %d, ref %d type:%d\n", vbv_delay, ref, s->pict_type);
1362 
1363  s->y_dc_scale = 8;
1364  s->c_dc_scale = 8;
1365  return 0;
1366 }
1367 
1369 {
1370  MpegEncContext *s = &s1->mpeg_enc_ctx;
1371  int horiz_size_ext, vert_size_ext;
1372  int bit_rate_ext;
1373  AVCPBProperties *cpb_props;
1374 
1375  skip_bits(&s->gb, 1); /* profile and level esc*/
1376  s->avctx->profile = get_bits(&s->gb, 3);
1377  s->avctx->level = get_bits(&s->gb, 4);
1378  s->progressive_sequence = get_bits1(&s->gb); /* progressive_sequence */
1379  s->chroma_format = get_bits(&s->gb, 2); /* chroma_format 1=420, 2=422, 3=444 */
1380 
1381  if (!s->chroma_format) {
1382  s->chroma_format = 1;
1383  av_log(s->avctx, AV_LOG_WARNING, "Chroma format invalid\n");
1384  }
1385 
1386  horiz_size_ext = get_bits(&s->gb, 2);
1387  vert_size_ext = get_bits(&s->gb, 2);
1388  s->width |= (horiz_size_ext << 12);
1389  s->height |= (vert_size_ext << 12);
1390  bit_rate_ext = get_bits(&s->gb, 12); /* XXX: handle it */
1391  s->bit_rate += (bit_rate_ext << 18) * 400LL;
1392  check_marker(s->avctx, &s->gb, "after bit rate extension");
1393  s1->rc_buffer_size += get_bits(&s->gb, 8) * 1024 * 16 << 10;
1394 
1395  s->low_delay = get_bits1(&s->gb);
1396  if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY)
1397  s->low_delay = 1;
1398 
1399  s1->frame_rate_ext.num = get_bits(&s->gb, 2) + 1;
1400  s1->frame_rate_ext.den = get_bits(&s->gb, 5) + 1;
1401 
1402  ff_dlog(s->avctx, "sequence extension\n");
1403  s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG2VIDEO;
1404 
1405  if (cpb_props = ff_add_cpb_side_data(s->avctx)) {
1406  cpb_props->buffer_size = s1->rc_buffer_size;
1407  if (s->bit_rate != 0x3FFFF*400)
1408  cpb_props->max_bitrate = s->bit_rate;
1409  }
1410 
1411  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1412  av_log(s->avctx, AV_LOG_DEBUG,
1413  "profile: %d, level: %d ps: %d cf:%d vbv buffer: %d, bitrate:%"PRId64"\n",
1414  s->avctx->profile, s->avctx->level, s->progressive_sequence, s->chroma_format,
1415  s1->rc_buffer_size, s->bit_rate);
1416 }
1417 
1419 {
1420  MpegEncContext *s = &s1->mpeg_enc_ctx;
1421  int color_description, w, h;
1422 
1423  skip_bits(&s->gb, 3); /* video format */
1424  color_description = get_bits1(&s->gb);
1425  if (color_description) {
1426  s->avctx->color_primaries = get_bits(&s->gb, 8);
1427  s->avctx->color_trc = get_bits(&s->gb, 8);
1428  s->avctx->colorspace = get_bits(&s->gb, 8);
1429  }
1430  w = get_bits(&s->gb, 14);
1431  skip_bits(&s->gb, 1); // marker
1432  h = get_bits(&s->gb, 14);
1433  // remaining 3 bits are zero padding
1434 
1435  s1->pan_scan.width = 16 * w;
1436  s1->pan_scan.height = 16 * h;
1437 
1438  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1439  av_log(s->avctx, AV_LOG_DEBUG, "sde w:%d, h:%d\n", w, h);
1440 }
1441 
1443 {
1444  MpegEncContext *s = &s1->mpeg_enc_ctx;
1445  int i, nofco;
1446 
1447  nofco = 1;
1448  if (s->progressive_sequence) {
1449  if (s->repeat_first_field) {
1450  nofco++;
1451  if (s->top_field_first)
1452  nofco++;
1453  }
1454  } else {
1455  if (s->picture_structure == PICT_FRAME) {
1456  nofco++;
1457  if (s->repeat_first_field)
1458  nofco++;
1459  }
1460  }
1461  for (i = 0; i < nofco; i++) {
1462  s1->pan_scan.position[i][0] = get_sbits(&s->gb, 16);
1463  skip_bits(&s->gb, 1); // marker
1464  s1->pan_scan.position[i][1] = get_sbits(&s->gb, 16);
1465  skip_bits(&s->gb, 1); // marker
1466  }
1467 
1468  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
1469  av_log(s->avctx, AV_LOG_DEBUG,
1470  "pde (%"PRId16",%"PRId16") (%"PRId16",%"PRId16") (%"PRId16",%"PRId16")\n",
1471  s1->pan_scan.position[0][0], s1->pan_scan.position[0][1],
1472  s1->pan_scan.position[1][0], s1->pan_scan.position[1][1],
1473  s1->pan_scan.position[2][0], s1->pan_scan.position[2][1]);
1474 }
1475 
1476 static int load_matrix(MpegEncContext *s, uint16_t matrix0[64],
1477  uint16_t matrix1[64], int intra)
1478 {
1479  int i;
1480 
1481  for (i = 0; i < 64; i++) {
1482  int j = s->idsp.idct_permutation[ff_zigzag_direct[i]];
1483  int v = get_bits(&s->gb, 8);
1484  if (v == 0) {
1485  av_log(s->avctx, AV_LOG_ERROR, "matrix damaged\n");
1486  return AVERROR_INVALIDDATA;
1487  }
1488  if (intra && i == 0 && v != 8) {
1489  av_log(s->avctx, AV_LOG_DEBUG, "intra matrix specifies invalid DC quantizer %d, ignoring\n", v);
1490  v = 8; // needed by pink.mpg / issue1046
1491  }
1492  matrix0[j] = v;
1493  if (matrix1)
1494  matrix1[j] = v;
1495  }
1496  return 0;
1497 }
1498 
1500 {
1501  ff_dlog(s->avctx, "matrix extension\n");
1502 
1503  if (get_bits1(&s->gb))
1504  load_matrix(s, s->chroma_intra_matrix, s->intra_matrix, 1);
1505  if (get_bits1(&s->gb))
1506  load_matrix(s, s->chroma_inter_matrix, s->inter_matrix, 0);
1507  if (get_bits1(&s->gb))
1508  load_matrix(s, s->chroma_intra_matrix, NULL, 1);
1509  if (get_bits1(&s->gb))
1510  load_matrix(s, s->chroma_inter_matrix, NULL, 0);
1511 }
1512 
1514 {
1515  MpegEncContext *s = &s1->mpeg_enc_ctx;
1516 
1517  s->full_pel[0] = s->full_pel[1] = 0;
1518  s->mpeg_f_code[0][0] = get_bits(&s->gb, 4);
1519  s->mpeg_f_code[0][1] = get_bits(&s->gb, 4);
1520  s->mpeg_f_code[1][0] = get_bits(&s->gb, 4);
1521  s->mpeg_f_code[1][1] = get_bits(&s->gb, 4);
1522  s->mpeg_f_code[0][0] += !s->mpeg_f_code[0][0];
1523  s->mpeg_f_code[0][1] += !s->mpeg_f_code[0][1];
1524  s->mpeg_f_code[1][0] += !s->mpeg_f_code[1][0];
1525  s->mpeg_f_code[1][1] += !s->mpeg_f_code[1][1];
1526  if (!s->pict_type && s1->mpeg_enc_ctx_allocated) {
1527  av_log(s->avctx, AV_LOG_ERROR, "Missing picture start code\n");
1528  if (s->avctx->err_recognition & AV_EF_EXPLODE)
1529  return AVERROR_INVALIDDATA;
1530  av_log(s->avctx, AV_LOG_WARNING, "Guessing pict_type from mpeg_f_code\n");
1531  if (s->mpeg_f_code[1][0] == 15 && s->mpeg_f_code[1][1] == 15) {
1532  if (s->mpeg_f_code[0][0] == 15 && s->mpeg_f_code[0][1] == 15)
1533  s->pict_type = AV_PICTURE_TYPE_I;
1534  else
1535  s->pict_type = AV_PICTURE_TYPE_P;
1536  } else
1537  s->pict_type = AV_PICTURE_TYPE_B;
1538  s->current_picture.f->pict_type = s->pict_type;
1539  s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1540  }
1541 
1542  s->intra_dc_precision = get_bits(&s->gb, 2);
1543  s->picture_structure = get_bits(&s->gb, 2);
1544  s->top_field_first = get_bits1(&s->gb);
1545  s->frame_pred_frame_dct = get_bits1(&s->gb);
1546  s->concealment_motion_vectors = get_bits1(&s->gb);
1547  s->q_scale_type = get_bits1(&s->gb);
1548  s->intra_vlc_format = get_bits1(&s->gb);
1549  s->alternate_scan = get_bits1(&s->gb);
1550  s->repeat_first_field = get_bits1(&s->gb);
1551  s->chroma_420_type = get_bits1(&s->gb);
1552  s->progressive_frame = get_bits1(&s->gb);
1553 
1554  if (s->alternate_scan) {
1555  ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
1556  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
1557  } else {
1558  ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
1559  ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
1560  }
1561 
1562  /* composite display not parsed */
1563  ff_dlog(s->avctx, "intra_dc_precision=%d\n", s->intra_dc_precision);
1564  ff_dlog(s->avctx, "picture_structure=%d\n", s->picture_structure);
1565  ff_dlog(s->avctx, "top field first=%d\n", s->top_field_first);
1566  ff_dlog(s->avctx, "repeat first field=%d\n", s->repeat_first_field);
1567  ff_dlog(s->avctx, "conceal=%d\n", s->concealment_motion_vectors);
1568  ff_dlog(s->avctx, "intra_vlc_format=%d\n", s->intra_vlc_format);
1569  ff_dlog(s->avctx, "alternate_scan=%d\n", s->alternate_scan);
1570  ff_dlog(s->avctx, "frame_pred_frame_dct=%d\n", s->frame_pred_frame_dct);
1571  ff_dlog(s->avctx, "progressive_frame=%d\n", s->progressive_frame);
1572 
1573  return 0;
1574 }
1575 
1576 static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
1577 {
1578  AVCodecContext *avctx = s->avctx;
1579  Mpeg1Context *s1 = (Mpeg1Context *) s;
1580  int ret;
1581 
1582  if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)) {
1583  if (s->mb_width * s->mb_height * 11LL / (33 * 2 * 8) > buf_size)
1584  return AVERROR_INVALIDDATA;
1585  }
1586 
1587  /* start frame decoding */
1588  if (s->first_field || s->picture_structure == PICT_FRAME) {
1589  AVFrameSideData *pan_scan;
1590 
1591  if ((ret = ff_mpv_frame_start(s, avctx)) < 0)
1592  return ret;
1593 
1595 
1596  /* first check if we must repeat the frame */
1597  s->current_picture_ptr->f->repeat_pict = 0;
1598  if (s->repeat_first_field) {
1599  if (s->progressive_sequence) {
1600  if (s->top_field_first)
1601  s->current_picture_ptr->f->repeat_pict = 4;
1602  else
1603  s->current_picture_ptr->f->repeat_pict = 2;
1604  } else if (s->progressive_frame) {
1605  s->current_picture_ptr->f->repeat_pict = 1;
1606  }
1607  }
1608 
1609  pan_scan = av_frame_new_side_data(s->current_picture_ptr->f,
1611  sizeof(s1->pan_scan));
1612  if (!pan_scan)
1613  return AVERROR(ENOMEM);
1614  memcpy(pan_scan->data, &s1->pan_scan, sizeof(s1->pan_scan));
1615 
1616  if (s1->a53_buf_ref) {
1618  s->current_picture_ptr->f, AV_FRAME_DATA_A53_CC,
1619  s1->a53_buf_ref);
1620  if (!sd)
1621  av_buffer_unref(&s1->a53_buf_ref);
1622  s1->a53_buf_ref = NULL;
1623  }
1624 
1625  if (s1->has_stereo3d) {
1626  AVStereo3D *stereo = av_stereo3d_create_side_data(s->current_picture_ptr->f);
1627  if (!stereo)
1628  return AVERROR(ENOMEM);
1629 
1630  *stereo = s1->stereo3d;
1631  s1->has_stereo3d = 0;
1632  }
1633 
1634  if (s1->has_afd) {
1635  AVFrameSideData *sd =
1636  av_frame_new_side_data(s->current_picture_ptr->f,
1637  AV_FRAME_DATA_AFD, 1);
1638  if (!sd)
1639  return AVERROR(ENOMEM);
1640 
1641  *sd->data = s1->afd;
1642  s1->has_afd = 0;
1643  }
1644 
1645  if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_FRAME))
1646  ff_thread_finish_setup(avctx);
1647  } else { // second field
1648  int i;
1649 
1650  if (!s->current_picture_ptr) {
1651  av_log(s->avctx, AV_LOG_ERROR, "first field missing\n");
1652  return AVERROR_INVALIDDATA;
1653  }
1654 
1655  if (s->avctx->hwaccel) {
1656  if ((ret = s->avctx->hwaccel->end_frame(s->avctx)) < 0) {
1657  av_log(avctx, AV_LOG_ERROR,
1658  "hardware accelerator failed to decode first field\n");
1659  return ret;
1660  }
1661  }
1662 
1663  for (i = 0; i < 4; i++) {
1664  s->current_picture.f->data[i] = s->current_picture_ptr->f->data[i];
1665  if (s->picture_structure == PICT_BOTTOM_FIELD)
1666  s->current_picture.f->data[i] +=
1667  s->current_picture_ptr->f->linesize[i];
1668  }
1669  }
1670 
1671  if (avctx->hwaccel) {
1672  if ((ret = avctx->hwaccel->start_frame(avctx, buf, buf_size)) < 0)
1673  return ret;
1674  }
1675 
1676  return 0;
1677 }
1678 
1679 #define DECODE_SLICE_ERROR -1
1680 #define DECODE_SLICE_OK 0
1681 
1682 /**
1683  * Decode a slice.
1684  * MpegEncContext.mb_y must be set to the MB row from the startcode.
1685  * @return DECODE_SLICE_ERROR if the slice is damaged,
1686  * DECODE_SLICE_OK if this slice is OK
1687  */
1688 static int mpeg_decode_slice(MpegEncContext *s, int mb_y,
1689  const uint8_t **buf, int buf_size)
1690 {
1691  AVCodecContext *avctx = s->avctx;
1692  const int lowres = s->avctx->lowres;
1693  const int field_pic = s->picture_structure != PICT_FRAME;
1694  int ret;
1695 
1696  s->resync_mb_x =
1697  s->resync_mb_y = -1;
1698 
1699  av_assert0(mb_y < s->mb_height);
1700 
1701  init_get_bits(&s->gb, *buf, buf_size * 8);
1702  if (s->codec_id != AV_CODEC_ID_MPEG1VIDEO && s->mb_height > 2800/16)
1703  skip_bits(&s->gb, 3);
1704 
1706  s->interlaced_dct = 0;
1707 
1708  s->qscale = mpeg_get_qscale(s);
1709 
1710  if (s->qscale == 0) {
1711  av_log(s->avctx, AV_LOG_ERROR, "qscale == 0\n");
1712  return AVERROR_INVALIDDATA;
1713  }
1714 
1715  /* extra slice info */
1716  if (skip_1stop_8data_bits(&s->gb) < 0)
1717  return AVERROR_INVALIDDATA;
1718 
1719  s->mb_x = 0;
1720 
1721  if (mb_y == 0 && s->codec_tag == AV_RL32("SLIF")) {
1722  skip_bits1(&s->gb);
1723  } else {
1724  while (get_bits_left(&s->gb) > 0) {
1725  int code = get_vlc2(&s->gb, ff_mbincr_vlc.table,
1726  MBINCR_VLC_BITS, 2);
1727  if (code < 0) {
1728  av_log(s->avctx, AV_LOG_ERROR, "first mb_incr damaged\n");
1729  return AVERROR_INVALIDDATA;
1730  }
1731  if (code >= 33) {
1732  if (code == 33)
1733  s->mb_x += 33;
1734  /* otherwise, stuffing, nothing to do */
1735  } else {
1736  s->mb_x += code;
1737  break;
1738  }
1739  }
1740  }
1741 
1742  if (s->mb_x >= (unsigned) s->mb_width) {
1743  av_log(s->avctx, AV_LOG_ERROR, "initial skip overflow\n");
1744  return AVERROR_INVALIDDATA;
1745  }
1746 
1747  if (avctx->hwaccel && avctx->hwaccel->decode_slice) {
1748  const uint8_t *buf_end, *buf_start = *buf - 4; /* include start_code */
1749  int start_code = -1;
1750  buf_end = avpriv_find_start_code(buf_start + 2, *buf + buf_size, &start_code);
1751  if (buf_end < *buf + buf_size)
1752  buf_end -= 4;
1753  s->mb_y = mb_y;
1754  if (avctx->hwaccel->decode_slice(avctx, buf_start, buf_end - buf_start) < 0)
1755  return DECODE_SLICE_ERROR;
1756  *buf = buf_end;
1757  return DECODE_SLICE_OK;
1758  }
1759 
1760  s->resync_mb_x = s->mb_x;
1761  s->resync_mb_y = s->mb_y = mb_y;
1762  s->mb_skip_run = 0;
1764 
1765  if (s->mb_y == 0 && s->mb_x == 0 && (s->first_field || s->picture_structure == PICT_FRAME)) {
1766  if (s->avctx->debug & FF_DEBUG_PICT_INFO) {
1767  av_log(s->avctx, AV_LOG_DEBUG,
1768  "qp:%d fc:%2d%2d%2d%2d %c %s %s %s %s dc:%d pstruct:%d fdct:%d cmv:%d qtype:%d ivlc:%d rff:%d %s\n",
1769  s->qscale,
1770  s->mpeg_f_code[0][0], s->mpeg_f_code[0][1],
1771  s->mpeg_f_code[1][0], s->mpeg_f_code[1][1],
1772  s->pict_type == AV_PICTURE_TYPE_I ? 'I' :
1773  (s->pict_type == AV_PICTURE_TYPE_P ? 'P' :
1774  (s->pict_type == AV_PICTURE_TYPE_B ? 'B' : 'S')),
1775  s->progressive_sequence ? "ps" : "",
1776  s->progressive_frame ? "pf" : "",
1777  s->alternate_scan ? "alt" : "",
1778  s->top_field_first ? "top" : "",
1779  s->intra_dc_precision, s->picture_structure,
1780  s->frame_pred_frame_dct, s->concealment_motion_vectors,
1781  s->q_scale_type, s->intra_vlc_format,
1782  s->repeat_first_field, s->chroma_420_type ? "420" : "");
1783  }
1784  }
1785 
1786  for (;;) {
1787  if ((ret = mpeg_decode_mb(s, s->block)) < 0)
1788  return ret;
1789 
1790  // Note motion_val is normally NULL unless we want to extract the MVs.
1791  if (s->current_picture.motion_val[0] && !s->encoding) {
1792  const int wrap = s->b8_stride;
1793  int xy = s->mb_x * 2 + s->mb_y * 2 * wrap;
1794  int b8_xy = 4 * (s->mb_x + s->mb_y * s->mb_stride);
1795  int motion_x, motion_y, dir, i;
1796 
1797  for (i = 0; i < 2; i++) {
1798  for (dir = 0; dir < 2; dir++) {
1799  if (s->mb_intra ||
1800  (dir == 1 && s->pict_type != AV_PICTURE_TYPE_B)) {
1801  motion_x = motion_y = 0;
1802  } else if (s->mv_type == MV_TYPE_16X16 ||
1803  (s->mv_type == MV_TYPE_FIELD && field_pic)) {
1804  motion_x = s->mv[dir][0][0];
1805  motion_y = s->mv[dir][0][1];
1806  } else { /* if ((s->mv_type == MV_TYPE_FIELD) || (s->mv_type == MV_TYPE_16X8)) */
1807  motion_x = s->mv[dir][i][0];
1808  motion_y = s->mv[dir][i][1];
1809  }
1810 
1811  s->current_picture.motion_val[dir][xy][0] = motion_x;
1812  s->current_picture.motion_val[dir][xy][1] = motion_y;
1813  s->current_picture.motion_val[dir][xy + 1][0] = motion_x;
1814  s->current_picture.motion_val[dir][xy + 1][1] = motion_y;
1815  s->current_picture.ref_index [dir][b8_xy] =
1816  s->current_picture.ref_index [dir][b8_xy + 1] = s->field_select[dir][i];
1817  av_assert2(s->field_select[dir][i] == 0 ||
1818  s->field_select[dir][i] == 1);
1819  }
1820  xy += wrap;
1821  b8_xy += 2;
1822  }
1823  }
1824 
1825  s->dest[0] += 16 >> lowres;
1826  s->dest[1] +=(16 >> lowres) >> s->chroma_x_shift;
1827  s->dest[2] +=(16 >> lowres) >> s->chroma_x_shift;
1828 
1829  ff_mpv_reconstruct_mb(s, s->block);
1830 
1831  if (++s->mb_x >= s->mb_width) {
1832  const int mb_size = 16 >> s->avctx->lowres;
1833  int left;
1834 
1835  ff_mpeg_draw_horiz_band(s, mb_size * (s->mb_y >> field_pic), mb_size);
1837 
1838  s->mb_x = 0;
1839  s->mb_y += 1 << field_pic;
1840 
1841  if (s->mb_y >= s->mb_height) {
1842  int left = get_bits_left(&s->gb);
1843  int is_d10 = s->chroma_format == 2 &&
1844  s->pict_type == AV_PICTURE_TYPE_I &&
1845  avctx->profile == 0 && avctx->level == 5 &&
1846  s->intra_dc_precision == 2 &&
1847  s->q_scale_type == 1 && s->alternate_scan == 0 &&
1848  s->progressive_frame == 0
1849  /* vbv_delay == 0xBBB || 0xE10 */;
1850 
1851  if (left >= 32 && !is_d10) {
1852  GetBitContext gb = s->gb;
1853  align_get_bits(&gb);
1854  if (show_bits(&gb, 24) == 0x060E2B) {
1855  av_log(avctx, AV_LOG_DEBUG, "Invalid MXF data found in video stream\n");
1856  is_d10 = 1;
1857  }
1858  if (left > 32 && show_bits_long(&gb, 32) == 0x201) {
1859  av_log(avctx, AV_LOG_DEBUG, "skipping m704 alpha (unsupported)\n");
1860  goto eos;
1861  }
1862  }
1863 
1864  if (left < 0 ||
1865  (left && show_bits(&s->gb, FFMIN(left, 23)) && !is_d10) ||
1866  ((avctx->err_recognition & (AV_EF_BITSTREAM | AV_EF_AGGRESSIVE)) && left > 8)) {
1867  av_log(avctx, AV_LOG_ERROR, "end mismatch left=%d %0X at %d %d\n",
1868  left, left>0 ? show_bits(&s->gb, FFMIN(left, 23)) : 0, s->mb_x, s->mb_y);
1869  return AVERROR_INVALIDDATA;
1870  } else
1871  goto eos;
1872  }
1873  // There are some files out there which are missing the last slice
1874  // in cases where the slice is completely outside the visible
1875  // area, we detect this here instead of running into the end expecting
1876  // more data
1877  left = get_bits_left(&s->gb);
1878  if (s->mb_y >= ((s->height + 15) >> 4) &&
1879  !s->progressive_sequence &&
1880  left <= 25 &&
1881  left >= 0 &&
1882  s->mb_skip_run == -1 &&
1883  (!left || show_bits(&s->gb, left) == 0))
1884  goto eos;
1885 
1887  }
1888 
1889  /* skip mb handling */
1890  if (s->mb_skip_run == -1) {
1891  /* read increment again */
1892  s->mb_skip_run = 0;
1893  for (;;) {
1894  int code = get_vlc2(&s->gb, ff_mbincr_vlc.table,
1895  MBINCR_VLC_BITS, 2);
1896  if (code < 0) {
1897  av_log(s->avctx, AV_LOG_ERROR, "mb incr damaged\n");
1898  return AVERROR_INVALIDDATA;
1899  }
1900  if (code >= 33) {
1901  if (code == 33) {
1902  s->mb_skip_run += 33;
1903  } else if (code == 35) {
1904  if (s->mb_skip_run != 0 || show_bits(&s->gb, 15) != 0) {
1905  av_log(s->avctx, AV_LOG_ERROR, "slice mismatch\n");
1906  return AVERROR_INVALIDDATA;
1907  }
1908  goto eos; /* end of slice */
1909  }
1910  /* otherwise, stuffing, nothing to do */
1911  } else {
1912  s->mb_skip_run += code;
1913  break;
1914  }
1915  }
1916  if (s->mb_skip_run) {
1917  int i;
1918  if (s->pict_type == AV_PICTURE_TYPE_I) {
1919  av_log(s->avctx, AV_LOG_ERROR,
1920  "skipped MB in I-frame at %d %d\n", s->mb_x, s->mb_y);
1921  return AVERROR_INVALIDDATA;
1922  }
1923 
1924  /* skip mb */
1925  s->mb_intra = 0;
1926  for (i = 0; i < 12; i++)
1927  s->block_last_index[i] = -1;
1928  if (s->picture_structure == PICT_FRAME)
1929  s->mv_type = MV_TYPE_16X16;
1930  else
1931  s->mv_type = MV_TYPE_FIELD;
1932  if (s->pict_type == AV_PICTURE_TYPE_P) {
1933  /* if P type, zero motion vector is implied */
1934  s->mv_dir = MV_DIR_FORWARD;
1935  s->mv[0][0][0] = s->mv[0][0][1] = 0;
1936  s->last_mv[0][0][0] = s->last_mv[0][0][1] = 0;
1937  s->last_mv[0][1][0] = s->last_mv[0][1][1] = 0;
1938  s->field_select[0][0] = (s->picture_structure - 1) & 1;
1939  } else {
1940  /* if B type, reuse previous vectors and directions */
1941  s->mv[0][0][0] = s->last_mv[0][0][0];
1942  s->mv[0][0][1] = s->last_mv[0][0][1];
1943  s->mv[1][0][0] = s->last_mv[1][0][0];
1944  s->mv[1][0][1] = s->last_mv[1][0][1];
1945  s->field_select[0][0] = (s->picture_structure - 1) & 1;
1946  s->field_select[1][0] = (s->picture_structure - 1) & 1;
1947  }
1948  }
1949  }
1950  }
1951 eos: // end of slice
1952  if (get_bits_left(&s->gb) < 0) {
1953  av_log(s, AV_LOG_ERROR, "overread %d\n", -get_bits_left(&s->gb));
1954  return AVERROR_INVALIDDATA;
1955  }
1956  *buf += (get_bits_count(&s->gb) - 1) / 8;
1957  ff_dlog(s, "Slice start:%d %d end:%d %d\n", s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y);
1958  return 0;
1959 }
1960 
1962 {
1963  MpegEncContext *s = *(void **) arg;
1964  const uint8_t *buf = s->gb.buffer;
1965  int mb_y = s->start_mb_y;
1966  const int field_pic = s->picture_structure != PICT_FRAME;
1967 
1968  s->er.error_count = (3 * (s->end_mb_y - s->start_mb_y) * s->mb_width) >> field_pic;
1969 
1970  for (;;) {
1971  uint32_t start_code;
1972  int ret;
1973 
1974  ret = mpeg_decode_slice(s, mb_y, &buf, s->gb.buffer_end - buf);
1975  emms_c();
1976  ff_dlog(c, "ret:%d resync:%d/%d mb:%d/%d ts:%d/%d ec:%d\n",
1977  ret, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y,
1978  s->start_mb_y, s->end_mb_y, s->er.error_count);
1979  if (ret < 0) {
1980  if (c->err_recognition & AV_EF_EXPLODE)
1981  return ret;
1982  if (s->resync_mb_x >= 0 && s->resync_mb_y >= 0)
1983  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
1984  s->mb_x, s->mb_y,
1986  } else {
1987  ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
1988  s->mb_x - 1, s->mb_y,
1990  }
1991 
1992  if (s->mb_y == s->end_mb_y)
1993  return 0;
1994 
1995  start_code = -1;
1996  buf = avpriv_find_start_code(buf, s->gb.buffer_end, &start_code);
1997  if (start_code < SLICE_MIN_START_CODE || start_code > SLICE_MAX_START_CODE)
1998  return AVERROR_INVALIDDATA;
2000  if (s->codec_id != AV_CODEC_ID_MPEG1VIDEO && s->mb_height > 2800/16)
2001  mb_y += (*buf&0xE0)<<2;
2002  mb_y <<= field_pic;
2003  if (s->picture_structure == PICT_BOTTOM_FIELD)
2004  mb_y++;
2005  if (mb_y >= s->end_mb_y)
2006  return AVERROR_INVALIDDATA;
2007  }
2008 }
2009 
2010 /**
2011  * Handle slice ends.
2012  * @return 1 if it seems to be the last slice
2013  */
2014 static int slice_end(AVCodecContext *avctx, AVFrame *pict)
2015 {
2016  Mpeg1Context *s1 = avctx->priv_data;
2017  MpegEncContext *s = &s1->mpeg_enc_ctx;
2018 
2019  if (!s1->mpeg_enc_ctx_allocated || !s->current_picture_ptr)
2020  return 0;
2021 
2022  if (s->avctx->hwaccel) {
2023  int ret = s->avctx->hwaccel->end_frame(s->avctx);
2024  if (ret < 0) {
2025  av_log(avctx, AV_LOG_ERROR,
2026  "hardware accelerator failed to decode picture\n");
2027  return ret;
2028  }
2029  }
2030 
2031  /* end of slice reached */
2032  if (/* s->mb_y << field_pic == s->mb_height && */ !s->first_field && !s1->first_slice) {
2033  /* end of image */
2034 
2035  ff_er_frame_end(&s->er);
2036 
2038 
2039  if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
2040  int ret = av_frame_ref(pict, s->current_picture_ptr->f);
2041  if (ret < 0)
2042  return ret;
2043  ff_print_debug_info(s, s->current_picture_ptr, pict);
2044  ff_mpv_export_qp_table(s, pict, s->current_picture_ptr, FF_MPV_QSCALE_TYPE_MPEG2);
2045  } else {
2046  /* latency of 1 frame for I- and P-frames */
2047  if (s->last_picture_ptr) {
2048  int ret = av_frame_ref(pict, s->last_picture_ptr->f);
2049  if (ret < 0)
2050  return ret;
2051  ff_print_debug_info(s, s->last_picture_ptr, pict);
2052  ff_mpv_export_qp_table(s, pict, s->last_picture_ptr, FF_MPV_QSCALE_TYPE_MPEG2);
2053  }
2054  }
2055 
2056  return 1;
2057  } else {
2058  return 0;
2059  }
2060 }
2061 
2063  const uint8_t *buf, int buf_size)
2064 {
2065  Mpeg1Context *s1 = avctx->priv_data;
2066  MpegEncContext *s = &s1->mpeg_enc_ctx;
2067  int width, height;
2068  int i, v, j;
2069 
2070  init_get_bits(&s->gb, buf, buf_size * 8);
2071 
2072  width = get_bits(&s->gb, 12);
2073  height = get_bits(&s->gb, 12);
2074  if (width == 0 || height == 0) {
2075  av_log(avctx, AV_LOG_WARNING,
2076  "Invalid horizontal or vertical size value.\n");
2078  return AVERROR_INVALIDDATA;
2079  }
2080  s1->aspect_ratio_info = get_bits(&s->gb, 4);
2081  if (s1->aspect_ratio_info == 0) {
2082  av_log(avctx, AV_LOG_ERROR, "aspect ratio has forbidden 0 value\n");
2084  return AVERROR_INVALIDDATA;
2085  }
2086  s1->frame_rate_index = get_bits(&s->gb, 4);
2087  if (s1->frame_rate_index == 0 || s1->frame_rate_index > 13) {
2088  av_log(avctx, AV_LOG_WARNING,
2089  "frame_rate_index %d is invalid\n", s1->frame_rate_index);
2090  s1->frame_rate_index = 1;
2091  }
2092  s->bit_rate = get_bits(&s->gb, 18) * 400LL;
2093  if (check_marker(s->avctx, &s->gb, "in sequence header") == 0) {
2094  return AVERROR_INVALIDDATA;
2095  }
2096 
2097  s1->rc_buffer_size = get_bits(&s->gb, 10) * 1024 * 16;
2098  skip_bits(&s->gb, 1);
2099 
2100  /* get matrix */
2101  if (get_bits1(&s->gb)) {
2102  load_matrix(s, s->chroma_intra_matrix, s->intra_matrix, 1);
2103  } else {
2104  for (i = 0; i < 64; i++) {
2105  j = s->idsp.idct_permutation[i];
2107  s->intra_matrix[j] = v;
2108  s->chroma_intra_matrix[j] = v;
2109  }
2110  }
2111  if (get_bits1(&s->gb)) {
2112  load_matrix(s, s->chroma_inter_matrix, s->inter_matrix, 0);
2113  } else {
2114  for (i = 0; i < 64; i++) {
2115  int j = s->idsp.idct_permutation[i];
2117  s->inter_matrix[j] = v;
2118  s->chroma_inter_matrix[j] = v;
2119  }
2120  }
2121 
2122  if (show_bits(&s->gb, 23) != 0) {
2123  av_log(s->avctx, AV_LOG_ERROR, "sequence header damaged\n");
2124  return AVERROR_INVALIDDATA;
2125  }
2126 
2127  s->width = width;
2128  s->height = height;
2129 
2130  /* We set MPEG-2 parameters so that it emulates MPEG-1. */
2131  s->progressive_sequence = 1;
2132  s->progressive_frame = 1;
2133  s->picture_structure = PICT_FRAME;
2134  s->first_field = 0;
2135  s->frame_pred_frame_dct = 1;
2136  s->chroma_format = 1;
2137  s->codec_id =
2138  s->avctx->codec_id = AV_CODEC_ID_MPEG1VIDEO;
2139  s->out_format = FMT_MPEG1;
2140  if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY)
2141  s->low_delay = 1;
2142 
2143  if (s->avctx->debug & FF_DEBUG_PICT_INFO)
2144  av_log(s->avctx, AV_LOG_DEBUG, "vbv buffer: %d, bitrate:%"PRId64", aspect_ratio_info: %d \n",
2145  s1->rc_buffer_size, s->bit_rate, s1->aspect_ratio_info);
2146 
2147  return 0;
2148 }
2149 
2151 {
2152  Mpeg1Context *s1 = avctx->priv_data;
2153  MpegEncContext *s = &s1->mpeg_enc_ctx;
2154  int i, v, ret;
2155 
2156  /* start new MPEG-1 context decoding */
2157  s->out_format = FMT_MPEG1;
2158  if (s1->mpeg_enc_ctx_allocated) {
2160  s1->mpeg_enc_ctx_allocated = 0;
2161  }
2162  s->width = avctx->coded_width;
2163  s->height = avctx->coded_height;
2164  avctx->has_b_frames = 0; // true?
2165  s->low_delay = 1;
2166 
2167  avctx->pix_fmt = mpeg_get_pixelformat(avctx);
2168 
2170  if ((ret = ff_mpv_common_init(s)) < 0)
2171  return ret;
2172  s1->mpeg_enc_ctx_allocated = 1;
2173 
2174  for (i = 0; i < 64; i++) {
2175  int j = s->idsp.idct_permutation[i];
2177  s->intra_matrix[j] = v;
2178  s->chroma_intra_matrix[j] = v;
2179 
2181  s->inter_matrix[j] = v;
2182  s->chroma_inter_matrix[j] = v;
2183  }
2184 
2185  s->progressive_sequence = 1;
2186  s->progressive_frame = 1;
2187  s->picture_structure = PICT_FRAME;
2188  s->first_field = 0;
2189  s->frame_pred_frame_dct = 1;
2190  s->chroma_format = 1;
2191  if (s->codec_tag == AV_RL32("BW10")) {
2192  s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG1VIDEO;
2193  } else {
2194  s->codec_id = s->avctx->codec_id = AV_CODEC_ID_MPEG2VIDEO;
2195  }
2196  s1->save_width = s->width;
2197  s1->save_height = s->height;
2198  s1->save_progressive_seq = s->progressive_sequence;
2199  return 0;
2200 }
2201 
2203  const uint8_t *p, int buf_size)
2204 {
2205  Mpeg1Context *s1 = avctx->priv_data;
2206 
2207  if (buf_size >= 6 &&
2208  p[0] == 'G' && p[1] == 'A' && p[2] == '9' && p[3] == '4' &&
2209  p[4] == 3 && (p[5] & 0x40)) {
2210  /* extract A53 Part 4 CC data */
2211  int cc_count = p[5] & 0x1f;
2212  if (cc_count > 0 && buf_size >= 7 + cc_count * 3) {
2213  int old_size = s1->a53_buf_ref ? s1->a53_buf_ref->size : 0;
2214  const uint64_t new_size = (old_size + cc_count
2215  * UINT64_C(3));
2216  int ret;
2217 
2218  if (new_size > 3*A53_MAX_CC_COUNT)
2219  return AVERROR(EINVAL);
2220 
2221  ret = av_buffer_realloc(&s1->a53_buf_ref, new_size);
2222  if (ret >= 0)
2223  memcpy(s1->a53_buf_ref->data + old_size, p + 7, cc_count * UINT64_C(3));
2224 
2226  }
2227  return 1;
2228  } else if (buf_size >= 2 &&
2229  p[0] == 0x03 && (p[1]&0x7f) == 0x01) {
2230  /* extract SCTE-20 CC data */
2231  GetBitContext gb;
2232  int cc_count = 0;
2233  int i, ret;
2234 
2235  init_get_bits8(&gb, p + 2, buf_size - 2);
2236  cc_count = get_bits(&gb, 5);
2237  if (cc_count > 0) {
2238  int old_size = s1->a53_buf_ref ? s1->a53_buf_ref->size : 0;
2239  const uint64_t new_size = (old_size + cc_count
2240  * UINT64_C(3));
2241  if (new_size > 3*A53_MAX_CC_COUNT)
2242  return AVERROR(EINVAL);
2243 
2244  ret = av_buffer_realloc(&s1->a53_buf_ref, new_size);
2245  if (ret >= 0) {
2246  uint8_t field, cc1, cc2;
2247  uint8_t *cap = s1->a53_buf_ref->data;
2248 
2249  memset(s1->a53_buf_ref->data + old_size, 0, cc_count * 3);
2250  for (i = 0; i < cc_count && get_bits_left(&gb) >= 26; i++) {
2251  skip_bits(&gb, 2); // priority
2252  field = get_bits(&gb, 2);
2253  skip_bits(&gb, 5); // line_offset
2254  cc1 = get_bits(&gb, 8);
2255  cc2 = get_bits(&gb, 8);
2256  skip_bits(&gb, 1); // marker
2257 
2258  if (!field) { // forbidden
2259  cap[0] = cap[1] = cap[2] = 0x00;
2260  } else {
2261  field = (field == 2 ? 1 : 0);
2262  if (!s1->mpeg_enc_ctx.top_field_first) field = !field;
2263  cap[0] = 0x04 | field;
2264  cap[1] = ff_reverse[cc1];
2265  cap[2] = ff_reverse[cc2];
2266  }
2267  cap += 3;
2268  }
2269  }
2271  }
2272  return 1;
2273  } else if (buf_size >= 11 &&
2274  p[0] == 'C' && p[1] == 'C' && p[2] == 0x01 && p[3] == 0xf8) {
2275  /* extract DVD CC data
2276  *
2277  * uint32_t user_data_start_code 0x000001B2 (big endian)
2278  * uint16_t user_identifier 0x4343 "CC"
2279  * uint8_t user_data_type_code 0x01
2280  * uint8_t caption_block_size 0xF8
2281  * uint8_t
2282  * bit 7 caption_odd_field_first 1=odd field (CC1/CC2) first 0=even field (CC3/CC4) first
2283  * bit 6 caption_filler 0
2284  * bit 5:1 caption_block_count number of caption blocks (pairs of caption words = frames). Most DVDs use 15 per start of GOP.
2285  * bit 0 caption_extra_field_added 1=one additional caption word
2286  *
2287  * struct caption_field_block {
2288  * uint8_t
2289  * bit 7:1 caption_filler 0x7F (all 1s)
2290  * bit 0 caption_field_odd 1=odd field (this is CC1/CC2) 0=even field (this is CC3/CC4)
2291  * uint8_t caption_first_byte
2292  * uint8_t caption_second_byte
2293  * } caption_block[(caption_block_count * 2) + caption_extra_field_added];
2294  *
2295  * Some DVDs encode caption data for both fields with caption_field_odd=1. The only way to decode the fields
2296  * correctly is to start on the field indicated by caption_odd_field_first and count between odd/even fields.
2297  * Don't assume that the first caption word is the odd field. There do exist MPEG files in the wild that start
2298  * on the even field. There also exist DVDs in the wild that encode an odd field count and the
2299  * caption_extra_field_added/caption_odd_field_first bits change per packet to allow that. */
2300  int cc_count = 0;
2301  int i, ret;
2302  // There is a caption count field in the data, but it is often
2303  // incorrect. So count the number of captions present.
2304  for (i = 5; i + 6 <= buf_size && ((p[i] & 0xfe) == 0xfe); i += 6)
2305  cc_count++;
2306  // Transform the DVD format into A53 Part 4 format
2307  if (cc_count > 0) {
2308  int old_size = s1->a53_buf_ref ? s1->a53_buf_ref->size : 0;
2309  const uint64_t new_size = (old_size + cc_count
2310  * UINT64_C(6));
2311  if (new_size > 3*A53_MAX_CC_COUNT)
2312  return AVERROR(EINVAL);
2313 
2314  ret = av_buffer_realloc(&s1->a53_buf_ref, new_size);
2315  if (ret >= 0) {
2316  uint8_t field1 = !!(p[4] & 0x80);
2317  uint8_t *cap = s1->a53_buf_ref->data;
2318  p += 5;
2319  for (i = 0; i < cc_count; i++) {
2320  cap[0] = (p[0] == 0xff && field1) ? 0xfc : 0xfd;
2321  cap[1] = p[1];
2322  cap[2] = p[2];
2323  cap[3] = (p[3] == 0xff && !field1) ? 0xfc : 0xfd;
2324  cap[4] = p[4];
2325  cap[5] = p[5];
2326  cap += 6;
2327  p += 6;
2328  }
2329  }
2331  }
2332  return 1;
2333  }
2334  return 0;
2335 }
2336 
2338  const uint8_t *p, int buf_size)
2339 {
2340  Mpeg1Context *s = avctx->priv_data;
2341  const uint8_t *buf_end = p + buf_size;
2342  Mpeg1Context *s1 = avctx->priv_data;
2343 
2344 #if 0
2345  int i;
2346  for(i=0; !(!p[i-2] && !p[i-1] && p[i]==1) && i<buf_size; i++){
2347  av_log(avctx, AV_LOG_ERROR, "%c", p[i]);
2348  }
2349  av_log(avctx, AV_LOG_ERROR, "\n");
2350 #endif
2351 
2352  if (buf_size > 29){
2353  int i;
2354  for(i=0; i<20; i++)
2355  if (!memcmp(p+i, "\0TMPGEXS\0", 9)){
2356  s->tmpgexs= 1;
2357  }
2358  }
2359  /* we parse the DTG active format information */
2360  if (buf_end - p >= 5 &&
2361  p[0] == 'D' && p[1] == 'T' && p[2] == 'G' && p[3] == '1') {
2362  int flags = p[4];
2363  p += 5;
2364  if (flags & 0x80) {
2365  /* skip event id */
2366  p += 2;
2367  }
2368  if (flags & 0x40) {
2369  if (buf_end - p < 1)
2370  return;
2371  s1->has_afd = 1;
2372  s1->afd = p[0] & 0x0f;
2373  }
2374  } else if (buf_end - p >= 6 &&
2375  p[0] == 'J' && p[1] == 'P' && p[2] == '3' && p[3] == 'D' &&
2376  p[4] == 0x03) { // S3D_video_format_length
2377  // the 0x7F mask ignores the reserved_bit value
2378  const uint8_t S3D_video_format_type = p[5] & 0x7F;
2379 
2380  if (S3D_video_format_type == 0x03 ||
2381  S3D_video_format_type == 0x04 ||
2382  S3D_video_format_type == 0x08 ||
2383  S3D_video_format_type == 0x23) {
2384 
2385  s1->has_stereo3d = 1;
2386 
2387  switch (S3D_video_format_type) {
2388  case 0x03:
2389  s1->stereo3d.type = AV_STEREO3D_SIDEBYSIDE;
2390  break;
2391  case 0x04:
2392  s1->stereo3d.type = AV_STEREO3D_TOPBOTTOM;
2393  break;
2394  case 0x08:
2395  s1->stereo3d.type = AV_STEREO3D_2D;
2396  break;
2397  case 0x23:
2398  s1->stereo3d.type = AV_STEREO3D_SIDEBYSIDE_QUINCUNX;
2399  break;
2400  }
2401  }
2402  } else if (mpeg_decode_a53_cc(avctx, p, buf_size)) {
2403  return;
2404  }
2405 }
2406 
2407 static void mpeg_decode_gop(AVCodecContext *avctx,
2408  const uint8_t *buf, int buf_size)
2409 {
2410  Mpeg1Context *s1 = avctx->priv_data;
2411  MpegEncContext *s = &s1->mpeg_enc_ctx;
2412  int broken_link;
2413  int64_t tc;
2414 
2415  init_get_bits(&s->gb, buf, buf_size * 8);
2416 
2417  tc = s1->timecode_frame_start = get_bits(&s->gb, 25);
2418 
2419  s1->closed_gop = get_bits1(&s->gb);
2420  /* broken_link indicates that after editing the
2421  * reference frames of the first B-Frames after GOP I-Frame
2422  * are missing (open gop) */
2423  broken_link = get_bits1(&s->gb);
2424 
2425  if (s->avctx->debug & FF_DEBUG_PICT_INFO) {
2426  char tcbuf[AV_TIMECODE_STR_SIZE];
2428  av_log(s->avctx, AV_LOG_DEBUG,
2429  "GOP (%s) closed_gop=%d broken_link=%d\n",
2430  tcbuf, s1->closed_gop, broken_link);
2431  }
2432 }
2433 
2434 static int decode_chunks(AVCodecContext *avctx, AVFrame *picture,
2435  int *got_output, const uint8_t *buf, int buf_size)
2436 {
2437  Mpeg1Context *s = avctx->priv_data;
2438  MpegEncContext *s2 = &s->mpeg_enc_ctx;
2439  const uint8_t *buf_ptr = buf;
2440  const uint8_t *buf_end = buf + buf_size;
2441  int ret, input_size;
2442  int last_code = 0, skip_frame = 0;
2443  int picture_start_code_seen = 0;
2444 
2445  for (;;) {
2446  /* find next start code */
2447  uint32_t start_code = -1;
2448  buf_ptr = avpriv_find_start_code(buf_ptr, buf_end, &start_code);
2449  if (start_code > 0x1ff) {
2450  if (!skip_frame) {
2451  if (HAVE_THREADS &&
2452  (avctx->active_thread_type & FF_THREAD_SLICE) &&
2453  !avctx->hwaccel) {
2454  int i;
2455  av_assert0(avctx->thread_count > 1);
2456 
2457  avctx->execute(avctx, slice_decode_thread,
2458  &s2->thread_context[0], NULL,
2459  s->slice_count, sizeof(void *));
2460  for (i = 0; i < s->slice_count; i++)
2461  s2->er.error_count += s2->thread_context[i]->er.error_count;
2462  }
2463 
2464  ret = slice_end(avctx, picture);
2465  if (ret < 0)
2466  return ret;
2467  else if (ret) {
2468  // FIXME: merge with the stuff in mpeg_decode_slice
2469  if (s2->last_picture_ptr || s2->low_delay || s2->pict_type == AV_PICTURE_TYPE_B)
2470  *got_output = 1;
2471  }
2472  }
2473  s2->pict_type = 0;
2474 
2475  if (avctx->err_recognition & AV_EF_EXPLODE && s2->er.error_count)
2476  return AVERROR_INVALIDDATA;
2477 
2478 #if FF_API_FLAG_TRUNCATED
2479  return FFMAX(0, buf_ptr - buf - s2->parse_context.last_index);
2480 #else
2481  return FFMAX(0, buf_ptr - buf);
2482 #endif
2483  }
2484 
2485  input_size = buf_end - buf_ptr;
2486 
2487  if (avctx->debug & FF_DEBUG_STARTCODE)
2488  av_log(avctx, AV_LOG_DEBUG, "%3"PRIX32" at %"PTRDIFF_SPECIFIER" left %d\n",
2489  start_code, buf_ptr - buf, input_size);
2490 
2491  /* prepare data for next start code */
2492  switch (start_code) {
2493  case SEQ_START_CODE:
2494  if (last_code == 0) {
2495  mpeg1_decode_sequence(avctx, buf_ptr, input_size);
2496  if (buf != avctx->extradata)
2497  s->sync = 1;
2498  } else {
2499  av_log(avctx, AV_LOG_ERROR,
2500  "ignoring SEQ_START_CODE after %X\n", last_code);
2501  if (avctx->err_recognition & AV_EF_EXPLODE)
2502  return AVERROR_INVALIDDATA;
2503  }
2504  break;
2505 
2506  case PICTURE_START_CODE:
2507  if (picture_start_code_seen && s2->picture_structure == PICT_FRAME) {
2508  /* If it's a frame picture, there can't be more than one picture header.
2509  Yet, it does happen and we need to handle it. */
2510  av_log(avctx, AV_LOG_WARNING, "ignoring extra picture following a frame-picture\n");
2511  break;
2512  }
2513  picture_start_code_seen = 1;
2514 
2515  if (s2->width <= 0 || s2->height <= 0) {
2516  av_log(avctx, AV_LOG_ERROR, "Invalid frame dimensions %dx%d.\n",
2517  s2->width, s2->height);
2518  return AVERROR_INVALIDDATA;
2519  }
2520 
2521  if (s->tmpgexs){
2522  s2->intra_dc_precision= 3;
2523  s2->intra_matrix[0]= 1;
2524  }
2525  if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE) &&
2526  !avctx->hwaccel && s->slice_count) {
2527  int i;
2528 
2529  avctx->execute(avctx, slice_decode_thread,
2530  s2->thread_context, NULL,
2531  s->slice_count, sizeof(void *));
2532  for (i = 0; i < s->slice_count; i++)
2533  s2->er.error_count += s2->thread_context[i]->er.error_count;
2534  s->slice_count = 0;
2535  }
2536  if (last_code == 0 || last_code == SLICE_MIN_START_CODE) {
2537  ret = mpeg_decode_postinit(avctx);
2538  if (ret < 0) {
2539  av_log(avctx, AV_LOG_ERROR,
2540  "mpeg_decode_postinit() failure\n");
2541  return ret;
2542  }
2543 
2544  /* We have a complete image: we try to decompress it. */
2545  if (mpeg1_decode_picture(avctx, buf_ptr, input_size) < 0)
2546  s2->pict_type = 0;
2547  s->first_slice = 1;
2548  last_code = PICTURE_START_CODE;
2549  } else {
2550  av_log(avctx, AV_LOG_ERROR,
2551  "ignoring pic after %X\n", last_code);
2552  if (avctx->err_recognition & AV_EF_EXPLODE)
2553  return AVERROR_INVALIDDATA;
2554  }
2555  break;
2556  case EXT_START_CODE:
2557  init_get_bits(&s2->gb, buf_ptr, input_size * 8);
2558 
2559  switch (get_bits(&s2->gb, 4)) {
2560  case 0x1:
2561  if (last_code == 0) {
2563  } else {
2564  av_log(avctx, AV_LOG_ERROR,
2565  "ignoring seq ext after %X\n", last_code);
2566  if (avctx->err_recognition & AV_EF_EXPLODE)
2567  return AVERROR_INVALIDDATA;
2568  }
2569  break;
2570  case 0x2:
2572  break;
2573  case 0x3:
2575  break;
2576  case 0x7:
2578  break;
2579  case 0x8:
2580  if (last_code == PICTURE_START_CODE) {
2582  if (ret < 0)
2583  return ret;
2584  } else {
2585  av_log(avctx, AV_LOG_ERROR,
2586  "ignoring pic cod ext after %X\n", last_code);
2587  if (avctx->err_recognition & AV_EF_EXPLODE)
2588  return AVERROR_INVALIDDATA;
2589  }
2590  break;
2591  }
2592  break;
2593  case USER_START_CODE:
2594  mpeg_decode_user_data(avctx, buf_ptr, input_size);
2595  break;
2596  case GOP_START_CODE:
2597  if (last_code == 0) {
2598  s2->first_field = 0;
2599  mpeg_decode_gop(avctx, buf_ptr, input_size);
2600  s->sync = 1;
2601  } else {
2602  av_log(avctx, AV_LOG_ERROR,
2603  "ignoring GOP_START_CODE after %X\n", last_code);
2604  if (avctx->err_recognition & AV_EF_EXPLODE)
2605  return AVERROR_INVALIDDATA;
2606  }
2607  break;
2608  default:
2610  start_code <= SLICE_MAX_START_CODE && last_code == PICTURE_START_CODE) {
2611  if (s2->progressive_sequence && !s2->progressive_frame) {
2612  s2->progressive_frame = 1;
2613  av_log(s2->avctx, AV_LOG_ERROR,
2614  "interlaced frame in progressive sequence, ignoring\n");
2615  }
2616 
2617  if (s2->picture_structure == 0 ||
2618  (s2->progressive_frame && s2->picture_structure != PICT_FRAME)) {
2619  av_log(s2->avctx, AV_LOG_ERROR,
2620  "picture_structure %d invalid, ignoring\n",
2621  s2->picture_structure);
2622  s2->picture_structure = PICT_FRAME;
2623  }
2624 
2625  if (s2->progressive_sequence && !s2->frame_pred_frame_dct)
2626  av_log(s2->avctx, AV_LOG_WARNING, "invalid frame_pred_frame_dct\n");
2627 
2628  if (s2->picture_structure == PICT_FRAME) {
2629  s2->first_field = 0;
2630  s2->v_edge_pos = 16 * s2->mb_height;
2631  } else {
2632  s2->first_field ^= 1;
2633  s2->v_edge_pos = 8 * s2->mb_height;
2634  memset(s2->mbskip_table, 0, s2->mb_stride * s2->mb_height);
2635  }
2636  }
2638  start_code <= SLICE_MAX_START_CODE && last_code != 0) {
2639  const int field_pic = s2->picture_structure != PICT_FRAME;
2640  int mb_y = start_code - SLICE_MIN_START_CODE;
2641  last_code = SLICE_MIN_START_CODE;
2642  if (s2->codec_id != AV_CODEC_ID_MPEG1VIDEO && s2->mb_height > 2800/16)
2643  mb_y += (*buf_ptr&0xE0)<<2;
2644 
2645  mb_y <<= field_pic;
2646  if (s2->picture_structure == PICT_BOTTOM_FIELD)
2647  mb_y++;
2648 
2649  if (buf_end - buf_ptr < 2) {
2650  av_log(s2->avctx, AV_LOG_ERROR, "slice too small\n");
2651  return AVERROR_INVALIDDATA;
2652  }
2653 
2654  if (mb_y >= s2->mb_height) {
2655  av_log(s2->avctx, AV_LOG_ERROR,
2656  "slice below image (%d >= %d)\n", mb_y, s2->mb_height);
2657  return AVERROR_INVALIDDATA;
2658  }
2659 
2660  if (!s2->last_picture_ptr) {
2661  /* Skip B-frames if we do not have reference frames and
2662  * GOP is not closed. */
2663  if (s2->pict_type == AV_PICTURE_TYPE_B) {
2664  if (!s->closed_gop) {
2665  skip_frame = 1;
2666  av_log(s2->avctx, AV_LOG_DEBUG,
2667  "Skipping B slice due to open GOP\n");
2668  break;
2669  }
2670  }
2671  }
2672  if (s2->pict_type == AV_PICTURE_TYPE_I || (s2->avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL))
2673  s->sync = 1;
2674  if (!s2->next_picture_ptr) {
2675  /* Skip P-frames if we do not have a reference frame or
2676  * we have an invalid header. */
2677  if (s2->pict_type == AV_PICTURE_TYPE_P && !s->sync) {
2678  skip_frame = 1;
2679  av_log(s2->avctx, AV_LOG_DEBUG,
2680  "Skipping P slice due to !sync\n");
2681  break;
2682  }
2683  }
2684  if ((avctx->skip_frame >= AVDISCARD_NONREF &&
2685  s2->pict_type == AV_PICTURE_TYPE_B) ||
2686  (avctx->skip_frame >= AVDISCARD_NONKEY &&
2687  s2->pict_type != AV_PICTURE_TYPE_I) ||
2688  avctx->skip_frame >= AVDISCARD_ALL) {
2689  skip_frame = 1;
2690  break;
2691  }
2692 
2693  if (!s->mpeg_enc_ctx_allocated)
2694  break;
2695 
2696  if (s2->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
2697  if (mb_y < avctx->skip_top ||
2698  mb_y >= s2->mb_height - avctx->skip_bottom)
2699  break;
2700  }
2701 
2702  if (!s2->pict_type) {
2703  av_log(avctx, AV_LOG_ERROR, "Missing picture start code\n");
2704  if (avctx->err_recognition & AV_EF_EXPLODE)
2705  return AVERROR_INVALIDDATA;
2706  break;
2707  }
2708 
2709  if (s->first_slice) {
2710  skip_frame = 0;
2711  s->first_slice = 0;
2712  if ((ret = mpeg_field_start(s2, buf, buf_size)) < 0)
2713  return ret;
2714  }
2715  if (!s2->current_picture_ptr) {
2716  av_log(avctx, AV_LOG_ERROR,
2717  "current_picture not initialized\n");
2718  return AVERROR_INVALIDDATA;
2719  }
2720 
2721  if (HAVE_THREADS &&
2722  (avctx->active_thread_type & FF_THREAD_SLICE) &&
2723  !avctx->hwaccel) {
2724  int threshold = (s2->mb_height * s->slice_count +
2725  s2->slice_context_count / 2) /
2726  s2->slice_context_count;
2727  av_assert0(avctx->thread_count > 1);
2728  if (threshold <= mb_y) {
2729  MpegEncContext *thread_context = s2->thread_context[s->slice_count];
2730 
2731  thread_context->start_mb_y = mb_y;
2732  thread_context->end_mb_y = s2->mb_height;
2733  if (s->slice_count) {
2734  s2->thread_context[s->slice_count - 1]->end_mb_y = mb_y;
2735  ret = ff_update_duplicate_context(thread_context, s2);
2736  if (ret < 0)
2737  return ret;
2738  }
2739  init_get_bits(&thread_context->gb, buf_ptr, input_size * 8);
2740  s->slice_count++;
2741  }
2742  buf_ptr += 2; // FIXME add minimum number of bytes per slice
2743  } else {
2744  ret = mpeg_decode_slice(s2, mb_y, &buf_ptr, input_size);
2745  emms_c();
2746 
2747  if (ret < 0) {
2748  if (avctx->err_recognition & AV_EF_EXPLODE)
2749  return ret;
2750  if (s2->resync_mb_x >= 0 && s2->resync_mb_y >= 0)
2751  ff_er_add_slice(&s2->er, s2->resync_mb_x,
2752  s2->resync_mb_y, s2->mb_x, s2->mb_y,
2754  } else {
2755  ff_er_add_slice(&s2->er, s2->resync_mb_x,
2756  s2->resync_mb_y, s2->mb_x - 1, s2->mb_y,
2758  }
2759  }
2760  }
2761  break;
2762  }
2763  }
2764 }
2765 
2766 static int mpeg_decode_frame(AVCodecContext *avctx, AVFrame *picture,
2767  int *got_output, AVPacket *avpkt)
2768 {
2769  const uint8_t *buf = avpkt->data;
2770  int ret;
2771  int buf_size = avpkt->size;
2772  Mpeg1Context *s = avctx->priv_data;
2773  MpegEncContext *s2 = &s->mpeg_enc_ctx;
2774 
2775  if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == SEQ_END_CODE)) {
2776  /* special case for last picture */
2777  if (s2->low_delay == 0 && s2->next_picture_ptr) {
2778  int ret = av_frame_ref(picture, s2->next_picture_ptr->f);
2779  if (ret < 0)
2780  return ret;
2781 
2782  s2->next_picture_ptr = NULL;
2783 
2784  *got_output = 1;
2785  }
2786  return buf_size;
2787  }
2788 
2789 #if FF_API_FLAG_TRUNCATED
2790  if (s2->avctx->flags & AV_CODEC_FLAG_TRUNCATED) {
2791  int next = ff_mpeg1_find_frame_end(&s2->parse_context, buf,
2792  buf_size, NULL);
2793 
2794  if (ff_combine_frame(&s2->parse_context, next,
2795  (const uint8_t **) &buf, &buf_size) < 0)
2796  return buf_size;
2797  }
2798 #endif
2799 
2800  s2->codec_tag = ff_toupper4(avctx->codec_tag);
2801  if (s->mpeg_enc_ctx_allocated == 0 && ( s2->codec_tag == AV_RL32("VCR2")
2802  || s2->codec_tag == AV_RL32("BW10")
2803  ))
2804  vcr2_init_sequence(avctx);
2805 
2806  s->slice_count = 0;
2807 
2808  if (avctx->extradata && !s->extradata_decoded) {
2809  ret = decode_chunks(avctx, picture, got_output,
2810  avctx->extradata, avctx->extradata_size);
2811  if (*got_output) {
2812  av_log(avctx, AV_LOG_ERROR, "picture in extradata\n");
2813  av_frame_unref(picture);
2814  *got_output = 0;
2815  }
2816  s->extradata_decoded = 1;
2817  if (ret < 0 && (avctx->err_recognition & AV_EF_EXPLODE)) {
2818  s2->current_picture_ptr = NULL;
2819  return ret;
2820  }
2821  }
2822 
2823  ret = decode_chunks(avctx, picture, got_output, buf, buf_size);
2824  if (ret<0 || *got_output) {
2825  s2->current_picture_ptr = NULL;
2826 
2827  if (s->timecode_frame_start != -1 && *got_output) {
2828  char tcbuf[AV_TIMECODE_STR_SIZE];
2829  AVFrameSideData *tcside = av_frame_new_side_data(picture,
2831  sizeof(int64_t));
2832  if (!tcside)
2833  return AVERROR(ENOMEM);
2834  memcpy(tcside->data, &s->timecode_frame_start, sizeof(int64_t));
2835 
2836  av_timecode_make_mpeg_tc_string(tcbuf, s->timecode_frame_start);
2837  av_dict_set(&picture->metadata, "timecode", tcbuf, 0);
2838 
2839  s->timecode_frame_start = -1;
2840  }
2841  }
2842 
2843  return ret;
2844 }
2845 
2846 static void flush(AVCodecContext *avctx)
2847 {
2848  Mpeg1Context *s = avctx->priv_data;
2849 
2850  s->sync = 0;
2851  s->closed_gop = 0;
2852 
2853  ff_mpeg_flush(avctx);
2854 }
2855 
2857 {
2858  Mpeg1Context *s = avctx->priv_data;
2859 
2860  if (s->mpeg_enc_ctx_allocated)
2861  ff_mpv_common_end(&s->mpeg_enc_ctx);
2862  av_buffer_unref(&s->a53_buf_ref);
2863  return 0;
2864 }
2865 
2867  .p.name = "mpeg1video",
2868  CODEC_LONG_NAME("MPEG-1 video"),
2869  .p.type = AVMEDIA_TYPE_VIDEO,
2870  .p.id = AV_CODEC_ID_MPEG1VIDEO,
2871  .priv_data_size = sizeof(Mpeg1Context),
2873  .close = mpeg_decode_end,
2875  .p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
2876 #if FF_API_FLAG_TRUNCATED
2877  AV_CODEC_CAP_TRUNCATED |
2878 #endif
2880  .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
2881  .flush = flush,
2882  .p.max_lowres = 3,
2883  UPDATE_THREAD_CONTEXT(mpeg_decode_update_thread_context),
2884  .hw_configs = (const AVCodecHWConfigInternal *const []) {
2885 #if CONFIG_MPEG1_NVDEC_HWACCEL
2886  HWACCEL_NVDEC(mpeg1),
2887 #endif
2888 #if CONFIG_MPEG1_VDPAU_HWACCEL
2889  HWACCEL_VDPAU(mpeg1),
2890 #endif
2891 #if CONFIG_MPEG1_VIDEOTOOLBOX_HWACCEL
2892  HWACCEL_VIDEOTOOLBOX(mpeg1),
2893 #endif
2894  NULL
2895  },
2896 };
2897 
2899  .p.name = "mpeg2video",
2900  CODEC_LONG_NAME("MPEG-2 video"),
2901  .p.type = AVMEDIA_TYPE_VIDEO,
2902  .p.id = AV_CODEC_ID_MPEG2VIDEO,
2903  .priv_data_size = sizeof(Mpeg1Context),
2905  .close = mpeg_decode_end,
2907  .p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
2908 #if FF_API_FLAG_TRUNCATED
2909  AV_CODEC_CAP_TRUNCATED |
2910 #endif
2912  .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
2913  .flush = flush,
2914  .p.max_lowres = 3,
2916  .hw_configs = (const AVCodecHWConfigInternal *const []) {
2917 #if CONFIG_MPEG2_DXVA2_HWACCEL
2918  HWACCEL_DXVA2(mpeg2),
2919 #endif
2920 #if CONFIG_MPEG2_D3D11VA_HWACCEL
2921  HWACCEL_D3D11VA(mpeg2),
2922 #endif
2923 #if CONFIG_MPEG2_D3D11VA2_HWACCEL
2924  HWACCEL_D3D11VA2(mpeg2),
2925 #endif
2926 #if CONFIG_MPEG2_NVDEC_HWACCEL
2927  HWACCEL_NVDEC(mpeg2),
2928 #endif
2929 #if CONFIG_MPEG2_VAAPI_HWACCEL
2930  HWACCEL_VAAPI(mpeg2),
2931 #endif
2932 #if CONFIG_MPEG2_VDPAU_HWACCEL
2933  HWACCEL_VDPAU(mpeg2),
2934 #endif
2935 #if CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL
2936  HWACCEL_VIDEOTOOLBOX(mpeg2),
2937 #endif
2938  NULL
2939  },
2940 };
2941 
2942 //legacy decoder
2944  .p.name = "mpegvideo",
2945  CODEC_LONG_NAME("MPEG-1 video"),
2946  .p.type = AVMEDIA_TYPE_VIDEO,
2947  .p.id = AV_CODEC_ID_MPEG2VIDEO,
2948  .priv_data_size = sizeof(Mpeg1Context),
2950  .close = mpeg_decode_end,
2952  .p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
2953 #if FF_API_FLAG_TRUNCATED
2954  AV_CODEC_CAP_TRUNCATED |
2955 #endif
2957  .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
2958  .flush = flush,
2959  .p.max_lowres = 3,
2960 };
2961 
2962 typedef struct IPUContext {
2964 
2965  int flags;
2966  DECLARE_ALIGNED(32, int16_t, block)[6][64];
2967 } IPUContext;
2968 
2970  int *got_frame, AVPacket *avpkt)
2971 {
2972  IPUContext *s = avctx->priv_data;
2973  MpegEncContext *m = &s->m;
2974  GetBitContext *gb = &m->gb;
2975  int ret;
2976 
2977  ret = ff_get_buffer(avctx, frame, 0);
2978  if (ret < 0)
2979  return ret;
2980 
2981  ret = init_get_bits8(gb, avpkt->data, avpkt->size);
2982  if (ret < 0)
2983  return ret;
2984 
2985  s->flags = get_bits(gb, 8);
2986  m->intra_dc_precision = s->flags & 3;
2987  m->q_scale_type = !!(s->flags & 0x40);
2988  m->intra_vlc_format = !!(s->flags & 0x20);
2989  m->alternate_scan = !!(s->flags & 0x10);
2990 
2991  if (s->flags & 0x10) {
2994  } else {
2997  }
2998 
2999  m->last_dc[0] = m->last_dc[1] = m->last_dc[2] = 1 << (7 + (s->flags & 3));
3000  m->qscale = 1;
3001 
3002  for (int y = 0; y < avctx->height; y += 16) {
3003  int intraquant;
3004 
3005  for (int x = 0; x < avctx->width; x += 16) {
3006  if (x || y) {
3007  if (!get_bits1(gb))
3008  return AVERROR_INVALIDDATA;
3009  }
3010  if (get_bits1(gb)) {
3011  intraquant = 0;
3012  } else {
3013  if (!get_bits1(gb))
3014  return AVERROR_INVALIDDATA;
3015  intraquant = 1;
3016  }
3017 
3018  if (s->flags & 4)
3019  skip_bits1(gb);
3020 
3021  if (intraquant)
3022  m->qscale = mpeg_get_qscale(m);
3023 
3024  memset(s->block, 0, sizeof(s->block));
3025 
3026  for (int n = 0; n < 6; n++) {
3027  if (s->flags & 0x80) {
3029  m->intra_matrix,
3031  m->last_dc, s->block[n],
3032  n, m->qscale);
3033  if (ret >= 0)
3034  m->block_last_index[n] = ret;
3035  } else {
3036  ret = mpeg2_decode_block_intra(m, s->block[n], n);
3037  }
3038 
3039  if (ret < 0)
3040  return ret;
3041  }
3042 
3043  m->idsp.idct_put(frame->data[0] + y * frame->linesize[0] + x,
3044  frame->linesize[0], s->block[0]);
3045  m->idsp.idct_put(frame->data[0] + y * frame->linesize[0] + x + 8,
3046  frame->linesize[0], s->block[1]);
3047  m->idsp.idct_put(frame->data[0] + (y + 8) * frame->linesize[0] + x,
3048  frame->linesize[0], s->block[2]);
3049  m->idsp.idct_put(frame->data[0] + (y + 8) * frame->linesize[0] + x + 8,
3050  frame->linesize[0], s->block[3]);
3051  m->idsp.idct_put(frame->data[1] + (y >> 1) * frame->linesize[1] + (x >> 1),
3052  frame->linesize[1], s->block[4]);
3053  m->idsp.idct_put(frame->data[2] + (y >> 1) * frame->linesize[2] + (x >> 1),
3054  frame->linesize[2], s->block[5]);
3055  }
3056  }
3057 
3058  align_get_bits(gb);
3059  if (get_bits_left(gb) != 32)
3060  return AVERROR_INVALIDDATA;
3061 
3062  frame->pict_type = AV_PICTURE_TYPE_I;
3063  frame->key_frame = 1;
3064  *got_frame = 1;
3065 
3066  return avpkt->size;
3067 }
3068 
3070 {
3071  IPUContext *s = avctx->priv_data;
3072  MpegEncContext *m = &s->m;
3073 
3074  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
3075 
3076  ff_mpv_decode_init(m, avctx);
3077  ff_mpv_idct_init(m);
3080 
3081  for (int i = 0; i < 64; i++) {
3082  int j = m->idsp.idct_permutation[i];
3084  m->intra_matrix[j] = v;
3085  m->chroma_intra_matrix[j] = v;
3086  }
3087 
3088  for (int i = 0; i < 64; i++) {
3089  int j = m->idsp.idct_permutation[i];
3091  m->inter_matrix[j] = v;
3092  m->chroma_inter_matrix[j] = v;
3093  }
3094 
3095  return 0;
3096 }
3097 
3099 {
3100  IPUContext *s = avctx->priv_data;
3101 
3102  ff_mpv_common_end(&s->m);
3103 
3104  return 0;
3105 }
3106 
3108  .p.name = "ipu",
3109  CODEC_LONG_NAME("IPU Video"),
3110  .p.type = AVMEDIA_TYPE_VIDEO,
3111  .p.id = AV_CODEC_ID_IPU,
3112  .priv_data_size = sizeof(IPUContext),
3113  .init = ipu_decode_init,
3115  .close = ipu_decode_end,
3116  .p.capabilities = AV_CODEC_CAP_DR1,
3117  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
3118 };
PICT_FRAME
#define PICT_FRAME
Definition: mpegutils.h:38
vcr2_init_sequence
static int vcr2_init_sequence(AVCodecContext *avctx)
Definition: mpeg12dec.c:2150
ff_mpv_common_init
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:668
hwconfig.h
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1369
ff_rl_mpeg2
RLTable ff_rl_mpeg2
Definition: mpeg12data.c:174
MB_TYPE_L0
#define MB_TYPE_L0
Definition: mpegutils.h:60
MV_TYPE_16X16
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:255
Mpeg1Context::has_afd
int has_afd
Definition: mpeg12dec.c:71
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_TIMECODE_STR_SIZE
#define AV_TIMECODE_STR_SIZE
Definition: timecode.h:33
AV_PIX_FMT_CUDA
@ AV_PIX_FMT_CUDA
HW acceleration through CUDA.
Definition: pixfmt.h:225
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
level
uint8_t level
Definition: svq3.c:204
AV_EF_EXPLODE
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: defs.h:51
Mpeg1Context::a53_buf_ref
AVBufferRef * a53_buf_ref
Definition: mpeg12dec.c:69
ff_mpeg2_aspect
const AVRational ff_mpeg2_aspect[16]
Definition: mpeg12data.c:395
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
show_bits_long
static unsigned int show_bits_long(GetBitContext *s, int n)
Show 0-32 bits.
Definition: get_bits.h:602
mpeg_decode_a53_cc
static int mpeg_decode_a53_cc(AVCodecContext *avctx, const uint8_t *p, int buf_size)
Definition: mpeg12dec.c:2202
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:839
FMT_MPEG1
@ FMT_MPEG1
Definition: mpegutils.h:117
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
ff_mpv_export_qp_table
int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f, const Picture *p, int qp_type)
Definition: mpegvideo_dec.c:494
AV_STEREO3D_SIDEBYSIDE_QUINCUNX
@ AV_STEREO3D_SIDEBYSIDE_QUINCUNX
Views are next to each other, but when upscaling apply a checkerboard pattern.
Definition: stereo3d.h:117
FF_MPV_QSCALE_TYPE_MPEG2
#define FF_MPV_QSCALE_TYPE_MPEG2
Definition: mpegvideodec.h:41
mem_internal.h
mpeg_decode_frame
static int mpeg_decode_frame(AVCodecContext *avctx, AVFrame *picture, int *got_output, AVPacket *avpkt)
Definition: mpeg12dec.c:2766
MpegEncContext::gb
GetBitContext gb
Definition: mpegvideo.h:427
AV_EF_COMPLIANT
#define AV_EF_COMPLIANT
consider all spec non compliances as errors
Definition: defs.h:55
SEQ_END_CODE
#define SEQ_END_CODE
Definition: mpeg12.h:28
av_frame_new_side_data
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, size_t size)
Add a new side data to a frame.
Definition: frame.c:679
check_scantable_index
#define check_scantable_index(ctx, x)
Definition: mpeg12dec.c:141
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:59
MT_FIELD
#define MT_FIELD
Definition: mpeg12dec.c:657
EXT_START_CODE
#define EXT_START_CODE
Definition: cavs.h:35
MV_TYPE_16X8
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
Definition: mpegvideo.h:257
ff_mbincr_vlc
VLC ff_mbincr_vlc
Definition: mpeg12.c:128
av_div_q
AVRational av_div_q(AVRational b, AVRational c)
Divide one rational by another.
Definition: rational.c:88
matrix
Definition: vc1dsp.c:42
AVPanScan
Pan Scan area.
Definition: defs.h:97
AVCodecContext::err_recognition
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:1351
SLICE_MAX_START_CODE
#define SLICE_MAX_START_CODE
Definition: cavs.h:34
MB_TYPE_16x8
#define MB_TYPE_16x8
Definition: mpegutils.h:48
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
ipu_decode_init
static av_cold int ipu_decode_init(AVCodecContext *avctx)
Definition: mpeg12dec.c:3069
ff_update_duplicate_context
int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src)
Definition: mpegvideo.c:476
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
start_code
static const uint8_t start_code[]
Definition: videotoolboxenc.c:198
ff_mpeg12_common_init
av_cold void ff_mpeg12_common_init(MpegEncContext *s)
Definition: mpeg12.c:103
ff_mpv_report_decode_progress
void ff_mpv_report_decode_progress(MpegEncContext *s)
Definition: mpegvideo_dec.c:561
w
uint8_t w
Definition: llviddspenc.c:38
HWACCEL_DXVA2
#define HWACCEL_DXVA2(codec)
Definition: hwconfig.h:67
ff_mpegvideo_decoder
const FFCodec ff_mpegvideo_decoder
Definition: mpeg12dec.c:2943
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:374
ipu_decode_end
static av_cold int ipu_decode_end(AVCodecContext *avctx)
Definition: mpeg12dec.c:3098
mpeg_decode_mb
static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpeg12dec.c:662
Mpeg1Context::closed_gop
int closed_gop
Definition: mpeg12dec.c:80
mpeg2_decode_block_intra
static int mpeg2_decode_block_intra(MpegEncContext *s, int16_t *block, int n)
Definition: mpeg12dec.c:486
HWACCEL_D3D11VA2
#define HWACCEL_D3D11VA2(codec)
Definition: hwconfig.h:69
ff_reverse
const uint8_t ff_reverse[256]
Definition: reverse.c:23
MpegEncContext::last_dc
int last_dc[3]
last DC values for MPEG-1
Definition: mpegvideo.h:168
MB_TYPE_16x16
#define MB_TYPE_16x16
Definition: mpegutils.h:47
AV_PIX_FMT_D3D11VA_VLD
@ AV_PIX_FMT_D3D11VA_VLD
HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView p...
Definition: pixfmt.h:219
FFCodec
Definition: codec_internal.h:119
mpeg2_fast_decode_block_intra
static int mpeg2_fast_decode_block_intra(MpegEncContext *s, int16_t *block, int n)
Changing this would eat up any speed benefits it has.
Definition: mpeg12dec.c:572
ff_toupper4
unsigned int ff_toupper4(unsigned int x)
Definition: to_upper4.h:29
PICT_BOTTOM_FIELD
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:37
ff_er_add_slice
void ff_er_add_slice(ERContext *s, int startx, int starty, int endx, int endy, int status)
Add a slice.
Definition: error_resilience.c:822
ff_init_block_index
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:1569
reverse.h
mpegvideo.h
MB_TYPE_L1
#define MB_TYPE_L1
Definition: mpegutils.h:61
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:178
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
Mpeg1Context::first_slice
int first_slice
Definition: mpeg12dec.c:82
ER_DC_END
#define ER_DC_END
Definition: error_resilience.h:35
mpeg_decode_postinit
static int mpeg_decode_postinit(AVCodecContext *avctx)
Definition: mpeg12dec.c:1179
ff_add_cpb_side_data
AVCPBProperties * ff_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
Definition: utils.c:1022
mpegutils.h
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:91
ER_MV_ERROR
#define ER_MV_ERROR
Definition: error_resilience.h:33
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:649
thread.h
ff_mb_pat_vlc
VLC ff_mb_pat_vlc
Definition: mpeg12.c:131
SEQ_START_CODE
#define SEQ_START_CODE
Definition: mpeg12.h:29
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1328
MV_TYPE_DMV
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
Definition: mpegvideo.h:259
init
static int init
Definition: av_tx.c:47
GET_CACHE
#define GET_CACHE(name, gb)
Definition: get_bits.h:215
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
Mpeg1Context::save_aspect
AVRational save_aspect
Definition: mpeg12dec.c:74
MpegEncContext::intra_scantable
ScanTable intra_scantable
Definition: mpegvideo.h:72
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:1735
AV_STEREO3D_SIDEBYSIDE
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:67
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
MB_TYPE_ZERO_MV
#define MB_TYPE_ZERO_MV
Definition: mpeg12dec.c:87
MT_DMV
#define MT_DMV
Definition: mpeg12dec.c:660
ParseContext
Definition: parser.h:28
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:123
decode_chunks
static int decode_chunks(AVCodecContext *avctx, AVFrame *picture, int *got_output, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:2434
AVCodecContext::skip_frame
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:1698
mpeg_decode_quant_matrix_extension
static void mpeg_decode_quant_matrix_extension(MpegEncContext *s)
Definition: mpeg12dec.c:1499
AVCodecContext::thread_count
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
Definition: avcodec.h:1466
AV_STEREO3D_2D
@ AV_STEREO3D_2D
Video is not stereoscopic (and metadata has to be there).
Definition: stereo3d.h:55
wrap
#define wrap(func)
Definition: neontest.h:65
timecode.h
RLTable
RLTable.
Definition: rl.h:39
GetBitContext
Definition: get_bits.h:61
AV_EF_BITSTREAM
#define AV_EF_BITSTREAM
detect bitstream specification deviations
Definition: defs.h:49
USES_LIST
#define USES_LIST(a, list)
Definition: mpegutils.h:92
slice_decode_thread
static int slice_decode_thread(AVCodecContext *c, void *arg)
Definition: mpeg12dec.c:1961
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:478
IDCTDSPContext::idct_put
void(* idct_put)(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
block -> idct -> clip to unsigned 8 bit -> dest.
Definition: idctdsp.h:72
MB_TYPE_CBP
#define MB_TYPE_CBP
Definition: mpegutils.h:64
val
static double val(void *priv, double ch)
Definition: aeval.c:77
Mpeg1Context::tmpgexs
int tmpgexs
Definition: mpeg12dec.c:81
HWACCEL_VDPAU
#define HWACCEL_VDPAU(codec)
Definition: hwconfig.h:75
AV_CODEC_FLAG_LOW_DELAY
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:280
mpeg12_pixfmt_list_444
static enum AVPixelFormat mpeg12_pixfmt_list_444[]
Definition: mpeg12dec.c:1151
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:586
ff_print_debug_info
void ff_print_debug_info(const MpegEncContext *s, const Picture *p, AVFrame *pict)
Definition: mpegvideo_dec.c:487
mpeg1_decode_sequence
static int mpeg1_decode_sequence(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:2062
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
HAS_CBP
#define HAS_CBP(a)
Definition: mpegutils.h:94
AVRational::num
int num
Numerator.
Definition: rational.h:59
GOP_START_CODE
#define GOP_START_CODE
Definition: mpeg12.h:30
IPUContext
Definition: mpeg12dec.c:2962
mpeg1_hwaccel_pixfmt_list_420
static enum AVPixelFormat mpeg1_hwaccel_pixfmt_list_420[]
Definition: mpeg12dec.c:1111
ff_mpv_common_end
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:775
mpeg12.h
mpegvideodec.h
ff_mpeg2video_decoder
const FFCodec ff_mpeg2video_decoder
Definition: mpeg12dec.c:2898
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
Mpeg1Context::frame_rate_index
unsigned frame_rate_index
Definition: mpeg12dec.c:78
ipu_decode_frame
static int ipu_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: mpeg12dec.c:2969
ER_DC_ERROR
#define ER_DC_ERROR
Definition: error_resilience.h:32
av_cold
#define av_cold
Definition: attributes.h:90
mpeg2_hwaccel_pixfmt_list_420
static enum AVPixelFormat mpeg2_hwaccel_pixfmt_list_420[]
Definition: mpeg12dec.c:1122
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:667
mpeg1_decode_picture
static int mpeg1_decode_picture(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:1319
flush
static void flush(AVCodecContext *avctx)
Definition: mpeg12dec.c:2846
Mpeg1Context::save_progressive_seq
int save_progressive_seq
Definition: mpeg12dec.c:75
ff_rl_mpeg1
RLTable ff_rl_mpeg1
Definition: mpeg12data.c:166
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:149
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:500
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:694
A53_MAX_CC_COUNT
#define A53_MAX_CC_COUNT
Definition: mpeg12dec.c:60
Mpeg1Context::repeat_field
int repeat_field
Definition: mpeg12dec.c:65
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:298
stereo3d.h
AV_PIX_FMT_DXVA2_VLD
@ AV_PIX_FMT_DXVA2_VLD
HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer.
Definition: pixfmt.h:127
s
#define s(width, name)
Definition: cbs_vp9.c:256
ff_mpeg1_aspect
const float ff_mpeg1_aspect[16]
Definition: mpeg12data.c:374
AVCodecContext::ticks_per_frame
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:530
s1
#define s1
Definition: regdef.h:38
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2014
Mpeg1Context::mpeg_enc_ctx_allocated
int mpeg_enc_ctx_allocated
Definition: mpeg12dec.c:64
SHOW_SBITS
#define SHOW_SBITS(name, gb, num)
Definition: get_bits.h:212
ff_mpeg_er_frame_start
void ff_mpeg_er_frame_start(MpegEncContext *s)
Definition: mpeg_er.c:46
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
Mpeg1Context::aspect_ratio_info
unsigned aspect_ratio_info
Definition: mpeg12dec.c:73
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
mpeg_decode_sequence_display_extension
static void mpeg_decode_sequence_display_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1418
Mpeg1Context::pan_scan
AVPanScan pan_scan
Definition: mpeg12dec.c:66
get_sbits
static int get_sbits(GetBitContext *s, int n)
Definition: get_bits.h:359
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
ctx
AVFormatContext * ctx
Definition: movenc.c:48
PICT_TOP_FIELD
#define PICT_TOP_FIELD
Definition: mpegutils.h:36
decode.h
mpeg12_pixfmt_list_422
static enum AVPixelFormat mpeg12_pixfmt_list_422[]
Definition: mpeg12dec.c:1146
SKIP_BITS
#define SKIP_BITS(name, gb, num)
Definition: get_bits.h:193
IS_INTRA
#define IS_INTRA(x, y)
field
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this field
Definition: writing_filters.txt:78
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
AVCodecContext::rc_max_rate
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1223
AVCPBProperties
This structure describes the bitrate properties of an encoded bitstream.
Definition: defs.h:126
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:264
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:408
arg
const char * arg
Definition: jacosubdec.c:67
if
if(ret)
Definition: filter_design.txt:179
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:76
Mpeg1Context::rc_buffer_size
int rc_buffer_size
Definition: mpeg12dec.c:76
MB_PTYPE_VLC_BITS
#define MB_PTYPE_VLC_BITS
Definition: mpeg12vlc.h:39
Mpeg1Context::save_width
int save_width
Definition: mpeg12dec.c:75
PTRDIFF_SPECIFIER
#define PTRDIFF_SPECIFIER
Definition: internal.h:149
NULL
#define NULL
Definition: coverity.c:32
run
uint8_t run
Definition: svq3.c:203
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:982
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
ER_AC_ERROR
#define ER_AC_ERROR
Definition: error_resilience.h:31
SLICE_MIN_START_CODE
#define SLICE_MIN_START_CODE
Definition: mpeg12.h:32
ff_mpv_idct_init
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:331
Mpeg1Context::sync
int sync
Definition: mpeg12dec.c:79
AVCHROMA_LOC_LEFT
@ AVCHROMA_LOC_LEFT
MPEG-2/4 4:2:0, H.264 default for 4:2:0.
Definition: pixfmt.h:651
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AVCHROMA_LOC_TOPLEFT
@ AVCHROMA_LOC_TOPLEFT
ITU-R 601, SMPTE 274M 296M S314M(DV 4:1:1), mpeg2 4:2:2.
Definition: pixfmt.h:653
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:448
mpeg_decode_picture_display_extension
static void mpeg_decode_picture_display_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1442
MpegEncContext::inter_matrix
uint16_t inter_matrix[64]
Definition: mpegvideo.h:291
AV_CODEC_FLAG2_FAST
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:303
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
profiles.h
AV_CODEC_FLAG_TRUNCATED
#define AV_CODEC_FLAG_TRUNCATED
Input bitstream might be truncated at a random location instead of only at frame boundaries.
Definition: avcodec.h:271
LAST_SKIP_BITS
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:199
MB_TYPE_QUANT
#define MB_TYPE_QUANT
Definition: mpegutils.h:63
avpriv_find_start_code
const uint8_t * avpriv_find_start_code(const uint8_t *p, const uint8_t *end, uint32_t *state)
lowres
static int lowres
Definition: ffplay.c:335
av_frame_new_side_data_from_buf
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
Definition: frame.c:647
MB_BTYPE_VLC_BITS
#define MB_BTYPE_VLC_BITS
Definition: mpeg12vlc.h:40
UPDATE_THREAD_CONTEXT
#define UPDATE_THREAD_CONTEXT(func)
Definition: codec_internal.h:273
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:787
AV_FRAME_DATA_AFD
@ AV_FRAME_DATA_AFD
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
Definition: frame.h:90
AVCodecContext::level
int level
level
Definition: avcodec.h:1676
Mpeg1Context::save_height
int save_height
Definition: mpeg12dec.c:75
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
MpegEncContext::idsp
IDCTDSPContext idsp
Definition: mpegvideo.h:210
ff_mb_ptype_vlc
VLC ff_mb_ptype_vlc
Definition: mpeg12.c:129
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
quant_matrix_rebuild
static void quant_matrix_rebuild(uint16_t *matrix, const uint8_t *old_perm, const uint8_t *new_perm)
Definition: mpeg12dec.c:1099
ff_mpeg1_find_frame_end
int ff_mpeg1_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size, AVCodecParserContext *s)
Find the end of the current frame in the bitstream.
Definition: mpeg12.c:173
startcode.h
s2
#define s2
Definition: regdef.h:39
AVDISCARD_NONKEY
@ AVDISCARD_NONKEY
discard all frames except keyframes
Definition: defs.h:75
check_marker
static int check_marker(void *logctx, GetBitContext *s, const char *msg)
Definition: mpegvideodec.h:72
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:485
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1450
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:259
AVPacket::size
int size
Definition: packet.h:375
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:115
MpegEncContext::qscale
int qscale
QP.
Definition: mpegvideo.h:187
AV_CODEC_ID_IPU
@ AV_CODEC_ID_IPU
Definition: codec_id.h:309
AV_FRAME_DATA_PANSCAN
@ AV_FRAME_DATA_PANSCAN
The data is the AVPanScan struct defined in libavcodec.
Definition: frame.h:53
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:353
MT_FRAME
#define MT_FRAME
Definition: mpeg12dec.c:658
codec_internal.h
shift
static int shift(int a, int b)
Definition: bonk.c:260
IPUContext::flags
int flags
Definition: mpeg12dec.c:2965
MpegEncContext::intra_matrix
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:289
ff_mpeg1_clean_buffers
void ff_mpeg1_clean_buffers(MpegEncContext *s)
Definition: mpeg12.c:111
ff_mpeg1video_decoder
const FFCodec ff_mpeg1video_decoder
Definition: mpeg12dec.c:2866
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: codec_internal.h:54
AVFrameSideData::data
uint8_t * data
Definition: frame.h:233
MB_TYPE_SKIP
#define MB_TYPE_SKIP
Definition: mpegutils.h:55
FF_THREAD_SLICE
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1478
ff_mpeg_draw_horiz_band
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
Definition: mpegvideo_dec.c:524
PICTURE_START_CODE
#define PICTURE_START_CODE
Definition: mpeg12.h:31
USER_START_CODE
#define USER_START_CODE
Definition: cavs.h:36
AVCodecContext::skip_bottom
int skip_bottom
Number of macroblock rows at the bottom which are skipped.
Definition: avcodec.h:913
AVCodecHWConfigInternal
Definition: hwconfig.h:29
ff_mpeg1_default_intra_matrix
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:30
ff_mpv_frame_start
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo_dec.c:263
MB_TYPE_INTERLACED
#define MB_TYPE_INTERLACED
Definition: mpegutils.h:51
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:138
ff_mpeg_flush
void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo_dec.c:532
height
#define height
Mpeg1Context::has_stereo3d
int has_stereo3d
Definition: mpeg12dec.c:68
mpeg_decode_init
static av_cold int mpeg_decode_init(AVCodecContext *avctx)
Definition: mpeg12dec.c:1052
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:117
HWACCEL_D3D11VA
#define HWACCEL_D3D11VA(codec)
Definition: hwconfig.h:79
mpegvideodata.h
attributes.h
ff_mpeg1_decode_block_intra
int ff_mpeg1_decode_block_intra(GetBitContext *gb, const uint16_t *quant_matrix, const uint8_t *scantable, int last_dc[3], int16_t *block, int index, int qscale)
Definition: mpeg12.c:236
MV_TYPE_FIELD
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:258
skip_bits1
static void skip_bits1(GetBitContext *s)
Definition: get_bits.h:538
AV_PIX_FMT_D3D11
@ AV_PIX_FMT_D3D11
Hardware surfaces for Direct3D11.
Definition: pixfmt.h:305
HWACCEL_NVDEC
#define HWACCEL_NVDEC(codec)
Definition: hwconfig.h:71
AV_PIX_FMT_VAAPI
@ AV_PIX_FMT_VAAPI
Hardware acceleration through VA-API, data[3] contains a VASurfaceID.
Definition: pixfmt.h:119
ff_combine_frame
int ff_combine_frame(ParseContext *pc, int next, const uint8_t **buf, int *buf_size)
Combine the (truncated) bitstream to a complete frame.
Definition: parser.c:199
FF_THREAD_FRAME
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:1477
ff_mpeg2_video_profiles
const AVProfile ff_mpeg2_video_profiles[]
Definition: profiles.c:100
AV_PIX_FMT_VDPAU
@ AV_PIX_FMT_VDPAU
HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface.
Definition: pixfmt.h:187
DECLARE_ALIGNED
#define DECLARE_ALIGNED(n, t, v)
Definition: mem.h:116
MB_TYPE_L0L1
#define MB_TYPE_L0L1
Definition: mpegutils.h:62
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:274
MpegEncContext::block_last_index
int block_last_index[12]
last non zero coefficient in block
Definition: mpegvideo.h:67
mpeg_decode_gop
static void mpeg_decode_gop(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:2407
av_assert2
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
MpegEncContext::chroma_inter_matrix
uint16_t chroma_inter_matrix[64]
Definition: mpegvideo.h:292
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
AV_CODEC_FLAG2_SHOW_ALL
#define AV_CODEC_FLAG2_SHOW_ALL
Show all frames before the first keyframe.
Definition: avcodec.h:331
AVCodecContext::properties
unsigned properties
Properties of the stream that gets decoded.
Definition: avcodec.h:1847
ff_alternate_vertical_scan
const uint8_t ff_alternate_vertical_scan[64]
Definition: mpegvideodata.c:95
btype2mb_type
static const uint32_t btype2mb_type[11]
Definition: mpeg12dec.c:99
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:499
AVHWAccel::decode_slice
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
Definition: avcodec.h:2159
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:446
ff_init_scantable
av_cold void ff_init_scantable(const uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: idctdsp.c:30
internal.h
ff_mpv_decode_init
void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
Initialize the given MpegEncContext for decoding.
Definition: mpegvideo_dec.c:37
AV_STEREO3D_TOPBOTTOM
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:79
AVCPBProperties::max_bitrate
int64_t max_bitrate
Maximum bitrate of the stream, in bits per second.
Definition: defs.h:131
IS_QUANT
#define IS_QUANT(a)
Definition: mpegutils.h:88
ff_mpeg12_init_vlcs
av_cold void ff_mpeg12_init_vlcs(void)
Definition: mpeg12.c:162
FF_DEBUG_STARTCODE
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:1335
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_d2q
AVRational av_d2q(double d, int max)
Convert a double precision floating point number to a rational.
Definition: rational.c:106
MB_PAT_VLC_BITS
#define MB_PAT_VLC_BITS
Definition: mpeg12vlc.h:38
mpeg1_decode_block_inter
static int mpeg1_decode_block_inter(MpegEncContext *s, int16_t *block, int n)
Definition: mpeg12dec.c:150
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:487
ptype2mb_type
static const uint32_t ptype2mb_type[7]
Definition: mpeg12dec.c:89
IPUContext::m
MpegEncContext m
Definition: mpeg12dec.c:2963
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:211
MpegEncContext::intra_vlc_format
int intra_vlc_format
Definition: mpegvideo.h:446
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:989
MAX_INDEX
#define MAX_INDEX
Definition: mpeg12dec.c:140
AVCodecContext::height
int height
Definition: avcodec.h:571
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:608
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:613
HWACCEL_VIDEOTOOLBOX
#define HWACCEL_VIDEOTOOLBOX(codec)
Definition: hwconfig.h:77
Mpeg1Context::stereo3d
AVStereo3D stereo3d
Definition: mpeg12dec.c:67
idctdsp.h
avcodec.h
av_cmp_q
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:89
GET_RL_VLC
#define GET_RL_VLC(level, run, name, gb, table, bits, max_depth, need_update)
Definition: get_bits.h:728
ff_zigzag_direct
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
ff_mpeg12_frame_rate_tab
const AVRational ff_mpeg12_frame_rate_tab[]
Definition: mpeg12framerate.c:24
ret
ret
Definition: filter_design.txt:187
AV_EF_AGGRESSIVE
#define AV_EF_AGGRESSIVE
consider things that a sane encoder/muxer should not do as an error
Definition: defs.h:56
pred
static const float pred[4]
Definition: siprdata.h:259
AV_FRAME_DATA_GOP_TIMECODE
@ AV_FRAME_DATA_GOP_TIMECODE
The GOP timecode in 25 bit timecode format.
Definition: frame.h:125
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
ff_mpeg1_default_non_intra_matrix
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:41
AVCPBProperties::buffer_size
int64_t buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
Definition: defs.h:147
mpeg1_fast_decode_block_inter
static int mpeg1_fast_decode_block_inter(MpegEncContext *s, int16_t *block, int n)
Changing this would eat up any speed benefits it has.
Definition: mpeg12dec.c:239
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:683
TEX_VLC_BITS
#define TEX_VLC_BITS
Definition: dvdec.c:146
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
mpeg_get_pixelformat
static enum AVPixelFormat mpeg_get_pixelformat(AVCodecContext *avctx)
Definition: mpeg12dec.c:1156
AV_CODEC_FLAG2_CHUNKS
#define AV_CODEC_FLAG2_CHUNKS
Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries.
Definition: avcodec.h:322
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
mpeg12data.h
mpeg2_fast_decode_block_non_intra
static int mpeg2_fast_decode_block_non_intra(MpegEncContext *s, int16_t *block, int n)
Changing this would eat up any speed benefits it has.
Definition: mpeg12dec.c:414
mpeg_field_start
static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
Definition: mpeg12dec.c:1576
ff_mpeg_update_thread_context
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
Definition: mpegvideo_dec.c:51
skip_1stop_8data_bits
static int skip_1stop_8data_bits(GetBitContext *gb)
Definition: get_bits.h:844
AVCodecContext
main external API structure.
Definition: avcodec.h:398
AVCodecContext::active_thread_type
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1485
av_timecode_make_mpeg_tc_string
char * av_timecode_make_mpeg_tc_string(char *buf, uint32_t tc25bit)
Get the timecode string from the 25-bit timecode format (MPEG GOP format).
Definition: timecode.c:167
MpegEncContext::intra_dc_precision
int intra_dc_precision
Definition: mpegvideo.h:440
AVCodecContext::execute
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:1517
SHOW_UBITS
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:211
AV_PICTURE_TYPE_B
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
mpeg12dec.h
AVCHROMA_LOC_CENTER
@ AVCHROMA_LOC_CENTER
MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0.
Definition: pixfmt.h:652
AVRational::den
int den
Denominator.
Definition: rational.h:60
error_resilience.h
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1550
AVFrame::metadata
AVDictionary * metadata
metadata.
Definition: frame.h:625
ff_mb_btype_vlc
VLC ff_mb_btype_vlc
Definition: mpeg12.c:130
sign_extend
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:133
ff_thread_get_format
FF_DISABLE_DEPRECATION_WARNINGS enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Wrapper around get_format() for frame-multithreaded codecs.
Definition: pthread_frame.c:1029
ff_mpv_frame_end
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo_dec.c:479
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
ff_mpv_reconstruct_mb
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo.c:1545
Mpeg1Context::slice_count
int slice_count
Definition: mpeg12dec.c:72
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:82
VLC::table
VLCElem * table
Definition: vlc.h:33
FF_CODEC_PROPERTY_CLOSED_CAPTIONS
#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS
Definition: avcodec.h:1849
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
av_buffer_realloc
int av_buffer_realloc(AVBufferRef **pbuf, size_t size)
Reallocate a given buffer.
Definition: buffer.c:183
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1327
AVHWAccel::start_frame
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
Definition: avcodec.h:2132
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:586
get_dmv
static int get_dmv(MpegEncContext *s)
Definition: mpeg12dec.c:648
tc
#define tc
Definition: regdef.h:69
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
mpeg_decode_end
static av_cold int mpeg_decode_end(AVCodecContext *avctx)
Definition: mpeg12dec.c:2856
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
MpegEncContext::inter_scantable
ScanTable inter_scantable
if inter == intra then intra should be used to reduce the cache usage
Definition: mpegvideo.h:71
IDCTDSPContext::idct_permutation
uint8_t idct_permutation[64]
IDCT input permutation.
Definition: idctdsp.h:96
ff_ipu_decoder
const FFCodec ff_ipu_decoder
Definition: mpeg12dec.c:3107
av_stereo3d_create_side_data
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:34
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:231
ER_MV_END
#define ER_MV_END
Definition: error_resilience.h:36
diff
static av_always_inline int diff(const uint32_t a, const uint32_t b)
Definition: vf_palettegen.c:139
ff_mv_vlc
VLC ff_mv_vlc
Definition: mpeg12.c:123
MpegEncContext::q_scale_type
int q_scale_type
Definition: mpegvideo.h:444
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:423
Mpeg1Context::mpeg_enc_ctx
MpegEncContext mpeg_enc_ctx
Definition: mpeg12dec.c:63
ff_tlog
#define ff_tlog(ctx,...)
Definition: internal.h:162
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
MV_DIR_FORWARD
#define MV_DIR_FORWARD
Definition: mpegvideo.h:251
AVPacket
This structure stores compressed data.
Definition: packet.h:351
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:425
ScanTable::permutated
uint8_t permutated[64]
Definition: idctdsp.h:33
ff_er_frame_end
void ff_er_frame_end(ERContext *s)
Definition: error_resilience.c:892
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:71
mpeg_get_qscale
static int mpeg_get_qscale(MpegEncContext *s)
Definition: mpegvideodec.h:63
mpeg_decode_sequence_extension
static void mpeg_decode_sequence_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1368
HWACCEL_VAAPI
#define HWACCEL_VAAPI(codec)
Definition: hwconfig.h:73
mpeg_er.h
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:571
int32_t
int32_t
Definition: audioconvert.c:56
imgutils.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AV_CODEC_CAP_DRAW_HORIZ_BAND
#define AV_CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: codec.h:44
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
Mpeg1Context::frame_rate_ext
AVRational frame_rate_ext
Definition: mpeg12dec.c:77
mpeg_decode_motion
static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred)
Definition: mpeg12dec.c:114
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
IPUContext::block
int16_t block[6][64]
Definition: mpeg12dec.c:2966
mpeg_decode_user_data
static void mpeg_decode_user_data(AVCodecContext *avctx, const uint8_t *p, int buf_size)
Definition: mpeg12dec.c:2337
h
h
Definition: vp9dsp_template.c:2038
MpegEncContext::end_mb_y
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:137
ER_AC_END
#define ER_AC_END
Definition: error_resilience.h:34
AVStereo3D
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:176
av_image_check_sar
int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar)
Check if the given sample aspect ratio of an image is valid.
Definition: imgutils.c:323
MV_VLC_BITS
#define MV_VLC_BITS
Definition: mpeg12vlc.h:34
Mpeg1Context::timecode_frame_start
int64_t timecode_frame_start
Definition: mpeg12dec.c:84
MpegEncContext::start_mb_y
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
Definition: mpegvideo.h:136
AVDISCARD_NONREF
@ AVDISCARD_NONREF
discard all non reference
Definition: defs.h:72
MpegEncContext::alternate_scan
int alternate_scan
Definition: mpegvideo.h:447
DECODE_SLICE_OK
#define DECODE_SLICE_OK
Definition: mpeg12dec.c:1680
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
DECODE_SLICE_ERROR
#define DECODE_SLICE_ERROR
Definition: mpeg12dec.c:1679
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:62
load_matrix
static int load_matrix(MpegEncContext *s, uint16_t matrix0[64], uint16_t matrix1[64], int intra)
Definition: mpeg12dec.c:1476
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:768
decode_dc
static int decode_dc(GetBitContext *gb, int component)
Definition: mpeg12dec.h:38
Mpeg1Context::afd
uint8_t afd
Definition: mpeg12dec.c:70
Mpeg1Context
Definition: mpeg12dec.c:62
RLTable::rl_vlc
RL_VLC_ELEM * rl_vlc[32]
decoding only
Definition: rl.h:48
MpegEncContext::chroma_intra_matrix
uint16_t chroma_intra_matrix[64]
Definition: mpegvideo.h:290
mpeg_decode_picture_coding_extension
static int mpeg_decode_picture_coding_extension(Mpeg1Context *s1)
Definition: mpeg12dec.c:1513
Mpeg1Context::extradata_decoded
int extradata_decoded
Definition: mpeg12dec.c:83
mpeg2_decode_block_non_intra
static int mpeg2_decode_block_non_intra(MpegEncContext *s, int16_t *block, int n)
Definition: mpeg12dec.c:324
MB_TYPE_INTRA
#define MB_TYPE_INTRA
Definition: mpegutils.h:66
MBINCR_VLC_BITS
#define MBINCR_VLC_BITS
Definition: mpeg12vlc.h:37
mpeg_decode_slice
static int mpeg_decode_slice(MpegEncContext *s, int mb_y, const uint8_t **buf, int buf_size)
Decode a slice.
Definition: mpeg12dec.c:1688
re
float re
Definition: fft.c:79