FFmpeg
mss2.c
Go to the documentation of this file.
1 /*
2  * Microsoft Screen 2 (aka Windows Media Video V9 Screen) decoder
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * Microsoft Screen 2 (aka Windows Media Video V9 Screen) decoder
24  */
25 
26 #include "libavutil/avassert.h"
27 #include "libavutil/mem.h"
28 #include "codec_internal.h"
29 #include "decode.h"
30 #include "error_resilience.h"
31 #include "mpeg_er.h"
32 #include "mpegvideodec.h"
33 #include "vc1.h"
34 #include "wmv2data.h"
35 #include "mss12.h"
36 #include "mss2dsp.h"
37 
38 typedef struct MSS2Context {
45 } MSS2Context;
46 
48 {
49  while ((c->high >> 15) - (c->low >> 15) < 2) {
50  if ((c->low ^ c->high) & 0x10000) {
51  c->high ^= 0x8000;
52  c->value ^= 0x8000;
53  c->low ^= 0x8000;
54  }
55  c->high = (uint16_t)c->high << 8 | 0xFF;
56  c->value = (uint16_t)c->value << 8 | bytestream2_get_byte(c->gbc.gB);
57  c->low = (uint16_t)c->low << 8;
58  }
59 }
60 
61 ARITH_GET_BIT(arith2)
62 
63 /* L. Stuiver and A. Moffat: "Piecewise Integer Mapping for Arithmetic Coding."
64  * In Proc. 8th Data Compression Conference (DCC '98), pp. 3-12, Mar. 1998 */
65 
66 static int arith2_get_scaled_value(int value, int n, int range)
67 {
68  int split = (n << 1) - range;
69 
70  if (value > split)
71  return split + (value - split >> 1);
72  else
73  return value;
74 }
75 
77  int low, int high, int n)
78 {
79  int split = (n << 1) - range;
80 
81  if (high > split)
82  c->high = split + (high - split << 1);
83  else
84  c->high = high;
85 
86  c->high += c->low - 1;
87 
88  if (low > split)
89  c->low += split + (low - split << 1);
90  else
91  c->low += low;
92 }
93 
94 static int arith2_get_number(ArithCoder *c, int n)
95 {
96  int range = c->high - c->low + 1;
97  int scale = av_log2(range) - av_log2(n);
98  int val;
99 
100  if (n << scale > range)
101  scale--;
102 
103  n <<= scale;
104 
105  val = arith2_get_scaled_value(c->value - c->low, n, range) >> scale;
106 
107  arith2_rescale_interval(c, range, val << scale, (val + 1) << scale, n);
108 
110 
111  return val;
112 }
113 
114 static int arith2_get_prob(ArithCoder *c, int16_t *probs)
115 {
116  int range = c->high - c->low + 1, n = *probs;
117  int scale = av_log2(range) - av_log2(n);
118  int i = 0, val;
119 
120  if (n << scale > range)
121  scale--;
122 
123  n <<= scale;
124 
125  val = arith2_get_scaled_value(c->value - c->low, n, range) >> scale;
126  while (probs[++i] > val) ;
127 
129  probs[i] << scale, probs[i - 1] << scale, n);
130 
131  return i;
132 }
133 
134 ARITH_GET_MODEL_SYM(arith2)
135 
137 {
138  int diff = (c->high >> 16) - (c->low >> 16);
139  int bp = bytestream2_tell(c->gbc.gB) - 3 << 3;
140  int bits = 1;
141 
142  while (!(diff & 0x80)) {
143  bits++;
144  diff <<= 1;
145  }
146 
147  return (bits + bp + 7 >> 3) + ((c->low >> 16) + 1 == c->high >> 16);
148 }
149 
151 {
152  c->low = 0;
153  c->high = 0xFFFFFF;
154  c->value = bytestream2_get_be24(gB);
155  c->overread = 0;
156  c->gbc.gB = gB;
157  c->get_model_sym = arith2_get_model_sym;
158  c->get_number = arith2_get_number;
159 }
160 
161 static int decode_pal_v2(MSS12Context *ctx, const uint8_t *buf, int buf_size)
162 {
163  int i, ncol;
164  uint32_t *pal = ctx->pal + 256 - ctx->free_colours;
165 
166  if (!ctx->free_colours)
167  return 0;
168 
169  ncol = *buf++;
170  if (ncol > ctx->free_colours || buf_size < 2 + ncol * 3)
171  return AVERROR_INVALIDDATA;
172  for (i = 0; i < ncol; i++)
173  *pal++ = AV_RB24(buf + 3 * i);
174 
175  return 1 + ncol * 3;
176 }
177 
178 static int decode_555(AVCodecContext *avctx, GetByteContext *gB, uint16_t *dst, ptrdiff_t stride,
179  int keyframe, int w, int h)
180 {
181  int last_symbol = 0, repeat = 0, prev_avail = 0;
182 
183  if (!keyframe) {
184  int x, y, endx, endy, t;
185 
186 #define READ_PAIR(a, b) \
187  a = bytestream2_get_byte(gB) << 4; \
188  t = bytestream2_get_byte(gB); \
189  a |= t >> 4; \
190  b = (t & 0xF) << 8; \
191  b |= bytestream2_get_byte(gB); \
192 
193  READ_PAIR(x, endx)
194  READ_PAIR(y, endy)
195 
196  if (endx >= w || endy >= h || x > endx || y > endy)
197  return AVERROR_INVALIDDATA;
198  dst += x + stride * y;
199  w = endx - x + 1;
200  h = endy - y + 1;
201  if (y)
202  prev_avail = 1;
203  }
204 
205  do {
206  uint16_t *p = dst;
207  do {
208  if (repeat-- < 1) {
209  int b = bytestream2_get_byte(gB);
210  if (b < 128)
211  last_symbol = b << 8 | bytestream2_get_byte(gB);
212  else if (b > 129) {
213  repeat = 0;
214  while (b-- > 130) {
215  if (repeat >= (INT_MAX >> 8) - 1) {
216  av_log(avctx, AV_LOG_ERROR, "repeat overflow\n");
217  return AVERROR_INVALIDDATA;
218  }
219  repeat = (repeat << 8) + bytestream2_get_byte(gB) + 1;
220  }
221  if (last_symbol == -2) {
222  int skip = FFMIN((unsigned)repeat, dst + w - p);
223  repeat -= skip;
224  p += skip;
225  }
226  } else
227  last_symbol = 127 - b;
228  }
229  if (last_symbol >= 0)
230  *p = last_symbol;
231  else if (last_symbol == -1 && prev_avail)
232  *p = *(p - stride);
233  } while (++p < dst + w);
234  dst += stride;
235  prev_avail = 1;
236  } while (--h);
237 
238  return 0;
239 }
240 
241 static int decode_rle(GetBitContext *gb, uint8_t *pal_dst, ptrdiff_t pal_stride,
242  uint8_t *rgb_dst, ptrdiff_t rgb_stride, uint32_t *pal,
243  int keyframe, int kf_slipt, int slice, int w, int h)
244 {
245  uint8_t bits[270] = { 0 };
246  uint32_t codes[270];
247  VLC vlc;
248 
249  int current_length = 0, read_codes = 0, next_code = 0, current_codes = 0;
250  int remaining_codes, surplus_codes, i;
251 
252  const int alphabet_size = 270 - keyframe;
253 
254  int last_symbol = 0, repeat = 0, prev_avail = 0;
255 
256  if (!keyframe) {
257  int x, y, clipw, cliph;
258 
259  x = get_bits(gb, 12);
260  y = get_bits(gb, 12);
261  clipw = get_bits(gb, 12) + 1;
262  cliph = get_bits(gb, 12) + 1;
263 
264  if (x + clipw > w || y + cliph > h)
265  return AVERROR_INVALIDDATA;
266  pal_dst += pal_stride * y + x;
267  rgb_dst += rgb_stride * y + x * 3;
268  w = clipw;
269  h = cliph;
270  if (y)
271  prev_avail = 1;
272  } else {
273  if (slice > 0) {
274  pal_dst += pal_stride * kf_slipt;
275  rgb_dst += rgb_stride * kf_slipt;
276  prev_avail = 1;
277  h -= kf_slipt;
278  } else
279  h = kf_slipt;
280  }
281 
282  /* read explicit codes */
283  do {
284  while (current_codes--) {
285  int symbol = get_bits(gb, 8);
286  if (symbol >= 204 - keyframe)
287  symbol += 14 - keyframe;
288  else if (symbol > 189)
289  symbol = get_bits1(gb) + (symbol << 1) - 190;
290  if (bits[symbol])
291  return AVERROR_INVALIDDATA;
292  bits[symbol] = current_length;
293  codes[symbol] = next_code++;
294  read_codes++;
295  }
296  current_length++;
297  next_code <<= 1;
298  remaining_codes = (1 << current_length) - next_code;
299  current_codes = get_bits(gb, av_ceil_log2(remaining_codes + 1));
300  if (current_length > 22 || current_codes > remaining_codes)
301  return AVERROR_INVALIDDATA;
302  } while (current_codes != remaining_codes);
303 
304  remaining_codes = alphabet_size - read_codes;
305 
306  /* determine the minimum length to fit the rest of the alphabet */
307  while ((surplus_codes = (2 << current_length) -
308  (next_code << 1) - remaining_codes) < 0) {
309  current_length++;
310  next_code <<= 1;
311  }
312 
313  /* add the rest of the symbols lexicographically */
314  for (i = 0; i < alphabet_size; i++)
315  if (!bits[i]) {
316  if (surplus_codes-- == 0) {
317  current_length++;
318  next_code <<= 1;
319  }
320  bits[i] = current_length;
321  codes[i] = next_code++;
322  }
323 
324  if (next_code != 1 << current_length)
325  return AVERROR_INVALIDDATA;
326 
327  if ((i = vlc_init(&vlc, 9, alphabet_size, bits, 1, 1, codes, 4, 4, 0)) < 0)
328  return i;
329 
330  /* frame decode */
331  do {
332  uint8_t *pp = pal_dst;
333  uint8_t *rp = rgb_dst;
334  do {
335  if (repeat-- < 1) {
336  int b = get_vlc2(gb, vlc.table, 9, 3);
337  if (b < 256)
338  last_symbol = b;
339  else if (b < 268) {
340  b -= 256;
341  if (b == 11)
342  b = get_bits(gb, 4) + 10;
343 
344  if (!b)
345  repeat = 0;
346  else
347  repeat = get_bits(gb, b);
348 
349  repeat += (1 << b) - 1;
350 
351  if (last_symbol == -2) {
352  int skip = FFMIN(repeat, pal_dst + w - pp);
353  repeat -= skip;
354  pp += skip;
355  rp += skip * 3;
356  }
357  } else
358  last_symbol = 267 - b;
359  }
360  if (last_symbol >= 0) {
361  *pp = last_symbol;
362  AV_WB24(rp, pal[last_symbol]);
363  } else if (last_symbol == -1 && prev_avail) {
364  *pp = *(pp - pal_stride);
365  memcpy(rp, rp - rgb_stride, 3);
366  }
367  rp += 3;
368  } while (++pp < pal_dst + w);
369  pal_dst += pal_stride;
370  rgb_dst += rgb_stride;
371  prev_avail = 1;
372  } while (--h);
373 
374  ff_vlc_free(&vlc);
375  return 0;
376 }
377 
378 static int decode_wmv9(AVCodecContext *avctx, const uint8_t *buf, int buf_size,
379  int x, int y, int w, int h, int wmv9_mask)
380 {
381  MSS2Context *ctx = avctx->priv_data;
382  MSS12Context *c = &ctx->c;
383  VC1Context *v = avctx->priv_data;
384  MpegEncContext *s = &v->s;
385  MPVWorkPicture *f;
386  int ret;
387 
388  ff_mpeg_flush(avctx);
389 
390  if ((ret = init_get_bits8(&s->gb, buf, buf_size)) < 0)
391  return ret;
392 
393  s->loop_filter = avctx->skip_loop_filter < AVDISCARD_ALL;
394 
395  if (ff_vc1_parse_frame_header(v, &s->gb) < 0) {
396  av_log(v->s.avctx, AV_LOG_ERROR, "header error\n");
397  return AVERROR_INVALIDDATA;
398  }
399 
400  if (s->pict_type != AV_PICTURE_TYPE_I) {
401  av_log(v->s.avctx, AV_LOG_ERROR, "expected I-frame\n");
402  return AVERROR_INVALIDDATA;
403  }
404 
405  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
406 
407  if ((ret = ff_mpv_frame_start(s, avctx)) < 0) {
408  av_log(v->s.avctx, AV_LOG_ERROR, "ff_mpv_frame_start error\n");
409  avctx->pix_fmt = AV_PIX_FMT_RGB24;
410  return ret;
411  }
412 
414 
415  v->end_mb_x = (w + 15) >> 4;
416  s->end_mb_y = (h + 15) >> 4;
417  if (v->respic & 1)
418  v->end_mb_x = v->end_mb_x + 1 >> 1;
419  if (v->respic & 2)
420  s->end_mb_y = s->end_mb_y + 1 >> 1;
421 
423 
424  if (v->end_mb_x == s->mb_width && s->end_mb_y == s->mb_height) {
425  ff_er_frame_end(&s->er, NULL);
426  } else {
428  "disabling error correction due to block count mismatch %dx%d != %dx%d\n",
429  v->end_mb_x, s->end_mb_y, s->mb_width, s->mb_height);
430  }
431 
433 
434  f = &s->cur_pic;
435 
436  if (v->respic == 3) {
437  ctx->dsp.upsample_plane(f->data[0], f->linesize[0], w, h);
438  ctx->dsp.upsample_plane(f->data[1], f->linesize[1], w+1 >> 1, h+1 >> 1);
439  ctx->dsp.upsample_plane(f->data[2], f->linesize[2], w+1 >> 1, h+1 >> 1);
440  } else if (v->respic)
442  "Asymmetric WMV9 rectangle subsampling");
443 
444  av_assert0(f->linesize[1] == f->linesize[2]);
445 
446  if (wmv9_mask != -1)
447  ctx->dsp.mss2_blit_wmv9_masked(c->rgb_pic + y * c->rgb_stride + x * 3,
448  c->rgb_stride, wmv9_mask,
449  c->pal_pic + y * c->pal_stride + x,
450  c->pal_stride,
451  f->data[0], f->linesize[0],
452  f->data[1], f->data[2], f->linesize[1],
453  w, h);
454  else
455  ctx->dsp.mss2_blit_wmv9(c->rgb_pic + y * c->rgb_stride + x * 3,
456  c->rgb_stride,
457  f->data[0], f->linesize[0],
458  f->data[1], f->data[2], f->linesize[1],
459  w, h);
460 
461  avctx->pix_fmt = AV_PIX_FMT_RGB24;
462 
463  return 0;
464 }
465 
466 struct Rectangle {
467  int coded, x, y, w, h;
468 };
469 
470 struct Rectangle2 {
472 };
473 
474 static void calc_draw_region(struct Rectangle2 * draw, const struct Rectangle2 * rect)
475 {
476 #define COMPARE(top, bottom, left, right) \
477  if (rect->top <= draw->top && rect->bottom >= draw->bottom) { \
478  if (rect->left <= draw->left && rect->right >= draw->left) \
479  draw->left = FFMIN(rect->right, draw->right); \
480  \
481  if (rect->right >= draw->right) { \
482  if (rect->left >= draw->left) { \
483  if (rect->left < draw->right) \
484  draw->right = rect->left; \
485  } else { \
486  draw->right = draw->left; \
487  } \
488  } \
489  }
490 
491  COMPARE(top, bottom, left, right)
492  COMPARE(left, right, top, bottom)
493 }
494 
495 static int calc_split_position(int split_position, const struct Rectangle2 * rect, int height)
496 {
497  if (rect->top || rect->bottom != height)
498  split_position = rect->top + split_position * (rect->bottom - rect->top) / height;
499 
500  return av_clip(split_position, rect->top + 1, rect->bottom - 1);
501 }
502 
503 #define MAX_WMV9_RECTANGLES 20
504 #define ARITH2_PADDING 2
505 
507  int *got_frame, AVPacket *avpkt)
508 {
509  const uint8_t *buf = avpkt->data;
510  int buf_size = avpkt->size;
511  MSS2Context *ctx = avctx->priv_data;
512  MSS12Context *c = &ctx->c;
513  GetBitContext gb;
514  GetByteContext gB;
515  ArithCoder acoder;
516 
517  int keyframe, has_wmv9, has_mv, is_rle, is_555, ret;
518 
519  struct Rectangle wmv9rects[MAX_WMV9_RECTANGLES], *r;
520  struct Rectangle2 draw;
521  int used_rects = 0, i, implicit_rect = 0, wmv9_mask = -1;
522 
523  if ((ret = init_get_bits8(&gb, buf, buf_size)) < 0)
524  return ret;
525 
526  if (keyframe = get_bits1(&gb))
527  skip_bits(&gb, 7);
528  has_wmv9 = get_bits1(&gb);
529  has_mv = keyframe ? 0 : get_bits1(&gb);
530  is_rle = get_bits1(&gb);
531  is_555 = is_rle && get_bits1(&gb);
532  if (c->slice_split > 0)
533  ctx->split_position = c->slice_split;
534  else if (c->slice_split < 0) {
535  if (get_bits1(&gb)) {
536  if (get_bits1(&gb)) {
537  if (get_bits1(&gb))
538  ctx->split_position = get_bits(&gb, 16);
539  else
540  ctx->split_position = get_bits(&gb, 12);
541  } else
542  ctx->split_position = get_bits(&gb, 8) << 4;
543  } else {
544  if (keyframe)
545  ctx->split_position = avctx->height / 2;
546  }
547  } else
548  ctx->split_position = avctx->height;
549 
550  if (c->slice_split && (ctx->split_position < 1 - is_555 ||
551  ctx->split_position > avctx->height - 1))
552  return AVERROR_INVALIDDATA;
553 
554  align_get_bits(&gb);
555  buf += get_bits_count(&gb) >> 3;
556  buf_size -= get_bits_count(&gb) >> 3;
557 
558  if (buf_size < 1)
559  return AVERROR_INVALIDDATA;
560 
561  if (is_555 && (has_wmv9 || has_mv || c->slice_split && ctx->split_position))
562  return AVERROR_INVALIDDATA;
563 
564  avctx->pix_fmt = is_555 ? AV_PIX_FMT_RGB555 : AV_PIX_FMT_RGB24;
565  if (ctx->last_pic->format != avctx->pix_fmt)
566  av_frame_unref(ctx->last_pic);
567 
568  if (has_wmv9) {
569  bytestream2_init(&gB, buf, buf_size + ARITH2_PADDING);
570  arith2_init(&acoder, &gB);
571 
572  implicit_rect = !arith2_get_bit(&acoder);
573 
574  while (arith2_get_bit(&acoder)) {
575  if (used_rects == MAX_WMV9_RECTANGLES)
576  return AVERROR_INVALIDDATA;
577  r = &wmv9rects[used_rects];
578  if (!used_rects)
579  r->x = arith2_get_number(&acoder, avctx->width);
580  else
581  r->x = arith2_get_number(&acoder, avctx->width -
582  wmv9rects[used_rects - 1].x) +
583  wmv9rects[used_rects - 1].x;
584  r->y = arith2_get_number(&acoder, avctx->height);
585  r->w = arith2_get_number(&acoder, avctx->width - r->x) + 1;
586  r->h = arith2_get_number(&acoder, avctx->height - r->y) + 1;
587  used_rects++;
588  }
589 
590  if (implicit_rect && used_rects) {
591  av_log(avctx, AV_LOG_ERROR, "implicit_rect && used_rects > 0\n");
592  return AVERROR_INVALIDDATA;
593  }
594 
595  if (implicit_rect) {
596  wmv9rects[0].x = 0;
597  wmv9rects[0].y = 0;
598  wmv9rects[0].w = avctx->width;
599  wmv9rects[0].h = avctx->height;
600 
601  used_rects = 1;
602  }
603  for (i = 0; i < used_rects; i++) {
604  if (!implicit_rect && arith2_get_bit(&acoder)) {
605  av_log(avctx, AV_LOG_ERROR, "Unexpected grandchildren\n");
606  return AVERROR_INVALIDDATA;
607  }
608  if (!i) {
609  wmv9_mask = arith2_get_bit(&acoder) - 1;
610  if (!wmv9_mask)
611  wmv9_mask = arith2_get_number(&acoder, 256);
612  }
613  wmv9rects[i].coded = arith2_get_number(&acoder, 2);
614  }
615 
616  buf += arith2_get_consumed_bytes(&acoder);
617  buf_size -= arith2_get_consumed_bytes(&acoder);
618  if (buf_size < 1)
619  return AVERROR_INVALIDDATA;
620  }
621 
622  c->mvX = c->mvY = 0;
623  if (keyframe && !is_555) {
624  if ((i = decode_pal_v2(c, buf, buf_size)) < 0)
625  return AVERROR_INVALIDDATA;
626  buf += i;
627  buf_size -= i;
628  } else if (has_mv) {
629  buf += 4;
630  buf_size -= 4;
631  if (buf_size < 1)
632  return AVERROR_INVALIDDATA;
633  c->mvX = AV_RB16(buf - 4) - avctx->width;
634  c->mvY = AV_RB16(buf - 2) - avctx->height;
635  }
636 
637  if (c->mvX < 0 || c->mvY < 0) {
638  FFSWAP(uint8_t *, c->pal_pic, c->last_pal_pic);
639 
640  if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0)
641  return ret;
642 
643  if (ctx->last_pic->data[0]) {
644  av_assert0(frame->linesize[0] == ctx->last_pic->linesize[0]);
645  c->last_rgb_pic = ctx->last_pic->data[0] +
646  ctx->last_pic->linesize[0] * (avctx->height - 1);
647  } else {
648  av_log(avctx, AV_LOG_ERROR, "Missing keyframe\n");
649  return AVERROR_INVALIDDATA;
650  }
651  } else {
652  if ((ret = ff_reget_buffer(avctx, ctx->last_pic, 0)) < 0)
653  return ret;
654  if ((ret = av_frame_ref(frame, ctx->last_pic)) < 0)
655  return ret;
656 
657  c->last_rgb_pic = NULL;
658  }
659  c->rgb_pic = frame->data[0] +
660  frame->linesize[0] * (avctx->height - 1);
661  c->rgb_stride = -frame->linesize[0];
662 
663  if (keyframe)
664  frame->flags |= AV_FRAME_FLAG_KEY;
665  else
666  frame->flags &= ~AV_FRAME_FLAG_KEY;
667  frame->pict_type = keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
668 
669  if (is_555) {
670  bytestream2_init(&gB, buf, buf_size);
671 
672  if (decode_555(avctx, &gB, (uint16_t *)c->rgb_pic, c->rgb_stride >> 1,
673  keyframe, avctx->width, avctx->height))
674  return AVERROR_INVALIDDATA;
675 
676  buf_size -= bytestream2_tell(&gB);
677  } else {
678  if (keyframe) {
679  c->corrupted = 0;
681  if (c->slice_split)
683  }
684  if (is_rle) {
685  if ((ret = init_get_bits8(&gb, buf, buf_size)) < 0)
686  return ret;
687  if (ret = decode_rle(&gb, c->pal_pic, c->pal_stride,
688  c->rgb_pic, c->rgb_stride, c->pal, keyframe,
689  ctx->split_position, 0,
690  avctx->width, avctx->height))
691  return ret;
692  align_get_bits(&gb);
693 
694  if (c->slice_split)
695  if (ret = decode_rle(&gb, c->pal_pic, c->pal_stride,
696  c->rgb_pic, c->rgb_stride, c->pal, keyframe,
697  ctx->split_position, 1,
698  avctx->width, avctx->height))
699  return ret;
700 
701  align_get_bits(&gb);
702  buf += get_bits_count(&gb) >> 3;
703  buf_size -= get_bits_count(&gb) >> 3;
704  } else if (!implicit_rect || wmv9_mask != -1) {
705  if (c->corrupted)
706  return AVERROR_INVALIDDATA;
707  bytestream2_init(&gB, buf, buf_size + ARITH2_PADDING);
708  arith2_init(&acoder, &gB);
709  c->keyframe = keyframe;
710 
711  draw.left = 0;
712  draw.top = 0;
713  draw.right = avctx->width;
714  draw.bottom = avctx->height;
715  if (wmv9_mask == -1) {
716  for (i = 0; i < used_rects; i++) {
717  struct Rectangle2 r;
718  r.left = wmv9rects[i].x;
719  r.top = wmv9rects[i].y;
720  r.right = r.left + wmv9rects[i].w;
721  r.bottom = r.top + wmv9rects[i].h;
722  calc_draw_region(&draw, &r);
723  }
724  }
725 
726  if (draw.left >= avctx->width || draw.right > avctx->width ||
727  draw.top >= avctx->height || draw.bottom > avctx->height)
728  return AVERROR_INVALIDDATA;
729 
730  if (c->slice_split && draw.bottom - draw.top >= 10) {
731  ctx->split_position = calc_split_position(ctx->split_position, &draw, avctx->height);
732  if (c->corrupted = ff_mss12_decode_rect(&ctx->sc[0], &acoder, 0, draw.top,
733  avctx->width,
734  ctx->split_position - draw.top))
735  return AVERROR_INVALIDDATA;
736  buf += arith2_get_consumed_bytes(&acoder);
737  buf_size -= arith2_get_consumed_bytes(&acoder);
738  if (c->slice_split) {
739  if (buf_size < 1)
740  return AVERROR_INVALIDDATA;
741  bytestream2_init(&gB, buf, buf_size + ARITH2_PADDING);
742  arith2_init(&acoder, &gB);
743  if (c->corrupted = ff_mss12_decode_rect(&ctx->sc[1], &acoder, 0,
744  ctx->split_position,
745  avctx->width,
746  draw.bottom - ctx->split_position))
747  return AVERROR_INVALIDDATA;
748  buf += arith2_get_consumed_bytes(&acoder);
749  buf_size -= arith2_get_consumed_bytes(&acoder);
750  }
751  } else {
752  if (c->corrupted = ff_mss12_decode_rect(&ctx->sc[0], &acoder, draw.left, draw.top,
753  draw.right - draw.left, draw.bottom - draw.top))
754  return AVERROR_INVALIDDATA;
755 
756  buf += arith2_get_consumed_bytes(&acoder);
757  buf_size -= arith2_get_consumed_bytes(&acoder);
758  }
759  } else
760  memset(c->pal_pic, 0, c->pal_stride * avctx->height);
761  }
762 
763  if (has_wmv9) {
764  for (i = 0; i < used_rects; i++) {
765  int x = wmv9rects[i].x;
766  int y = wmv9rects[i].y;
767  int w = wmv9rects[i].w;
768  int h = wmv9rects[i].h;
769  if (wmv9rects[i].coded) {
770  int WMV9codedFrameSize;
771  if (buf_size < 4 || !(WMV9codedFrameSize = AV_RL24(buf)))
772  return AVERROR_INVALIDDATA;
773  if (ret = decode_wmv9(avctx, buf + 3, buf_size - 3,
774  x, y, w, h, wmv9_mask))
775  return ret;
776  buf += WMV9codedFrameSize + 3;
777  buf_size -= WMV9codedFrameSize + 3;
778  } else {
779  uint8_t *dst = c->rgb_pic + y * c->rgb_stride + x * 3;
780  if (wmv9_mask != -1) {
781  ctx->dsp.mss2_gray_fill_masked(dst, c->rgb_stride,
782  wmv9_mask,
783  c->pal_pic + y * c->pal_stride + x,
784  c->pal_stride,
785  w, h);
786  } else {
787  do {
788  memset(dst, 0x80, w * 3);
789  dst += c->rgb_stride;
790  } while (--h);
791  }
792  }
793  }
794  }
795 
796  if (buf_size)
797  av_log(avctx, AV_LOG_WARNING, "buffer not fully consumed\n");
798 
799  if (c->mvX < 0 || c->mvY < 0) {
800  ret = av_frame_replace(ctx->last_pic, frame);
801  if (ret < 0)
802  return ret;
803  }
804 
805  *got_frame = 1;
806 
807  return avpkt->size;
808 }
809 
810 static av_cold int wmv9_init(AVCodecContext *avctx)
811 {
812  VC1Context *v = avctx->priv_data;
813  int ret;
814 
815  v->s.avctx = avctx;
816 
818 
819  v->profile = PROFILE_MAIN;
820 
823  v->res_y411 = 0;
824  v->res_sprite = 0;
825 
826  v->frmrtq_postproc = 7;
827  v->bitrtq_postproc = 31;
828 
829  v->res_x8 = 0;
830  v->multires = 0;
831  v->res_fasttx = 1;
832 
833  v->fastuvmc = 0;
834 
835  v->extended_mv = 0;
836 
837  v->dquant = 1;
838  v->vstransform = 1;
839 
840  v->res_transtab = 0;
841 
842  v->overlap = 0;
843 
844  v->resync_marker = 0;
845  v->rangered = 0;
846 
847  v->s.max_b_frames = avctx->max_b_frames = 0;
848  v->quantizer_mode = 0;
849 
850  v->finterpflag = 0;
851 
852  v->res_rtm_flag = 1;
853 
855 
856  ret = ff_vc1_decode_init(avctx);
857  if (ret < 0)
858  return ret;
859 
860  return 0;
861 }
862 
864 {
865  MSS2Context *const ctx = avctx->priv_data;
866 
867  av_frame_free(&ctx->last_pic);
868 
870  av_freep(&ctx->c.pal_pic);
871  av_freep(&ctx->c.last_pal_pic);
872  ff_vc1_decode_end(avctx);
873 
874  return 0;
875 }
876 
878 {
879  MSS2Context * const ctx = avctx->priv_data;
880  MSS12Context *c = &ctx->c;
881  int ret;
882  c->avctx = avctx;
883  if (ret = ff_mss12_decode_init(c, 1, &ctx->sc[0], &ctx->sc[1]))
884  return ret;
885  ctx->last_pic = av_frame_alloc();
886  c->pal_stride = c->mask_stride;
887  c->pal_pic = av_mallocz(c->pal_stride * avctx->height);
888  c->last_pal_pic = av_mallocz(c->pal_stride * avctx->height);
889  if (!c->pal_pic || !c->last_pal_pic || !ctx->last_pic)
890  return AVERROR(ENOMEM);
891  if (ret = wmv9_init(avctx))
892  return ret;
893  ff_mss2dsp_init(&ctx->dsp);
894 
895  avctx->pix_fmt = c->free_colours == 127 ? AV_PIX_FMT_RGB555
897 
898 
899  return 0;
900 }
901 
903  .p.name = "mss2",
904  CODEC_LONG_NAME("MS Windows Media Video V9 Screen"),
905  .p.type = AVMEDIA_TYPE_VIDEO,
906  .p.id = AV_CODEC_ID_MSS2,
907  .priv_data_size = sizeof(MSS2Context),
909  .close = mss2_decode_end,
911  .p.capabilities = AV_CODEC_CAP_DR1,
912  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
913 };
decode_wmv9
static int decode_wmv9(AVCodecContext *avctx, const uint8_t *buf, int buf_size, int x, int y, int w, int h, int wmv9_mask)
Definition: mss2.c:378
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
COMPARE
#define COMPARE(top, bottom, left, right)
av_clip
#define av_clip
Definition: common.h:100
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:43
r
const char * r
Definition: vf_curves.c:127
VC1Context
The VC1 Context.
Definition: vc1.h:173
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
mss2dsp.h
GetByteContext
Definition: bytestream.h:33
VC1Context::end_mb_x
int end_mb_x
Horizontal macroblock limit (used only by mss2)
Definition: vc1.h:396
VC1Context::overlap
int overlap
overlapped transforms in use
Definition: vc1.h:224
vc1.h
MpegEncContext::max_b_frames
int max_b_frames
max number of B-frames for encoding
Definition: mpegvideo.h:111
rect
Definition: f_ebur128.c:76
ARITH_GET_MODEL_SYM
#define ARITH_GET_MODEL_SYM(prefix)
Definition: mss12.h:120
calc_draw_region
static void calc_draw_region(struct Rectangle2 *draw, const struct Rectangle2 *rect)
Definition: mss2.c:474
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:266
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:162
ff_vc1_init_transposed_scantables
void ff_vc1_init_transposed_scantables(VC1Context *v)
Definition: vc1dec.c:490
Rectangle::w
int w
Definition: mss2.c:467
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
PROFILE_MAIN
@ PROFILE_MAIN
Definition: vc1_common.h:50
ff_vc1_decode_init
int ff_vc1_decode_init(AVCodecContext *avctx)
Definition: vc1dec.c:454
w
uint8_t w
Definition: llviddspenc.c:38
Rectangle::h
int h
Definition: mss2.c:467
AVPacket::data
uint8_t * data
Definition: packet.h:539
b
#define b
Definition: input.c:41
VC1Context::fastuvmc
int fastuvmc
Rounding of qpel vector to hpel ? (not in Simple)
Definition: vc1.h:220
high
int high
Definition: dovi_rpuenc.c:38
mss12.h
FFCodec
Definition: codec_internal.h:127
VC1Context::zz_8x4
const uint8_t * zz_8x4
Zigzag scan table for TT_8x4 coding mode.
Definition: vc1.h:239
MpegEncContext::avctx
struct AVCodecContext * avctx
Definition: mpegvideo.h:91
arith2_init
static void arith2_init(ArithCoder *c, GetByteContext *gB)
Definition: mss2.c:150
Rectangle2::left
int left
Definition: mss2.c:471
arith2_get_number
static int arith2_get_number(ArithCoder *c, int n)
Definition: mss2.c:94
MSS2Context::sc
SliceContext sc[2]
Definition: mss2.c:44
ff_vc1_parse_frame_header
int ff_vc1_parse_frame_header(VC1Context *v, GetBitContext *gb)
Definition: vc1.c:624
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:381
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
ff_mss2dsp_init
av_cold void ff_mss2dsp_init(MSS2DSPContext *dsp)
Definition: mss2dsp.c:150
av_ceil_log2
#define av_ceil_log2
Definition: common.h:97
VC1Context::multires
int multires
frame-level RESPIC syntax element present
Definition: vc1.h:184
GetBitContext
Definition: get_bits.h:108
VC1Context::res_x8
int res_x8
reserved
Definition: vc1.h:183
decode_555
static int decode_555(AVCodecContext *avctx, GetByteContext *gB, uint16_t *dst, ptrdiff_t stride, int keyframe, int w, int h)
Definition: mss2.c:178
Rectangle2::top
int top
Definition: mss2.c:471
val
static double val(void *priv, double ch)
Definition: aeval.c:77
ARITH2_PADDING
#define ARITH2_PADDING
Definition: mss2.c:504
arith2_get_scaled_value
static int arith2_get_scaled_value(int value, int n, int range)
Definition: mss2.c:66
arith2_get_consumed_bytes
static int arith2_get_consumed_bytes(ArithCoder *c)
Definition: mss2.c:136
VC1Context::dquant
int dquant
How qscale varies with MBs, 2 bits (not in Simple)
Definition: vc1.h:222
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:150
calc_split_position
static int calc_split_position(int split_position, const struct Rectangle2 *rect, int height)
Definition: mss2.c:495
avassert.h
mpegvideodec.h
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:545
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:640
ff_er_frame_end
void ff_er_frame_end(ERContext *s, int *decode_error_flags)
Indicate that a frame has finished decoding and perform error concealment in case it has been enabled...
Definition: error_resilience.c:896
MSS2Context::dsp
MSS2DSPContext dsp
Definition: mss2.c:43
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:311
s
#define s(width, name)
Definition: cbs_vp9.c:198
VC1Context::res_sprite
int res_sprite
Simple/Main Profile sequence header.
Definition: vc1.h:181
VC1Context::res_fasttx
int res_fasttx
reserved, always 1
Definition: vc1.h:185
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:431
ff_mpeg_er_frame_start
void ff_mpeg_er_frame_start(MpegEncContext *s)
Definition: mpeg_er.c:49
bits
uint8_t bits
Definition: vp3data.h:128
wmv2data.h
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
vlc_init
#define vlc_init(vlc, nb_bits, nb_codes, bits, bits_wrap, bits_size, codes, codes_wrap, codes_size, flags)
Definition: vlc.h:62
VC1Context::rangered
int rangered
RANGEREDFRM (range reduction) syntax element present at frame level.
Definition: vc1.h:187
ff_vc1_decode_end
int ff_vc1_decode_end(AVCodecContext *avctx)
Close a MSS2/VC1/WMV3 decoder.
Definition: vc1dec.c:812
ctx
AVFormatContext * ctx
Definition: movenc.c:49
decode.h
ArithCoder
Definition: dstdec.c:57
ff_vc1_init_common
void ff_vc1_init_common(VC1Context *v)
Init VC-1 specific tables and VC1Context members.
Definition: vc1dec.c:600
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:296
Rectangle2
Definition: mss2.c:470
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:221
NULL
#define NULL
Definition: coverity.c:32
ff_vc1_decode_blocks
void ff_vc1_decode_blocks(VC1Context *v)
Definition: vc1_block.c:2981
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:388
VC1Context::resync_marker
int resync_marker
could this stream contain resync markers
Definition: vc1.h:399
MAX_WMV9_RECTANGLES
#define MAX_WMV9_RECTANGLES
Definition: mss2.c:503
arith2_get_prob
static int arith2_get_prob(ArithCoder *c, int16_t *probs)
Definition: mss2.c:114
mss2_decode_end
static av_cold int mss2_decode_end(AVCodecContext *avctx)
Definition: mss2.c:863
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:652
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
SliceContext
Definition: mss12.h:70
Rectangle::coded
int coded
Definition: mss2.c:467
ARITH_GET_BIT
#define ARITH_GET_BIT(prefix)
Definition: mss12.h:104
f
f
Definition: af_crystalizer.c:122
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1692
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:368
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:540
MSS2Context
Definition: mss2.c:38
height
#define height
Definition: dsp.h:85
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:388
codec_internal.h
dst
uint8_t ptrdiff_t const uint8_t ptrdiff_t int intptr_t intptr_t int int16_t * dst
Definition: dsp.h:83
ff_wmv2_scantableB
const uint8_t ff_wmv2_scantableB[64]
Definition: wmv2data.c:30
ff_mss2_decoder
const FFCodec ff_mss2_decoder
Definition: mss2.c:902
MSS2Context::c
MSS12Context c
Definition: mss2.c:42
AV_WB24
#define AV_WB24(p, d)
Definition: intreadwrite.h:446
AV_RL24
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_RL24
Definition: bytestream.h:93
ff_wmv2_scantableA
const uint8_t ff_wmv2_scantableA[64]
Definition: wmv2data.c:23
range
enum AVColorRange range
Definition: mediacodec_wrapper.c:2464
decode_pal_v2
static int decode_pal_v2(MSS12Context *ctx, const uint8_t *buf, int buf_size)
Definition: mss2.c:161
VC1Context::res_transtab
int res_transtab
reserved, always 0
Definition: vc1.h:186
VC1Context::respic
uint8_t respic
Frame-level flag for resized images.
Definition: vc1.h:272
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:164
ff_mpv_frame_start
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
Definition: mpegvideo_dec.c:344
split
static char * split(char *message, char delim)
Definition: af_channelmap.c:89
MSS2Context::split_position
int split_position
Definition: mss2.c:40
ff_mpeg_flush
void ff_mpeg_flush(AVCodecContext *avctx)
Definition: mpegvideo_dec.c:449
Rectangle::y
int y
Definition: mss2.c:467
Rectangle::x
int x
Definition: mss2.c:467
AVCodecContext::skip_loop_filter
enum AVDiscard skip_loop_filter
Skip loop filtering for selected frames.
Definition: avcodec.h:1816
draw
static int draw(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
Definition: avf_showcwt.c:440
AV_CODEC_ID_MSS2
@ AV_CODEC_ID_MSS2
Definition: codec_id.h:219
MSS12Context
Definition: mss12.h:77
MSS2Context::last_pic
AVFrame * last_pic
Definition: mss2.c:41
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
VC1Context::frmrtq_postproc
int frmrtq_postproc
3 bits,
Definition: vc1.h:217
Rectangle
Definition: mss2.c:466
AV_PIX_FMT_RGB555
#define AV_PIX_FMT_RGB555
Definition: pixfmt.h:466
MSS2DSPContext
Definition: mss2dsp.h:32
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
VC1Context::s
MpegEncContext s
Definition: vc1.h:174
mss2_decode_init
static av_cold int mss2_decode_init(AVCodecContext *avctx)
Definition: mss2.c:877
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
VC1Context::extended_mv
int extended_mv
Ext MV in P/B (not in Simple)
Definition: vc1.h:221
ff_mss12_decode_end
av_cold int ff_mss12_decode_end(MSS12Context *c)
Definition: mss12.c:693
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:610
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
VC1Context::zz_4x8
const uint8_t * zz_4x8
Zigzag scan table for TT_4x8 coding mode.
Definition: vc1.h:240
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
decode_rle
static int decode_rle(GetBitContext *gb, uint8_t *pal_dst, ptrdiff_t pal_stride, uint8_t *rgb_dst, ptrdiff_t rgb_stride, uint32_t *pal, int keyframe, int kf_slipt, int slice, int w, int h)
Definition: mss2.c:241
Rectangle2::bottom
int bottom
Definition: mss2.c:471
AVCodecContext::height
int height
Definition: avcodec.h:624
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:663
Rectangle2::right
int right
Definition: mss2.c:471
VC1Context::res_y411
int res_y411
reserved, old interlaced mode
Definition: vc1.h:182
stride
#define stride
Definition: h264pred_template.c:537
wmv9_init
static av_cold int wmv9_init(AVCodecContext *avctx)
Definition: mss2.c:810
ff_vlc_free
void ff_vlc_free(VLC *vlc)
Definition: vlc.c:580
ff_reget_buffer
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Identical in function to ff_get_buffer(), except it reuses the existing buffer if available.
Definition: decode.c:1810
ret
ret
Definition: filter_design.txt:187
arith2_normalise
static void arith2_normalise(ArithCoder *c)
Definition: mss2.c:47
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
READ_PAIR
#define READ_PAIR(a, b)
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:561
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
av_frame_replace
int av_frame_replace(AVFrame *dst, const AVFrame *src)
Ensure the destination frame refers to the same data described by the source frame,...
Definition: frame.c:487
AVCodecContext
main external API structure.
Definition: avcodec.h:451
error_resilience.h
VLC
Definition: vlc.h:36
VC1Context::res_rtm_flag
int res_rtm_flag
reserved, set to 1
Definition: vc1.h:189
VC1Context::profile
int profile
Sequence header data for all Profiles TODO: choose between ints, uint8_ts and monobit flags.
Definition: vc1.h:216
ff_mpv_frame_end
void ff_mpv_frame_end(MpegEncContext *s)
Definition: mpegvideo_dec.c:395
VC1Context::vstransform
int vstransform
variable-size [48]x[48] transform type + info
Definition: vc1.h:223
VLC::table
VLCElem * table
Definition: vlc.h:38
arith2_rescale_interval
static void arith2_rescale_interval(ArithCoder *c, int range, int low, int high, int n)
Definition: mss2.c:76
ff_mss12_decode_rect
int ff_mss12_decode_rect(SliceContext *sc, ArithCoder *acoder, int x, int y, int width, int height)
Definition: mss12.c:543
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:280
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
mem.h
AVCodecContext::max_b_frames
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:801
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
MPVWorkPicture
Definition: mpegpicture.h:95
scale
static void scale(int *out, const int *in, const int w, const int h, const int shift)
Definition: intra.c:291
AVPacket
This structure stores compressed data.
Definition: packet.h:516
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:478
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
ff_mss12_slicecontext_reset
void ff_mss12_slicecontext_reset(SliceContext *sc)
Definition: mss12.c:437
mpeg_er.h
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:624
VC1Context::finterpflag
int finterpflag
INTERPFRM present.
Definition: vc1.h:226
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
MSS2Context::v
VC1Context v
Definition: mss2.c:39
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
h
h
Definition: vp9dsp_template.c:2070
VC1Context::quantizer_mode
int quantizer_mode
2 bits, quantizer mode used for sequence, see QUANT_*
Definition: vc1.h:225
AV_RB24
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_RB24
Definition: bytestream.h:97
MpegEncContext
MpegEncContext.
Definition: mpegvideo.h:73
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
ff_mss12_decode_init
av_cold int ff_mss12_decode_init(MSS12Context *c, int version, SliceContext *sc1, SliceContext *sc2)
Definition: mss12.c:581
skip
static void BS_FUNC() skip(BSCTX *bc, unsigned int n)
Skip n bits in the buffer.
Definition: bitstream_template.h:375
mss2_decode_frame
static int mss2_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: mss2.c:506
AV_RB16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
Definition: bytestream.h:98
VC1Context::bitrtq_postproc
int bitrtq_postproc
5 bits, quantized framerate-based postprocessing strength
Definition: vc1.h:218