FFmpeg
rasc.c
Go to the documentation of this file.
1 /*
2  * RemotelyAnywhere Screen Capture decoder
3  *
4  * Copyright (c) 2018 Paul B Mahol
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 
27 #include "libavutil/imgutils.h"
28 #include "libavutil/opt.h"
29 
30 #include "avcodec.h"
31 #include "bytestream.h"
32 #include "codec_internal.h"
33 #include "internal.h"
34 #include "zlib_wrapper.h"
35 
36 #include <zlib.h>
37 
38 #define KBND MKTAG('K', 'B', 'N', 'D')
39 #define FINT MKTAG('F', 'I', 'N', 'T')
40 #define INIT MKTAG('I', 'N', 'I', 'T')
41 #define BNDL MKTAG('B', 'N', 'D', 'L')
42 #define KFRM MKTAG('K', 'F', 'R', 'M')
43 #define DLTA MKTAG('D', 'L', 'T', 'A')
44 #define MOUS MKTAG('M', 'O', 'U', 'S')
45 #define MPOS MKTAG('M', 'P', 'O', 'S')
46 #define MOVE MKTAG('M', 'O', 'V', 'E')
47 #define EMPT MKTAG('E', 'M', 'P', 'T')
48 
49 typedef struct RASCContext {
50  AVClass *class;
53  uint8_t *delta;
55  uint8_t *cursor;
57  unsigned cursor_w;
58  unsigned cursor_h;
59  unsigned cursor_x;
60  unsigned cursor_y;
61  int stride;
62  int bpp;
67 } RASCContext;
68 
69 static void clear_plane(AVCodecContext *avctx, AVFrame *frame)
70 {
71  RASCContext *s = avctx->priv_data;
72  uint8_t *dst = frame->data[0];
73 
74  if (!dst)
75  return;
76 
77  for (int y = 0; y < avctx->height; y++) {
78  memset(dst, 0, avctx->width * s->bpp);
79  dst += frame->linesize[0];
80  }
81 }
82 
83 static void copy_plane(AVCodecContext *avctx, AVFrame *src, AVFrame *dst)
84 {
85  RASCContext *s = avctx->priv_data;
86  uint8_t *srcp = src->data[0];
87  uint8_t *dstp = dst->data[0];
88 
89  for (int y = 0; y < avctx->height; y++) {
90  memcpy(dstp, srcp, s->stride);
91  srcp += src->linesize[0];
92  dstp += dst->linesize[0];
93  }
94 }
95 
96 static int init_frames(AVCodecContext *avctx)
97 {
98  RASCContext *s = avctx->priv_data;
99  int ret;
100 
101  av_frame_unref(s->frame1);
102  av_frame_unref(s->frame2);
103  if ((ret = ff_get_buffer(avctx, s->frame1, 0)) < 0)
104  return ret;
105 
106  if ((ret = ff_get_buffer(avctx, s->frame2, 0)) < 0)
107  return ret;
108 
109  clear_plane(avctx, s->frame2);
110  clear_plane(avctx, s->frame1);
111 
112  return 0;
113 }
114 
115 static int decode_fint(AVCodecContext *avctx,
116  const AVPacket *avpkt, unsigned size)
117 {
118  RASCContext *s = avctx->priv_data;
119  GetByteContext *gb = &s->gb;
120  unsigned w, h, fmt;
121  int ret;
122 
123  if (bytestream2_peek_le32(gb) != 0x65) {
124  if (!s->frame2->data[0] || !s->frame1->data[0])
125  return AVERROR_INVALIDDATA;
126 
127  clear_plane(avctx, s->frame2);
128  clear_plane(avctx, s->frame1);
129  return 0;
130  }
131  if (bytestream2_get_bytes_left(gb) < 72)
132  return AVERROR_INVALIDDATA;
133 
134  bytestream2_skip(gb, 8);
135  w = bytestream2_get_le32(gb);
136  h = bytestream2_get_le32(gb);
137  bytestream2_skip(gb, 30);
138  fmt = bytestream2_get_le16(gb);
139  bytestream2_skip(gb, 24);
140 
141  switch (fmt) {
142  case 8: s->stride = FFALIGN(w, 4);
143  s->bpp = 1;
144  fmt = AV_PIX_FMT_PAL8; break;
145  case 16: s->stride = w * 2;
146  s->bpp = 2;
147  fmt = AV_PIX_FMT_RGB555LE; break;
148  case 32: s->stride = w * 4;
149  s->bpp = 4;
150  fmt = AV_PIX_FMT_BGR0; break;
151  default: return AVERROR_INVALIDDATA;
152  }
153 
154  ret = ff_set_dimensions(avctx, w, h);
155  if (ret < 0)
156  return ret;
157  avctx->width = w;
158  avctx->height = h;
159  avctx->pix_fmt = fmt;
160 
161  ret = init_frames(avctx);
162  if (ret < 0)
163  return ret;
164 
165  if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
166  uint32_t *pal = (uint32_t *)s->frame2->data[1];
167 
168  for (int i = 0; i < 256; i++)
169  pal[i] = bytestream2_get_le32(gb) | 0xFF000000u;
170  }
171 
172  return 0;
173 }
174 
175 static int decode_zlib(AVCodecContext *avctx, const AVPacket *avpkt,
176  unsigned size, unsigned uncompressed_size)
177 {
178  RASCContext *s = avctx->priv_data;
179  z_stream *const zstream = &s->zstream.zstream;
180  GetByteContext *gb = &s->gb;
181  int zret;
182 
183  zret = inflateReset(zstream);
184  if (zret != Z_OK) {
185  av_log(avctx, AV_LOG_ERROR, "Inflate reset error: %d\n", zret);
186  return AVERROR_EXTERNAL;
187  }
188 
189  av_fast_padded_malloc(&s->delta, &s->delta_size, uncompressed_size);
190  if (!s->delta)
191  return AVERROR(ENOMEM);
192 
193  zstream->next_in = avpkt->data + bytestream2_tell(gb);
194  zstream->avail_in = FFMIN(size, bytestream2_get_bytes_left(gb));
195 
196  zstream->next_out = s->delta;
197  zstream->avail_out = s->delta_size;
198 
199  zret = inflate(zstream, Z_FINISH);
200  if (zret != Z_STREAM_END) {
201  av_log(avctx, AV_LOG_ERROR,
202  "Inflate failed with return code: %d.\n", zret);
203  return AVERROR_INVALIDDATA;
204  }
205 
206  return 0;
207 }
208 
209 static int decode_move(AVCodecContext *avctx,
210  const AVPacket *avpkt, unsigned size)
211 {
212  RASCContext *s = avctx->priv_data;
213  GetByteContext *gb = &s->gb;
215  unsigned pos, compression, nb_moves;
216  unsigned uncompressed_size;
217  int ret;
218 
219  pos = bytestream2_tell(gb);
220  bytestream2_skip(gb, 8);
221  nb_moves = bytestream2_get_le32(gb);
222  bytestream2_skip(gb, 8);
223  compression = bytestream2_get_le32(gb);
224 
225  if (nb_moves > INT32_MAX / 16 || nb_moves > avctx->width * avctx->height)
226  return AVERROR_INVALIDDATA;
227 
228  uncompressed_size = 16 * nb_moves;
229 
230  if (compression == 1) {
231  ret = decode_zlib(avctx, avpkt,
232  size - (bytestream2_tell(gb) - pos),
233  uncompressed_size);
234  if (ret < 0)
235  return ret;
236  bytestream2_init(&mc, s->delta, uncompressed_size);
237  } else if (compression == 0) {
238  bytestream2_init(&mc, avpkt->data + bytestream2_tell(gb),
240  } else if (compression == 2) {
241  avpriv_request_sample(avctx, "compression %d", compression);
242  return AVERROR_PATCHWELCOME;
243  } else {
244  return AVERROR_INVALIDDATA;
245  }
246 
247  if (bytestream2_get_bytes_left(&mc) < uncompressed_size)
248  return AVERROR_INVALIDDATA;
249 
250  for (int i = 0; i < nb_moves; i++) {
251  int type, start_x, start_y, end_x, end_y, mov_x, mov_y;
252  uint8_t *e2, *b1, *b2;
253  int w, h;
254 
255  type = bytestream2_get_le16(&mc);
256  start_x = bytestream2_get_le16(&mc);
257  start_y = bytestream2_get_le16(&mc);
258  end_x = bytestream2_get_le16(&mc);
259  end_y = bytestream2_get_le16(&mc);
260  mov_x = bytestream2_get_le16(&mc);
261  mov_y = bytestream2_get_le16(&mc);
262  bytestream2_skip(&mc, 2);
263 
264  if (start_x >= avctx->width || start_y >= avctx->height ||
265  end_x >= avctx->width || end_y >= avctx->height ||
266  mov_x >= avctx->width || mov_y >= avctx->height) {
267  continue;
268  }
269 
270  if (start_x >= end_x || start_y >= end_y)
271  continue;
272 
273  w = end_x - start_x;
274  h = end_y - start_y;
275 
276  if (mov_x + w > avctx->width || mov_y + h > avctx->height)
277  continue;
278 
279  if (!s->frame2->data[0] || !s->frame1->data[0])
280  return AVERROR_INVALIDDATA;
281 
282  b1 = s->frame1->data[0] + s->frame1->linesize[0] * (start_y + h - 1) + start_x * s->bpp;
283  b2 = s->frame2->data[0] + s->frame2->linesize[0] * (start_y + h - 1) + start_x * s->bpp;
284  e2 = s->frame2->data[0] + s->frame2->linesize[0] * (mov_y + h - 1) + mov_x * s->bpp;
285 
286  if (type == 2) {
287  for (int j = 0; j < h; j++) {
288  memcpy(b1, b2, w * s->bpp);
289  b1 -= s->frame1->linesize[0];
290  b2 -= s->frame2->linesize[0];
291  }
292  } else if (type == 1) {
293  for (int j = 0; j < h; j++) {
294  memset(b2, 0, w * s->bpp);
295  b2 -= s->frame2->linesize[0];
296  }
297  } else if (type == 0) {
298  uint8_t *buffer;
299 
300  av_fast_padded_malloc(&s->delta, &s->delta_size, w * h * s->bpp);
301  buffer = s->delta;
302  if (!buffer)
303  return AVERROR(ENOMEM);
304 
305  for (int j = 0; j < h; j++) {
306  memcpy(buffer + j * w * s->bpp, e2, w * s->bpp);
307  e2 -= s->frame2->linesize[0];
308  }
309 
310  for (int j = 0; j < h; j++) {
311  memcpy(b2, buffer + j * w * s->bpp, w * s->bpp);
312  b2 -= s->frame2->linesize[0];
313  }
314  } else {
315  return AVERROR_INVALIDDATA;
316  }
317  }
318 
319  bytestream2_skip(gb, size - (bytestream2_tell(gb) - pos));
320 
321  return 0;
322 }
323 
324 #define NEXT_LINE \
325  if (cx >= w * s->bpp) { \
326  cx = 0; \
327  cy--; \
328  b1 -= s->frame1->linesize[0]; \
329  b2 -= s->frame2->linesize[0]; \
330  } \
331  len--;
332 
333 static int decode_dlta(AVCodecContext *avctx,
334  const AVPacket *avpkt, unsigned size)
335 {
336  RASCContext *s = avctx->priv_data;
337  GetByteContext *gb = &s->gb;
339  unsigned uncompressed_size, pos;
340  unsigned x, y, w, h;
341  int ret, cx, cy, compression;
342  uint8_t *b1, *b2;
343 
344  pos = bytestream2_tell(gb);
345  bytestream2_skip(gb, 12);
346  uncompressed_size = bytestream2_get_le32(gb);
347  x = bytestream2_get_le32(gb);
348  y = bytestream2_get_le32(gb);
349  w = bytestream2_get_le32(gb);
350  h = bytestream2_get_le32(gb);
351 
352  if (x >= avctx->width || y >= avctx->height ||
353  w > avctx->width || h > avctx->height)
354  return AVERROR_INVALIDDATA;
355 
356  if (x + w > avctx->width || y + h > avctx->height)
357  return AVERROR_INVALIDDATA;
358 
359  bytestream2_skip(gb, 4);
360  compression = bytestream2_get_le32(gb);
361 
362  if (compression == 1) {
363  if (w * h * s->bpp * 3 < uncompressed_size)
364  return AVERROR_INVALIDDATA;
365  ret = decode_zlib(avctx, avpkt, size, uncompressed_size);
366  if (ret < 0)
367  return ret;
368  bytestream2_init(&dc, s->delta, uncompressed_size);
369  } else if (compression == 0) {
370  if (bytestream2_get_bytes_left(gb) < uncompressed_size)
371  return AVERROR_INVALIDDATA;
372  bytestream2_init(&dc, avpkt->data + bytestream2_tell(gb),
373  uncompressed_size);
374  } else if (compression == 2) {
375  avpriv_request_sample(avctx, "compression %d", compression);
376  return AVERROR_PATCHWELCOME;
377  } else {
378  return AVERROR_INVALIDDATA;
379  }
380 
381  if (!s->frame2->data[0] || !s->frame1->data[0])
382  return AVERROR_INVALIDDATA;
383 
384  b1 = s->frame1->data[0] + s->frame1->linesize[0] * (y + h - 1) + x * s->bpp;
385  b2 = s->frame2->data[0] + s->frame2->linesize[0] * (y + h - 1) + x * s->bpp;
386  cx = 0, cy = h;
387  while (bytestream2_get_bytes_left(&dc) > 0) {
388  int type = bytestream2_get_byte(&dc);
389  int len = bytestream2_get_byte(&dc);
390  unsigned fill;
391 
392  switch (type) {
393  case 1:
394  while (len > 0 && cy > 0) {
395  cx++;
396  NEXT_LINE
397  }
398  break;
399  case 2:
400  while (len > 0 && cy > 0) {
401  int v0 = b1[cx];
402  int v1 = b2[cx];
403 
404  b2[cx] = v0;
405  b1[cx] = v1;
406  cx++;
407  NEXT_LINE
408  }
409  break;
410  case 3:
411  while (len > 0 && cy > 0) {
412  fill = bytestream2_get_byte(&dc);
413  b1[cx] = b2[cx];
414  b2[cx] = fill;
415  cx++;
416  NEXT_LINE
417  }
418  break;
419  case 4:
420  fill = bytestream2_get_byte(&dc);
421  while (len > 0 && cy > 0) {
422  AV_WL32(b1 + cx, AV_RL32(b2 + cx));
423  AV_WL32(b2 + cx, fill);
424  cx++;
425  NEXT_LINE
426  }
427  break;
428  case 7:
429  fill = bytestream2_get_le32(&dc);
430  while (len > 0 && cy > 0) {
431  AV_WL32(b1 + cx, AV_RL32(b2 + cx));
432  AV_WL32(b2 + cx, fill);
433  cx += 4;
434  NEXT_LINE
435  }
436  break;
437  case 10:
438  while (len > 0 && cy > 0) {
439  cx += 4;
440  NEXT_LINE
441  }
442  break;
443  case 12:
444  while (len > 0 && cy > 0) {
445  unsigned v0, v1;
446 
447  v0 = AV_RL32(b2 + cx);
448  v1 = AV_RL32(b1 + cx);
449  AV_WL32(b2 + cx, v1);
450  AV_WL32(b1 + cx, v0);
451  cx += 4;
452  NEXT_LINE
453  }
454  break;
455  case 13:
456  while (len > 0 && cy > 0) {
457  fill = bytestream2_get_le32(&dc);
458  AV_WL32(b1 + cx, AV_RL32(b2 + cx));
459  AV_WL32(b2 + cx, fill);
460  cx += 4;
461  NEXT_LINE
462  }
463  break;
464  default:
465  avpriv_request_sample(avctx, "runlen %d", type);
466  return AVERROR_INVALIDDATA;
467  }
468  }
469 
470  bytestream2_skip(gb, size - (bytestream2_tell(gb) - pos));
471 
472  return 0;
473 }
474 
475 static int decode_kfrm(AVCodecContext *avctx,
476  const AVPacket *avpkt, unsigned size)
477 {
478  RASCContext *s = avctx->priv_data;
479  z_stream *const zstream = &s->zstream.zstream;
480  GetByteContext *gb = &s->gb;
481  uint8_t *dst;
482  unsigned pos;
483  int zret, ret;
484 
485  pos = bytestream2_tell(gb);
486  if (bytestream2_peek_le32(gb) == 0x65) {
487  ret = decode_fint(avctx, avpkt, size);
488  if (ret < 0)
489  return ret;
490  }
491 
492  if (!s->frame2->data[0])
493  return AVERROR_INVALIDDATA;
494 
495  zret = inflateReset(zstream);
496  if (zret != Z_OK) {
497  av_log(avctx, AV_LOG_ERROR, "Inflate reset error: %d\n", zret);
498  return AVERROR_EXTERNAL;
499  }
500 
501  zstream->next_in = avpkt->data + bytestream2_tell(gb);
502  zstream->avail_in = bytestream2_get_bytes_left(gb);
503 
504  dst = s->frame2->data[0] + (avctx->height - 1) * s->frame2->linesize[0];
505  for (int i = 0; i < avctx->height; i++) {
506  zstream->next_out = dst;
507  zstream->avail_out = s->stride;
508 
509  zret = inflate(zstream, Z_SYNC_FLUSH);
510  if (zret != Z_OK && zret != Z_STREAM_END) {
511  av_log(avctx, AV_LOG_ERROR,
512  "Inflate failed with return code: %d.\n", zret);
513  return AVERROR_INVALIDDATA;
514  }
515 
516  dst -= s->frame2->linesize[0];
517  }
518 
519  dst = s->frame1->data[0] + (avctx->height - 1) * s->frame1->linesize[0];
520  for (int i = 0; i < avctx->height; i++) {
521  zstream->next_out = dst;
522  zstream->avail_out = s->stride;
523 
524  zret = inflate(zstream, Z_SYNC_FLUSH);
525  if (zret != Z_OK && zret != Z_STREAM_END) {
526  av_log(avctx, AV_LOG_ERROR,
527  "Inflate failed with return code: %d.\n", zret);
528  return AVERROR_INVALIDDATA;
529  }
530 
531  dst -= s->frame1->linesize[0];
532  }
533 
534  bytestream2_skip(gb, size - (bytestream2_tell(gb) - pos));
535 
536  return 0;
537 }
538 
539 static int decode_mous(AVCodecContext *avctx,
540  const AVPacket *avpkt, unsigned size)
541 {
542  RASCContext *s = avctx->priv_data;
543  GetByteContext *gb = &s->gb;
544  unsigned w, h, pos, uncompressed_size;
545  int ret;
546 
547  pos = bytestream2_tell(gb);
548  bytestream2_skip(gb, 8);
549  w = bytestream2_get_le32(gb);
550  h = bytestream2_get_le32(gb);
551  bytestream2_skip(gb, 12);
552  uncompressed_size = bytestream2_get_le32(gb);
553 
554  if (w > avctx->width || h > avctx->height)
555  return AVERROR_INVALIDDATA;
556 
557  if (uncompressed_size != 3 * w * h)
558  return AVERROR_INVALIDDATA;
559 
560  av_fast_padded_malloc(&s->cursor, &s->cursor_size, uncompressed_size);
561  if (!s->cursor)
562  return AVERROR(ENOMEM);
563 
564  ret = decode_zlib(avctx, avpkt,
565  size - (bytestream2_tell(gb) - pos),
566  uncompressed_size);
567  if (ret < 0)
568  return ret;
569  memcpy(s->cursor, s->delta, uncompressed_size);
570 
571  bytestream2_skip(gb, size - (bytestream2_tell(gb) - pos));
572 
573  s->cursor_w = w;
574  s->cursor_h = h;
575 
576  return 0;
577 }
578 
579 static int decode_mpos(AVCodecContext *avctx,
580  const AVPacket *avpkt, unsigned size)
581 {
582  RASCContext *s = avctx->priv_data;
583  GetByteContext *gb = &s->gb;
584  unsigned pos;
585 
586  pos = bytestream2_tell(gb);
587  bytestream2_skip(gb, 8);
588  s->cursor_x = bytestream2_get_le32(gb);
589  s->cursor_y = bytestream2_get_le32(gb);
590 
591  bytestream2_skip(gb, size - (bytestream2_tell(gb) - pos));
592 
593  return 0;
594 }
595 
596 static void draw_cursor(AVCodecContext *avctx)
597 {
598  RASCContext *s = avctx->priv_data;
599  uint8_t *dst, *pal;
600 
601  if (!s->cursor)
602  return;
603 
604  if (s->cursor_x >= avctx->width || s->cursor_y >= avctx->height)
605  return;
606 
607  if (s->cursor_x + s->cursor_w > avctx->width ||
608  s->cursor_y + s->cursor_h > avctx->height)
609  return;
610 
611  if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
612  pal = s->frame->data[1];
613  for (int i = 0; i < s->cursor_h; i++) {
614  for (int j = 0; j < s->cursor_w; j++) {
615  int cr = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 0];
616  int cg = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 1];
617  int cb = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 2];
618  int best = INT_MAX;
619  int index = 0;
620  int dist;
621 
622  if (cr == s->cursor[0] && cg == s->cursor[1] && cb == s->cursor[2])
623  continue;
624 
625  dst = s->frame->data[0] + s->frame->linesize[0] * (s->cursor_y + i) + (s->cursor_x + j);
626  for (int k = 0; k < 256; k++) {
627  int pr = pal[k * 4 + 0];
628  int pg = pal[k * 4 + 1];
629  int pb = pal[k * 4 + 2];
630 
631  dist = FFABS(cr - pr) + FFABS(cg - pg) + FFABS(cb - pb);
632  if (dist < best) {
633  best = dist;
634  index = k;
635  }
636  }
637  dst[0] = index;
638  }
639  }
640  } else if (avctx->pix_fmt == AV_PIX_FMT_RGB555LE) {
641  for (int i = 0; i < s->cursor_h; i++) {
642  for (int j = 0; j < s->cursor_w; j++) {
643  int cr = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 0];
644  int cg = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 1];
645  int cb = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 2];
646 
647  if (cr == s->cursor[0] && cg == s->cursor[1] && cb == s->cursor[2])
648  continue;
649 
650  cr >>= 3; cg >>=3; cb >>= 3;
651  dst = s->frame->data[0] + s->frame->linesize[0] * (s->cursor_y + i) + 2 * (s->cursor_x + j);
652  AV_WL16(dst, cr | cg << 5 | cb << 10);
653  }
654  }
655  } else if (avctx->pix_fmt == AV_PIX_FMT_BGR0) {
656  for (int i = 0; i < s->cursor_h; i++) {
657  for (int j = 0; j < s->cursor_w; j++) {
658  int cr = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 0];
659  int cg = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 1];
660  int cb = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 2];
661 
662  if (cr == s->cursor[0] && cg == s->cursor[1] && cb == s->cursor[2])
663  continue;
664 
665  dst = s->frame->data[0] + s->frame->linesize[0] * (s->cursor_y + i) + 4 * (s->cursor_x + j);
666  dst[0] = cb;
667  dst[1] = cg;
668  dst[2] = cr;
669  }
670  }
671  }
672 }
673 
675  int *got_frame, AVPacket *avpkt)
676 {
677  RASCContext *s = avctx->priv_data;
678  GetByteContext *gb = &s->gb;
679  int ret, intra = 0;
680 
681  bytestream2_init(gb, avpkt->data, avpkt->size);
682 
683  if (bytestream2_peek_le32(gb) == EMPT)
684  return avpkt->size;
685 
686  s->frame = frame;
687 
688  while (bytestream2_get_bytes_left(gb) > 0) {
689  unsigned type, size = 0;
690 
691  if (bytestream2_get_bytes_left(gb) < 8)
692  return AVERROR_INVALIDDATA;
693 
694  type = bytestream2_get_le32(gb);
695  if (type == KBND || type == BNDL) {
696  intra = type == KBND;
697  type = bytestream2_get_le32(gb);
698  }
699 
700  size = bytestream2_get_le32(gb);
702  return AVERROR_INVALIDDATA;
703 
704  switch (type) {
705  case FINT:
706  case INIT:
707  ret = decode_fint(avctx, avpkt, size);
708  break;
709  case KFRM:
710  ret = decode_kfrm(avctx, avpkt, size);
711  break;
712  case DLTA:
713  ret = decode_dlta(avctx, avpkt, size);
714  break;
715  case MOVE:
716  ret = decode_move(avctx, avpkt, size);
717  break;
718  case MOUS:
719  ret = decode_mous(avctx, avpkt, size);
720  break;
721  case MPOS:
722  ret = decode_mpos(avctx, avpkt, size);
723  break;
724  default:
725  bytestream2_skip(gb, size);
726  ret = 0;
727  }
728 
729  if (ret < 0)
730  return ret;
731  }
732 
733  if (!s->frame2->data[0] || !s->frame1->data[0])
734  return AVERROR_INVALIDDATA;
735 
736  if ((ret = ff_get_buffer(avctx, s->frame, 0)) < 0)
737  return ret;
738 
739  copy_plane(avctx, s->frame2, s->frame);
740  if (avctx->pix_fmt == AV_PIX_FMT_PAL8)
741  memcpy(s->frame->data[1], s->frame2->data[1], 1024);
742  if (!s->skip_cursor)
743  draw_cursor(avctx);
744 
745  s->frame->key_frame = intra;
746  s->frame->pict_type = intra ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
747 
748  *got_frame = 1;
749 
750  return avpkt->size;
751 }
752 
754 {
755  RASCContext *s = avctx->priv_data;
756 
757  s->frame1 = av_frame_alloc();
758  s->frame2 = av_frame_alloc();
759  if (!s->frame1 || !s->frame2)
760  return AVERROR(ENOMEM);
761 
762  return ff_inflate_init(&s->zstream, avctx);
763 }
764 
766 {
767  RASCContext *s = avctx->priv_data;
768 
769  av_freep(&s->cursor);
770  s->cursor_size = 0;
771  av_freep(&s->delta);
772  s->delta_size = 0;
773  av_frame_free(&s->frame1);
774  av_frame_free(&s->frame2);
775  ff_inflate_end(&s->zstream);
776 
777  return 0;
778 }
779 
780 static void decode_flush(AVCodecContext *avctx)
781 {
782  RASCContext *s = avctx->priv_data;
783 
784  clear_plane(avctx, s->frame1);
785  clear_plane(avctx, s->frame2);
786 }
787 
788 static const AVOption options[] = {
789 { "skip_cursor", "skip the cursor", offsetof(RASCContext, skip_cursor), AV_OPT_TYPE_BOOL, {.i64 = 0 }, 0, 1, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
790 { NULL },
791 };
792 
793 static const AVClass rasc_decoder_class = {
794  .class_name = "rasc decoder",
795  .item_name = av_default_item_name,
796  .option = options,
797  .version = LIBAVUTIL_VERSION_INT,
798 };
799 
801  .p.name = "rasc",
802  .p.long_name = NULL_IF_CONFIG_SMALL("RemotelyAnywhere Screen Capture"),
803  .p.type = AVMEDIA_TYPE_VIDEO,
804  .p.id = AV_CODEC_ID_RASC,
805  .priv_data_size = sizeof(RASCContext),
806  .init = decode_init,
807  .close = decode_close,
809  .flush = decode_flush,
810  .p.capabilities = AV_CODEC_CAP_DR1,
811  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
813  .p.priv_class = &rasc_decoder_class,
814 };
RASCContext
Definition: rasc.c:49
init_frames
static int init_frames(AVCodecContext *avctx)
Definition: rasc.c:96
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:39
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
BNDL
#define BNDL
Definition: rasc.c:41
opt.h
AV_OPT_FLAG_VIDEO_PARAM
#define AV_OPT_FLAG_VIDEO_PARAM
Definition: opt.h:284
AV_WL32
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
cb
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:239
GetByteContext
Definition: bytestream.h:33
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:262
FINT
#define FINT
Definition: rasc.c:39
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:111
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
RASCContext::delta_size
int delta_size
Definition: rasc.c:54
w
uint8_t w
Definition: llviddspenc.c:38
RASCContext::zstream
FFZStream zstream
Definition: rasc.c:66
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:374
AVOption
AVOption.
Definition: opt.h:251
RASCContext::frame
AVFrame * frame
Definition: rasc.c:63
FFCodec
Definition: codec_internal.h:112
options
static const AVOption options[]
Definition: rasc.c:788
EMPT
#define EMPT
Definition: rasc.c:47
decode_fint
static int decode_fint(AVCodecContext *avctx, const AVPacket *avpkt, unsigned size)
Definition: rasc.c:115
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:346
init
static int init
Definition: av_tx.c:47
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
decode_dlta
static int decode_dlta(AVCodecContext *avctx, const AVPacket *avpkt, unsigned size)
Definition: rasc.c:333
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:116
b1
static double b1(void *priv, double x, double y)
Definition: vf_xfade.c:1771
v0
#define v0
Definition: regdef.h:26
inflate
static void inflate(uint8_t *dst, const uint8_t *p1, int width, int threshold, const uint8_t *coordinates[], int coord, int maxc)
Definition: vf_neighbor.c:195
NEXT_LINE
#define NEXT_LINE
Definition: rasc.c:324
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
ff_rasc_decoder
const FFCodec ff_rasc_decoder
Definition: rasc.c:800
copy_plane
static void copy_plane(AVCodecContext *avctx, AVFrame *src, AVFrame *dst)
Definition: rasc.c:83
RASCContext::frame2
AVFrame * frame2
Definition: rasc.c:65
RASCContext::cursor_x
unsigned cursor_x
Definition: rasc.c:59
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:99
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
zlib_wrapper.h
av_cold
#define av_cold
Definition: attributes.h:90
RASCContext::stride
int stride
Definition: rasc.c:61
rasc_decoder_class
static const AVClass rasc_decoder_class
Definition: rasc.c:793
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:254
s
#define s(width, name)
Definition: cbs_vp9.c:256
RASCContext::cursor_h
unsigned cursor_h
Definition: rasc.c:58
decode_kfrm
static int decode_kfrm(AVCodecContext *avctx, const AVPacket *avpkt, unsigned size)
Definition: rasc.c:475
decode_flush
static void decode_flush(AVCodecContext *avctx)
Definition: rasc.c:780
draw_cursor
static void draw_cursor(AVCodecContext *avctx)
Definition: rasc.c:596
decode_move
static int decode_move(AVCodecContext *avctx, const AVPacket *avpkt, unsigned size)
Definition: rasc.c:209
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:64
if
if(ret)
Definition: filter_design.txt:179
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
NULL
#define NULL
Definition: coverity.c:32
INIT
#define INIT
Definition: rasc.c:40
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
decode_mous
static int decode_mous(AVCodecContext *avctx, const AVPacket *avpkt, unsigned size)
Definition: rasc.c:539
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:237
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
AV_PIX_FMT_BGR0
@ AV_PIX_FMT_BGR0
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
Definition: pixfmt.h:230
MOVE
#define MOVE
Definition: rasc.c:46
RASCContext::cursor_y
unsigned cursor_y
Definition: rasc.c:60
RASCContext::skip_cursor
int skip_cursor
Definition: rasc.c:51
KBND
#define KBND
Definition: rasc.c:38
index
int index
Definition: gxfenc.c:89
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
DLTA
#define DLTA
Definition: rasc.c:43
decode_zlib
static int decode_zlib(AVCodecContext *avctx, const AVPacket *avpkt, unsigned size, unsigned uncompressed_size)
Definition: rasc.c:175
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1403
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
RASCContext::cursor_size
int cursor_size
Definition: rasc.c:56
AVPacket::size
int size
Definition: packet.h:375
dc
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff) *mv_scale Intra DC Prediction block[y][x] dc[1]
Definition: snow.txt:400
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
codec_internal.h
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
AV_CODEC_ID_RASC
@ AV_CODEC_ID_RASC
Definition: codec_id.h:290
size
int size
Definition: twinvq_data.h:10344
RASCContext::cursor
uint8_t * cursor
Definition: rasc.c:55
RASCContext::cursor_w
unsigned cursor_w
Definition: rasc.c:57
RASCContext::bpp
int bpp
Definition: rasc.c:62
AV_WL16
#define AV_WL16(p, v)
Definition: intreadwrite.h:412
b2
static double b2(void *priv, double x, double y)
Definition: vf_xfade.c:1772
AVERROR_EXTERNAL
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:59
KFRM
#define KFRM
Definition: rasc.c:42
AV_PIX_FMT_RGB555LE
@ AV_PIX_FMT_RGB555LE
packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined
Definition: pixfmt.h:108
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
decode_mpos
static int decode_mpos(AVCodecContext *avctx, const AVPacket *avpkt, unsigned size)
Definition: rasc.c:579
decode_init
static av_cold int decode_init(AVCodecContext *avctx)
Definition: rasc.c:753
av_fast_padded_malloc
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:48
AV_OPT_FLAG_DECODING_PARAM
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
Definition: opt.h:282
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: codec_internal.h:31
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:477
MPOS
#define MPOS
Definition: rasc.c:45
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:203
len
int len
Definition: vorbis_enc_data.h:426
ff_inflate_end
void ff_inflate_end(FFZStream *zstream)
Wrapper around inflateEnd().
AVCodecContext::height
int height
Definition: avcodec.h:562
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:599
avcodec.h
decode_close
static av_cold int decode_close(AVCodecContext *avctx)
Definition: rasc.c:765
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
ret
ret
Definition: filter_design.txt:187
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
pos
unsigned int pos
Definition: spdifenc.c:412
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
AVCodecContext
main external API structure.
Definition: avcodec.h:389
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
clear_plane
static void clear_plane(AVCodecContext *avctx, AVFrame *frame)
Definition: rasc.c:69
MOUS
#define MOUS
Definition: rasc.c:44
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
FFZStream
Definition: zlib_wrapper.h:27
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:90
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
AVPacket
This structure stores compressed data.
Definition: packet.h:351
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:416
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:244
cr
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:240
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
ff_inflate_init
int ff_inflate_init(FFZStream *zstream, void *logctx)
Wrapper around inflateInit().
RASCContext::gb
GetByteContext gb
Definition: rasc.c:52
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:562
decode_frame
static int decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: rasc.c:674
bytestream.h
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:370
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
RASCContext::delta
uint8_t * delta
Definition: rasc.c:53
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
h
h
Definition: vp9dsp_template.c:2038
RASCContext::frame1
AVFrame * frame1
Definition: rasc.c:64
mc
#define mc
Definition: vf_colormatrix.c:102