FFmpeg
vmdvideo.c
Go to the documentation of this file.
1 /*
2  * Sierra VMD video decoder
3  * Copyright (c) 2004 The FFmpeg Project
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Sierra VMD video decoder
25  * by Vladimir "VAG" Gneushev (vagsoft at mail.ru)
26  * for more information on the Sierra VMD format, visit:
27  * http://www.pcisys.net/~melanson/codecs/
28  *
29  * The video decoder outputs PAL8 colorspace data. The decoder expects
30  * a 0x330-byte VMD file header to be transmitted via extradata during
31  * codec initialization. Each encoded frame that is sent to this decoder
32  * is expected to be prepended with the appropriate 16-byte frame
33  * information record from the VMD file.
34  */
35 
36 #include <string.h>
37 
38 #include "libavutil/common.h"
39 #include "libavutil/intreadwrite.h"
40 
41 #include "avcodec.h"
42 #include "codec_internal.h"
43 #include "decode.h"
44 #include "bytestream.h"
45 
46 #define VMD_HEADER_SIZE 0x330
47 #define PALETTE_COUNT 256
48 
49 typedef struct VmdVideoContext {
50 
53 
54  const unsigned char *buf;
55  int size;
56 
57  unsigned char palette[PALETTE_COUNT * 4];
58  unsigned char *unpack_buffer;
60 
61  int x_off, y_off;
63 
64 #define QUEUE_SIZE 0x1000
65 #define QUEUE_MASK 0x0FFF
66 
67 static int lz_unpack(const unsigned char *src, int src_len,
68  unsigned char *dest, int dest_len)
69 {
70  unsigned char *d;
71  unsigned char *d_end;
72  unsigned char queue[QUEUE_SIZE];
73  unsigned int qpos;
74  unsigned int dataleft;
75  unsigned int chainofs;
76  unsigned int chainlen;
77  unsigned int speclen;
78  unsigned char tag;
79  unsigned int i, j;
80  GetByteContext gb;
81 
82  bytestream2_init(&gb, src, src_len);
83  d = dest;
84  d_end = d + dest_len;
85  dataleft = bytestream2_get_le32(&gb);
86  memset(queue, 0x20, QUEUE_SIZE);
87  if (bytestream2_get_bytes_left(&gb) < 4)
88  return AVERROR_INVALIDDATA;
89  if (bytestream2_peek_le32(&gb) == 0x56781234) {
90  bytestream2_skipu(&gb, 4);
91  qpos = 0x111;
92  speclen = 0xF + 3;
93  } else {
94  qpos = 0xFEE;
95  speclen = 100; /* no speclen */
96  }
97 
98  while (dataleft > 0 && bytestream2_get_bytes_left(&gb) > 0) {
99  tag = bytestream2_get_byteu(&gb);
100  if ((tag == 0xFF) && (dataleft > 8)) {
101  if (d_end - d < 8 || bytestream2_get_bytes_left(&gb) < 8)
102  return AVERROR_INVALIDDATA;
103  for (i = 0; i < 8; i++) {
104  queue[qpos++] = *d++ = bytestream2_get_byteu(&gb);
105  qpos &= QUEUE_MASK;
106  }
107  dataleft -= 8;
108  } else {
109  for (i = 0; i < 8; i++) {
110  if (dataleft == 0)
111  break;
112  if (tag & 0x01) {
113  if (d_end - d < 1 || bytestream2_get_bytes_left(&gb) < 1)
114  return AVERROR_INVALIDDATA;
115  queue[qpos++] = *d++ = bytestream2_get_byteu(&gb);
116  qpos &= QUEUE_MASK;
117  dataleft--;
118  } else {
119  chainofs = bytestream2_get_byte(&gb);
120  chainofs |= ((bytestream2_peek_byte(&gb) & 0xF0) << 4);
121  chainlen = (bytestream2_get_byte(&gb) & 0x0F) + 3;
122  if (chainlen == speclen) {
123  chainlen = bytestream2_get_byte(&gb) + 0xF + 3;
124  }
125  if (d_end - d < chainlen)
126  return AVERROR_INVALIDDATA;
127  for (j = 0; j < chainlen; j++) {
128  *d = queue[chainofs++ & QUEUE_MASK];
129  queue[qpos++] = *d++;
130  qpos &= QUEUE_MASK;
131  }
132  dataleft -= chainlen;
133  }
134  tag >>= 1;
135  }
136  }
137  }
138  return d - dest;
139 }
140 static int rle_unpack(const unsigned char *src, unsigned char *dest,
141  int src_count, int src_size, int dest_len)
142 {
143  unsigned char *pd;
144  int i, l, used = 0;
145  unsigned char *dest_end = dest + dest_len;
146  GetByteContext gb;
147  uint16_t run_val;
148 
149  bytestream2_init(&gb, src, src_size);
150  pd = dest;
151  if (src_count & 1) {
152  if (bytestream2_get_bytes_left(&gb) < 1)
153  return 0;
154  *pd++ = bytestream2_get_byteu(&gb);
155  used++;
156  }
157 
158  do {
159  if (bytestream2_get_bytes_left(&gb) < 1)
160  break;
161  l = bytestream2_get_byteu(&gb);
162  if (l & 0x80) {
163  l = (l & 0x7F) * 2;
164  if (dest_end - pd < l || bytestream2_get_bytes_left(&gb) < l)
165  return bytestream2_tell(&gb);
166  bytestream2_get_bufferu(&gb, pd, l);
167  pd += l;
168  } else {
169  if (dest_end - pd < 2*l || bytestream2_get_bytes_left(&gb) < 2)
170  return bytestream2_tell(&gb);
171  run_val = bytestream2_get_ne16(&gb);
172  for (i = 0; i < l; i++) {
173  AV_WN16(pd, run_val);
174  pd += 2;
175  }
176  l *= 2;
177  }
178  used += l;
179  } while (used < src_count);
180 
181  return bytestream2_tell(&gb);
182 }
183 
185 {
186  int i;
187  unsigned int *palette32;
188  unsigned char r, g, b;
189 
190  GetByteContext gb;
191 
192  unsigned char meth;
193  unsigned char *dp; /* pointer to current frame */
194  unsigned char *pp; /* pointer to previous frame */
195  unsigned char len;
196  int ofs;
197 
198  int frame_x, frame_y, prev_linesize;
199  int frame_width, frame_height;
200 
201  frame_x = AV_RL16(&s->buf[6]);
202  frame_y = AV_RL16(&s->buf[8]);
203  frame_width = AV_RL16(&s->buf[10]) - frame_x + 1;
204  frame_height = AV_RL16(&s->buf[12]) - frame_y + 1;
205 
206  if ((frame_width == s->avctx->width && frame_height == s->avctx->height) &&
207  (frame_x || frame_y)) {
208 
209  s->x_off = frame_x;
210  s->y_off = frame_y;
211  }
212  frame_x -= s->x_off;
213  frame_y -= s->y_off;
214 
215  if (frame_x < 0 || frame_width < 0 ||
216  frame_x >= s->avctx->width ||
217  frame_width > s->avctx->width ||
218  frame_x + frame_width > s->avctx->width) {
219  av_log(s->avctx, AV_LOG_ERROR,
220  "Invalid horizontal range %d-%d\n",
221  frame_x, frame_width);
222  return AVERROR_INVALIDDATA;
223  }
224  if (frame_y < 0 || frame_height < 0 ||
225  frame_y >= s->avctx->height ||
226  frame_height > s->avctx->height ||
227  frame_y + frame_height > s->avctx->height) {
228  av_log(s->avctx, AV_LOG_ERROR,
229  "Invalid vertical range %d-%d\n",
230  frame_y, frame_height);
231  return AVERROR_INVALIDDATA;
232  }
233 
234  /* if only a certain region will be updated, copy the entire previous
235  * frame before the decode */
236  if (s->prev_frame->data[0] &&
237  (frame_x || frame_y || (frame_width != s->avctx->width) ||
238  (frame_height != s->avctx->height))) {
239 
240  memcpy(frame->data[0], s->prev_frame->data[0],
241  s->avctx->height * frame->linesize[0]);
242  }
243 
244  /* check if there is a new palette */
245  bytestream2_init(&gb, s->buf + 16, s->size - 16);
246  if (s->buf[15] & 0x02) {
247  bytestream2_skip(&gb, 2);
248  palette32 = (unsigned int *)s->palette;
250  for (i = 0; i < PALETTE_COUNT; i++) {
251  r = bytestream2_get_byteu(&gb) * 4;
252  g = bytestream2_get_byteu(&gb) * 4;
253  b = bytestream2_get_byteu(&gb) * 4;
254  palette32[i] = 0xFFU << 24 | (r << 16) | (g << 8) | (b);
255  palette32[i] |= palette32[i] >> 6 & 0x30303;
256  }
257  } else {
258  av_log(s->avctx, AV_LOG_ERROR, "Incomplete palette\n");
259  return AVERROR_INVALIDDATA;
260  }
261  }
262 
263  if (!s->size)
264  return 0;
265 
266  /* originally UnpackFrame in VAG's code */
267  if (bytestream2_get_bytes_left(&gb) < 1)
268  return AVERROR_INVALIDDATA;
269  meth = bytestream2_get_byteu(&gb);
270  if (meth & 0x80) {
271  int size;
272  if (!s->unpack_buffer_size) {
273  av_log(s->avctx, AV_LOG_ERROR,
274  "Trying to unpack LZ-compressed frame with no LZ buffer\n");
275  return AVERROR_INVALIDDATA;
276  }
278  s->unpack_buffer, s->unpack_buffer_size);
279  if (size < 0)
280  return size;
281  meth &= 0x7F;
282  bytestream2_init(&gb, s->unpack_buffer, size);
283  }
284 
285  dp = &frame->data[0][frame_y * frame->linesize[0] + frame_x];
286  if (s->prev_frame->data[0]) {
287  prev_linesize = s->prev_frame->linesize[0];
288  pp = s->prev_frame->data[0] + frame_y * prev_linesize + frame_x;
289  } else {
290  pp = NULL;
291  prev_linesize = 0;
292  }
293  switch (meth) {
294  case 1:
295  for (i = 0; i < frame_height; i++) {
296  ofs = 0;
297  do {
298  len = bytestream2_get_byte(&gb);
299  if (len & 0x80) {
300  len = (len & 0x7F) + 1;
301  if (ofs + len > frame_width ||
303  return AVERROR_INVALIDDATA;
304  bytestream2_get_bufferu(&gb, &dp[ofs], len);
305  ofs += len;
306  } else {
307  /* interframe pixel copy */
308  if (ofs + len + 1 > frame_width || !pp)
309  return AVERROR_INVALIDDATA;
310  memcpy(&dp[ofs], &pp[ofs], len + 1);
311  ofs += len + 1;
312  }
313  } while (ofs < frame_width);
314  if (ofs > frame_width) {
315  av_log(s->avctx, AV_LOG_ERROR,
316  "offset > width (%d > %d)\n",
317  ofs, frame_width);
318  return AVERROR_INVALIDDATA;
319  }
320  dp += frame->linesize[0];
321  pp = FF_PTR_ADD(pp, prev_linesize);
322  }
323  break;
324 
325  case 2:
326  for (i = 0; i < frame_height; i++) {
327  bytestream2_get_buffer(&gb, dp, frame_width);
328  dp += frame->linesize[0];
329  }
330  break;
331 
332  case 3:
333  for (i = 0; i < frame_height; i++) {
334  ofs = 0;
335  do {
336  len = bytestream2_get_byte(&gb);
337  if (len & 0x80) {
338  len = (len & 0x7F) + 1;
339  if (bytestream2_peek_byte(&gb) == 0xFF) {
340  int slen = len;
341  bytestream2_get_byte(&gb);
342  len = rle_unpack(gb.buffer, &dp[ofs],
344  frame_width - ofs);
345  ofs += slen;
346  bytestream2_skip(&gb, len);
347  } else {
348  if (ofs + len > frame_width ||
350  return AVERROR_INVALIDDATA;
351  bytestream2_get_buffer(&gb, &dp[ofs], len);
352  ofs += len;
353  }
354  } else {
355  /* interframe pixel copy */
356  if (ofs + len + 1 > frame_width || !pp)
357  return AVERROR_INVALIDDATA;
358  memcpy(&dp[ofs], &pp[ofs], len + 1);
359  ofs += len + 1;
360  }
361  } while (ofs < frame_width);
362  if (ofs > frame_width) {
363  av_log(s->avctx, AV_LOG_ERROR,
364  "offset > width (%d > %d)\n",
365  ofs, frame_width);
366  return AVERROR_INVALIDDATA;
367  }
368  dp += frame->linesize[0];
369  pp = FF_PTR_ADD(pp, prev_linesize);
370  }
371  break;
372  }
373  return 0;
374 }
375 
377 {
378  VmdVideoContext *s = avctx->priv_data;
379 
380  av_frame_free(&s->prev_frame);
381  av_freep(&s->unpack_buffer);
382  s->unpack_buffer_size = 0;
383 
384  return 0;
385 }
386 
388 {
389  VmdVideoContext *s = avctx->priv_data;
390  int i;
391  unsigned int *palette32;
392  int palette_index = 0;
393  unsigned char r, g, b;
394  unsigned char *vmd_header;
395  unsigned char *raw_palette;
396 
397  s->avctx = avctx;
398  avctx->pix_fmt = AV_PIX_FMT_PAL8;
399 
400  /* make sure the VMD header made it */
401  if (s->avctx->extradata_size != VMD_HEADER_SIZE) {
402  av_log(s->avctx, AV_LOG_ERROR, "expected extradata size of %d\n",
404  return AVERROR_INVALIDDATA;
405  }
406  vmd_header = (unsigned char *)avctx->extradata;
407 
408  s->unpack_buffer_size = AV_RL32(&vmd_header[800]);
409  if (s->unpack_buffer_size) {
410  s->unpack_buffer = av_malloc(s->unpack_buffer_size);
411  if (!s->unpack_buffer)
412  return AVERROR(ENOMEM);
413  }
414 
415  /* load up the initial palette */
416  raw_palette = &vmd_header[28];
417  palette32 = (unsigned int *)s->palette;
418  for (i = 0; i < PALETTE_COUNT; i++) {
419  r = raw_palette[palette_index++] * 4;
420  g = raw_palette[palette_index++] * 4;
421  b = raw_palette[palette_index++] * 4;
422  palette32[i] = 0xFFU << 24 | (r << 16) | (g << 8) | (b);
423  palette32[i] |= palette32[i] >> 6 & 0x30303;
424  }
425 
426  s->prev_frame = av_frame_alloc();
427  if (!s->prev_frame)
428  return AVERROR(ENOMEM);
429 
430  return 0;
431 }
432 
434  int *got_frame, AVPacket *avpkt)
435 {
436  const uint8_t *buf = avpkt->data;
437  int buf_size = avpkt->size;
438  VmdVideoContext *s = avctx->priv_data;
439  int ret;
440 
441  s->buf = buf;
442  s->size = buf_size;
443 
444  if (buf_size < 16)
445  return AVERROR_INVALIDDATA;
446 
447  if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0)
448  return ret;
449 
450  if ((ret = vmd_decode(s, frame)) < 0)
451  return ret;
452 
453  /* make the palette available on the way out */
454  memcpy(frame->data[1], s->palette, PALETTE_COUNT * 4);
455 
456  /* shuffle frames */
457  if ((ret = av_frame_replace(s->prev_frame, frame)) < 0)
458  return ret;
459 
460  *got_frame = 1;
461 
462  /* report that the buffer was completely consumed */
463  return buf_size;
464 }
465 
467  .p.name = "vmdvideo",
468  CODEC_LONG_NAME("Sierra VMD video"),
469  .p.type = AVMEDIA_TYPE_VIDEO,
470  .p.id = AV_CODEC_ID_VMDVIDEO,
471  .priv_data_size = sizeof(VmdVideoContext),
473  .close = vmdvideo_decode_end,
475  .p.capabilities = AV_CODEC_CAP_DR1,
476  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
477 };
VmdVideoContext::prev_frame
AVFrame * prev_frame
Definition: vmdvideo.c:52
QUEUE_MASK
#define QUEUE_MASK
Definition: vmdvideo.c:65
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
r
const char * r
Definition: vf_curves.c:126
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
GetByteContext
Definition: bytestream.h:33
bytestream2_skipu
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:174
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:160
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:375
AVPacket::data
uint8_t * data
Definition: packet.h:522
b
#define b
Definition: input.c:41
vmdvideo_decode_end
static av_cold int vmdvideo_decode_end(AVCodecContext *avctx)
Definition: vmdvideo.c:376
FFCodec
Definition: codec_internal.h:127
VmdVideoContext::palette
unsigned char palette[PALETTE_COUNT *4]
Definition: vmdvideo.c:57
PALETTE_COUNT
#define PALETTE_COUNT
Definition: vmdvideo.c:47
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
VmdVideoContext::unpack_buffer
unsigned char * unpack_buffer
Definition: vmdvideo.c:58
VmdVideoContext::unpack_buffer_size
int unpack_buffer_size
Definition: vmdvideo.c:59
VmdVideoContext::buf
const unsigned char * buf
Definition: vmdvideo.c:54
VmdVideoContext::avctx
AVCodecContext * avctx
Definition: vmdvideo.c:51
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:148
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:287
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:198
rle_unpack
static int rle_unpack(const unsigned char *src, unsigned char *dest, int src_count, int src_size, int dest_len)
Definition: vmdvideo.c:140
g
const char * g
Definition: vf_curves.c:127
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:425
GetByteContext::buffer
const uint8_t * buffer
Definition: bytestream.h:34
decode.h
AV_RL16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:94
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
if
if(ret)
Definition: filter_design.txt:179
NULL
#define NULL
Definition: coverity.c:32
FF_PTR_ADD
#define FF_PTR_ADD(ptr, off)
Definition: internal.h:90
bytestream2_get_buffer
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:267
AV_CODEC_ID_VMDVIDEO
@ AV_CODEC_ID_VMDVIDEO
Definition: codec_id.h:104
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1553
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:365
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:523
VmdVideoContext::size
int size
Definition: vmdvideo.c:55
codec_internal.h
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
bytestream2_get_ne16
#define bytestream2_get_ne16
Definition: bytestream.h:119
size
int size
Definition: twinvq_data.h:10344
VmdVideoContext
Definition: vmdvideo.c:49
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:523
common.h
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
len
int len
Definition: vorbis_enc_data.h:426
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:657
avcodec.h
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:84
tag
uint32_t tag
Definition: movenc.c:1786
ret
ret
Definition: filter_design.txt:187
lz_unpack
static int lz_unpack(const unsigned char *src, int src_len, unsigned char *dest, int dest_len)
Definition: vmdvideo.c:67
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
U
#define U(x)
Definition: vpx_arith.h:37
VMD_HEADER_SIZE
#define VMD_HEADER_SIZE
Definition: vmdvideo.c:46
av_frame_replace
int av_frame_replace(AVFrame *dst, const AVFrame *src)
Ensure the destination frame refers to the same data described by the source frame,...
Definition: frame.c:483
AVCodecContext
main external API structure.
Definition: avcodec.h:445
VmdVideoContext::y_off
int y_off
Definition: vmdvideo.c:61
QUEUE_SIZE
#define QUEUE_SIZE
Definition: vmdvideo.c:64
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
bytestream2_get_bufferu
static av_always_inline unsigned int bytestream2_get_bufferu(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:277
AVPacket
This structure stores compressed data.
Definition: packet.h:499
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
vmd_decode
static int vmd_decode(VmdVideoContext *s, AVFrame *frame)
Definition: vmdvideo.c:184
d
d
Definition: ffmpeg_filter.c:410
bytestream.h
ff_vmdvideo_decoder
const FFCodec ff_vmdvideo_decoder
Definition: vmdvideo.c:466
vmdvideo_decode_init
static av_cold int vmdvideo_decode_init(AVCodecContext *avctx)
Definition: vmdvideo.c:387
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
vmdvideo_decode_frame
static int vmdvideo_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: vmdvideo.c:433
VmdVideoContext::x_off
int x_off
Definition: vmdvideo.c:61
AV_WN16
#define AV_WN16(p, v)
Definition: intreadwrite.h:370