FFmpeg
vmdvideo.c
Go to the documentation of this file.
1 /*
2  * Sierra VMD video decoder
3  * Copyright (c) 2004 The FFmpeg Project
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Sierra VMD video decoder
25  * by Vladimir "VAG" Gneushev (vagsoft at mail.ru)
26  * for more information on the Sierra VMD format, visit:
27  * http://www.pcisys.net/~melanson/codecs/
28  *
29  * The video decoder outputs PAL8 colorspace data. The decoder expects
30  * a 0x330-byte VMD file header to be transmitted via extradata during
31  * codec initialization. Each encoded frame that is sent to this decoder
32  * is expected to be prepended with the appropriate 16-byte frame
33  * information record from the VMD file.
34  */
35 
36 #include <string.h>
37 
38 #include "libavutil/common.h"
39 #include "libavutil/intreadwrite.h"
40 #include "libavutil/mem.h"
41 
42 #include "avcodec.h"
43 #include "codec_internal.h"
44 #include "decode.h"
45 #include "bytestream.h"
46 
47 #define VMD_HEADER_SIZE 0x330
48 #define PALETTE_COUNT 256
49 
50 typedef struct VmdVideoContext {
51 
54 
55  const unsigned char *buf;
56  int size;
57 
58  unsigned char palette[PALETTE_COUNT * 4];
59  unsigned char *unpack_buffer;
61 
62  int x_off, y_off;
64 
65 #define QUEUE_SIZE 0x1000
66 #define QUEUE_MASK 0x0FFF
67 
68 static int lz_unpack(const unsigned char *src, int src_len,
69  unsigned char *dest, int dest_len)
70 {
71  unsigned char *d;
72  unsigned char *d_end;
73  unsigned char queue[QUEUE_SIZE];
74  unsigned int qpos;
75  unsigned int dataleft;
76  unsigned int chainofs;
77  unsigned int chainlen;
78  unsigned int speclen;
79  unsigned char tag;
80  unsigned int i, j;
81  GetByteContext gb;
82 
83  bytestream2_init(&gb, src, src_len);
84  d = dest;
85  d_end = d + dest_len;
86  dataleft = bytestream2_get_le32(&gb);
87  memset(queue, 0x20, QUEUE_SIZE);
88  if (bytestream2_get_bytes_left(&gb) < 4)
89  return AVERROR_INVALIDDATA;
90  if (bytestream2_peek_le32(&gb) == 0x56781234) {
91  bytestream2_skipu(&gb, 4);
92  qpos = 0x111;
93  speclen = 0xF + 3;
94  } else {
95  qpos = 0xFEE;
96  speclen = 100; /* no speclen */
97  }
98 
99  while (dataleft > 0 && bytestream2_get_bytes_left(&gb) > 0) {
100  tag = bytestream2_get_byteu(&gb);
101  if ((tag == 0xFF) && (dataleft > 8)) {
102  if (d_end - d < 8 || bytestream2_get_bytes_left(&gb) < 8)
103  return AVERROR_INVALIDDATA;
104  for (i = 0; i < 8; i++) {
105  queue[qpos++] = *d++ = bytestream2_get_byteu(&gb);
106  qpos &= QUEUE_MASK;
107  }
108  dataleft -= 8;
109  } else {
110  for (i = 0; i < 8; i++) {
111  if (dataleft == 0)
112  break;
113  if (tag & 0x01) {
114  if (d_end - d < 1 || bytestream2_get_bytes_left(&gb) < 1)
115  return AVERROR_INVALIDDATA;
116  queue[qpos++] = *d++ = bytestream2_get_byteu(&gb);
117  qpos &= QUEUE_MASK;
118  dataleft--;
119  } else {
120  chainofs = bytestream2_get_byte(&gb);
121  chainofs |= ((bytestream2_peek_byte(&gb) & 0xF0) << 4);
122  chainlen = (bytestream2_get_byte(&gb) & 0x0F) + 3;
123  if (chainlen == speclen) {
124  chainlen = bytestream2_get_byte(&gb) + 0xF + 3;
125  }
126  if (d_end - d < chainlen)
127  return AVERROR_INVALIDDATA;
128  for (j = 0; j < chainlen; j++) {
129  *d = queue[chainofs++ & QUEUE_MASK];
130  queue[qpos++] = *d++;
131  qpos &= QUEUE_MASK;
132  }
133  dataleft -= chainlen;
134  }
135  tag >>= 1;
136  }
137  }
138  }
139  return d - dest;
140 }
141 static int rle_unpack(const unsigned char *src, unsigned char *dest,
142  int src_count, int src_size, int dest_len)
143 {
144  unsigned char *pd;
145  int i, l, used = 0;
146  unsigned char *dest_end = dest + dest_len;
147  GetByteContext gb;
148  uint16_t run_val;
149 
150  bytestream2_init(&gb, src, src_size);
151  pd = dest;
152  if (src_count & 1) {
153  if (bytestream2_get_bytes_left(&gb) < 1)
154  return 0;
155  *pd++ = bytestream2_get_byteu(&gb);
156  used++;
157  }
158 
159  do {
160  if (bytestream2_get_bytes_left(&gb) < 1)
161  break;
162  l = bytestream2_get_byteu(&gb);
163  if (l & 0x80) {
164  l = (l & 0x7F) * 2;
165  if (dest_end - pd < l || bytestream2_get_bytes_left(&gb) < l)
166  return bytestream2_tell(&gb);
167  bytestream2_get_bufferu(&gb, pd, l);
168  pd += l;
169  } else {
170  if (dest_end - pd < 2*l || bytestream2_get_bytes_left(&gb) < 2)
171  return bytestream2_tell(&gb);
172  run_val = bytestream2_get_ne16(&gb);
173  for (i = 0; i < l; i++) {
174  AV_WN16(pd, run_val);
175  pd += 2;
176  }
177  l *= 2;
178  }
179  used += l;
180  } while (used < src_count);
181 
182  return bytestream2_tell(&gb);
183 }
184 
186 {
187  int i;
188  unsigned int *palette32;
189  unsigned char r, g, b;
190 
191  GetByteContext gb;
192 
193  unsigned char meth;
194  unsigned char *dp; /* pointer to current frame */
195  unsigned char *pp; /* pointer to previous frame */
196  unsigned char len;
197  int ofs;
198 
199  int frame_x, frame_y, prev_linesize;
200  int frame_width, frame_height;
201 
202  frame_x = AV_RL16(&s->buf[6]);
203  frame_y = AV_RL16(&s->buf[8]);
204  frame_width = AV_RL16(&s->buf[10]) - frame_x + 1;
205  frame_height = AV_RL16(&s->buf[12]) - frame_y + 1;
206 
207  if ((frame_width == s->avctx->width && frame_height == s->avctx->height) &&
208  (frame_x || frame_y)) {
209 
210  s->x_off = frame_x;
211  s->y_off = frame_y;
212  }
213  frame_x -= s->x_off;
214  frame_y -= s->y_off;
215 
216  if (frame_x < 0 || frame_width < 0 ||
217  frame_x >= s->avctx->width ||
218  frame_width > s->avctx->width ||
219  frame_x + frame_width > s->avctx->width) {
220  av_log(s->avctx, AV_LOG_ERROR,
221  "Invalid horizontal range %d-%d\n",
222  frame_x, frame_width);
223  return AVERROR_INVALIDDATA;
224  }
225  if (frame_y < 0 || frame_height < 0 ||
226  frame_y >= s->avctx->height ||
227  frame_height > s->avctx->height ||
228  frame_y + frame_height > s->avctx->height) {
229  av_log(s->avctx, AV_LOG_ERROR,
230  "Invalid vertical range %d-%d\n",
231  frame_y, frame_height);
232  return AVERROR_INVALIDDATA;
233  }
234 
235  /* if only a certain region will be updated, copy the entire previous
236  * frame before the decode */
237  if (s->prev_frame->data[0] &&
238  (frame_x || frame_y || (frame_width != s->avctx->width) ||
239  (frame_height != s->avctx->height))) {
240 
241  memcpy(frame->data[0], s->prev_frame->data[0],
242  s->avctx->height * frame->linesize[0]);
243  }
244 
245  /* check if there is a new palette */
246  bytestream2_init(&gb, s->buf + 16, s->size - 16);
247  if (s->buf[15] & 0x02) {
248  bytestream2_skip(&gb, 2);
249  palette32 = (unsigned int *)s->palette;
251  for (i = 0; i < PALETTE_COUNT; i++) {
252  r = bytestream2_get_byteu(&gb) * 4;
253  g = bytestream2_get_byteu(&gb) * 4;
254  b = bytestream2_get_byteu(&gb) * 4;
255  palette32[i] = 0xFFU << 24 | (r << 16) | (g << 8) | (b);
256  palette32[i] |= palette32[i] >> 6 & 0x30303;
257  }
258  } else {
259  av_log(s->avctx, AV_LOG_ERROR, "Incomplete palette\n");
260  return AVERROR_INVALIDDATA;
261  }
262  }
263 
264  if (!s->size)
265  return 0;
266 
267  /* originally UnpackFrame in VAG's code */
268  if (bytestream2_get_bytes_left(&gb) < 1)
269  return AVERROR_INVALIDDATA;
270  meth = bytestream2_get_byteu(&gb);
271  if (meth & 0x80) {
272  int size;
273  if (!s->unpack_buffer_size) {
274  av_log(s->avctx, AV_LOG_ERROR,
275  "Trying to unpack LZ-compressed frame with no LZ buffer\n");
276  return AVERROR_INVALIDDATA;
277  }
279  s->unpack_buffer, s->unpack_buffer_size);
280  if (size < 0)
281  return size;
282  meth &= 0x7F;
283  bytestream2_init(&gb, s->unpack_buffer, size);
284  }
285 
286  dp = &frame->data[0][frame_y * frame->linesize[0] + frame_x];
287  if (s->prev_frame->data[0]) {
288  prev_linesize = s->prev_frame->linesize[0];
289  pp = s->prev_frame->data[0] + frame_y * prev_linesize + frame_x;
290  } else {
291  pp = NULL;
292  prev_linesize = 0;
293  }
294  switch (meth) {
295  case 1:
296  for (i = 0; i < frame_height; i++) {
297  ofs = 0;
298  do {
299  len = bytestream2_get_byte(&gb);
300  if (len & 0x80) {
301  len = (len & 0x7F) + 1;
302  if (ofs + len > frame_width ||
304  return AVERROR_INVALIDDATA;
305  bytestream2_get_bufferu(&gb, &dp[ofs], len);
306  ofs += len;
307  } else {
308  /* interframe pixel copy */
309  if (ofs + len + 1 > frame_width || !pp)
310  return AVERROR_INVALIDDATA;
311  memcpy(&dp[ofs], &pp[ofs], len + 1);
312  ofs += len + 1;
313  }
314  } while (ofs < frame_width);
315  if (ofs > frame_width) {
316  av_log(s->avctx, AV_LOG_ERROR,
317  "offset > width (%d > %d)\n",
318  ofs, frame_width);
319  return AVERROR_INVALIDDATA;
320  }
321  dp += frame->linesize[0];
322  pp = FF_PTR_ADD(pp, prev_linesize);
323  }
324  break;
325 
326  case 2:
327  for (i = 0; i < frame_height; i++) {
328  bytestream2_get_buffer(&gb, dp, frame_width);
329  dp += frame->linesize[0];
330  }
331  break;
332 
333  case 3:
334  for (i = 0; i < frame_height; i++) {
335  ofs = 0;
336  do {
337  len = bytestream2_get_byte(&gb);
338  if (len & 0x80) {
339  len = (len & 0x7F) + 1;
340  if (bytestream2_peek_byte(&gb) == 0xFF) {
341  int slen = len;
342  bytestream2_get_byte(&gb);
343  len = rle_unpack(gb.buffer, &dp[ofs],
345  frame_width - ofs);
346  ofs += slen;
347  bytestream2_skip(&gb, len);
348  } else {
349  if (ofs + len > frame_width ||
351  return AVERROR_INVALIDDATA;
352  bytestream2_get_buffer(&gb, &dp[ofs], len);
353  ofs += len;
354  }
355  } else {
356  /* interframe pixel copy */
357  if (ofs + len + 1 > frame_width || !pp)
358  return AVERROR_INVALIDDATA;
359  memcpy(&dp[ofs], &pp[ofs], len + 1);
360  ofs += len + 1;
361  }
362  } while (ofs < frame_width);
363  if (ofs > frame_width) {
364  av_log(s->avctx, AV_LOG_ERROR,
365  "offset > width (%d > %d)\n",
366  ofs, frame_width);
367  return AVERROR_INVALIDDATA;
368  }
369  dp += frame->linesize[0];
370  pp = FF_PTR_ADD(pp, prev_linesize);
371  }
372  break;
373  }
374  return 0;
375 }
376 
378 {
379  VmdVideoContext *s = avctx->priv_data;
380 
381  av_frame_free(&s->prev_frame);
382  av_freep(&s->unpack_buffer);
383  s->unpack_buffer_size = 0;
384 
385  return 0;
386 }
387 
389 {
390  VmdVideoContext *s = avctx->priv_data;
391  int i;
392  unsigned int *palette32;
393  int palette_index = 0;
394  unsigned char r, g, b;
395  unsigned char *vmd_header;
396  unsigned char *raw_palette;
397 
398  s->avctx = avctx;
399  avctx->pix_fmt = AV_PIX_FMT_PAL8;
400 
401  /* make sure the VMD header made it */
402  if (s->avctx->extradata_size != VMD_HEADER_SIZE) {
403  av_log(s->avctx, AV_LOG_ERROR, "expected extradata size of %d\n",
405  return AVERROR_INVALIDDATA;
406  }
407  vmd_header = (unsigned char *)avctx->extradata;
408 
409  s->unpack_buffer_size = AV_RL32(&vmd_header[800]);
410  if (s->unpack_buffer_size) {
411  s->unpack_buffer = av_malloc(s->unpack_buffer_size);
412  if (!s->unpack_buffer)
413  return AVERROR(ENOMEM);
414  }
415 
416  /* load up the initial palette */
417  raw_palette = &vmd_header[28];
418  palette32 = (unsigned int *)s->palette;
419  for (i = 0; i < PALETTE_COUNT; i++) {
420  r = raw_palette[palette_index++] * 4;
421  g = raw_palette[palette_index++] * 4;
422  b = raw_palette[palette_index++] * 4;
423  palette32[i] = 0xFFU << 24 | (r << 16) | (g << 8) | (b);
424  palette32[i] |= palette32[i] >> 6 & 0x30303;
425  }
426 
427  s->prev_frame = av_frame_alloc();
428  if (!s->prev_frame)
429  return AVERROR(ENOMEM);
430 
431  return 0;
432 }
433 
435  int *got_frame, AVPacket *avpkt)
436 {
437  const uint8_t *buf = avpkt->data;
438  int buf_size = avpkt->size;
439  VmdVideoContext *s = avctx->priv_data;
440  int ret;
441 
442  s->buf = buf;
443  s->size = buf_size;
444 
445  if (buf_size < 16)
446  return AVERROR_INVALIDDATA;
447 
448  if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0)
449  return ret;
450 
451  if ((ret = vmd_decode(s, frame)) < 0)
452  return ret;
453 
454  /* make the palette available on the way out */
455  memcpy(frame->data[1], s->palette, PALETTE_COUNT * 4);
456 
457  /* shuffle frames */
458  if ((ret = av_frame_replace(s->prev_frame, frame)) < 0)
459  return ret;
460 
461  *got_frame = 1;
462 
463  /* report that the buffer was completely consumed */
464  return buf_size;
465 }
466 
468  .p.name = "vmdvideo",
469  CODEC_LONG_NAME("Sierra VMD video"),
470  .p.type = AVMEDIA_TYPE_VIDEO,
471  .p.id = AV_CODEC_ID_VMDVIDEO,
472  .priv_data_size = sizeof(VmdVideoContext),
474  .close = vmdvideo_decode_end,
476  .p.capabilities = AV_CODEC_CAP_DR1,
477  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
478 };
VmdVideoContext::prev_frame
AVFrame * prev_frame
Definition: vmdvideo.c:53
QUEUE_MASK
#define QUEUE_MASK
Definition: vmdvideo.c:66
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:42
r
const char * r
Definition: vf_curves.c:127
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
GetByteContext
Definition: bytestream.h:33
bytestream2_skipu
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:174
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:160
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
AVPacket::data
uint8_t * data
Definition: packet.h:524
b
#define b
Definition: input.c:41
vmdvideo_decode_end
static av_cold int vmdvideo_decode_end(AVCodecContext *avctx)
Definition: vmdvideo.c:377
FFCodec
Definition: codec_internal.h:126
VmdVideoContext::palette
unsigned char palette[PALETTE_COUNT *4]
Definition: vmdvideo.c:58
PALETTE_COUNT
#define PALETTE_COUNT
Definition: vmdvideo.c:48
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:130
VmdVideoContext::unpack_buffer
unsigned char * unpack_buffer
Definition: vmdvideo.c:59
VmdVideoContext::unpack_buffer_size
int unpack_buffer_size
Definition: vmdvideo.c:60
VmdVideoContext::buf
const unsigned char * buf
Definition: vmdvideo.c:55
VmdVideoContext::avctx
AVCodecContext * avctx
Definition: vmdvideo.c:52
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:148
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:286
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:198
rle_unpack
static int rle_unpack(const unsigned char *src, unsigned char *dest, int src_count, int src_size, int dest_len)
Definition: vmdvideo.c:141
g
const char * g
Definition: vf_curves.c:128
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:425
GetByteContext::buffer
const uint8_t * buffer
Definition: bytestream.h:34
decode.h
AV_RL16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:94
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:271
if
if(ret)
Definition: filter_design.txt:179
NULL
#define NULL
Definition: coverity.c:32
FF_PTR_ADD
#define FF_PTR_ADD(ptr, off)
Definition: internal.h:80
bytestream2_get_buffer
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:267
AV_CODEC_ID_VMDVIDEO
@ AV_CODEC_ID_VMDVIDEO
Definition: codec_id.h:104
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1556
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:366
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:525
VmdVideoContext::size
int size
Definition: vmdvideo.c:56
codec_internal.h
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
bytestream2_get_ne16
#define bytestream2_get_ne16
Definition: bytestream.h:119
size
int size
Definition: twinvq_data.h:10344
VmdVideoContext
Definition: vmdvideo.c:50
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:523
common.h
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
len
int len
Definition: vorbis_enc_data.h:426
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:657
avcodec.h
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:84
tag
uint32_t tag
Definition: movenc.c:1787
ret
ret
Definition: filter_design.txt:187
lz_unpack
static int lz_unpack(const unsigned char *src, int src_len, unsigned char *dest, int dest_len)
Definition: vmdvideo.c:68
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
U
#define U(x)
Definition: vpx_arith.h:37
VMD_HEADER_SIZE
#define VMD_HEADER_SIZE
Definition: vmdvideo.c:47
av_frame_replace
int av_frame_replace(AVFrame *dst, const AVFrame *src)
Ensure the destination frame refers to the same data described by the source frame,...
Definition: frame.c:483
AVCodecContext
main external API structure.
Definition: avcodec.h:445
VmdVideoContext::y_off
int y_off
Definition: vmdvideo.c:62
QUEUE_SIZE
#define QUEUE_SIZE
Definition: vmdvideo.c:65
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
mem.h
bytestream2_get_bufferu
static av_always_inline unsigned int bytestream2_get_bufferu(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:277
AVPacket
This structure stores compressed data.
Definition: packet.h:501
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
vmd_decode
static int vmd_decode(VmdVideoContext *s, AVFrame *frame)
Definition: vmdvideo.c:185
d
d
Definition: ffmpeg_filter.c:424
bytestream.h
ff_vmdvideo_decoder
const FFCodec ff_vmdvideo_decoder
Definition: vmdvideo.c:467
vmdvideo_decode_init
static av_cold int vmdvideo_decode_init(AVCodecContext *avctx)
Definition: vmdvideo.c:388
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
vmdvideo_decode_frame
static int vmdvideo_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: vmdvideo.c:434
VmdVideoContext::x_off
int x_off
Definition: vmdvideo.c:62
AV_WN16
#define AV_WN16(p, v)
Definition: intreadwrite.h:370