FFmpeg
dxa.c
Go to the documentation of this file.
1 /*
2  * Feeble Files/ScummVM DXA decoder
3  * Copyright (c) 2007 Konstantin Shishkov
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * DXA Video decoder
25  */
26 
27 #include <stdio.h>
28 #include <stdlib.h>
29 
30 #include "libavutil/common.h"
31 #include "libavutil/intreadwrite.h"
32 #include "bytestream.h"
33 #include "avcodec.h"
34 #include "codec_internal.h"
35 #include "internal.h"
36 
37 #include <zlib.h>
38 
39 /*
40  * Decoder context
41  */
42 typedef struct DxaDecContext {
44 
45  int dsize;
46 #define DECOMP_BUF_PADDING 16
47  uint8_t *decomp_buf;
48  uint32_t pal[256];
50 
51 static const int shift1[6] = { 0, 8, 8, 8, 4, 4 };
52 static const int shift2[6] = { 0, 0, 8, 4, 0, 4 };
53 
54 static int decode_13(AVCodecContext *avctx, DxaDecContext *c, uint8_t* dst,
55  int stride, uint8_t *src, int srcsize, uint8_t *ref)
56 {
57  uint8_t *code, *data, *mv, *msk, *tmp, *tmp2;
58  uint8_t *src_end = src + srcsize;
59  int i, j, k;
60  int type, x, y, d, d2;
61  uint32_t mask;
62 
63  if (12ULL + ((avctx->width * avctx->height) >> 4) + AV_RB32(src + 0) + AV_RB32(src + 4) > srcsize)
64  return AVERROR_INVALIDDATA;
65 
66  code = src + 12;
67  data = code + ((avctx->width * avctx->height) >> 4);
68  mv = data + AV_RB32(src + 0);
69  msk = mv + AV_RB32(src + 4);
70 
71  for(j = 0; j < avctx->height; j += 4){
72  for(i = 0; i < avctx->width; i += 4){
73  if (data > src_end || mv > src_end || msk > src_end)
74  return AVERROR_INVALIDDATA;
75  tmp = dst + i;
76  tmp2 = ref + i;
77  type = *code++;
78  switch(type){
79  case 4: // motion compensation
80  x = (*mv) >> 4; if(x & 8) x = 8 - x;
81  y = (*mv++) & 0xF; if(y & 8) y = 8 - y;
82  if (i < -x || avctx->width - i - 4 < x ||
83  j < -y || avctx->height - j - 4 < y) {
84  av_log(avctx, AV_LOG_ERROR, "MV %d %d out of bounds\n", x,y);
85  return AVERROR_INVALIDDATA;
86  }
87  tmp2 += x + y*stride;
88  case 0: // skip
89  case 5: // skip in method 12
90  for(y = 0; y < 4; y++){
91  memcpy(tmp, tmp2, 4);
92  tmp += stride;
93  tmp2 += stride;
94  }
95  break;
96  case 1: // masked change
97  case 10: // masked change with only half of pixels changed
98  case 11: // cases 10-15 are for method 12 only
99  case 12:
100  case 13:
101  case 14:
102  case 15:
103  if(type == 1){
104  mask = AV_RB16(msk);
105  msk += 2;
106  }else{
107  type -= 10;
108  mask = ((msk[0] & 0xF0) << shift1[type]) | ((msk[0] & 0xF) << shift2[type]);
109  msk++;
110  }
111  for(y = 0; y < 4; y++){
112  for(x = 0; x < 4; x++){
113  tmp[x] = (mask & 0x8000) ? *data++ : tmp2[x];
114  mask <<= 1;
115  }
116  tmp += stride;
117  tmp2 += stride;
118  }
119  break;
120  case 2: // fill block
121  for(y = 0; y < 4; y++){
122  memset(tmp, data[0], 4);
123  tmp += stride;
124  }
125  data++;
126  break;
127  case 3: // raw block
128  for(y = 0; y < 4; y++){
129  memcpy(tmp, data, 4);
130  data += 4;
131  tmp += stride;
132  }
133  break;
134  case 8: // subblocks - method 13 only
135  mask = *msk++;
136  for(k = 0; k < 4; k++){
137  d = ((k & 1) << 1) + ((k & 2) * stride);
138  d2 = ((k & 1) << 1) + ((k & 2) * stride);
139  tmp2 = ref + i + d2;
140  switch(mask & 0xC0){
141  case 0x80: // motion compensation
142  x = (*mv) >> 4; if(x & 8) x = 8 - x;
143  y = (*mv++) & 0xF; if(y & 8) y = 8 - y;
144  if (i + 2*(k & 1) < -x || avctx->width - i - 2*(k & 1) - 2 < x ||
145  j + (k & 2) < -y || avctx->height - j - (k & 2) - 2 < y) {
146  av_log(avctx, AV_LOG_ERROR, "MV %d %d out of bounds\n", x,y);
147  return AVERROR_INVALIDDATA;
148  }
149  tmp2 += x + y*stride;
150  case 0x00: // skip
151  tmp[d + 0 ] = tmp2[0];
152  tmp[d + 1 ] = tmp2[1];
153  tmp[d + 0 + stride] = tmp2[0 + stride];
154  tmp[d + 1 + stride] = tmp2[1 + stride];
155  break;
156  case 0x40: // fill
157  tmp[d + 0 ] = data[0];
158  tmp[d + 1 ] = data[0];
159  tmp[d + 0 + stride] = data[0];
160  tmp[d + 1 + stride] = data[0];
161  data++;
162  break;
163  case 0xC0: // raw
164  tmp[d + 0 ] = *data++;
165  tmp[d + 1 ] = *data++;
166  tmp[d + 0 + stride] = *data++;
167  tmp[d + 1 + stride] = *data++;
168  break;
169  }
170  mask <<= 2;
171  }
172  break;
173  case 32: // vector quantization - 2 colors
174  mask = AV_RB16(msk);
175  msk += 2;
176  for(y = 0; y < 4; y++){
177  for(x = 0; x < 4; x++){
178  tmp[x] = data[mask & 1];
179  mask >>= 1;
180  }
181  tmp += stride;
182  tmp2 += stride;
183  }
184  data += 2;
185  break;
186  case 33: // vector quantization - 3 or 4 colors
187  case 34:
188  mask = AV_RB32(msk);
189  msk += 4;
190  for(y = 0; y < 4; y++){
191  for(x = 0; x < 4; x++){
192  tmp[x] = data[mask & 3];
193  mask >>= 2;
194  }
195  tmp += stride;
196  tmp2 += stride;
197  }
198  data += type - 30;
199  break;
200  default:
201  av_log(avctx, AV_LOG_ERROR, "Unknown opcode %d\n", type);
202  return AVERROR_INVALIDDATA;
203  }
204  }
205  dst += stride * 4;
206  ref += stride * 4;
207  }
208  return 0;
209 }
210 
212  int *got_frame, AVPacket *avpkt)
213 {
214  DxaDecContext * const c = avctx->priv_data;
215  uint8_t *outptr, *srcptr, *tmpptr;
216  unsigned long dsize;
217  int i, j, compr, ret;
218  int stride;
219  int pc = 0;
220  GetByteContext gb;
221 
222  bytestream2_init(&gb, avpkt->data, avpkt->size);
223 
224  /* make the palette available on the way out */
225  if (bytestream2_peek_le32(&gb) == MKTAG('C','M','A','P')) {
226  bytestream2_skip(&gb, 4);
227  for(i = 0; i < 256; i++){
228  c->pal[i] = 0xFFU << 24 | bytestream2_get_be24(&gb);
229  }
230  pc = 1;
231  }
232 
233  if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0)
234  return ret;
235  memcpy(frame->data[1], c->pal, AVPALETTE_SIZE);
236  frame->palette_has_changed = pc;
237 
238  outptr = frame->data[0];
239  srcptr = c->decomp_buf;
240  tmpptr = c->prev->data[0];
241  stride = frame->linesize[0];
242 
243  if (bytestream2_get_le32(&gb) == MKTAG('N','U','L','L'))
244  compr = -1;
245  else
246  compr = bytestream2_get_byte(&gb);
247 
248  dsize = c->dsize;
249  if (compr != 4 && compr != -1) {
250  bytestream2_skip(&gb, 4);
251  if (uncompress(c->decomp_buf, &dsize, avpkt->data + bytestream2_tell(&gb),
252  bytestream2_get_bytes_left(&gb)) != Z_OK) {
253  av_log(avctx, AV_LOG_ERROR, "Uncompress failed!\n");
254  return AVERROR_UNKNOWN;
255  }
256  memset(c->decomp_buf + dsize, 0, DECOMP_BUF_PADDING);
257  }
258 
259  if (avctx->debug & FF_DEBUG_PICT_INFO)
260  av_log(avctx, AV_LOG_DEBUG, "compr:%2d, dsize:%d\n", compr, (int)dsize);
261 
262  switch(compr){
263  case -1:
264  frame->key_frame = 0;
265  frame->pict_type = AV_PICTURE_TYPE_P;
266  if (c->prev->data[0])
267  memcpy(frame->data[0], c->prev->data[0], frame->linesize[0] * avctx->height);
268  else{ // Should happen only when first frame is 'NULL'
269  memset(frame->data[0], 0, frame->linesize[0] * avctx->height);
270  frame->key_frame = 1;
271  frame->pict_type = AV_PICTURE_TYPE_I;
272  }
273  break;
274  case 2:
275  case 4:
276  frame->key_frame = 1;
277  frame->pict_type = AV_PICTURE_TYPE_I;
278  for (j = 0; j < avctx->height; j++) {
279  memcpy(outptr, srcptr, avctx->width);
280  outptr += stride;
281  srcptr += avctx->width;
282  }
283  break;
284  case 3:
285  case 5:
286  if (!tmpptr) {
287  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
288  if (!(avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL))
289  return AVERROR_INVALIDDATA;
290  }
291  frame->key_frame = 0;
292  frame->pict_type = AV_PICTURE_TYPE_P;
293  for (j = 0; j < avctx->height; j++) {
294  if(tmpptr){
295  for(i = 0; i < avctx->width; i++)
296  outptr[i] = srcptr[i] ^ tmpptr[i];
297  tmpptr += stride;
298  }else
299  memcpy(outptr, srcptr, avctx->width);
300  outptr += stride;
301  srcptr += avctx->width;
302  }
303  break;
304  case 12: // ScummVM coding
305  case 13:
306  frame->key_frame = 0;
307  frame->pict_type = AV_PICTURE_TYPE_P;
308  if (!c->prev->data[0]) {
309  av_log(avctx, AV_LOG_ERROR, "Missing reference frame\n");
310  return AVERROR_INVALIDDATA;
311  }
312  decode_13(avctx, c, frame->data[0], frame->linesize[0], srcptr, dsize, c->prev->data[0]);
313  break;
314  default:
315  av_log(avctx, AV_LOG_ERROR, "Unknown/unsupported compression type %d\n", compr);
316  return AVERROR_INVALIDDATA;
317  }
318 
319  av_frame_unref(c->prev);
320  if ((ret = av_frame_ref(c->prev, frame)) < 0)
321  return ret;
322 
323  *got_frame = 1;
324 
325  /* always report that the buffer was completely consumed */
326  return avpkt->size;
327 }
328 
330 {
331  DxaDecContext * const c = avctx->priv_data;
332 
333  if (avctx->width%4 || avctx->height%4) {
334  avpriv_request_sample(avctx, "dimensions are not a multiple of 4");
335  return AVERROR_INVALIDDATA;
336  }
337 
338  c->prev = av_frame_alloc();
339  if (!c->prev)
340  return AVERROR(ENOMEM);
341 
342  avctx->pix_fmt = AV_PIX_FMT_PAL8;
343 
344  c->dsize = avctx->width * avctx->height * 2;
345  c->decomp_buf = av_malloc(c->dsize + DECOMP_BUF_PADDING);
346  if (!c->decomp_buf) {
347  av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n");
348  return AVERROR(ENOMEM);
349  }
350 
351  return 0;
352 }
353 
355 {
356  DxaDecContext * const c = avctx->priv_data;
357 
358  av_freep(&c->decomp_buf);
359  av_frame_free(&c->prev);
360 
361  return 0;
362 }
363 
365  .p.name = "dxa",
366  .p.long_name = NULL_IF_CONFIG_SMALL("Feeble Files/ScummVM DXA"),
367  .p.type = AVMEDIA_TYPE_VIDEO,
368  .p.id = AV_CODEC_ID_DXA,
369  .priv_data_size = sizeof(DxaDecContext),
370  .init = decode_init,
371  .close = decode_end,
373  .p.capabilities = AV_CODEC_CAP_DR1,
375 };
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:39
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
GetByteContext
Definition: bytestream.h:33
mv
static const int8_t mv[256][2]
Definition: 4xm.c:80
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:111
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:374
data
const char data[16]
Definition: mxf.c:143
FFCodec
Definition: codec_internal.h:112
AVERROR_UNKNOWN
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:73
ff_dxa_decoder
const FFCodec ff_dxa_decoder
Definition: dxa.c:364
FF_DEBUG_PICT_INFO
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:1323
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
init
static int init
Definition: av_tx.c:47
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:116
U
#define U(x)
Definition: vp56_arith.h:37
decode_end
static av_cold int decode_end(AVCodecContext *avctx)
Definition: dxa.c:354
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:99
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
mask
static const uint16_t mask[17]
Definition: lzw.c:38
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:254
intreadwrite.h
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:367
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
DECOMP_BUF_PADDING
#define DECOMP_BUF_PADDING
Definition: dxa.c:46
AVPALETTE_SIZE
#define AVPALETTE_SIZE
Definition: pixfmt.h:32
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
shift1
static const int shift1[6]
Definition: dxa.c:51
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:476
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1403
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:375
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:343
codec_internal.h
DxaDecContext::dsize
int dsize
Definition: dxa.c:45
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
height
#define height
decode_13
static int decode_13(AVCodecContext *avctx, DxaDecContext *c, uint8_t *dst, int stride, uint8_t *src, int srcsize, uint8_t *ref)
Definition: dxa.c:54
AV_CODEC_ID_DXA
@ AV_CODEC_ID_DXA
Definition: codec_id.h:148
shift2
static const int shift2[6]
Definition: dxa.c:52
DxaDecContext::pal
uint32_t pal[256]
Definition: dxa.c:48
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
AV_CODEC_FLAG2_SHOW_ALL
#define AV_CODEC_FLAG2_SHOW_ALL
Show all frames before the first keyframe.
Definition: avcodec.h:321
common.h
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: codec_internal.h:31
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:477
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:203
AVCodecContext::height
int height
Definition: avcodec.h:562
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:599
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
DxaDecContext::prev
AVFrame * prev
Definition: dxa.c:43
decode_frame
static int decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: dxa.c:211
AVCodecContext
main external API structure.
Definition: avcodec.h:389
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:112
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1322
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
DxaDecContext
Definition: dxa.c:42
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:416
AVPacket
This structure stores compressed data.
Definition: packet.h:351
DxaDecContext::decomp_buf
uint8_t * decomp_buf
Definition: dxa.c:47
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
d
d
Definition: ffmpeg_filter.c:153
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:562
bytestream.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
decode_init
static av_cold int decode_init(AVCodecContext *avctx)
Definition: dxa.c:329
AV_RB16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
Definition: bytestream.h:98