FFmpeg
qpeg.c
Go to the documentation of this file.
1 /*
2  * QPEG codec
3  * Copyright (c) 2004 Konstantin Shishkov
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * QPEG codec.
25  */
26 
27 #include "avcodec.h"
28 #include "bytestream.h"
29 #include "decode.h"
30 #include "internal.h"
31 
32 typedef struct QpegContext{
35  uint32_t pal[256];
37 } QpegContext;
38 
39 static void qpeg_decode_intra(QpegContext *qctx, uint8_t *dst,
40  int stride, int width, int height)
41 {
42  int i;
43  int code;
44  int c0, c1;
45  int run, copy;
46  int filled = 0;
47  int rows_to_go;
48 
49  rows_to_go = height;
50  height--;
51  dst = dst + height * stride;
52 
53  while ((bytestream2_get_bytes_left(&qctx->buffer) > 0) && (rows_to_go > 0)) {
54  code = bytestream2_get_byte(&qctx->buffer);
55  run = copy = 0;
56  if(code == 0xFC) /* end-of-picture code */
57  break;
58  if(code >= 0xF8) { /* very long run */
59  c0 = bytestream2_get_byte(&qctx->buffer);
60  c1 = bytestream2_get_byte(&qctx->buffer);
61  run = ((code & 0x7) << 16) + (c0 << 8) + c1 + 2;
62  } else if (code >= 0xF0) { /* long run */
63  c0 = bytestream2_get_byte(&qctx->buffer);
64  run = ((code & 0xF) << 8) + c0 + 2;
65  } else if (code >= 0xE0) { /* short run */
66  run = (code & 0x1F) + 2;
67  } else if (code >= 0xC0) { /* very long copy */
68  c0 = bytestream2_get_byte(&qctx->buffer);
69  c1 = bytestream2_get_byte(&qctx->buffer);
70  copy = ((code & 0x3F) << 16) + (c0 << 8) + c1 + 1;
71  } else if (code >= 0x80) { /* long copy */
72  c0 = bytestream2_get_byte(&qctx->buffer);
73  copy = ((code & 0x7F) << 8) + c0 + 1;
74  } else { /* short copy */
75  copy = code + 1;
76  }
77 
78  /* perform actual run or copy */
79  if(run) {
80  int p;
81 
82  p = bytestream2_get_byte(&qctx->buffer);
83  for(i = 0; i < run; i++) {
84  int step = FFMIN(run - i, width - filled);
85  memset(dst+filled, p, step);
86  filled += step;
87  i += step - 1;
88  if (filled >= width) {
89  filled = 0;
90  dst -= stride;
91  rows_to_go--;
92  while (run - i > width && rows_to_go > 0) {
93  memset(dst, p, width);
94  dst -= stride;
95  rows_to_go--;
96  i += width;
97  }
98  if(rows_to_go <= 0)
99  break;
100  }
101  }
102  } else {
103  if (bytestream2_get_bytes_left(&qctx->buffer) < copy)
105  while (copy > 0) {
106  int step = FFMIN(copy, width - filled);
107  bytestream2_get_bufferu(&qctx->buffer, dst + filled, step);
108  filled += step;
109  copy -= step;
110  if (filled >= width) {
111  filled = 0;
112  dst -= stride;
113  rows_to_go--;
114  if(rows_to_go <= 0)
115  break;
116  }
117  }
118  }
119  }
120 }
121 
122 static const uint8_t qpeg_table_h[16] =
123  { 0x00, 0x20, 0x20, 0x20, 0x18, 0x10, 0x10, 0x20, 0x10, 0x08, 0x18, 0x08, 0x08, 0x18, 0x10, 0x04};
124 static const uint8_t qpeg_table_w[16] =
125  { 0x00, 0x20, 0x18, 0x08, 0x18, 0x10, 0x20, 0x10, 0x08, 0x10, 0x20, 0x20, 0x08, 0x10, 0x18, 0x04};
126 
127 /* Decodes delta frames */
128 static void av_noinline qpeg_decode_inter(QpegContext *qctx, uint8_t *dst,
129  int stride, int width, int height,
130  int delta, const uint8_t *ctable,
131  uint8_t *refdata)
132 {
133  int i, j;
134  int code;
135  int filled = 0;
136  int orig_height;
137 
138  if (refdata) {
139  /* copy prev frame */
140  for (i = 0; i < height; i++)
141  memcpy(dst + (i * stride), refdata + (i * stride), width);
142  } else {
143  refdata = dst;
144  }
145 
146  orig_height = height;
147  height--;
148  dst = dst + height * stride;
149 
150  while ((bytestream2_get_bytes_left(&qctx->buffer) > 0) && (height >= 0)) {
151  code = bytestream2_get_byte(&qctx->buffer);
152 
153  if(delta) {
154  /* motion compensation */
155  while(bytestream2_get_bytes_left(&qctx->buffer) > 0 && (code & 0xF0) == 0xF0) {
156  if(delta == 1) {
157  int me_idx;
158  int me_w, me_h, me_x, me_y;
159  uint8_t *me_plane;
160  int corr, val;
161 
162  /* get block size by index */
163  me_idx = code & 0xF;
164  me_w = qpeg_table_w[me_idx];
165  me_h = qpeg_table_h[me_idx];
166 
167  /* extract motion vector */
168  corr = bytestream2_get_byte(&qctx->buffer);
169 
170  val = corr >> 4;
171  if(val > 7)
172  val -= 16;
173  me_x = val;
174 
175  val = corr & 0xF;
176  if(val > 7)
177  val -= 16;
178  me_y = val;
179 
180  /* check motion vector */
181  if ((me_x + filled < 0) || (me_x + me_w + filled > width) ||
182  (height - me_y - me_h < 0) || (height - me_y >= orig_height) ||
183  (filled + me_w > width) || (height - me_h < 0))
184  av_log(qctx->avctx, AV_LOG_ERROR, "Bogus motion vector (%i,%i), block size %ix%i at %i,%i\n",
185  me_x, me_y, me_w, me_h, filled, height);
186  else {
187  /* do motion compensation */
188  me_plane = refdata + (filled + me_x) + (height - me_y) * stride;
189  for(j = 0; j < me_h; j++) {
190  for(i = 0; i < me_w; i++)
191  dst[filled + i - (j * stride)] = me_plane[i - (j * stride)];
192  }
193  }
194  }
195  code = bytestream2_get_byte(&qctx->buffer);
196  }
197  }
198 
199  if(code == 0xE0) /* end-of-picture code */
200  break;
201  if(code > 0xE0) { /* run code: 0xE1..0xFF */
202  int p;
203 
204  code &= 0x1F;
205  p = bytestream2_get_byte(&qctx->buffer);
206  for(i = 0; i <= code; i++) {
207  dst[filled++] = p;
208  if(filled >= width) {
209  filled = 0;
210  dst -= stride;
211  height--;
212  if (height < 0)
213  break;
214  }
215  }
216  } else if(code >= 0xC0) { /* copy code: 0xC0..0xDF */
217  code &= 0x1F;
218 
219  if(code + 1 > bytestream2_get_bytes_left(&qctx->buffer))
220  break;
221 
222  for(i = 0; i <= code; i++) {
223  dst[filled++] = bytestream2_get_byte(&qctx->buffer);
224  if(filled >= width) {
225  filled = 0;
226  dst -= stride;
227  height--;
228  if (height < 0)
229  break;
230  }
231  }
232  } else if(code >= 0x80) { /* skip code: 0x80..0xBF */
233  int skip;
234 
235  code &= 0x3F;
236  /* codes 0x80 and 0x81 are actually escape codes,
237  skip value minus constant is in the next byte */
238  if(!code)
239  skip = bytestream2_get_byte(&qctx->buffer) + 64;
240  else if(code == 1)
241  skip = bytestream2_get_byte(&qctx->buffer) + 320;
242  else
243  skip = code;
244  filled += skip;
245  while( filled >= width) {
246  filled -= width;
247  dst -= stride;
248  height--;
249  if(height < 0)
250  break;
251  }
252  } else {
253  /* zero code treated as one-pixel skip */
254  if(code) {
255  dst[filled++] = ctable[code & 0x7F];
256  }
257  else
258  filled++;
259  if(filled >= width) {
260  filled = 0;
261  dst -= stride;
262  height--;
263  }
264  }
265  }
266 }
267 
268 static int decode_frame(AVCodecContext *avctx,
269  void *data, int *got_frame,
270  AVPacket *avpkt)
271 {
272  uint8_t ctable[128];
273  QpegContext * const a = avctx->priv_data;
274  AVFrame * const p = data;
275  AVFrame * const ref = a->ref;
276  uint8_t* outdata;
277  int delta, intra, ret;
278 
279  if (avpkt->size < 0x86) {
280  av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
281  return AVERROR_INVALIDDATA;
282  }
283 
284  bytestream2_init(&a->buffer, avpkt->data, avpkt->size);
285 
286  if ((ret = ff_get_buffer(avctx, p, AV_GET_BUFFER_FLAG_REF)) < 0)
287  return ret;
288  outdata = p->data[0];
289  bytestream2_skip(&a->buffer, 4);
290  bytestream2_get_buffer(&a->buffer, ctable, 128);
291  bytestream2_skip(&a->buffer, 1);
292 
293  delta = bytestream2_get_byte(&a->buffer);
294  intra = delta == 0x10;
295  if (intra) {
296  qpeg_decode_intra(a, outdata, p->linesize[0], avctx->width, avctx->height);
297  } else {
298  qpeg_decode_inter(a, outdata, p->linesize[0], avctx->width, avctx->height, delta, ctable, ref->data[0]);
299  }
300 
301  /* make the palette available on the way out */
302  p->palette_has_changed = ff_copy_palette(a->pal, avpkt, avctx);
303  memcpy(p->data[1], a->pal, AVPALETTE_SIZE);
304 
306  if ((ret = av_frame_ref(ref, p)) < 0)
307  return ret;
308 
309  p->key_frame = intra;
311 
312  *got_frame = 1;
313 
314  return avpkt->size;
315 }
316 
317 static void decode_flush(AVCodecContext *avctx){
318  QpegContext * const a = avctx->priv_data;
319  int i, pal_size;
320  const uint8_t *pal_src;
321 
322  av_frame_unref(a->ref);
323 
324  pal_size = FFMIN(1024U, avctx->extradata_size);
325  pal_src = avctx->extradata + avctx->extradata_size - pal_size;
326 
327  for (i=0; i<pal_size/4; i++)
328  a->pal[i] = 0xFFU<<24 | AV_RL32(pal_src+4*i);
329 }
330 
332 {
333  QpegContext * const a = avctx->priv_data;
334 
335  av_frame_free(&a->ref);
336 
337  return 0;
338 }
339 
340 static av_cold int decode_init(AVCodecContext *avctx){
341  QpegContext * const a = avctx->priv_data;
342 
343  a->avctx = avctx;
344  avctx->pix_fmt= AV_PIX_FMT_PAL8;
345 
346  a->ref = av_frame_alloc();
347  if (!a->ref)
348  return AVERROR(ENOMEM);
349 
350  decode_flush(avctx);
351 
352  return 0;
353 }
354 
356  .name = "qpeg",
357  .long_name = NULL_IF_CONFIG_SMALL("Q-team QPEG"),
358  .type = AVMEDIA_TYPE_VIDEO,
359  .id = AV_CODEC_ID_QPEG,
360  .priv_data_size = sizeof(QpegContext),
361  .init = decode_init,
362  .close = decode_end,
363  .decode = decode_frame,
364  .flush = decode_flush,
365  .capabilities = AV_CODEC_CAP_DR1,
366  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
368 };
AVCodec
AVCodec.
Definition: codec.h:202
stride
int stride
Definition: mace.c:144
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:42
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
qpeg_decode_inter
static void av_noinline qpeg_decode_inter(QpegContext *qctx, uint8_t *dst, int stride, int width, int height, int delta, const uint8_t *ctable, uint8_t *refdata)
Definition: qpeg.c:128
GetByteContext
Definition: bytestream.h:33
qpeg_decode_intra
static void qpeg_decode_intra(QpegContext *qctx, uint8_t *dst, int stride, int width, int height)
Definition: qpeg.c:39
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:109
decode_flush
static void decode_flush(AVCodecContext *avctx)
Definition: qpeg.c:317
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:317
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:373
data
const char data[16]
Definition: mxf.c:143
c1
static const uint64_t c1
Definition: murmur3.c:51
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:338
init
static int init
Definition: av_tx.c:47
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
U
#define U(x)
Definition: vp56_arith.h:37
AVFrame::key_frame
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:409
val
static double val(void *priv, double ch)
Definition: aeval.c:76
av_noinline
#define av_noinline
Definition: attributes.h:72
qpeg_table_h
static const uint8_t qpeg_table_h[16]
Definition: qpeg.c:122
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:97
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:485
QpegContext::buffer
GetByteContext buffer
Definition: qpeg.c:36
width
#define width
QpegContext::pal
uint32_t pal[256]
Definition: qpeg.c:35
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:361
decode.h
flush
static void flush(AVCodecContext *avctx)
Definition: aacdec_template.c:593
run
uint8_t run
Definition: svq3.c:203
AVPALETTE_SIZE
#define AVPALETTE_SIZE
Definition: pixfmt.h:32
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
bytestream2_get_buffer
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:267
qpeg_table_w
static const uint8_t qpeg_table_w[16]
Definition: qpeg.c:124
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:414
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1652
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:374
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
copy
static void copy(const float *p1, float *p2, const int length)
Definition: vf_vaguedenoiser.c:187
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:325
decode_end
static av_cold int decode_end(AVCodecContext *avctx)
Definition: qpeg.c:331
QpegContext::avctx
AVCodecContext * avctx
Definition: qpeg.c:33
height
#define height
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
AV_CODEC_ID_QPEG
@ AV_CODEC_ID_QPEG
Definition: codec_id.h:110
QpegContext
Definition: qpeg.c:32
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:484
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:50
delta
float delta
Definition: vorbis_enc_data.h:430
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:435
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:209
AVCodecContext::height
int height
Definition: avcodec.h:556
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:593
avcodec.h
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
decode_init
static av_cold int decode_init(AVCodecContext *avctx)
Definition: qpeg.c:340
ret
ret
Definition: filter_design.txt:187
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
AVCodecContext
main external API structure.
Definition: avcodec.h:383
decode_frame
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: qpeg.c:268
AVFrame::palette_has_changed
int palette_has_changed
Tell user application that palette has changed from previous frame.
Definition: frame.h:479
ref
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
QpegContext::ref
AVFrame * ref
Definition: qpeg.c:34
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
ff_qpeg_decoder
const AVCodec ff_qpeg_decoder
Definition: qpeg.c:355
bytestream2_get_bufferu
static av_always_inline unsigned int bytestream2_get_bufferu(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:277
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:410
AVPacket
This structure stores compressed data.
Definition: packet.h:350
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:556
bytestream.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:362
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
ff_copy_palette
int ff_copy_palette(void *dst, const AVPacket *src, void *logctx)
Check whether the side-data of src contains a palette of size AVPALETTE_SIZE; if so,...
Definition: decode.c:1854