FFmpeg
qpeg.c
Go to the documentation of this file.
1 /*
2  * QPEG codec
3  * Copyright (c) 2004 Konstantin Shishkov
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * QPEG codec.
25  */
26 
27 #include "avcodec.h"
28 #include "bytestream.h"
29 #include "internal.h"
30 
31 typedef struct QpegContext{
34  uint32_t pal[256];
36 } QpegContext;
37 
38 static void qpeg_decode_intra(QpegContext *qctx, uint8_t *dst,
39  int stride, int width, int height)
40 {
41  int i;
42  int code;
43  int c0, c1;
44  int run, copy;
45  int filled = 0;
46  int rows_to_go;
47 
48  rows_to_go = height;
49  height--;
50  dst = dst + height * stride;
51 
52  while ((bytestream2_get_bytes_left(&qctx->buffer) > 0) && (rows_to_go > 0)) {
53  code = bytestream2_get_byte(&qctx->buffer);
54  run = copy = 0;
55  if(code == 0xFC) /* end-of-picture code */
56  break;
57  if(code >= 0xF8) { /* very long run */
58  c0 = bytestream2_get_byte(&qctx->buffer);
59  c1 = bytestream2_get_byte(&qctx->buffer);
60  run = ((code & 0x7) << 16) + (c0 << 8) + c1 + 2;
61  } else if (code >= 0xF0) { /* long run */
62  c0 = bytestream2_get_byte(&qctx->buffer);
63  run = ((code & 0xF) << 8) + c0 + 2;
64  } else if (code >= 0xE0) { /* short run */
65  run = (code & 0x1F) + 2;
66  } else if (code >= 0xC0) { /* very long copy */
67  c0 = bytestream2_get_byte(&qctx->buffer);
68  c1 = bytestream2_get_byte(&qctx->buffer);
69  copy = ((code & 0x3F) << 16) + (c0 << 8) + c1 + 1;
70  } else if (code >= 0x80) { /* long copy */
71  c0 = bytestream2_get_byte(&qctx->buffer);
72  copy = ((code & 0x7F) << 8) + c0 + 1;
73  } else { /* short copy */
74  copy = code + 1;
75  }
76 
77  /* perform actual run or copy */
78  if(run) {
79  int p;
80 
81  p = bytestream2_get_byte(&qctx->buffer);
82  for(i = 0; i < run; i++) {
83  int step = FFMIN(run - i, width - filled);
84  memset(dst+filled, p, step);
85  filled += step;
86  i += step - 1;
87  if (filled >= width) {
88  filled = 0;
89  dst -= stride;
90  rows_to_go--;
91  while (run - i > width && rows_to_go > 0) {
92  memset(dst, p, width);
93  dst -= stride;
94  rows_to_go--;
95  i += width;
96  }
97  if(rows_to_go <= 0)
98  break;
99  }
100  }
101  } else {
102  if (bytestream2_get_bytes_left(&qctx->buffer) < copy)
103  copy = bytestream2_get_bytes_left(&qctx->buffer);
104  while (copy > 0) {
105  int step = FFMIN(copy, width - filled);
106  bytestream2_get_bufferu(&qctx->buffer, dst + filled, step);
107  filled += step;
108  copy -= step;
109  if (filled >= width) {
110  filled = 0;
111  dst -= stride;
112  rows_to_go--;
113  if(rows_to_go <= 0)
114  break;
115  }
116  }
117  }
118  }
119 }
120 
121 static const uint8_t qpeg_table_h[16] =
122  { 0x00, 0x20, 0x20, 0x20, 0x18, 0x10, 0x10, 0x20, 0x10, 0x08, 0x18, 0x08, 0x08, 0x18, 0x10, 0x04};
123 static const uint8_t qpeg_table_w[16] =
124  { 0x00, 0x20, 0x18, 0x08, 0x18, 0x10, 0x20, 0x10, 0x08, 0x10, 0x20, 0x20, 0x08, 0x10, 0x18, 0x04};
125 
126 /* Decodes delta frames */
128  int stride, int width, int height,
129  int delta, const uint8_t *ctable,
130  uint8_t *refdata)
131 {
132  int i, j;
133  int code;
134  int filled = 0;
135  int orig_height;
136 
137  if (refdata) {
138  /* copy prev frame */
139  for (i = 0; i < height; i++)
140  memcpy(dst + (i * stride), refdata + (i * stride), width);
141  } else {
142  refdata = dst;
143  }
144 
145  orig_height = height;
146  height--;
147  dst = dst + height * stride;
148 
149  while ((bytestream2_get_bytes_left(&qctx->buffer) > 0) && (height >= 0)) {
150  code = bytestream2_get_byte(&qctx->buffer);
151 
152  if(delta) {
153  /* motion compensation */
154  while(bytestream2_get_bytes_left(&qctx->buffer) > 0 && (code & 0xF0) == 0xF0) {
155  if(delta == 1) {
156  int me_idx;
157  int me_w, me_h, me_x, me_y;
158  uint8_t *me_plane;
159  int corr, val;
160 
161  /* get block size by index */
162  me_idx = code & 0xF;
163  me_w = qpeg_table_w[me_idx];
164  me_h = qpeg_table_h[me_idx];
165 
166  /* extract motion vector */
167  corr = bytestream2_get_byte(&qctx->buffer);
168 
169  val = corr >> 4;
170  if(val > 7)
171  val -= 16;
172  me_x = val;
173 
174  val = corr & 0xF;
175  if(val > 7)
176  val -= 16;
177  me_y = val;
178 
179  /* check motion vector */
180  if ((me_x + filled < 0) || (me_x + me_w + filled > width) ||
181  (height - me_y - me_h < 0) || (height - me_y >= orig_height) ||
182  (filled + me_w > width) || (height - me_h < 0))
183  av_log(qctx->avctx, AV_LOG_ERROR, "Bogus motion vector (%i,%i), block size %ix%i at %i,%i\n",
184  me_x, me_y, me_w, me_h, filled, height);
185  else {
186  /* do motion compensation */
187  me_plane = refdata + (filled + me_x) + (height - me_y) * stride;
188  for(j = 0; j < me_h; j++) {
189  for(i = 0; i < me_w; i++)
190  dst[filled + i - (j * stride)] = me_plane[i - (j * stride)];
191  }
192  }
193  }
194  code = bytestream2_get_byte(&qctx->buffer);
195  }
196  }
197 
198  if(code == 0xE0) /* end-of-picture code */
199  break;
200  if(code > 0xE0) { /* run code: 0xE1..0xFF */
201  int p;
202 
203  code &= 0x1F;
204  p = bytestream2_get_byte(&qctx->buffer);
205  for(i = 0; i <= code; i++) {
206  dst[filled++] = p;
207  if(filled >= width) {
208  filled = 0;
209  dst -= stride;
210  height--;
211  if (height < 0)
212  break;
213  }
214  }
215  } else if(code >= 0xC0) { /* copy code: 0xC0..0xDF */
216  code &= 0x1F;
217 
218  if(code + 1 > bytestream2_get_bytes_left(&qctx->buffer))
219  break;
220 
221  for(i = 0; i <= code; i++) {
222  dst[filled++] = bytestream2_get_byte(&qctx->buffer);
223  if(filled >= width) {
224  filled = 0;
225  dst -= stride;
226  height--;
227  if (height < 0)
228  break;
229  }
230  }
231  } else if(code >= 0x80) { /* skip code: 0x80..0xBF */
232  int skip;
233 
234  code &= 0x3F;
235  /* codes 0x80 and 0x81 are actually escape codes,
236  skip value minus constant is in the next byte */
237  if(!code)
238  skip = bytestream2_get_byte(&qctx->buffer) + 64;
239  else if(code == 1)
240  skip = bytestream2_get_byte(&qctx->buffer) + 320;
241  else
242  skip = code;
243  filled += skip;
244  while( filled >= width) {
245  filled -= width;
246  dst -= stride;
247  height--;
248  if(height < 0)
249  break;
250  }
251  } else {
252  /* zero code treated as one-pixel skip */
253  if(code) {
254  dst[filled++] = ctable[code & 0x7F];
255  }
256  else
257  filled++;
258  if(filled >= width) {
259  filled = 0;
260  dst -= stride;
261  height--;
262  }
263  }
264  }
265 }
266 
268  void *data, int *got_frame,
269  AVPacket *avpkt)
270 {
271  uint8_t ctable[128];
272  QpegContext * const a = avctx->priv_data;
273  AVFrame * const p = data;
274  AVFrame * const ref = a->ref;
275  uint8_t* outdata;
276  int delta, intra, ret;
277  int pal_size;
278  const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, &pal_size);
279 
280  if (avpkt->size < 0x86) {
281  av_log(avctx, AV_LOG_ERROR, "Packet is too small\n");
282  return AVERROR_INVALIDDATA;
283  }
284 
285  bytestream2_init(&a->buffer, avpkt->data, avpkt->size);
286 
287  if ((ret = ff_get_buffer(avctx, p, AV_GET_BUFFER_FLAG_REF)) < 0)
288  return ret;
289  outdata = p->data[0];
290  bytestream2_skip(&a->buffer, 4);
291  bytestream2_get_buffer(&a->buffer, ctable, 128);
292  bytestream2_skip(&a->buffer, 1);
293 
294  delta = bytestream2_get_byte(&a->buffer);
295  intra = delta == 0x10;
296  if (intra) {
297  qpeg_decode_intra(a, outdata, p->linesize[0], avctx->width, avctx->height);
298  } else {
299  qpeg_decode_inter(a, outdata, p->linesize[0], avctx->width, avctx->height, delta, ctable, ref->data[0]);
300  }
301 
302  /* make the palette available on the way out */
303  if (pal && pal_size == AVPALETTE_SIZE) {
304  p->palette_has_changed = 1;
305  memcpy(a->pal, pal, AVPALETTE_SIZE);
306  } else if (pal) {
307  av_log(avctx, AV_LOG_ERROR, "Palette size %d is wrong\n", pal_size);
308  }
309  memcpy(p->data[1], a->pal, AVPALETTE_SIZE);
310 
311  av_frame_unref(ref);
312  if ((ret = av_frame_ref(ref, p)) < 0)
313  return ret;
314 
315  p->key_frame = intra;
317 
318  *got_frame = 1;
319 
320  return avpkt->size;
321 }
322 
324  QpegContext * const a = avctx->priv_data;
325  int i, pal_size;
326  const uint8_t *pal_src;
327 
328  av_frame_unref(a->ref);
329 
330  pal_size = FFMIN(1024U, avctx->extradata_size);
331  pal_src = avctx->extradata + avctx->extradata_size - pal_size;
332 
333  for (i=0; i<pal_size/4; i++)
334  a->pal[i] = 0xFFU<<24 | AV_RL32(pal_src+4*i);
335 }
336 
338 {
339  QpegContext * const a = avctx->priv_data;
340 
341  av_frame_free(&a->ref);
342 
343  return 0;
344 }
345 
347  QpegContext * const a = avctx->priv_data;
348 
349  a->avctx = avctx;
350  avctx->pix_fmt= AV_PIX_FMT_PAL8;
351 
352  a->ref = av_frame_alloc();
353  if (!a->ref)
354  return AVERROR(ENOMEM);
355 
356  decode_flush(avctx);
357 
358  return 0;
359 }
360 
362  .name = "qpeg",
363  .long_name = NULL_IF_CONFIG_SMALL("Q-team QPEG"),
364  .type = AVMEDIA_TYPE_VIDEO,
365  .id = AV_CODEC_ID_QPEG,
366  .priv_data_size = sizeof(QpegContext),
367  .init = decode_init,
368  .close = decode_end,
369  .decode = decode_frame,
370  .flush = decode_flush,
371  .capabilities = AV_CODEC_CAP_DR1,
372  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
374 };
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:48
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static void copy(const float *p1, float *p2, const int length)
This structure describes decoded (raw) audio or video data.
Definition: frame.h:308
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
static void flush(AVCodecContext *avctx)
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
int size
Definition: packet.h:364
static const uint8_t qpeg_table_h[16]
Definition: qpeg.c:121
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:36
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
uint32_t pal[256]
Definition: qpeg.c:34
uint8_t run
Definition: svq3.c:204
AVCodec.
Definition: codec.h:190
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
static av_always_inline unsigned int bytestream2_get_bufferu(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:273
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
uint8_t
#define av_cold
Definition: attributes.h:88
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
float delta
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
#define AVPALETTE_SIZE
Definition: pixfmt.h:32
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:456
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:627
#define height
uint8_t * data
Definition: packet.h:363
static const uint64_t c1
Definition: murmur3.c:49
static void qpeg_decode_intra(QpegContext *qctx, uint8_t *dst, int stride, int width, int height)
Definition: qpeg.c:38
#define av_log(a,...)
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:154
#define U(x)
Definition: vp56_arith.h:37
static av_cold int decode_init(AVCodecContext *avctx)
Definition: qpeg.c:346
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
An AV_PKT_DATA_PALETTE side data packet contains exactly AVPALETTE_SIZE bytes worth of palette...
Definition: packet.h:46
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:164
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:353
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:153
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:263
const char * name
Name of the codec implementation.
Definition: codec.h:197
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:391
static void decode_flush(AVCodecContext *avctx)
Definition: qpeg.c:323
#define FFMIN(a, b)
Definition: common.h:96
#define width
int width
picture width / height.
Definition: avcodec.h:699
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: qpeg.c:267
AVCodec ff_qpeg_decoder
Definition: qpeg.c:361
Libavcodec external API header.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:339
main external API structure.
Definition: avcodec.h:526
static void av_noinline qpeg_decode_inter(QpegContext *qctx, uint8_t *dst, int stride, int width, int height, int delta, const uint8_t *ctable, uint8_t *refdata)
Definition: qpeg.c:127
AVFrame * ref
Definition: qpeg.c:33
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1872
int extradata_size
Definition: avcodec.h:628
int palette_has_changed
Tell user application that palette has changed from previous frame.
Definition: frame.h:465
GetByteContext buffer
Definition: qpeg.c:35
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:566
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
static const uint8_t qpeg_table_w[16]
Definition: qpeg.c:123
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:322
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:104
common internal api header.
AVCodecContext * avctx
Definition: qpeg.c:32
static av_cold int decode_end(AVCodecContext *avctx)
Definition: qpeg.c:337
void * priv_data
Definition: avcodec.h:553
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:386
#define av_noinline
Definition: attributes.h:68
#define stride
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
static double val(void *priv, double ch)
Definition: aeval.c:76
This structure stores compressed data.
Definition: packet.h:340
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:509
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:50
int i
Definition: input.c:407
Predicted.
Definition: avutil.h:275
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step