76 #define PALETTE_COUNT 256 77 #define VQA_HEADER_SIZE 0x2A 81 #define MAX_CODEBOOK_VECTORS 0xFF00 82 #define SOLID_PIXEL_VECTORS 0x100 83 #define MAX_VECTORS (MAX_CODEBOOK_VECTORS + SOLID_PIXEL_VECTORS) 84 #define MAX_CODEBOOK_SIZE (MAX_VECTORS * 4 * 4) 86 #define CBF0_TAG MKBETAG('C', 'B', 'F', '0') 87 #define CBFZ_TAG MKBETAG('C', 'B', 'F', 'Z') 88 #define CBP0_TAG MKBETAG('C', 'B', 'P', '0') 89 #define CBPZ_TAG MKBETAG('C', 'B', 'P', 'Z') 90 #define CPL0_TAG MKBETAG('C', 'P', 'L', '0') 91 #define CPLZ_TAG MKBETAG('C', 'P', 'L', 'Z') 92 #define VPTZ_TAG MKBETAG('V', 'P', 'T', 'Z') 124 int i, j, codebook_index,
ret;
188 codebook_index = 0xFF00 * 16;
189 for (i = 0; i < 256; i++)
190 for (j = 0; j < 16; j++)
193 codebook_index = 0xF00 * 8;
194 for (i = 0; i < 256; i++)
195 for (j = 0; j < 8; j++)
208 #define CHECK_COUNT() \ 209 if (dest_index + count > dest_size) { \ 210 av_log(s->avctx, AV_LOG_ERROR, "decode_format80 problem: next op would overflow dest_index\n"); \ 211 av_log(s->avctx, AV_LOG_ERROR, "current dest_index = %d, count = %d, dest_size = %d\n", \ 212 dest_index, count, dest_size); \ 213 return AVERROR_INVALIDDATA; \ 216 #define CHECK_COPY(idx) \ 217 if (idx < 0 || idx + count > dest_size) { \ 218 av_log(s->avctx, AV_LOG_ERROR, "decode_format80 problem: next op would overflow dest_index\n"); \ 219 av_log(s->avctx, AV_LOG_ERROR, "current src_pos = %d, count = %d, dest_size = %d\n", \ 220 src_pos, count, dest_size); \ 221 return AVERROR_INVALIDDATA; \ 226 unsigned char *dest,
int dest_size,
int check_size) {
229 int count, opcode, start;
242 opcode = bytestream2_get_byte(&s->
gb);
249 if (dest_index >= dest_size) {
251 dest_index, dest_size);
255 if (opcode == 0xFF) {
257 count = bytestream2_get_le16(&s->
gb);
258 src_pos = bytestream2_get_le16(&s->
gb);
259 ff_tlog(s->
avctx,
"(1) copy %X bytes from absolute pos %X\n", count, src_pos);
262 for (i = 0; i <
count; i++)
263 dest[dest_index + i] = dest[src_pos + i];
266 }
else if (opcode == 0xFE) {
268 count = bytestream2_get_le16(&s->
gb);
269 color = bytestream2_get_byte(&s->
gb);
270 ff_tlog(s->
avctx,
"(2) set %X bytes to %02X\n", count, color);
272 memset(&dest[dest_index], color, count);
275 }
else if ((opcode & 0xC0) == 0xC0) {
277 count = (opcode & 0x3F) + 3;
278 src_pos = bytestream2_get_le16(&s->
gb);
279 ff_tlog(s->
avctx,
"(3) copy %X bytes from absolute pos %X\n", count, src_pos);
282 for (i = 0; i <
count; i++)
283 dest[dest_index + i] = dest[src_pos + i];
286 }
else if (opcode > 0x80) {
288 count = opcode & 0x3F;
289 ff_tlog(s->
avctx,
"(4) copy %X bytes from source to dest\n", count);
296 count = ((opcode & 0x70) >> 4) + 3;
297 src_pos = bytestream2_get_byte(&s->
gb) | ((opcode & 0x0F) << 8);
298 ff_tlog(s->
avctx,
"(5) copy %X bytes from relpos %X\n", count, src_pos);
301 for (i = 0; i <
count; i++)
302 dest[dest_index + i] = dest[dest_index - src_pos + i];
312 if (dest_index < dest_size) {
313 av_log(s->
avctx,
AV_LOG_ERROR,
"decode_format80 problem: decode finished with dest_index (%d) < dest_size (%d)\n",
314 dest_index, dest_size);
315 memset(dest + dest_index, 0, dest_size - dest_index);
323 unsigned int chunk_type;
324 unsigned int chunk_size;
326 unsigned int index = 0;
328 unsigned char r,
g,
b;
343 int vector_index = 0;
352 chunk_type = bytestream2_get_be32u(&s->
gb);
354 chunk_size = bytestream2_get_be32u(&s->
gb);
356 switch (chunk_type) {
392 byte_skip = chunk_size & 0x01;
397 if ((cpl0_chunk != -1) && (cplz_chunk != -1)) {
405 if (cplz_chunk != -1) {
412 if (cpl0_chunk != -1) {
415 chunk_size = bytestream2_get_be32(&s->
gb);
422 for (i = 0; i < chunk_size / 3; i++) {
424 r = bytestream2_get_byteu(&s->
gb) * 4;
425 g = bytestream2_get_byteu(&s->
gb) * 4;
426 b = bytestream2_get_byteu(&s->
gb) * 4;
427 s->
palette[
i] = 0xFF
U << 24 | r << 16 | g << 8 |
b;
433 if ((cbf0_chunk != -1) && (cbfz_chunk != -1)) {
441 if (cbfz_chunk != -1) {
444 chunk_size = bytestream2_get_be32(&s->
gb);
451 if (cbf0_chunk != -1) {
454 chunk_size = bytestream2_get_be32(&s->
gb);
466 if (vptz_chunk == -1) {
474 chunk_size = bytestream2_get_be32(&s->
gb);
485 for (x = 0; x < s->
width; x += 4, lobytes++, hibytes++) {
486 pixel_ptr = y * frame->
linesize[0] + x;
495 vector_index = ((hibyte << 8) | lobyte) >> 3;
496 vector_index <<= index_shift;
499 if (hibyte == 0xFF) {
501 frame->
data[0][pixel_ptr + 0] = 255 - lobyte;
502 frame->
data[0][pixel_ptr + 1] = 255 - lobyte;
503 frame->
data[0][pixel_ptr + 2] = 255 - lobyte;
504 frame->
data[0][pixel_ptr + 3] = 255 - lobyte;
514 vector_index = (hibyte << 8) | lobyte;
515 vector_index <<= index_shift;
526 frame->
data[0][pixel_ptr + 0] = s->
codebook[vector_index++];
527 frame->
data[0][pixel_ptr + 1] = s->
codebook[vector_index++];
528 frame->
data[0][pixel_ptr + 2] = s->
codebook[vector_index++];
529 frame->
data[0][pixel_ptr + 3] = s->
codebook[vector_index++];
536 if ((cbp0_chunk != -1) && (cbpz_chunk != -1)) {
542 if (cbp0_chunk != -1) {
545 chunk_size = bytestream2_get_be32(&s->
gb);
571 if (cbpz_chunk != -1) {
574 chunk_size = bytestream2_get_be32(&s->
gb);
605 void *
data,
int *got_frame,
641 {
"max_pixels",
"320*240" },
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static int vqa_decode_chunk(VqaContext *s, AVFrame *frame)
This structure describes decoded (raw) audio or video data.
ptrdiff_t const GLvoid * data
static int check_size(TiffEncoderContext *s, uint64_t need)
Check free space in buffer.
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
static av_cold int init(AVCodecContext *avctx)
#define avpriv_request_sample(...)
static const AVCodecDefault vqa_defaults[]
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
int next_codebook_buffer_index
static int decode_format80(VqaContext *s, int src_size, unsigned char *dest, int dest_size, int check_size)
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
8 bits with AV_PIX_FMT_RGB32 palette
unsigned char * decode_buffer
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size)
#define av_fourcc2str(fourcc)
const char * name
Name of the codec implementation.
static const AVCodecDefault defaults[]
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
static av_always_inline int bytestream2_tell(GetByteContext *g)
uint32_t palette[PALETTE_COUNT]
Libavcodec external API header.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
main external API structure.
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
int palette_has_changed
Tell user application that palette has changed from previous frame.
#define MAX_CODEBOOK_SIZE
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
common internal api header.
static av_cold int vqa_decode_end(AVCodecContext *avctx)
static int vqa_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
static av_cold int vqa_decode_init(AVCodecContext *avctx)
unsigned char * next_codebook_buffer
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
This structure stores compressed data.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.