FFmpeg
xan.c
Go to the documentation of this file.
1 /*
2  * Wing Commander/Xan Video Decoder
3  * Copyright (C) 2003 The FFmpeg project
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Xan video decoder for Wing Commander III computer game
25  * by Mario Brito (mbrito@student.dei.uc.pt)
26  * and Mike Melanson (melanson@pcisys.net)
27  *
28  * The xan_wc3 decoder outputs PAL8 data.
29  */
30 
31 #include <stdio.h>
32 #include <stdlib.h>
33 #include <string.h>
34 
35 #include "libavutil/intreadwrite.h"
36 #include "libavutil/mem.h"
37 
38 #define BITSTREAM_READER_LE
39 #include "avcodec.h"
40 #include "bytestream.h"
41 #include "codec_internal.h"
42 #include "get_bits.h"
43 #include "internal.h"
44 
45 #define RUNTIME_GAMMA 0
46 
47 #define VGA__TAG MKTAG('V', 'G', 'A', ' ')
48 #define PALT_TAG MKTAG('P', 'A', 'L', 'T')
49 #define SHOT_TAG MKTAG('S', 'H', 'O', 'T')
50 #define PALETTE_COUNT 256
51 #define PALETTE_SIZE (PALETTE_COUNT * 3)
52 #define PALETTES_MAX 256
53 
54 typedef struct XanContext {
55 
58 
59  const uint8_t *buf;
60  int size;
61 
62  /* scratch space */
63  uint8_t *buffer1;
65  uint8_t *buffer2;
67 
68  unsigned *palettes;
71 
73 
74 } XanContext;
75 
77 {
78  XanContext *s = avctx->priv_data;
79 
80  av_frame_free(&s->last_frame);
81 
82  av_freep(&s->buffer1);
83  av_freep(&s->buffer2);
84  av_freep(&s->palettes);
85 
86  return 0;
87 }
88 
90 {
91  XanContext *s = avctx->priv_data;
92 
93  s->avctx = avctx;
94  s->frame_size = 0;
95 
96  avctx->pix_fmt = AV_PIX_FMT_PAL8;
97 
98  s->buffer1_size = avctx->width * avctx->height;
99  s->buffer1 = av_malloc(s->buffer1_size);
100  if (!s->buffer1)
101  return AVERROR(ENOMEM);
102  s->buffer2_size = avctx->width * avctx->height;
103  s->buffer2 = av_malloc(s->buffer2_size + 130);
104  if (!s->buffer2)
105  return AVERROR(ENOMEM);
106 
107  s->last_frame = av_frame_alloc();
108  if (!s->last_frame)
109  return AVERROR(ENOMEM);
110 
111  return 0;
112 }
113 
114 static int xan_huffman_decode(uint8_t *dest, int dest_len,
115  const uint8_t *src, int src_len)
116 {
117  uint8_t byte = *src++;
118  uint8_t ival = byte + 0x16;
119  const uint8_t * ptr = src + byte*2;
120  int ptr_len = src_len - 1 - byte*2;
121  uint8_t val = ival;
122  uint8_t *dest_end = dest + dest_len;
123  uint8_t *dest_start = dest;
124  int ret;
125  GetBitContext gb;
126 
127  if ((ret = init_get_bits8(&gb, ptr, ptr_len)) < 0)
128  return ret;
129 
130  while (val != 0x16) {
131  unsigned idx;
132  if (get_bits_left(&gb) < 1)
133  return AVERROR_INVALIDDATA;
134  idx = val - 0x17 + get_bits1(&gb) * byte;
135  if (idx >= 2 * byte)
136  return AVERROR_INVALIDDATA;
137  val = src[idx];
138 
139  if (val < 0x16) {
140  if (dest >= dest_end)
141  return dest_len;
142  *dest++ = val;
143  val = ival;
144  }
145  }
146 
147  return dest - dest_start;
148 }
149 
150 /**
151  * unpack simple compression
152  *
153  * @param dest destination buffer of dest_len, must be padded with at least 130 bytes
154  */
155 static void xan_unpack(uint8_t *dest, int dest_len,
156  const uint8_t *src, int src_len)
157 {
158  uint8_t opcode;
159  int size;
160  uint8_t *dest_org = dest;
161  uint8_t *dest_end = dest + dest_len;
163 
164  bytestream2_init(&ctx, src, src_len);
165  while (dest < dest_end && bytestream2_get_bytes_left(&ctx)) {
166  opcode = bytestream2_get_byte(&ctx);
167 
168  if (opcode < 0xe0) {
169  int size2, back;
170  if ((opcode & 0x80) == 0) {
171  size = opcode & 3;
172 
173  back = ((opcode & 0x60) << 3) + bytestream2_get_byte(&ctx) + 1;
174  size2 = ((opcode & 0x1c) >> 2) + 3;
175  } else if ((opcode & 0x40) == 0) {
176  size = bytestream2_peek_byte(&ctx) >> 6;
177 
178  back = (bytestream2_get_be16(&ctx) & 0x3fff) + 1;
179  size2 = (opcode & 0x3f) + 4;
180  } else {
181  size = opcode & 3;
182 
183  back = ((opcode & 0x10) << 12) + bytestream2_get_be16(&ctx) + 1;
184  size2 = ((opcode & 0x0c) << 6) + bytestream2_get_byte(&ctx) + 5;
185  }
186 
187  if (dest_end - dest < size + size2 ||
188  dest + size - dest_org < back ||
190  return;
192  dest += size;
193  av_memcpy_backptr(dest, back, size2);
194  dest += size2;
195  } else {
196  int finish = opcode >= 0xfc;
197  size = finish ? opcode & 3 : ((opcode & 0x1f) << 2) + 4;
198 
199  if (dest_end - dest < size || bytestream2_get_bytes_left(&ctx) < size)
200  return;
202  dest += size;
203  if (finish)
204  return;
205  }
206  }
207 }
208 
210  const uint8_t *pixel_buffer, int x, int y, int pixel_count)
211 {
212  int stride;
213  int line_inc;
214  int index;
215  int current_x;
216  int width = s->avctx->width;
217  uint8_t *palette_plane;
218 
219  palette_plane = frame->data[0];
220  stride = frame->linesize[0];
221  line_inc = stride - width;
222  index = y * stride + x;
223  current_x = x;
224  while (pixel_count && index < s->frame_size) {
225  int count = FFMIN(pixel_count, width - current_x);
226  memcpy(palette_plane + index, pixel_buffer, count);
227  pixel_count -= count;
228  index += count;
229  pixel_buffer += count;
230  current_x += count;
231 
232  if (current_x >= width) {
233  index += line_inc;
234  current_x = 0;
235  }
236  }
237 }
238 
240  int x, int y,
241  int pixel_count, int motion_x,
242  int motion_y)
243 {
244  int stride;
245  int line_inc;
246  int curframe_index, prevframe_index;
247  int curframe_x, prevframe_x;
248  int width = s->avctx->width;
249  uint8_t *palette_plane, *prev_palette_plane;
250 
251  if (y + motion_y < 0 || y + motion_y >= s->avctx->height ||
252  x + motion_x < 0 || x + motion_x >= s->avctx->width)
253  return;
254 
255  palette_plane = frame->data[0];
256  prev_palette_plane = s->last_frame->data[0];
257  if (!prev_palette_plane)
258  prev_palette_plane = palette_plane;
259  stride = frame->linesize[0];
260  line_inc = stride - width;
261  curframe_index = y * stride + x;
262  curframe_x = x;
263  prevframe_index = (y + motion_y) * stride + x + motion_x;
264  prevframe_x = x + motion_x;
265 
266  if (prev_palette_plane == palette_plane && FFABS(motion_x + width*motion_y) < pixel_count) {
267  avpriv_request_sample(s->avctx, "Overlapping copy");
268  return ;
269  }
270 
271  while (pixel_count &&
272  curframe_index < s->frame_size &&
273  prevframe_index < s->frame_size) {
274  int count = FFMIN3(pixel_count, width - curframe_x,
275  width - prevframe_x);
276 
277  memcpy(palette_plane + curframe_index,
278  prev_palette_plane + prevframe_index, count);
279  pixel_count -= count;
280  curframe_index += count;
281  prevframe_index += count;
282  curframe_x += count;
283  prevframe_x += count;
284 
285  if (curframe_x >= width) {
286  curframe_index += line_inc;
287  curframe_x = 0;
288  }
289 
290  if (prevframe_x >= width) {
291  prevframe_index += line_inc;
292  prevframe_x = 0;
293  }
294  }
295 }
296 
298 {
299 
300  int width = s->avctx->width;
301  int height = s->avctx->height;
302  int total_pixels = width * height;
303  uint8_t opcode;
304  uint8_t flag = 0;
305  int size = 0;
306  int motion_x, motion_y;
307  int x, y, ret;
308 
309  uint8_t *opcode_buffer = s->buffer1;
310  uint8_t *opcode_buffer_end = s->buffer1 + s->buffer1_size;
311  int opcode_buffer_size = s->buffer1_size;
312  const uint8_t *imagedata_buffer = s->buffer2;
313 
314  /* pointers to segments inside the compressed chunk */
315  const uint8_t *huffman_segment;
316  GetByteContext size_segment;
317  GetByteContext vector_segment;
318  const uint8_t *imagedata_segment;
319  int huffman_offset, size_offset, vector_offset, imagedata_offset,
320  imagedata_size;
321 
322  if (s->size < 8)
323  return AVERROR_INVALIDDATA;
324 
325  huffman_offset = AV_RL16(&s->buf[0]);
326  size_offset = AV_RL16(&s->buf[2]);
327  vector_offset = AV_RL16(&s->buf[4]);
328  imagedata_offset = AV_RL16(&s->buf[6]);
329 
330  if (huffman_offset >= s->size ||
331  size_offset >= s->size ||
332  vector_offset >= s->size ||
333  imagedata_offset >= s->size)
334  return AVERROR_INVALIDDATA;
335 
336  huffman_segment = s->buf + huffman_offset;
337  bytestream2_init(&size_segment, s->buf + size_offset, s->size - size_offset);
338  bytestream2_init(&vector_segment, s->buf + vector_offset, s->size - vector_offset);
339  imagedata_segment = s->buf + imagedata_offset;
340 
341  if ((ret = xan_huffman_decode(opcode_buffer, opcode_buffer_size,
342  huffman_segment, s->size - huffman_offset)) < 0)
343  return AVERROR_INVALIDDATA;
344  opcode_buffer_end = opcode_buffer + ret;
345 
346  if (imagedata_segment[0] == 2) {
347  xan_unpack(s->buffer2, s->buffer2_size,
348  &imagedata_segment[1], s->size - imagedata_offset - 1);
349  imagedata_size = s->buffer2_size;
350  } else {
351  imagedata_size = s->size - imagedata_offset - 1;
352  imagedata_buffer = &imagedata_segment[1];
353  }
354 
355  /* use the decoded data segments to build the frame */
356  x = y = 0;
357  while (total_pixels && opcode_buffer < opcode_buffer_end) {
358 
359  opcode = *opcode_buffer++;
360  size = 0;
361 
362  switch (opcode) {
363 
364  case 0:
365  flag ^= 1;
366  continue;
367 
368  case 1:
369  case 2:
370  case 3:
371  case 4:
372  case 5:
373  case 6:
374  case 7:
375  case 8:
376  size = opcode;
377  break;
378 
379  case 12:
380  case 13:
381  case 14:
382  case 15:
383  case 16:
384  case 17:
385  case 18:
386  size += (opcode - 10);
387  break;
388 
389  case 9:
390  case 19:
391  if (bytestream2_get_bytes_left(&size_segment) < 1) {
392  av_log(s->avctx, AV_LOG_ERROR, "size_segment overread\n");
393  return AVERROR_INVALIDDATA;
394  }
395  size = bytestream2_get_byte(&size_segment);
396  break;
397 
398  case 10:
399  case 20:
400  if (bytestream2_get_bytes_left(&size_segment) < 2) {
401  av_log(s->avctx, AV_LOG_ERROR, "size_segment overread\n");
402  return AVERROR_INVALIDDATA;
403  }
404  size = bytestream2_get_be16(&size_segment);
405  break;
406 
407  case 11:
408  case 21:
409  if (bytestream2_get_bytes_left(&size_segment) < 3) {
410  av_log(s->avctx, AV_LOG_ERROR, "size_segment overread\n");
411  return AVERROR_INVALIDDATA;
412  }
413  size = bytestream2_get_be24(&size_segment);
414  break;
415  }
416 
417  if (size > total_pixels)
418  break;
419 
420  if (opcode < 12) {
421  flag ^= 1;
422  if (flag) {
423  /* run of (size) pixels is unchanged from last frame */
424  xan_wc3_copy_pixel_run(s, frame, x, y, size, 0, 0);
425  } else {
426  /* output a run of pixels from imagedata_buffer */
427  if (imagedata_size < size)
428  break;
429  xan_wc3_output_pixel_run(s, frame, imagedata_buffer, x, y, size);
430  imagedata_buffer += size;
431  imagedata_size -= size;
432  }
433  } else {
434  uint8_t vector;
435  if (bytestream2_get_bytes_left(&vector_segment) <= 0) {
436  av_log(s->avctx, AV_LOG_ERROR, "vector_segment overread\n");
437  return AVERROR_INVALIDDATA;
438  }
439  /* run-based motion compensation from last frame */
440  vector = bytestream2_get_byte(&vector_segment);
441  motion_x = sign_extend(vector >> 4, 4);
442  motion_y = sign_extend(vector & 0xF, 4);
443 
444  /* copy a run of pixels from the previous frame */
445  xan_wc3_copy_pixel_run(s, frame, x, y, size, motion_x, motion_y);
446 
447  flag = 0;
448  }
449 
450  /* coordinate accounting */
451  total_pixels -= size;
452  y += (x + size) / width;
453  x = (x + size) % width;
454  }
455  return 0;
456 }
457 
458 #if RUNTIME_GAMMA
459 static inline unsigned mul(unsigned a, unsigned b)
460 {
461  return (a * b) >> 16;
462 }
463 
464 static inline unsigned pow4(unsigned a)
465 {
466  unsigned square = mul(a, a);
467  return mul(square, square);
468 }
469 
470 static inline unsigned pow5(unsigned a)
471 {
472  return mul(pow4(a), a);
473 }
474 
475 static uint8_t gamma_corr(uint8_t in) {
476  unsigned lo, hi = 0xff40, target;
477  int i = 15;
478  in = (in << 2) | (in >> 6);
479  /* equivalent float code:
480  if (in >= 252)
481  return 253;
482  return round(pow(in / 256.0, 0.8) * 256);
483  */
484  lo = target = in << 8;
485  do {
486  unsigned mid = (lo + hi) >> 1;
487  unsigned pow = pow5(mid);
488  if (pow > target) hi = mid;
489  else lo = mid;
490  } while (--i);
491  return (pow4((lo + hi) >> 1) + 0x80) >> 8;
492 }
493 #else
494 /**
495  * This is a gamma correction that xan3 applies to all palette entries.
496  *
497  * There is a peculiarity, namely that the values are clamped to 253 -
498  * it seems likely that this table was calculated by a buggy fixed-point
499  * implementation, the one above under RUNTIME_GAMMA behaves like this for
500  * example.
501  * The exponent value of 0.8 can be explained by this as well, since 0.8 = 4/5
502  * and thus pow(x, 0.8) is still easy to calculate.
503  * Also, the input values are first rotated to the left by 2.
504  */
505 static const uint8_t gamma_lookup[256] = {
506  0x00, 0x09, 0x10, 0x16, 0x1C, 0x21, 0x27, 0x2C,
507  0x31, 0x35, 0x3A, 0x3F, 0x43, 0x48, 0x4C, 0x50,
508  0x54, 0x59, 0x5D, 0x61, 0x65, 0x69, 0x6D, 0x71,
509  0x75, 0x79, 0x7D, 0x80, 0x84, 0x88, 0x8C, 0x8F,
510  0x93, 0x97, 0x9A, 0x9E, 0xA2, 0xA5, 0xA9, 0xAC,
511  0xB0, 0xB3, 0xB7, 0xBA, 0xBE, 0xC1, 0xC5, 0xC8,
512  0xCB, 0xCF, 0xD2, 0xD5, 0xD9, 0xDC, 0xDF, 0xE3,
513  0xE6, 0xE9, 0xED, 0xF0, 0xF3, 0xF6, 0xFA, 0xFD,
514  0x03, 0x0B, 0x12, 0x18, 0x1D, 0x23, 0x28, 0x2D,
515  0x32, 0x36, 0x3B, 0x40, 0x44, 0x49, 0x4D, 0x51,
516  0x56, 0x5A, 0x5E, 0x62, 0x66, 0x6A, 0x6E, 0x72,
517  0x76, 0x7A, 0x7D, 0x81, 0x85, 0x89, 0x8D, 0x90,
518  0x94, 0x98, 0x9B, 0x9F, 0xA2, 0xA6, 0xAA, 0xAD,
519  0xB1, 0xB4, 0xB8, 0xBB, 0xBF, 0xC2, 0xC5, 0xC9,
520  0xCC, 0xD0, 0xD3, 0xD6, 0xDA, 0xDD, 0xE0, 0xE4,
521  0xE7, 0xEA, 0xED, 0xF1, 0xF4, 0xF7, 0xFA, 0xFD,
522  0x05, 0x0D, 0x13, 0x19, 0x1F, 0x24, 0x29, 0x2E,
523  0x33, 0x38, 0x3C, 0x41, 0x45, 0x4A, 0x4E, 0x52,
524  0x57, 0x5B, 0x5F, 0x63, 0x67, 0x6B, 0x6F, 0x73,
525  0x77, 0x7B, 0x7E, 0x82, 0x86, 0x8A, 0x8D, 0x91,
526  0x95, 0x99, 0x9C, 0xA0, 0xA3, 0xA7, 0xAA, 0xAE,
527  0xB2, 0xB5, 0xB9, 0xBC, 0xBF, 0xC3, 0xC6, 0xCA,
528  0xCD, 0xD0, 0xD4, 0xD7, 0xDA, 0xDE, 0xE1, 0xE4,
529  0xE8, 0xEB, 0xEE, 0xF1, 0xF5, 0xF8, 0xFB, 0xFD,
530  0x07, 0x0E, 0x15, 0x1A, 0x20, 0x25, 0x2A, 0x2F,
531  0x34, 0x39, 0x3D, 0x42, 0x46, 0x4B, 0x4F, 0x53,
532  0x58, 0x5C, 0x60, 0x64, 0x68, 0x6C, 0x70, 0x74,
533  0x78, 0x7C, 0x7F, 0x83, 0x87, 0x8B, 0x8E, 0x92,
534  0x96, 0x99, 0x9D, 0xA1, 0xA4, 0xA8, 0xAB, 0xAF,
535  0xB2, 0xB6, 0xB9, 0xBD, 0xC0, 0xC4, 0xC7, 0xCB,
536  0xCE, 0xD1, 0xD5, 0xD8, 0xDB, 0xDF, 0xE2, 0xE5,
537  0xE9, 0xEC, 0xEF, 0xF2, 0xF6, 0xF9, 0xFC, 0xFD
538 };
539 #endif
540 
542  int *got_frame, AVPacket *avpkt)
543 {
544  const uint8_t *buf = avpkt->data;
545  int ret, buf_size = avpkt->size;
546  XanContext *s = avctx->priv_data;
548  int tag = 0;
549 
550  bytestream2_init(&ctx, buf, buf_size);
551  while (bytestream2_get_bytes_left(&ctx) > 8 && tag != VGA__TAG) {
552  unsigned *tmpptr;
553  uint32_t new_pal;
554  int size;
555  int i;
556  tag = bytestream2_get_le32(&ctx);
557  size = bytestream2_get_be32(&ctx);
558  if (size < 0) {
559  av_log(avctx, AV_LOG_ERROR, "Invalid tag size %d\n", size);
560  return AVERROR_INVALIDDATA;
561  }
563  switch (tag) {
564  case PALT_TAG:
565  if (size < PALETTE_SIZE)
566  return AVERROR_INVALIDDATA;
567  if (s->palettes_count >= PALETTES_MAX)
568  return AVERROR_INVALIDDATA;
569  tmpptr = av_realloc_array(s->palettes,
570  s->palettes_count + 1, AVPALETTE_SIZE);
571  if (!tmpptr)
572  return AVERROR(ENOMEM);
573  s->palettes = tmpptr;
574  tmpptr += s->palettes_count * AVPALETTE_COUNT;
575  for (i = 0; i < PALETTE_COUNT; i++) {
576 #if RUNTIME_GAMMA
577  int r = gamma_corr(bytestream2_get_byteu(&ctx));
578  int g = gamma_corr(bytestream2_get_byteu(&ctx));
579  int b = gamma_corr(bytestream2_get_byteu(&ctx));
580 #else
581  int r = gamma_lookup[bytestream2_get_byteu(&ctx)];
582  int g = gamma_lookup[bytestream2_get_byteu(&ctx)];
583  int b = gamma_lookup[bytestream2_get_byteu(&ctx)];
584 #endif
585  *tmpptr++ = (0xFFU << 24) | (r << 16) | (g << 8) | b;
586  }
587  s->palettes_count++;
588  break;
589  case SHOT_TAG:
590  if (size < 4)
591  return AVERROR_INVALIDDATA;
592  new_pal = bytestream2_get_le32(&ctx);
593  if (new_pal < s->palettes_count) {
594  s->cur_palette = new_pal;
595  } else
596  av_log(avctx, AV_LOG_ERROR, "Invalid palette selected\n");
597  break;
598  case VGA__TAG:
599  break;
600  default:
602  break;
603  }
604  }
605  buf_size = bytestream2_get_bytes_left(&ctx);
606 
607  if (s->palettes_count <= 0) {
608  av_log(s->avctx, AV_LOG_ERROR, "No palette found\n");
609  return AVERROR_INVALIDDATA;
610  }
611 
612  if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0)
613  return ret;
614 
615  if (!s->frame_size)
616  s->frame_size = frame->linesize[0] * s->avctx->height;
617 
618  memcpy(frame->data[1],
619  s->palettes + s->cur_palette * AVPALETTE_COUNT, AVPALETTE_SIZE);
620 
621  s->buf = ctx.buffer;
622  s->size = buf_size;
623 
624  if (xan_wc3_decode_frame(s, frame) < 0)
625  return AVERROR_INVALIDDATA;
626 
627  av_frame_unref(s->last_frame);
628  if ((ret = av_frame_ref(s->last_frame, frame)) < 0)
629  return ret;
630 
631  *got_frame = 1;
632 
633  /* always report that the buffer was completely consumed */
634  return buf_size;
635 }
636 
638  .p.name = "xan_wc3",
639  .p.long_name = NULL_IF_CONFIG_SMALL("Wing Commander III / Xan"),
640  .p.type = AVMEDIA_TYPE_VIDEO,
641  .p.id = AV_CODEC_ID_XAN_WC3,
642  .priv_data_size = sizeof(XanContext),
644  .close = xan_decode_end,
646  .p.capabilities = AV_CODEC_CAP_DR1,
648 };
gamma_lookup
static const uint8_t gamma_lookup[256]
This is a gamma correction that xan3 applies to all palette entries.
Definition: xan.c:505
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:39
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:839
r
const char * r
Definition: vf_curves.c:116
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
GetByteContext
Definition: bytestream.h:33
XanContext::last_frame
AVFrame * last_frame
Definition: xan.c:57
xan_wc3_copy_pixel_run
static void xan_wc3_copy_pixel_run(XanContext *s, AVFrame *frame, int x, int y, int pixel_count, int motion_x, int motion_y)
Definition: xan.c:239
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:111
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:374
b
#define b
Definition: input.c:34
FFCodec
Definition: codec_internal.h:112
XanContext::buffer1
uint8_t * buffer1
Definition: xan.c:63
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
init
static int init
Definition: av_tx.c:47
xan_unpack
static void xan_unpack(uint8_t *dest, int dest_len, const uint8_t *src, int src_len)
unpack simple compression
Definition: xan.c:155
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
return
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a it should return
Definition: filter_design.txt:264
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:116
finish
static void finish(void)
Definition: movenc.c:342
U
#define U(x)
Definition: vp56_arith.h:37
GetBitContext
Definition: get_bits.h:61
SHOT_TAG
#define SHOT_TAG
Definition: xan.c:49
val
static double val(void *priv, double ch)
Definition: aeval.c:77
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:99
xan_decode_end
static av_cold int xan_decode_end(AVCodecContext *avctx)
Definition: xan.c:76
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
XanContext::buf
const uint8_t * buf
Definition: xan.c:59
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:667
av_memcpy_backptr
void av_memcpy_backptr(uint8_t *dst, int back, int cnt)
Overlapping memcpy() implementation.
Definition: mem.c:455
XanContext::palettes
unsigned * palettes
Definition: xan.c:68
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:254
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:256
av_realloc_array
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
Definition: mem.c:225
g
const char * g
Definition: vf_curves.c:117
AV_GET_BUFFER_FLAG_REF
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:367
frame_size
int frame_size
Definition: mxfenc.c:2201
XanContext::frame_size
int frame_size
Definition: xan.c:72
ctx
AVFormatContext * ctx
Definition: movenc.c:48
get_bits.h
AV_RL16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_RL16
Definition: bytestream.h:94
mul
static float mul(float src0, float src1)
Definition: dnn_backend_native_layer_mathbinary.c:39
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:64
AVPALETTE_SIZE
#define AVPALETTE_SIZE
Definition: pixfmt.h:32
PALETTE_COUNT
#define PALETTE_COUNT
Definition: xan.c:50
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
XanContext::buffer2_size
int buffer2_size
Definition: xan.c:66
bytestream2_get_buffer
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:267
PALT_TAG
#define PALT_TAG
Definition: xan.c:48
VGA__TAG
#define VGA__TAG
Definition: xan.c:47
index
int index
Definition: gxfenc.c:89
AVPALETTE_COUNT
#define AVPALETTE_COUNT
Definition: pixfmt.h:33
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
ff_get_buffer
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1403
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
ff_xan_wc3_decoder
const FFCodec ff_xan_wc3_decoder
Definition: xan.c:637
AVPacket::size
int size
Definition: packet.h:375
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
byte
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_WB16 unsigned int_TMPL byte
Definition: bytestream.h:99
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:343
codec_internal.h
size
int size
Definition: twinvq_data.h:10344
XanContext::avctx
AVCodecContext * avctx
Definition: xan.c:56
PALETTE_SIZE
#define PALETTE_SIZE
Definition: xan.c:51
xan_huffman_decode
static int xan_huffman_decode(uint8_t *dest, int dest_len, const uint8_t *src, int src_len)
Definition: xan.c:114
xan_decode_init
static av_cold int xan_decode_init(AVCodecContext *avctx)
Definition: xan.c:89
height
#define height
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
AV_CODEC_ID_XAN_WC3
@ AV_CODEC_ID_XAN_WC3
Definition: codec_id.h:90
flag
#define flag(name)
Definition: cbs_av1.c:553
XanContext::size
int size
Definition: xan.c:60
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
XanContext
Definition: xan.c:54
FFMIN3
#define FFMIN3(a, b, c)
Definition: macros.h:50
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: codec_internal.h:31
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:477
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:203
AVCodecContext::height
int height
Definition: avcodec.h:562
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:599
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
tag
uint32_t tag
Definition: movenc.c:1646
ret
ret
Definition: filter_design.txt:187
xan_wc3_decode_frame
static int xan_wc3_decode_frame(XanContext *s, AVFrame *frame)
Definition: xan.c:297
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
square
static int square(int x)
Definition: roqvideoenc.c:196
XanContext::buffer1_size
int buffer1_size
Definition: xan.c:64
AVCodecContext
main external API structure.
Definition: avcodec.h:389
xan_wc3_output_pixel_run
static void xan_wc3_output_pixel_run(XanContext *s, AVFrame *frame, const uint8_t *pixel_buffer, int x, int y, int pixel_count)
Definition: xan.c:209
sign_extend
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:132
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
mem.h
XanContext::cur_palette
int cur_palette
Definition: xan.c:70
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
AVPacket
This structure stores compressed data.
Definition: packet.h:351
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:416
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
XanContext::palettes_count
int palettes_count
Definition: xan.c:69
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:562
bytestream.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
xan_decode_frame
static int xan_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: xan.c:541
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
XanContext::buffer2
uint8_t * buffer2
Definition: xan.c:65
PALETTES_MAX
#define PALETTES_MAX
Definition: xan.c:52