FFmpeg
argo.c
Go to the documentation of this file.
1 /*
2  * Argonaut Games Video decoder
3  * Copyright (c) 2020 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <string.h>
25 
26 #include "libavutil/imgutils.h"
27 #include "libavutil/internal.h"
28 #include "libavutil/intreadwrite.h"
29 
30 #include "avcodec.h"
31 #include "bytestream.h"
32 #include "internal.h"
33 
34 typedef struct ArgoContext {
36 
37  int bpp;
38  int key;
39  int mv0[128][2];
40  int mv1[16][2];
41  uint32_t pal[256];
43 } ArgoContext;
44 
45 static int decode_pal8(AVCodecContext *avctx, uint32_t *pal)
46 {
47  ArgoContext *s = avctx->priv_data;
48  GetByteContext *gb = &s->gb;
49  int start, count;
50 
51  start = bytestream2_get_le16(gb);
52  count = bytestream2_get_le16(gb);
53 
54  if (start + count > 256)
55  return AVERROR_INVALIDDATA;
56 
57  if (bytestream2_get_bytes_left(gb) < 3 * count)
58  return AVERROR_INVALIDDATA;
59 
60  for (int i = 0; i < count; i++)
61  pal[start + i] = (0xFFU << 24) | bytestream2_get_be24u(gb);
62 
63  return 0;
64 }
65 
67 {
68  ArgoContext *s = avctx->priv_data;
69  GetByteContext *gb = &s->gb;
70  const int l = frame->linesize[0];
71  const uint8_t *map = gb->buffer;
72  uint8_t *dst = frame->data[0];
73 
74  if (bytestream2_get_bytes_left(gb) < 1024 + (frame->width / 2) * (frame->height / 2))
75  return AVERROR_INVALIDDATA;
76 
77  bytestream2_skipu(gb, 1024);
78  for (int y = 0; y < frame->height; y += 2) {
79  for (int x = 0; x < frame->width; x += 2) {
80  int index = bytestream2_get_byteu(gb);
81  const uint8_t *block = map + index * 4;
82 
83  dst[x+0] = block[0];
84  dst[x+1] = block[1];
85  dst[x+l] = block[2];
86  dst[x+l+1] = block[3];
87  }
88 
89  dst += frame->linesize[0] * 2;
90  }
91 
92  return 0;
93 }
94 
96 {
97  ArgoContext *s = avctx->priv_data;
98  GetByteContext *gb = &s->gb;
99  GetByteContext sb;
100  const int l = frame->linesize[0];
101  const uint8_t *map = gb->buffer;
102  uint8_t *dst = frame->data[0];
103  uint8_t codes = 0;
104  int count = 0;
105 
106  if (bytestream2_get_bytes_left(gb) < 1024 + (((frame->width / 2) * (frame->height / 2) + 7) >> 3))
107  return AVERROR_INVALIDDATA;
108 
109  bytestream2_skipu(gb, 1024);
110  sb = *gb;
111  bytestream2_skipu(gb, ((frame->width / 2) * (frame->height / 2) + 7) >> 3);
112 
113  for (int y = 0; y < frame->height; y += 2) {
114  for (int x = 0; x < frame->width; x += 2) {
115  const uint8_t *block;
116  int index;
117 
118  if (count == 0) {
119  codes = bytestream2_get_byteu(&sb);
120  count = 8;
121  }
122 
123  if (codes & 0x80) {
124  index = bytestream2_get_byte(gb);
125  block = map + index * 4;
126 
127  dst[x+0] = block[0];
128  dst[x+1] = block[1];
129  dst[x+l] = block[2];
130  dst[x+l+1] = block[3];
131  }
132 
133  codes <<= 1;
134  count--;
135  }
136 
137  dst += frame->linesize[0] * 2;
138  }
139 
140  return 0;
141 }
142 
144 {
145  ArgoContext *s = avctx->priv_data;
146  GetByteContext *gb = &s->gb;
147  const int w = frame->width;
148  const int h = frame->height;
149  const int l = frame->linesize[0];
150 
151  while (bytestream2_get_bytes_left(gb) > 0) {
152  int size, type, pos, dy;
153  uint8_t *dst;
154 
155  type = bytestream2_get_byte(gb);
156  if (type == 0xFF)
157  break;
158 
159  switch (type) {
160  case 8:
161  dst = frame->data[0];
162  for (int y = 0; y < h; y += 8) {
163  for (int x = 0; x < w; x += 8) {
164  int fill = bytestream2_get_byte(gb);
165  uint8_t *ddst = dst + x;
166 
167  for (int by = 0; by < 8; by++) {
168  memset(ddst, fill, 8);
169  ddst += l;
170  }
171  }
172 
173  dst += 8 * l;
174  }
175  break;
176  case 7:
177  while (bytestream2_get_bytes_left(gb) > 0) {
178  int bsize = bytestream2_get_byte(gb);
179  uint8_t *src;
180  int count;
181 
182  if (!bsize)
183  break;
184 
185  count = bytestream2_get_be16(gb);
186  while (count > 0) {
187  int mvx, mvy, a, b, c, mx, my;
188  int bsize_w, bsize_h;
189 
190  bsize_w = bsize_h = bsize;
191  if (bytestream2_get_bytes_left(gb) < 4)
192  return AVERROR_INVALIDDATA;
193  mvx = bytestream2_get_byte(gb) * bsize;
194  mvy = bytestream2_get_byte(gb) * bsize;
195  a = bytestream2_get_byte(gb);
196  b = bytestream2_get_byte(gb);
197  c = ((a & 0x3F) << 8) + b;
198  mx = mvx + (c & 0x7F) - 64;
199  my = mvy + (c >> 7) - 64;
200 
201  if (mvy < 0 || mvy >= h)
202  return AVERROR_INVALIDDATA;
203 
204  if (mvx < 0 || mvx >= w)
205  return AVERROR_INVALIDDATA;
206 
207  if (my < 0 || my >= h)
208  return AVERROR_INVALIDDATA;
209 
210  if (mx < 0 || mx >= w)
211  return AVERROR_INVALIDDATA;
212 
213  dst = frame->data[0] + mvx + l * mvy;
214  src = frame->data[0] + mx + l * my;
215 
216  bsize_w = FFMIN3(bsize_w, w - mvx, w - mx);
217  bsize_h = FFMIN3(bsize_h, h - mvy, h - my);
218 
219  if (mvy >= my && (mvy != my || mvx >= mx)) {
220  src += (bsize_h - 1) * l;
221  dst += (bsize_h - 1) * l;
222  for (int by = 0; by < bsize_h; by++) {
223  memmove(dst, src, bsize_w);
224  src -= l;
225  dst -= l;
226  }
227  } else {
228  for (int by = 0; by < bsize_h; by++) {
229  memmove(dst, src, bsize_w);
230  src += l;
231  dst += l;
232  }
233  }
234 
235  count--;
236  }
237  }
238  break;
239  case 6:
240  dst = frame->data[0];
241  if (bytestream2_get_bytes_left(gb) < w * h)
242  return AVERROR_INVALIDDATA;
243  for (int y = 0; y < h; y++) {
244  bytestream2_get_bufferu(gb, dst, w);
245  dst += l;
246  }
247  break;
248  case 5:
249  dst = frame->data[0];
250  for (int y = 0; y < h; y += 2) {
251  for (int x = 0; x < w; x += 2) {
252  int fill = bytestream2_get_byte(gb);
253  uint8_t *ddst = dst + x;
254 
255  fill = (fill << 8) | fill;
256  for (int by = 0; by < 2; by++) {
257  AV_WN16(ddst, fill);
258 
259  ddst += l;
260  }
261  }
262 
263  dst += 2 * l;
264  }
265  break;
266  case 3:
267  size = bytestream2_get_le16(gb);
268  if (size > 0) {
269  int x = bytestream2_get_byte(gb) * 4;
270  int y = bytestream2_get_byte(gb) * 4;
271  int count = bytestream2_get_byte(gb);
272  int fill = bytestream2_get_byte(gb);
273 
274  av_log(avctx, AV_LOG_DEBUG, "%d %d %d %d\n", x, y, count, fill);
275  for (int i = 0; i < count; i++)
276  ;
277  return AVERROR_PATCHWELCOME;
278  }
279  break;
280  case 2:
281  dst = frame->data[0];
282  pos = 0;
283  dy = 0;
284  while (bytestream2_get_bytes_left(gb) > 0) {
285  int count = bytestream2_get_byteu(gb);
286  int skip = count & 0x3F;
287 
288  count = count >> 6;
289  if (skip == 0x3F) {
290  pos += 0x3E;
291  while (pos >= w) {
292  pos -= w;
293  dst += l;
294  dy++;
295  if (dy >= h)
296  return 0;
297  }
298  } else {
299  pos += skip;
300  while (pos >= w) {
301  pos -= w;
302  dst += l;
303  dy++;
304  if (dy >= h)
305  return 0;
306  }
307  while (count >= 0) {
308  int bits = bytestream2_get_byte(gb);
309 
310  for (int i = 0; i < 4; i++) {
311  switch (bits & 3) {
312  case 0:
313  break;
314  case 1:
315  if (dy < 1 && !pos)
316  return AVERROR_INVALIDDATA;
317  else
318  dst[pos] = pos ? dst[pos - 1] : dst[-l + w - 1];
319  break;
320  case 2:
321  if (dy < 1)
322  return AVERROR_INVALIDDATA;
323  dst[pos] = dst[pos - l];
324  break;
325  case 3:
326  dst[pos] = bytestream2_get_byte(gb);
327  break;
328  }
329 
330  pos++;
331  if (pos >= w) {
332  pos -= w;
333  dst += l;
334  dy++;
335  if (dy >= h)
336  return 0;
337  }
338  bits >>= 2;
339  }
340  count--;
341  }
342  }
343  }
344  break;
345  default:
346  return AVERROR_INVALIDDATA;
347  }
348  }
349 
350  return 0;
351 }
352 
354 {
355  ArgoContext *s = avctx->priv_data;
356  GetByteContext *gb = &s->gb;
357  const int w = frame->width;
358  const int h = frame->height;
359  const int l = frame->linesize[0] / 4;
360 
361  while (bytestream2_get_bytes_left(gb) > 0) {
362  int osize, type, pos, dy, di, bcode, value, v14;
363  const uint8_t *bits;
364  uint32_t *dst;
365 
366  type = bytestream2_get_byte(gb);
367  if (type == 0xFF)
368  return 0;
369 
370  switch (type) {
371  case 8:
372  dst = (uint32_t *)frame->data[0];
373  for (int y = 0; y + 12 <= h; y += 12) {
374  for (int x = 0; x + 12 <= w; x += 12) {
375  int fill = bytestream2_get_be24(gb);
376  uint32_t *dstp = dst + x;
377 
378  for (int by = 0; by < 12; by++) {
379  for (int bx = 0; bx < 12; bx++)
380  dstp[bx] = fill;
381 
382  dstp += l;
383  }
384  }
385 
386  dst += 12 * l;
387  }
388  break;
389  case 7:
390  while (bytestream2_get_bytes_left(gb) > 0) {
391  int bsize = bytestream2_get_byte(gb);
392  uint32_t *src;
393  int count;
394 
395  if (!bsize)
396  break;
397 
398  count = bytestream2_get_be16(gb);
399  while (count > 0) {
400  int mvx, mvy, a, b, c, mx, my;
401  int bsize_w, bsize_h;
402 
403  bsize_w = bsize_h = bsize;
404  if (bytestream2_get_bytes_left(gb) < 4)
405  return AVERROR_INVALIDDATA;
406  mvx = bytestream2_get_byte(gb) * bsize;
407  mvy = bytestream2_get_byte(gb) * bsize;
408  a = bytestream2_get_byte(gb);
409  b = bytestream2_get_byte(gb);
410  c = ((a & 0x3F) << 8) + b;
411  mx = mvx + (c & 0x7F) - 64;
412  my = mvy + (c >> 7) - 64;
413 
414  if (mvy < 0 || mvy >= h)
415  return AVERROR_INVALIDDATA;
416 
417  if (mvx < 0 || mvx >= w)
418  return AVERROR_INVALIDDATA;
419 
420  if (my < 0 || my >= h)
421  return AVERROR_INVALIDDATA;
422 
423  if (mx < 0 || mx >= w)
424  return AVERROR_INVALIDDATA;
425 
426  dst = (uint32_t *)frame->data[0] + mvx + l * mvy;
427  src = (uint32_t *)frame->data[0] + mx + l * my;
428 
429  bsize_w = FFMIN3(bsize_w, w - mvx, w - mx);
430  bsize_h = FFMIN3(bsize_h, h - mvy, h - my);
431 
432  if (mvy >= my && (mvy != my || mvx >= mx)) {
433  src += (bsize_h - 1) * l;
434  dst += (bsize_h - 1) * l;
435  for (int by = 0; by < bsize_h; by++) {
436  memmove(dst, src, bsize_w * 4);
437  src -= l;
438  dst -= l;
439  }
440  } else {
441  for (int by = 0; by < bsize_h; by++) {
442  memmove(dst, src, bsize_w * 4);
443  src += l;
444  dst += l;
445  }
446  }
447 
448  count--;
449  }
450  }
451  break;
452  case 12:
453  osize = ((h + 3) / 4) * ((w + 3) / 4) + 7;
454  bits = gb->buffer;
455  di = 0;
456  bcode = v14 = 0;
457  if (bytestream2_get_bytes_left(gb) < osize >> 3)
458  return AVERROR_INVALIDDATA;
459  bytestream2_skip(gb, osize >> 3);
460  for (int x = 0; x < w; x += 4) {
461  for (int y = 0; y < h; y += 4) {
462  int astate = 0;
463 
464  if (bits[di >> 3] & (1 << (di & 7))) {
465  int codes = bytestream2_get_byte(gb);
466 
467  for (int count = 0; count < 4; count++) {
468  uint32_t *src = (uint32_t *)frame->data[0];
469  size_t src_size = l * (h - 1) + (w - 1);
470  int nv, v, code = codes & 3;
471 
472  pos = x;
473  dy = y + count;
474  dst = (uint32_t *)frame->data[0] + pos + dy * l;
475  if (code & 1)
476  bcode = bytestream2_get_byte(gb);
477  if (code == 3) {
478  for (int j = 0; j < 4; j++) {
479  switch (bcode & 3) {
480  case 0:
481  break;
482  case 1:
483  if (dy < 1 && !pos)
484  return AVERROR_INVALIDDATA;
485  dst[0] = dst[-1];
486  break;
487  case 2:
488  if (dy < 1)
489  return AVERROR_INVALIDDATA;
490  dst[0] = dst[-l];
491  break;
492  case 3:
493  if (astate) {
494  nv = value >> 4;
495  } else {
496  value = bytestream2_get_byte(gb);
497  nv = value & 0xF;
498  }
499  astate ^= 1;
500  dst[0] = src[av_clip(l * (dy + s->mv1[nv][1]) + pos +
501  s->mv1[nv][0], 0, src_size)];
502  break;
503  }
504 
505  bcode >>= 2;
506  dst++;
507  pos++;
508  }
509  } else if (code) {
510  if (code == 1)
511  v14 = bcode;
512  else
513  bcode = v14;
514  for (int j = 0; j < 4; j++) {
515  switch (bcode & 3) {
516  case 0:
517  break;
518  case 1:
519  if (dy < 1 && !pos)
520  return AVERROR_INVALIDDATA;
521  dst[0] = dst[-1];
522  break;
523  case 2:
524  if (dy < 1)
525  return AVERROR_INVALIDDATA;
526  dst[0] = dst[-l];
527  break;
528  case 3:
529  v = bytestream2_get_byte(gb);
530  if (v < 128) {
531  dst[0] = src[av_clip(l * (dy + s->mv0[v][1]) + pos +
532  s->mv0[v][0], 0, src_size)];
533  } else {
534  dst[0] = ((v & 0x7F) << 17) | bytestream2_get_be16(gb);
535  }
536  break;
537  }
538 
539  bcode >>= 2;
540  dst++;
541  pos++;
542  }
543  }
544 
545  codes >>= 2;
546  }
547  }
548 
549  di++;
550  }
551  }
552  break;
553  default:
554  return AVERROR_INVALIDDATA;
555  }
556  }
557 
558  return AVERROR_INVALIDDATA;
559 }
560 
562 {
563  ArgoContext *s = avctx->priv_data;
564  GetByteContext *gb = &s->gb;
565  const int w = frame->width;
566  const int h = frame->height;
567  const int l = frame->linesize[0];
568  uint8_t *dst = frame->data[0];
569  int pos = 0, y = 0;
570 
571  while (bytestream2_get_bytes_left(gb) > 0) {
572  int count = bytestream2_get_byte(gb);
573  int pixel = bytestream2_get_byte(gb);
574 
575  if (!count) {
576  pos += pixel;
577  while (pos >= w) {
578  pos -= w;
579  y++;
580  if (y >= h)
581  return 0;
582  }
583  } else {
584  while (count > 0) {
585  dst[pos + y * l] = pixel;
586  count--;
587  pos++;
588  if (pos >= w) {
589  pos = 0;
590  y++;
591  if (y >= h)
592  return 0;
593  }
594  }
595  }
596  }
597 
598  return 0;
599 }
600 
601 static int decode_frame(AVCodecContext *avctx, void *data,
602  int *got_frame, AVPacket *avpkt)
603 {
604  ArgoContext *s = avctx->priv_data;
605  GetByteContext *gb = &s->gb;
606  AVFrame *frame = s->frame;
607  uint32_t chunk;
608  int ret;
609 
610  if (avpkt->size < 4)
611  return AVERROR_INVALIDDATA;
612 
613  bytestream2_init(gb, avpkt->data, avpkt->size);
614 
615  if ((ret = ff_reget_buffer(avctx, frame, 0)) < 0)
616  return ret;
617 
618  chunk = bytestream2_get_be32(gb);
619  switch (chunk) {
620  case MKBETAG('P', 'A', 'L', '8'):
621  for (int y = 0; y < frame->height; y++)
622  memset(frame->data[0] + y * frame->linesize[0], 0, frame->width * s->bpp);
623  if (avctx->pix_fmt == AV_PIX_FMT_PAL8)
624  memset(frame->data[1], 0, AVPALETTE_SIZE);
625  return decode_pal8(avctx, s->pal);
626  case MKBETAG('M', 'A', 'D', '1'):
627  if (avctx->pix_fmt == AV_PIX_FMT_PAL8)
628  ret = decode_mad1(avctx, frame);
629  else
630  ret = decode_mad1_24(avctx, frame);
631  break;
632  case MKBETAG('A', 'V', 'C', 'F'):
633  if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
634  s->key = 1;
635  ret = decode_avcf(avctx, frame);
636  break;
637  }
638  case MKBETAG('A', 'L', 'C', 'D'):
639  if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
640  s->key = 0;
641  ret = decode_alcd(avctx, frame);
642  break;
643  }
644  case MKBETAG('R', 'L', 'E', 'F'):
645  if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
646  s->key = 1;
647  ret = decode_rle(avctx, frame);
648  break;
649  }
650  case MKBETAG('R', 'L', 'E', 'D'):
651  if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
652  s->key = 0;
653  ret = decode_rle(avctx, frame);
654  break;
655  }
656  default:
657  av_log(avctx, AV_LOG_DEBUG, "unknown chunk 0x%X\n", chunk);
658  break;
659  }
660 
661  if (ret < 0)
662  return ret;
663 
664  if (avctx->pix_fmt == AV_PIX_FMT_PAL8)
665  memcpy(frame->data[1], s->pal, AVPALETTE_SIZE);
666 
667  if ((ret = av_frame_ref(data, s->frame)) < 0)
668  return ret;
669 
670  frame->pict_type = s->key ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
671  frame->key_frame = s->key;
672  *got_frame = 1;
673 
674  return avpkt->size;
675 }
676 
678 {
679  ArgoContext *s = avctx->priv_data;
680 
681  switch (avctx->bits_per_coded_sample) {
682  case 8: s->bpp = 1;
683  avctx->pix_fmt = AV_PIX_FMT_PAL8; break;
684  case 24: s->bpp = 4;
685  avctx->pix_fmt = AV_PIX_FMT_BGR0; break;
686  default: avpriv_request_sample(s, "depth == %u", avctx->bits_per_coded_sample);
687  return AVERROR_PATCHWELCOME;
688  }
689 
690  if (avctx->width % 2 || avctx->height % 2) {
691  avpriv_request_sample(s, "Odd dimensions\n");
692  return AVERROR_PATCHWELCOME;
693  }
694 
695  s->frame = av_frame_alloc();
696  if (!s->frame)
697  return AVERROR(ENOMEM);
698 
699  for (int n = 0, i = -4; i < 4; i++) {
700  for (int j = -14; j < 2; j++) {
701  s->mv0[n][0] = j;
702  s->mv0[n++][1] = i;
703  }
704  }
705 
706  for (int n = 0, i = -5; i <= 1; i += 2) {
707  int j = -5;
708 
709  while (j <= 1) {
710  s->mv1[n][0] = j;
711  s->mv1[n++][1] = i;
712  j += 2;
713  }
714  }
715 
716  return 0;
717 }
718 
719 static void decode_flush(AVCodecContext *avctx)
720 {
721  ArgoContext *s = avctx->priv_data;
722 
723  av_frame_unref(s->frame);
724 }
725 
727 {
728  ArgoContext *s = avctx->priv_data;
729 
730  av_frame_free(&s->frame);
731 
732  return 0;
733 }
734 
736  .name = "argo",
737  .long_name = NULL_IF_CONFIG_SMALL("Argonaut Games Video"),
738  .type = AVMEDIA_TYPE_VIDEO,
739  .id = AV_CODEC_ID_ARGO,
740  .priv_data_size = sizeof(ArgoContext),
741  .init = decode_init,
742  .decode = decode_frame,
743  .flush = decode_flush,
744  .close = decode_close,
745  .capabilities = AV_CODEC_CAP_DR1,
747 };
decode_close
static av_cold int decode_close(AVCodecContext *avctx)
Definition: argo.c:726
AVCodec
AVCodec.
Definition: codec.h:202
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:42
av_clip
#define av_clip
Definition: common.h:96
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
GetByteContext
Definition: bytestream.h:33
bytestream2_skipu
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:174
ArgoContext::frame
AVFrame * frame
Definition: argo.c:42
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:109
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:317
index
fg index
Definition: ffmpeg_filter.c:167
decode_flush
static void decode_flush(AVCodecContext *avctx)
Definition: argo.c:719
w
uint8_t w
Definition: llviddspenc.c:38
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:373
b
#define b
Definition: input.c:40
data
const char data[16]
Definition: mxf.c:143
ArgoContext::gb
GetByteContext gb
Definition: argo.c:35
init
static int init
Definition: av_tx.c:47
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
ArgoContext::mv1
int mv1[16][2]
Definition: argo.c:40
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:97
av_cold
#define av_cold
Definition: attributes.h:90
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:257
GetByteContext::buffer
const uint8_t * buffer
Definition: bytestream.h:34
AV_CODEC_ID_ARGO
@ AV_CODEC_ID_ARGO
Definition: codec_id.h:306
bits
uint8_t bits
Definition: vp3data.h:141
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
ArgoContext::key
int key
Definition: argo.c:38
if
if(ret)
Definition: filter_design.txt:179
ArgoContext
Definition: argo.c:34
decode_avcf
static int decode_avcf(AVCodecContext *avctx, AVFrame *frame)
Definition: argo.c:66
flush
static void flush(AVCodecContext *avctx)
Definition: aacdec_template.c:593
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
pixel
uint8_t pixel
Definition: tiny_ssim.c:42
decode_rle
static int decode_rle(AVCodecContext *avctx, AVFrame *frame)
Definition: argo.c:561
AVPALETTE_SIZE
#define AVPALETTE_SIZE
Definition: pixfmt.h:32
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
src
#define src
Definition: vp8dsp.c:255
AV_PIX_FMT_BGR0
@ AV_PIX_FMT_BGR0
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
Definition: pixfmt.h:230
decode_mad1
static int decode_mad1(AVCodecContext *avctx, AVFrame *frame)
Definition: argo.c:143
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:374
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:325
size
int size
Definition: twinvq_data.h:10344
MKBETAG
#define MKBETAG(a, b, c, d)
Definition: macros.h:56
ArgoContext::pal
uint32_t pal[256]
Definition: argo.c:41
ArgoContext::mv0
int mv0[128][2]
Definition: argo.c:39
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
decode_alcd
static int decode_alcd(AVCodecContext *avctx, AVFrame *frame)
Definition: argo.c:95
AVCodecContext::bits_per_coded_sample
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:1418
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
ArgoContext::bpp
int bpp
Definition: argo.c:37
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:50
FFMIN3
#define FFMIN3(a, b, c)
Definition: macros.h:50
internal.h
decode_frame
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: argo.c:601
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:435
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:209
AVCodecContext::height
int height
Definition: avcodec.h:556
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:593
decode_pal8
static int decode_pal8(AVCodecContext *avctx, uint32_t *pal)
Definition: argo.c:45
avcodec.h
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
ff_reget_buffer
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Identical in function to ff_get_buffer(), except it reuses the existing buffer if available.
Definition: decode.c:1759
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
pos
unsigned int pos
Definition: spdifenc.c:412
AVCodecContext
main external API structure.
Definition: avcodec.h:383
decode_mad1_24
static int decode_mad1_24(AVCodecContext *avctx, AVFrame *frame)
Definition: argo.c:353
AV_PICTURE_TYPE_P
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
bytestream2_get_bufferu
static av_always_inline unsigned int bytestream2_get_bufferu(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:277
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:37
map
const VDPAUPixFmtMap * map
Definition: hwcontext_vdpau.c:71
AVPacket
This structure stores compressed data.
Definition: packet.h:350
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:410
ff_argo_decoder
const AVCodec ff_argo_decoder
Definition: argo.c:735
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:556
bytestream.h
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
h
h
Definition: vp9dsp_template.c:2038
decode_init
static av_cold int decode_init(AVCodecContext *avctx)
Definition: argo.c:677
AV_WN16
#define AV_WN16(p, v)
Definition: intreadwrite.h:372