FFmpeg
magicyuv.c
Go to the documentation of this file.
1 /*
2  * MagicYUV decoder
3  * Copyright (c) 2016 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include <stdlib.h>
23 #include <string.h>
24 
25 #define CACHED_BITSTREAM_READER !ARCH_X86_32
26 
27 #include "libavutil/pixdesc.h"
28 
29 #include "avcodec.h"
30 #include "bytestream.h"
31 #include "codec_internal.h"
32 #include "decode.h"
33 #include "get_bits.h"
34 #include "lossless_videodsp.h"
35 #include "thread.h"
36 
37 #define VLC_BITS 12
38 
39 typedef struct Slice {
40  uint32_t start;
41  uint32_t size;
42 } Slice;
43 
44 typedef enum Prediction {
45  LEFT = 1,
48 } Prediction;
49 
50 typedef struct HuffEntry {
51  uint8_t len;
52  uint16_t sym;
53 } HuffEntry;
54 
55 typedef struct MagicYUVContext {
57  int max;
58  int bps;
60  int nb_slices;
61  int planes; // number of encoded planes in bitstream
62  int decorrelate; // postprocessing work
63  int color_matrix; // video color matrix
64  int flags;
65  int interlaced; // video is interlaced
66  const uint8_t *buf; // pointer to AVPacket->data
67  int hshift[4];
68  int vshift[4];
69  Slice *slices[4]; // slice bitstream positions for each plane
70  unsigned int slices_size[4]; // slice sizes for each plane
71  VLC vlc[4]; // VLC for each plane
72  VLC_MULTI multi[4]; // Buffer for joint VLC data
73  int (*magy_decode_slice)(AVCodecContext *avctx, void *tdata,
74  int j, int threadnr);
77 
78 static int huff_build(const uint8_t len[], uint16_t codes_pos[33],
79  VLC *vlc, VLC_MULTI *multi, int nb_elems, void *logctx)
80 {
81  HuffEntry he[4096];
82 
83  for (int i = 31; i > 0; i--)
84  codes_pos[i] += codes_pos[i + 1];
85 
86  for (unsigned i = nb_elems; i-- > 0;)
87  he[--codes_pos[len[i]]] = (HuffEntry){ len[i], i };
88 
89  ff_vlc_free(vlc);
90  ff_vlc_free_multi(multi);
91  return ff_vlc_init_multi_from_lengths(vlc, multi, FFMIN(he[0].len, VLC_BITS), nb_elems, nb_elems,
92  &he[0].len, sizeof(he[0]),
93  &he[0].sym, sizeof(he[0]), sizeof(he[0].sym),
94  0, 0, logctx);
95 }
96 
97 static void magicyuv_median_pred16(uint16_t *dst, const uint16_t *src1,
98  const uint16_t *diff, intptr_t w,
99  int *left, int *left_top, int max)
100 {
101  int i;
102  uint16_t l, lt;
103 
104  l = *left;
105  lt = *left_top;
106 
107  for (i = 0; i < w; i++) {
108  l = mid_pred(l, src1[i], (l + src1[i] - lt)) + diff[i];
109  l &= max;
110  lt = src1[i];
111  dst[i] = l;
112  }
113 
114  *left = l;
115  *left_top = lt;
116 }
117 
118 #define READ_PLANE(dst, plane, b, c) \
119 { \
120  x = 0; \
121  for (; CACHED_BITSTREAM_READER && x < width-c && get_bits_left(&gb) > 0;) {\
122  ret = get_vlc_multi(&gb, (uint8_t *)dst + x * b, multi, \
123  vlc, vlc_bits, 3); \
124  if (ret > 0) \
125  x += ret; \
126  if (ret <= 0) \
127  return AVERROR_INVALIDDATA; \
128  } \
129  for (; x < width && get_bits_left(&gb) > 0; x++) \
130  dst[x] = get_vlc2(&gb, vlc, vlc_bits, 3); \
131  dst += stride; \
132 }
133 
134 static int magy_decode_slice10(AVCodecContext *avctx, void *tdata,
135  int j, int threadnr)
136 {
137  const MagicYUVContext *s = avctx->priv_data;
138  int interlaced = s->interlaced;
139  const int bps = s->bps;
140  const int max = s->max - 1;
141  AVFrame *p = s->p;
142  int i, k, x;
143  GetBitContext gb;
144  uint16_t *dst;
145 
146  for (i = 0; i < s->planes; i++) {
147  int left, lefttop, top;
148  int height = AV_CEIL_RSHIFT(FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height), s->vshift[i]);
149  int width = AV_CEIL_RSHIFT(avctx->coded_width, s->hshift[i]);
150  int sheight = AV_CEIL_RSHIFT(s->slice_height, s->vshift[i]);
151  ptrdiff_t fake_stride = (p->linesize[i] / 2) * (1 + interlaced);
152  ptrdiff_t stride = p->linesize[i] / 2;
153  const VLC_MULTI_ELEM *const multi = s->multi[i].table;
154  const VLCElem *const vlc = s->vlc[i].table;
155  const int vlc_bits = s->vlc[i].bits;
156  int flags, pred;
157  int ret = init_get_bits8(&gb, s->buf + s->slices[i][j].start,
158  s->slices[i][j].size);
159 
160  if (ret < 0)
161  return ret;
162 
163  flags = get_bits(&gb, 8);
164  pred = get_bits(&gb, 8);
165 
166  dst = (uint16_t *)p->data[i] + j * sheight * stride;
167  if (flags & 1) {
168  if (get_bits_left(&gb) < bps * width * height)
169  return AVERROR_INVALIDDATA;
170  for (k = 0; k < height; k++) {
171  for (x = 0; x < width; x++)
172  dst[x] = get_bits(&gb, bps);
173 
174  dst += stride;
175  }
176  } else {
177  for (k = 0; k < height; k++)
178  READ_PLANE(dst, i, 2, 3)
179  }
180 
181  switch (pred) {
182  case LEFT:
183  dst = (uint16_t *)p->data[i] + j * sheight * stride;
184  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
185  dst += stride;
186  if (interlaced) {
187  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
188  dst += stride;
189  }
190  for (k = 1 + interlaced; k < height; k++) {
191  s->llviddsp.add_left_pred_int16(dst, dst, max, width, dst[-fake_stride]);
192  dst += stride;
193  }
194  break;
195  case GRADIENT:
196  dst = (uint16_t *)p->data[i] + j * sheight * stride;
197  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
198  dst += stride;
199  if (interlaced) {
200  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
201  dst += stride;
202  }
203  for (k = 1 + interlaced; k < height; k++) {
204  top = dst[-fake_stride];
205  left = top + dst[0];
206  dst[0] = left & max;
207  for (x = 1; x < width; x++) {
208  top = dst[x - fake_stride];
209  lefttop = dst[x - (fake_stride + 1)];
210  left += top - lefttop + dst[x];
211  dst[x] = left & max;
212  }
213  dst += stride;
214  }
215  break;
216  case MEDIAN:
217  dst = (uint16_t *)p->data[i] + j * sheight * stride;
218  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
219  dst += stride;
220  if (interlaced) {
221  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
222  dst += stride;
223  }
224  lefttop = left = dst[0];
225  for (k = 1 + interlaced; k < height; k++) {
226  magicyuv_median_pred16(dst, dst - fake_stride, dst, width, &left, &lefttop, max);
227  lefttop = left = dst[0];
228  dst += stride;
229  }
230  break;
231  default:
232  avpriv_request_sample(avctx, "Unknown prediction: %d", pred);
233  }
234  }
235 
236  if (s->decorrelate) {
237  int height = FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height);
238  int width = avctx->coded_width;
239  uint16_t *r = (uint16_t *)p->data[0] + j * s->slice_height * p->linesize[0] / 2;
240  uint16_t *g = (uint16_t *)p->data[1] + j * s->slice_height * p->linesize[1] / 2;
241  uint16_t *b = (uint16_t *)p->data[2] + j * s->slice_height * p->linesize[2] / 2;
242 
243  for (i = 0; i < height; i++) {
244  for (k = 0; k < width; k++) {
245  b[k] = (b[k] + g[k]) & max;
246  r[k] = (r[k] + g[k]) & max;
247  }
248  b += p->linesize[0] / 2;
249  g += p->linesize[1] / 2;
250  r += p->linesize[2] / 2;
251  }
252  }
253 
254  return 0;
255 }
256 
257 static int magy_decode_slice(AVCodecContext *avctx, void *tdata,
258  int j, int threadnr)
259 {
260  const MagicYUVContext *s = avctx->priv_data;
261  int interlaced = s->interlaced;
262  AVFrame *p = s->p;
263  int i, k, x, min_width;
264  GetBitContext gb;
265  uint8_t *dst;
266 
267  for (i = 0; i < s->planes; i++) {
268  int left, lefttop, top;
269  int height = AV_CEIL_RSHIFT(FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height), s->vshift[i]);
270  int width = AV_CEIL_RSHIFT(avctx->coded_width, s->hshift[i]);
271  int sheight = AV_CEIL_RSHIFT(s->slice_height, s->vshift[i]);
272  ptrdiff_t fake_stride = p->linesize[i] * (1 + interlaced);
273  ptrdiff_t stride = p->linesize[i];
274  const uint8_t *slice = s->buf + s->slices[i][j].start;
275  const VLC_MULTI_ELEM *const multi = s->multi[i].table;
276  const VLCElem *const vlc = s->vlc[i].table;
277  const int vlc_bits = s->vlc[i].bits;
278  int flags, pred;
279 
280  flags = bytestream_get_byte(&slice);
281  pred = bytestream_get_byte(&slice);
282 
283  dst = p->data[i] + j * sheight * stride;
284  if (flags & 1) {
285  if (s->slices[i][j].size - 2 < width * height)
286  return AVERROR_INVALIDDATA;
287  for (k = 0; k < height; k++) {
288  bytestream_get_buffer(&slice, dst, width);
289  dst += stride;
290  }
291  } else {
292  int ret = init_get_bits8(&gb, slice, s->slices[i][j].size - 2);
293 
294  if (ret < 0)
295  return ret;
296 
297  for (k = 0; k < height; k++)
298  READ_PLANE(dst, i, 1, 5)
299  }
300 
301  switch (pred) {
302  case LEFT:
303  dst = p->data[i] + j * sheight * stride;
304  s->llviddsp.add_left_pred(dst, dst, width, 0);
305  dst += stride;
306  if (interlaced) {
307  s->llviddsp.add_left_pred(dst, dst, width, 0);
308  dst += stride;
309  }
310  for (k = 1 + interlaced; k < height; k++) {
311  s->llviddsp.add_left_pred(dst, dst, width, dst[-fake_stride]);
312  dst += stride;
313  }
314  break;
315  case GRADIENT:
316  dst = p->data[i] + j * sheight * stride;
317  s->llviddsp.add_left_pred(dst, dst, width, 0);
318  dst += stride;
319  if (interlaced) {
320  s->llviddsp.add_left_pred(dst, dst, width, 0);
321  dst += stride;
322  }
323  min_width = FFMIN(width, 32);
324  for (k = 1 + interlaced; k < height; k++) {
325  top = dst[-fake_stride];
326  left = top + dst[0];
327  dst[0] = left;
328  for (x = 1; x < min_width; x++) { /* dsp need aligned 32 */
329  top = dst[x - fake_stride];
330  lefttop = dst[x - (fake_stride + 1)];
331  left += top - lefttop + dst[x];
332  dst[x] = left;
333  }
334  if (width > 32)
335  s->llviddsp.add_gradient_pred(dst + 32, fake_stride, width - 32);
336  dst += stride;
337  }
338  break;
339  case MEDIAN:
340  dst = p->data[i] + j * sheight * stride;
341  s->llviddsp.add_left_pred(dst, dst, width, 0);
342  dst += stride;
343  if (interlaced) {
344  s->llviddsp.add_left_pred(dst, dst, width, 0);
345  dst += stride;
346  }
347  lefttop = left = dst[0];
348  for (k = 1 + interlaced; k < height; k++) {
349  s->llviddsp.add_median_pred(dst, dst - fake_stride,
350  dst, width, &left, &lefttop);
351  lefttop = left = dst[0];
352  dst += stride;
353  }
354  break;
355  default:
356  avpriv_request_sample(avctx, "Unknown prediction: %d", pred);
357  }
358  }
359 
360  if (s->decorrelate) {
361  int height = FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height);
362  int width = avctx->coded_width;
363  uint8_t *b = p->data[0] + j * s->slice_height * p->linesize[0];
364  uint8_t *g = p->data[1] + j * s->slice_height * p->linesize[1];
365  uint8_t *r = p->data[2] + j * s->slice_height * p->linesize[2];
366 
367  for (i = 0; i < height; i++) {
368  s->llviddsp.add_bytes(b, g, width);
369  s->llviddsp.add_bytes(r, g, width);
370  b += p->linesize[0];
371  g += p->linesize[1];
372  r += p->linesize[2];
373  }
374  }
375 
376  return 0;
377 }
378 
379 static int build_huffman(AVCodecContext *avctx, const uint8_t *table,
380  int table_size, int max)
381 {
382  MagicYUVContext *s = avctx->priv_data;
383  GetByteContext gb;
384  uint8_t len[4096];
385  uint16_t length_count[33] = { 0 };
386  int i = 0, j = 0, k;
387 
388  bytestream2_init(&gb, table, table_size);
389 
390  while (bytestream2_get_bytes_left(&gb) > 0) {
391  int b = bytestream2_peek_byteu(&gb) & 0x80;
392  int x = bytestream2_get_byteu(&gb) & ~0x80;
393  int l = 1;
394 
395  if (b) {
396  if (bytestream2_get_bytes_left(&gb) <= 0)
397  break;
398  l += bytestream2_get_byteu(&gb);
399  }
400  k = j + l;
401  if (k > max || x == 0 || x > 32) {
402  av_log(avctx, AV_LOG_ERROR, "Invalid Huffman codes\n");
403  return AVERROR_INVALIDDATA;
404  }
405 
406  length_count[x] += l;
407  for (; j < k; j++)
408  len[j] = x;
409 
410  if (j == max) {
411  j = 0;
412  if (huff_build(len, length_count, &s->vlc[i], &s->multi[i], max, avctx)) {
413  av_log(avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n");
414  return AVERROR_INVALIDDATA;
415  }
416  i++;
417  if (i == s->planes) {
418  break;
419  }
420  memset(length_count, 0, sizeof(length_count));
421  }
422  }
423 
424  if (i != s->planes) {
425  av_log(avctx, AV_LOG_ERROR, "Huffman tables too short\n");
426  return AVERROR_INVALIDDATA;
427  }
428 
429  return 0;
430 }
431 
433  int *got_frame, AVPacket *avpkt)
434 {
435  MagicYUVContext *s = avctx->priv_data;
436  GetByteContext gb;
437  uint32_t first_offset, offset, next_offset, header_size, slice_width;
438  int width, height, format, version, table_size;
439  int ret, i, j;
440 
441  if (avpkt->size < 36)
442  return AVERROR_INVALIDDATA;
443 
444  bytestream2_init(&gb, avpkt->data, avpkt->size);
445  if (bytestream2_get_le32u(&gb) != MKTAG('M', 'A', 'G', 'Y'))
446  return AVERROR_INVALIDDATA;
447 
448  header_size = bytestream2_get_le32u(&gb);
449  if (header_size < 32 || header_size >= avpkt->size) {
450  av_log(avctx, AV_LOG_ERROR,
451  "header or packet too small %"PRIu32"\n", header_size);
452  return AVERROR_INVALIDDATA;
453  }
454 
455  version = bytestream2_get_byteu(&gb);
456  if (version != 7) {
457  avpriv_request_sample(avctx, "Version %d", version);
458  return AVERROR_PATCHWELCOME;
459  }
460 
461  s->hshift[1] =
462  s->vshift[1] =
463  s->hshift[2] =
464  s->vshift[2] = 0;
465  s->decorrelate = 0;
466  s->bps = 8;
467 
468  format = bytestream2_get_byteu(&gb);
469  switch (format) {
470  case 0x65:
471  avctx->pix_fmt = AV_PIX_FMT_GBRP;
472  s->decorrelate = 1;
473  break;
474  case 0x66:
475  avctx->pix_fmt = AV_PIX_FMT_GBRAP;
476  s->decorrelate = 1;
477  break;
478  case 0x67:
479  avctx->pix_fmt = AV_PIX_FMT_YUV444P;
480  break;
481  case 0x68:
482  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
483  s->hshift[1] =
484  s->hshift[2] = 1;
485  break;
486  case 0x69:
487  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
488  s->hshift[1] =
489  s->vshift[1] =
490  s->hshift[2] =
491  s->vshift[2] = 1;
492  break;
493  case 0x6a:
494  avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
495  break;
496  case 0x6b:
497  avctx->pix_fmt = AV_PIX_FMT_GRAY8;
498  break;
499  case 0x6c:
500  avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
501  s->hshift[1] =
502  s->hshift[2] = 1;
503  s->bps = 10;
504  break;
505  case 0x76:
506  avctx->pix_fmt = AV_PIX_FMT_YUV444P10;
507  s->bps = 10;
508  break;
509  case 0x6d:
510  avctx->pix_fmt = AV_PIX_FMT_GBRP10;
511  s->decorrelate = 1;
512  s->bps = 10;
513  break;
514  case 0x6e:
515  avctx->pix_fmt = AV_PIX_FMT_GBRAP10;
516  s->decorrelate = 1;
517  s->bps = 10;
518  break;
519  case 0x6f:
520  avctx->pix_fmt = AV_PIX_FMT_GBRP12;
521  s->decorrelate = 1;
522  s->bps = 12;
523  break;
524  case 0x70:
525  avctx->pix_fmt = AV_PIX_FMT_GBRAP12;
526  s->decorrelate = 1;
527  s->bps = 12;
528  break;
529  case 0x73:
530  avctx->pix_fmt = AV_PIX_FMT_GRAY10;
531  s->bps = 10;
532  break;
533  case 0x7b:
534  avctx->pix_fmt = AV_PIX_FMT_YUV420P10;
535  s->hshift[1] =
536  s->vshift[1] =
537  s->hshift[2] =
538  s->vshift[2] = 1;
539  s->bps = 10;
540  break;
541  default:
542  avpriv_request_sample(avctx, "Format 0x%X", format);
543  return AVERROR_PATCHWELCOME;
544  }
545  s->max = 1 << s->bps;
546  s->magy_decode_slice = s->bps == 8 ? magy_decode_slice : magy_decode_slice10;
547  s->planes = av_pix_fmt_count_planes(avctx->pix_fmt);
548 
549  bytestream2_skipu(&gb, 1);
550  s->color_matrix = bytestream2_get_byteu(&gb);
551  s->flags = bytestream2_get_byteu(&gb);
552  s->interlaced = !!(s->flags & 2);
553  bytestream2_skipu(&gb, 3);
554 
555  width = bytestream2_get_le32u(&gb);
556  height = bytestream2_get_le32u(&gb);
557  ret = ff_set_dimensions(avctx, width, height);
558  if (ret < 0)
559  return ret;
560 
561  slice_width = bytestream2_get_le32u(&gb);
562  if (slice_width != avctx->coded_width) {
563  avpriv_request_sample(avctx, "Slice width %"PRIu32, slice_width);
564  return AVERROR_PATCHWELCOME;
565  }
566  s->slice_height = bytestream2_get_le32u(&gb);
567  if (s->slice_height <= 0 || s->slice_height > INT_MAX - avctx->coded_height) {
568  av_log(avctx, AV_LOG_ERROR,
569  "invalid slice height: %d\n", s->slice_height);
570  return AVERROR_INVALIDDATA;
571  }
572 
573  bytestream2_skipu(&gb, 4);
574 
575  s->nb_slices = (avctx->coded_height + s->slice_height - 1) / s->slice_height;
576  if (s->nb_slices > INT_MAX / FFMAX(sizeof(Slice), 4 * 5)) {
577  av_log(avctx, AV_LOG_ERROR,
578  "invalid number of slices: %d\n", s->nb_slices);
579  return AVERROR_INVALIDDATA;
580  }
581 
582  if (s->interlaced) {
583  if ((s->slice_height >> s->vshift[1]) < 2) {
584  av_log(avctx, AV_LOG_ERROR, "impossible slice height\n");
585  return AVERROR_INVALIDDATA;
586  }
587  if ((avctx->coded_height % s->slice_height) && ((avctx->coded_height % s->slice_height) >> s->vshift[1]) < 2) {
588  av_log(avctx, AV_LOG_ERROR, "impossible height\n");
589  return AVERROR_INVALIDDATA;
590  }
591  }
592 
593  if (bytestream2_get_bytes_left(&gb) <= s->nb_slices * s->planes * 5)
594  return AVERROR_INVALIDDATA;
595  for (i = 0; i < s->planes; i++) {
596  av_fast_malloc(&s->slices[i], &s->slices_size[i], s->nb_slices * sizeof(Slice));
597  if (!s->slices[i])
598  return AVERROR(ENOMEM);
599 
600  offset = bytestream2_get_le32u(&gb);
601  if (offset >= avpkt->size - header_size)
602  return AVERROR_INVALIDDATA;
603 
604  if (i == 0)
605  first_offset = offset;
606 
607  for (j = 0; j < s->nb_slices - 1; j++) {
608  s->slices[i][j].start = offset + header_size;
609 
610  next_offset = bytestream2_get_le32u(&gb);
611  if (next_offset <= offset || next_offset >= avpkt->size - header_size)
612  return AVERROR_INVALIDDATA;
613 
614  s->slices[i][j].size = next_offset - offset;
615  if (s->slices[i][j].size < 2)
616  return AVERROR_INVALIDDATA;
617  offset = next_offset;
618  }
619 
620  s->slices[i][j].start = offset + header_size;
621  s->slices[i][j].size = avpkt->size - s->slices[i][j].start;
622 
623  if (s->slices[i][j].size < 2)
624  return AVERROR_INVALIDDATA;
625  }
626 
627  if (bytestream2_get_byteu(&gb) != s->planes)
628  return AVERROR_INVALIDDATA;
629 
630  bytestream2_skipu(&gb, s->nb_slices * s->planes);
631 
632  table_size = header_size + first_offset - bytestream2_tell(&gb);
633  if (table_size < 2)
634  return AVERROR_INVALIDDATA;
635 
636  ret = build_huffman(avctx, avpkt->data + bytestream2_tell(&gb),
637  table_size, s->max);
638  if (ret < 0)
639  return ret;
640 
642  p->flags |= AV_FRAME_FLAG_KEY;
643 
644  if ((ret = ff_thread_get_buffer(avctx, p, 0)) < 0)
645  return ret;
646 
647  s->buf = avpkt->data;
648  s->p = p;
649  avctx->execute2(avctx, s->magy_decode_slice, NULL, NULL, s->nb_slices);
650 
651  if (avctx->pix_fmt == AV_PIX_FMT_GBRP ||
652  avctx->pix_fmt == AV_PIX_FMT_GBRAP ||
653  avctx->pix_fmt == AV_PIX_FMT_GBRP10 ||
654  avctx->pix_fmt == AV_PIX_FMT_GBRAP10||
655  avctx->pix_fmt == AV_PIX_FMT_GBRAP12||
656  avctx->pix_fmt == AV_PIX_FMT_GBRP12) {
657  FFSWAP(uint8_t*, p->data[0], p->data[1]);
658  FFSWAP(int, p->linesize[0], p->linesize[1]);
659  } else {
660  switch (s->color_matrix) {
661  case 1:
663  break;
664  case 2:
666  break;
667  }
668  p->color_range = (s->flags & 4) ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
669  }
670 
671  *got_frame = 1;
672 
673  return avpkt->size;
674 }
675 
677 {
678  MagicYUVContext *s = avctx->priv_data;
679  ff_llviddsp_init(&s->llviddsp);
680  return 0;
681 }
682 
684 {
685  MagicYUVContext * const s = avctx->priv_data;
686  int i;
687 
688  for (i = 0; i < FF_ARRAY_ELEMS(s->slices); i++) {
689  av_freep(&s->slices[i]);
690  s->slices_size[i] = 0;
691  ff_vlc_free(&s->vlc[i]);
692  ff_vlc_free_multi(&s->multi[i]);
693  }
694 
695  return 0;
696 }
697 
699  .p.name = "magicyuv",
700  CODEC_LONG_NAME("MagicYUV video"),
701  .p.type = AVMEDIA_TYPE_VIDEO,
702  .p.id = AV_CODEC_ID_MAGICYUV,
703  .priv_data_size = sizeof(MagicYUVContext),
705  .close = magy_decode_end,
707  .p.capabilities = AV_CODEC_CAP_DR1 |
710 };
ff_magicyuv_decoder
const FFCodec ff_magicyuv_decoder
Definition: magicyuv.c:698
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:656
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:694
r
const char * r
Definition: vf_curves.c:126
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
Prediction
Definition: aptx.h:70
GetByteContext
Definition: bytestream.h:33
HuffEntry::len
uint8_t len
Definition: exr.c:95
MEDIAN
@ MEDIAN
Definition: magicyuv.c:47
MagicYUVContext::nb_slices
int nb_slices
Definition: magicyuv.c:60
src1
const pixel * src1
Definition: h264pred_template.c:421
MagicYUVContext::hshift
int hshift[4]
Definition: magicyuv.c:67
bytestream2_skipu
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:174
VLC_MULTI_ELEM
Definition: vlc.h:39
AVFrame::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:667
MagicYUVContext::color_matrix
int color_matrix
Definition: magicyuv.c:63
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
pixdesc.h
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:669
MagicYUVContext::llviddsp
LLVidDSPContext llviddsp
Definition: magicyuv.c:75
VLC_BITS
#define VLC_BITS
Definition: magicyuv.c:37
AVPacket::data
uint8_t * data
Definition: packet.h:374
b
#define b
Definition: input.c:41
table
static const uint16_t table[]
Definition: prosumer.c:205
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:465
FFCodec
Definition: codec_internal.h:127
max
#define max(a, b)
Definition: cuda_runtime.h:33
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:649
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
build_huffman
static int build_huffman(AVCodecContext *avctx, const uint8_t *table, int table_size, int max)
Definition: magicyuv.c:379
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:91
Slice::size
uint32_t size
Definition: magicyuv.c:41
magy_decode_slice10
static int magy_decode_slice10(AVCodecContext *avctx, void *tdata, int j, int threadnr)
Definition: magicyuv.c:134
thread.h
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:361
MagicYUVContext
Definition: magicyuv.c:55
Slice::start
uint32_t start
Definition: magicyuv.c:40
MagicYUVContext::max
int max
Definition: magicyuv.c:57
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2976
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:601
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:335
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:131
MagicYUVContext::bps
int bps
Definition: magicyuv.c:58
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:205
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:481
GetBitContext
Definition: get_bits.h:108
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:632
LLVidDSPContext
Definition: lossless_videodsp.h:28
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:468
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:545
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:628
MagicYUVContext::slices_size
unsigned int slices_size[4]
Definition: magicyuv.c:70
AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:485
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:306
s
#define s(width, name)
Definition: cbs_vp9.c:198
AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:486
VLC_MULTI
Definition: vlc.h:45
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:51
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
g
const char * g
Definition: vf_curves.c:127
MagicYUVContext::multi
VLC_MULTI multi[4]
Definition: magicyuv.c:72
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts_bsf.c:365
ff_vlc_free_multi
void ff_vlc_free_multi(VLC_MULTI *vlc)
Definition: vlc.c:484
HuffEntry::sym
uint16_t sym
Definition: exr.c:96
GRADIENT
@ GRADIENT
Definition: magicyuv.c:46
decode.h
get_bits.h
Slice
Definition: magicyuv.c:39
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:272
MagicYUVContext::decorrelate
int decorrelate
Definition: magicyuv.c:62
AV_PIX_FMT_GRAY10
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:446
if
if(ret)
Definition: filter_design.txt:179
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:110
MagicYUVContext::vshift
int vshift[4]
Definition: magicyuv.c:68
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
magy_decode_init
static av_cold int magy_decode_init(AVCodecContext *avctx)
Definition: magicyuv.c:676
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:279
ff_vlc_init_multi_from_lengths
int ff_vlc_init_multi_from_lengths(VLC *vlc, VLC_MULTI *multi, int nb_bits, int nb_elems, int nb_codes, const int8_t *lens, int lens_wrap, const void *symbols, int symbols_wrap, int symbols_size, int offset, int flags, void *logctx)
Build VLC decoding tables suitable for use with get_vlc_multi()
Definition: vlc.c:430
Prediction
Prediction
Definition: magicyuv.c:44
MagicYUVContext::magy_decode_slice
int(* magy_decode_slice)(AVCodecContext *avctx, void *tdata, int j, int threadnr)
Definition: magicyuv.c:73
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:466
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
MagicYUVContext::slices
Slice * slices[4]
Definition: magicyuv.c:69
AV_CODEC_ID_MAGICYUV
@ AV_CODEC_ID_MAGICYUV
Definition: codec_id.h:273
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:442
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:375
codec_internal.h
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
bps
unsigned bps
Definition: movenc.c:1738
VLCElem
Definition: vlc.h:29
MagicYUVContext::p
AVFrame * p
Definition: magicyuv.c:56
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:164
height
#define height
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:167
MagicYUVContext::slice_height
int slice_height
Definition: magicyuv.c:59
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:114
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
MagicYUVContext::flags
int flags
Definition: magicyuv.c:64
version
version
Definition: libkvazaar.c:314
interlaced
uint8_t interlaced
Definition: mxfenc.c:2148
MagicYUVContext::vlc
VLC vlc[4]
Definition: magicyuv.c:71
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:244
huff_build
static int huff_build(const uint8_t len[], uint16_t codes_pos[33], VLC *vlc, VLC_MULTI *multi, int nb_elems, void *logctx)
Definition: magicyuv.c:78
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:482
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
len
int len
Definition: vorbis_enc_data.h:426
MagicYUVContext::interlaced
int interlaced
Definition: magicyuv.c:65
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:654
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:652
magy_decode_end
static av_cold int magy_decode_end(AVCodecContext *avctx)
Definition: magicyuv.c:683
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
bytestream_get_buffer
static av_always_inline unsigned int bytestream_get_buffer(const uint8_t **b, uint8_t *dst, unsigned int size)
Definition: bytestream.h:363
mid_pred
#define mid_pred
Definition: mathops.h:98
ff_vlc_free
void ff_vlc_free(VLC *vlc)
Definition: vlc.c:489
ret
ret
Definition: filter_design.txt:187
pred
static const float pred[4]
Definition: siprdata.h:259
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
magy_decode_frame
static int magy_decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, AVPacket *avpkt)
Definition: magicyuv.c:432
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
ff_llviddsp_init
void ff_llviddsp_init(LLVidDSPContext *c)
Definition: lossless_videodsp.c:113
AVCodecContext
main external API structure.
Definition: avcodec.h:437
magicyuv_median_pred16
static void magicyuv_median_pred16(uint16_t *dst, const uint16_t *src1, const uint16_t *diff, intptr_t w, int *left, int *left_top, int max)
Definition: magicyuv.c:97
VLC
Definition: vlc.h:33
READ_PLANE
#define READ_PLANE(dst, plane, b, c)
Definition: magicyuv.c:118
HuffEntry
Definition: exr.c:94
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:632
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
lossless_videodsp.h
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
AVPacket
This structure stores compressed data.
Definition: packet.h:351
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:464
MagicYUVContext::planes
int planes
Definition: magicyuv.c:61
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:555
bytestream.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:467
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:385
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
MagicYUVContext::buf
const uint8_t * buf
Definition: magicyuv.c:66
magy_decode_slice
static int magy_decode_slice(AVCodecContext *avctx, void *tdata, int j, int threadnr)
Definition: magicyuv.c:257
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:597
int
int
Definition: ffmpeg_filter.c:331
AVCodecContext::execute2
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
Definition: avcodec.h:1577
LEFT
@ LEFT
Definition: magicyuv.c:45