FFmpeg
magicyuv.c
Go to the documentation of this file.
1 /*
2  * MagicYUV decoder
3  * Copyright (c) 2016 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include <stdlib.h>
23 #include <string.h>
24 
25 #define CACHED_BITSTREAM_READER !ARCH_X86_32
26 
27 #include "libavutil/pixdesc.h"
28 
29 #include "avcodec.h"
30 #include "bytestream.h"
31 #include "codec_internal.h"
32 #include "get_bits.h"
33 #include "huffyuvdsp.h"
34 #include "internal.h"
35 #include "lossless_videodsp.h"
36 #include "thread.h"
37 
38 typedef struct Slice {
39  uint32_t start;
40  uint32_t size;
41 } Slice;
42 
43 typedef enum Prediction {
44  LEFT = 1,
47 } Prediction;
48 
49 typedef struct HuffEntry {
50  uint8_t len;
51  uint16_t sym;
52 } HuffEntry;
53 
54 typedef struct MagicYUVContext {
56  int max;
57  int bps;
59  int nb_slices;
60  int planes; // number of encoded planes in bitstream
61  int decorrelate; // postprocessing work
62  int color_matrix; // video color matrix
63  int flags;
64  int interlaced; // video is interlaced
65  const uint8_t *buf; // pointer to AVPacket->data
66  int hshift[4];
67  int vshift[4];
68  Slice *slices[4]; // slice bitstream positions for each plane
69  unsigned int slices_size[4]; // slice sizes for each plane
70  VLC vlc[4]; // VLC for each plane
71  int (*magy_decode_slice)(AVCodecContext *avctx, void *tdata,
72  int j, int threadnr);
75 
76 static int huff_build(const uint8_t len[], uint16_t codes_pos[33],
77  VLC *vlc, int nb_elems, void *logctx)
78 {
79  HuffEntry he[4096];
80 
81  for (int i = 31; i > 0; i--)
82  codes_pos[i] += codes_pos[i + 1];
83 
84  for (unsigned i = nb_elems; i-- > 0;)
85  he[--codes_pos[len[i]]] = (HuffEntry){ len[i], i };
86 
87  ff_free_vlc(vlc);
88  return ff_init_vlc_from_lengths(vlc, FFMIN(he[0].len, 12), nb_elems,
89  &he[0].len, sizeof(he[0]),
90  &he[0].sym, sizeof(he[0]), sizeof(he[0].sym),
91  0, 0, logctx);
92 }
93 
94 static void magicyuv_median_pred16(uint16_t *dst, const uint16_t *src1,
95  const uint16_t *diff, intptr_t w,
96  int *left, int *left_top, int max)
97 {
98  int i;
99  uint16_t l, lt;
100 
101  l = *left;
102  lt = *left_top;
103 
104  for (i = 0; i < w; i++) {
105  l = mid_pred(l, src1[i], (l + src1[i] - lt)) + diff[i];
106  l &= max;
107  lt = src1[i];
108  dst[i] = l;
109  }
110 
111  *left = l;
112  *left_top = lt;
113 }
114 
115 static int magy_decode_slice10(AVCodecContext *avctx, void *tdata,
116  int j, int threadnr)
117 {
118  MagicYUVContext *s = avctx->priv_data;
119  int interlaced = s->interlaced;
120  const int bps = s->bps;
121  const int max = s->max - 1;
122  AVFrame *p = s->p;
123  int i, k, x;
124  GetBitContext gb;
125  uint16_t *dst;
126 
127  for (i = 0; i < s->planes; i++) {
128  int left, lefttop, top;
129  int height = AV_CEIL_RSHIFT(FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height), s->vshift[i]);
130  int width = AV_CEIL_RSHIFT(avctx->coded_width, s->hshift[i]);
131  int sheight = AV_CEIL_RSHIFT(s->slice_height, s->vshift[i]);
132  ptrdiff_t fake_stride = (p->linesize[i] / 2) * (1 + interlaced);
133  ptrdiff_t stride = p->linesize[i] / 2;
134  int flags, pred;
135  int ret = init_get_bits8(&gb, s->buf + s->slices[i][j].start,
136  s->slices[i][j].size);
137 
138  if (ret < 0)
139  return ret;
140 
141  flags = get_bits(&gb, 8);
142  pred = get_bits(&gb, 8);
143 
144  dst = (uint16_t *)p->data[i] + j * sheight * stride;
145  if (flags & 1) {
146  if (get_bits_left(&gb) < bps * width * height)
147  return AVERROR_INVALIDDATA;
148  for (k = 0; k < height; k++) {
149  for (x = 0; x < width; x++)
150  dst[x] = get_bits(&gb, bps);
151 
152  dst += stride;
153  }
154  } else {
155  for (k = 0; k < height; k++) {
156  for (x = 0; x < width; x++) {
157  int pix;
158  if (get_bits_left(&gb) <= 0)
159  return AVERROR_INVALIDDATA;
160 
161  pix = get_vlc2(&gb, s->vlc[i].table, s->vlc[i].bits, 3);
162  if (pix < 0)
163  return AVERROR_INVALIDDATA;
164 
165  dst[x] = pix;
166  }
167  dst += stride;
168  }
169  }
170 
171  switch (pred) {
172  case LEFT:
173  dst = (uint16_t *)p->data[i] + j * sheight * stride;
174  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
175  dst += stride;
176  if (interlaced) {
177  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
178  dst += stride;
179  }
180  for (k = 1 + interlaced; k < height; k++) {
181  s->llviddsp.add_left_pred_int16(dst, dst, max, width, dst[-fake_stride]);
182  dst += stride;
183  }
184  break;
185  case GRADIENT:
186  dst = (uint16_t *)p->data[i] + j * sheight * stride;
187  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
188  dst += stride;
189  if (interlaced) {
190  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
191  dst += stride;
192  }
193  for (k = 1 + interlaced; k < height; k++) {
194  top = dst[-fake_stride];
195  left = top + dst[0];
196  dst[0] = left & max;
197  for (x = 1; x < width; x++) {
198  top = dst[x - fake_stride];
199  lefttop = dst[x - (fake_stride + 1)];
200  left += top - lefttop + dst[x];
201  dst[x] = left & max;
202  }
203  dst += stride;
204  }
205  break;
206  case MEDIAN:
207  dst = (uint16_t *)p->data[i] + j * sheight * stride;
208  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
209  dst += stride;
210  if (interlaced) {
211  s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
212  dst += stride;
213  }
214  lefttop = left = dst[0];
215  for (k = 1 + interlaced; k < height; k++) {
216  magicyuv_median_pred16(dst, dst - fake_stride, dst, width, &left, &lefttop, max);
217  lefttop = left = dst[0];
218  dst += stride;
219  }
220  break;
221  default:
222  avpriv_request_sample(avctx, "Unknown prediction: %d", pred);
223  }
224  }
225 
226  if (s->decorrelate) {
227  int height = FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height);
228  int width = avctx->coded_width;
229  uint16_t *r = (uint16_t *)p->data[0] + j * s->slice_height * p->linesize[0] / 2;
230  uint16_t *g = (uint16_t *)p->data[1] + j * s->slice_height * p->linesize[1] / 2;
231  uint16_t *b = (uint16_t *)p->data[2] + j * s->slice_height * p->linesize[2] / 2;
232 
233  for (i = 0; i < height; i++) {
234  for (k = 0; k < width; k++) {
235  b[k] = (b[k] + g[k]) & max;
236  r[k] = (r[k] + g[k]) & max;
237  }
238  b += p->linesize[0] / 2;
239  g += p->linesize[1] / 2;
240  r += p->linesize[2] / 2;
241  }
242  }
243 
244  return 0;
245 }
246 
247 static int magy_decode_slice(AVCodecContext *avctx, void *tdata,
248  int j, int threadnr)
249 {
250  MagicYUVContext *s = avctx->priv_data;
251  int interlaced = s->interlaced;
252  AVFrame *p = s->p;
253  int i, k, x, min_width;
254  GetBitContext gb;
255  uint8_t *dst;
256 
257  for (i = 0; i < s->planes; i++) {
258  int left, lefttop, top;
259  int height = AV_CEIL_RSHIFT(FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height), s->vshift[i]);
260  int width = AV_CEIL_RSHIFT(avctx->coded_width, s->hshift[i]);
261  int sheight = AV_CEIL_RSHIFT(s->slice_height, s->vshift[i]);
262  ptrdiff_t fake_stride = p->linesize[i] * (1 + interlaced);
263  ptrdiff_t stride = p->linesize[i];
264  const uint8_t *slice = s->buf + s->slices[i][j].start;
265  int flags, pred;
266 
267  flags = bytestream_get_byte(&slice);
268  pred = bytestream_get_byte(&slice);
269 
270  dst = p->data[i] + j * sheight * stride;
271  if (flags & 1) {
272  if (s->slices[i][j].size - 2 < width * height)
273  return AVERROR_INVALIDDATA;
274  for (k = 0; k < height; k++) {
275  bytestream_get_buffer(&slice, dst, width);
276  dst += stride;
277  }
278  } else {
279  int ret = init_get_bits8(&gb, slice, s->slices[i][j].size - 2);
280 
281  if (ret < 0)
282  return ret;
283 
284  for (k = 0; k < height; k++) {
285  for (x = 0; x < width; x++) {
286  int pix;
287  if (get_bits_left(&gb) <= 0)
288  return AVERROR_INVALIDDATA;
289 
290  pix = get_vlc2(&gb, s->vlc[i].table, s->vlc[i].bits, 3);
291  if (pix < 0)
292  return AVERROR_INVALIDDATA;
293 
294  dst[x] = pix;
295  }
296  dst += stride;
297  }
298  }
299 
300  switch (pred) {
301  case LEFT:
302  dst = p->data[i] + j * sheight * stride;
303  s->llviddsp.add_left_pred(dst, dst, width, 0);
304  dst += stride;
305  if (interlaced) {
306  s->llviddsp.add_left_pred(dst, dst, width, 0);
307  dst += stride;
308  }
309  for (k = 1 + interlaced; k < height; k++) {
310  s->llviddsp.add_left_pred(dst, dst, width, dst[-fake_stride]);
311  dst += stride;
312  }
313  break;
314  case GRADIENT:
315  dst = p->data[i] + j * sheight * stride;
316  s->llviddsp.add_left_pred(dst, dst, width, 0);
317  dst += stride;
318  if (interlaced) {
319  s->llviddsp.add_left_pred(dst, dst, width, 0);
320  dst += stride;
321  }
322  min_width = FFMIN(width, 32);
323  for (k = 1 + interlaced; k < height; k++) {
324  top = dst[-fake_stride];
325  left = top + dst[0];
326  dst[0] = left;
327  for (x = 1; x < min_width; x++) { /* dsp need aligned 32 */
328  top = dst[x - fake_stride];
329  lefttop = dst[x - (fake_stride + 1)];
330  left += top - lefttop + dst[x];
331  dst[x] = left;
332  }
333  if (width > 32)
334  s->llviddsp.add_gradient_pred(dst + 32, fake_stride, width - 32);
335  dst += stride;
336  }
337  break;
338  case MEDIAN:
339  dst = p->data[i] + j * sheight * stride;
340  s->llviddsp.add_left_pred(dst, dst, width, 0);
341  dst += stride;
342  if (interlaced) {
343  s->llviddsp.add_left_pred(dst, dst, width, 0);
344  dst += stride;
345  }
346  lefttop = left = dst[0];
347  for (k = 1 + interlaced; k < height; k++) {
348  s->llviddsp.add_median_pred(dst, dst - fake_stride,
349  dst, width, &left, &lefttop);
350  lefttop = left = dst[0];
351  dst += stride;
352  }
353  break;
354  default:
355  avpriv_request_sample(avctx, "Unknown prediction: %d", pred);
356  }
357  }
358 
359  if (s->decorrelate) {
360  int height = FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height);
361  int width = avctx->coded_width;
362  uint8_t *b = p->data[0] + j * s->slice_height * p->linesize[0];
363  uint8_t *g = p->data[1] + j * s->slice_height * p->linesize[1];
364  uint8_t *r = p->data[2] + j * s->slice_height * p->linesize[2];
365 
366  for (i = 0; i < height; i++) {
367  s->llviddsp.add_bytes(b, g, width);
368  s->llviddsp.add_bytes(r, g, width);
369  b += p->linesize[0];
370  g += p->linesize[1];
371  r += p->linesize[2];
372  }
373  }
374 
375  return 0;
376 }
377 
378 static int build_huffman(AVCodecContext *avctx, const uint8_t *table,
379  int table_size, int max)
380 {
381  MagicYUVContext *s = avctx->priv_data;
382  GetByteContext gb;
383  uint8_t len[4096];
384  uint16_t length_count[33] = { 0 };
385  int i = 0, j = 0, k;
386 
387  bytestream2_init(&gb, table, table_size);
388 
389  while (bytestream2_get_bytes_left(&gb) > 0) {
390  int b = bytestream2_peek_byteu(&gb) & 0x80;
391  int x = bytestream2_get_byteu(&gb) & ~0x80;
392  int l = 1;
393 
394  if (b) {
395  if (bytestream2_get_bytes_left(&gb) <= 0)
396  break;
397  l += bytestream2_get_byteu(&gb);
398  }
399  k = j + l;
400  if (k > max || x == 0 || x > 32) {
401  av_log(avctx, AV_LOG_ERROR, "Invalid Huffman codes\n");
402  return AVERROR_INVALIDDATA;
403  }
404 
405  length_count[x] += l;
406  for (; j < k; j++)
407  len[j] = x;
408 
409  if (j == max) {
410  j = 0;
411  if (huff_build(len, length_count, &s->vlc[i], max, avctx)) {
412  av_log(avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n");
413  return AVERROR_INVALIDDATA;
414  }
415  i++;
416  if (i == s->planes) {
417  break;
418  }
419  memset(length_count, 0, sizeof(length_count));
420  }
421  }
422 
423  if (i != s->planes) {
424  av_log(avctx, AV_LOG_ERROR, "Huffman tables too short\n");
425  return AVERROR_INVALIDDATA;
426  }
427 
428  return 0;
429 }
430 
432  int *got_frame, AVPacket *avpkt)
433 {
434  MagicYUVContext *s = avctx->priv_data;
435  GetByteContext gb;
436  uint32_t first_offset, offset, next_offset, header_size, slice_width;
437  int width, height, format, version, table_size;
438  int ret, i, j;
439 
440  if (avpkt->size < 36)
441  return AVERROR_INVALIDDATA;
442 
443  bytestream2_init(&gb, avpkt->data, avpkt->size);
444  if (bytestream2_get_le32u(&gb) != MKTAG('M', 'A', 'G', 'Y'))
445  return AVERROR_INVALIDDATA;
446 
447  header_size = bytestream2_get_le32u(&gb);
448  if (header_size < 32 || header_size >= avpkt->size) {
449  av_log(avctx, AV_LOG_ERROR,
450  "header or packet too small %"PRIu32"\n", header_size);
451  return AVERROR_INVALIDDATA;
452  }
453 
454  version = bytestream2_get_byteu(&gb);
455  if (version != 7) {
456  avpriv_request_sample(avctx, "Version %d", version);
457  return AVERROR_PATCHWELCOME;
458  }
459 
460  s->hshift[1] =
461  s->vshift[1] =
462  s->hshift[2] =
463  s->vshift[2] = 0;
464  s->decorrelate = 0;
465  s->bps = 8;
466 
467  format = bytestream2_get_byteu(&gb);
468  switch (format) {
469  case 0x65:
470  avctx->pix_fmt = AV_PIX_FMT_GBRP;
471  s->decorrelate = 1;
472  break;
473  case 0x66:
474  avctx->pix_fmt = AV_PIX_FMT_GBRAP;
475  s->decorrelate = 1;
476  break;
477  case 0x67:
478  avctx->pix_fmt = AV_PIX_FMT_YUV444P;
479  break;
480  case 0x68:
481  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
482  s->hshift[1] =
483  s->hshift[2] = 1;
484  break;
485  case 0x69:
486  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
487  s->hshift[1] =
488  s->vshift[1] =
489  s->hshift[2] =
490  s->vshift[2] = 1;
491  break;
492  case 0x6a:
493  avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
494  break;
495  case 0x6b:
496  avctx->pix_fmt = AV_PIX_FMT_GRAY8;
497  break;
498  case 0x6c:
499  avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
500  s->hshift[1] =
501  s->hshift[2] = 1;
502  s->bps = 10;
503  break;
504  case 0x76:
505  avctx->pix_fmt = AV_PIX_FMT_YUV444P10;
506  s->bps = 10;
507  break;
508  case 0x6d:
509  avctx->pix_fmt = AV_PIX_FMT_GBRP10;
510  s->decorrelate = 1;
511  s->bps = 10;
512  break;
513  case 0x6e:
514  avctx->pix_fmt = AV_PIX_FMT_GBRAP10;
515  s->decorrelate = 1;
516  s->bps = 10;
517  break;
518  case 0x6f:
519  avctx->pix_fmt = AV_PIX_FMT_GBRP12;
520  s->decorrelate = 1;
521  s->bps = 12;
522  break;
523  case 0x70:
524  avctx->pix_fmt = AV_PIX_FMT_GBRAP12;
525  s->decorrelate = 1;
526  s->bps = 12;
527  break;
528  case 0x73:
529  avctx->pix_fmt = AV_PIX_FMT_GRAY10;
530  s->bps = 10;
531  break;
532  case 0x7b:
533  avctx->pix_fmt = AV_PIX_FMT_YUV420P10;
534  s->hshift[1] =
535  s->vshift[1] =
536  s->hshift[2] =
537  s->vshift[2] = 1;
538  s->bps = 10;
539  break;
540  default:
541  avpriv_request_sample(avctx, "Format 0x%X", format);
542  return AVERROR_PATCHWELCOME;
543  }
544  s->max = 1 << s->bps;
545  s->magy_decode_slice = s->bps == 8 ? magy_decode_slice : magy_decode_slice10;
546  s->planes = av_pix_fmt_count_planes(avctx->pix_fmt);
547 
548  bytestream2_skipu(&gb, 1);
549  s->color_matrix = bytestream2_get_byteu(&gb);
550  s->flags = bytestream2_get_byteu(&gb);
551  s->interlaced = !!(s->flags & 2);
552  bytestream2_skipu(&gb, 3);
553 
554  width = bytestream2_get_le32u(&gb);
555  height = bytestream2_get_le32u(&gb);
556  ret = ff_set_dimensions(avctx, width, height);
557  if (ret < 0)
558  return ret;
559 
560  slice_width = bytestream2_get_le32u(&gb);
561  if (slice_width != avctx->coded_width) {
562  avpriv_request_sample(avctx, "Slice width %"PRIu32, slice_width);
563  return AVERROR_PATCHWELCOME;
564  }
565  s->slice_height = bytestream2_get_le32u(&gb);
566  if (s->slice_height <= 0 || s->slice_height > INT_MAX - avctx->coded_height) {
567  av_log(avctx, AV_LOG_ERROR,
568  "invalid slice height: %d\n", s->slice_height);
569  return AVERROR_INVALIDDATA;
570  }
571 
572  bytestream2_skipu(&gb, 4);
573 
574  s->nb_slices = (avctx->coded_height + s->slice_height - 1) / s->slice_height;
575  if (s->nb_slices > INT_MAX / FFMAX(sizeof(Slice), 4 * 5)) {
576  av_log(avctx, AV_LOG_ERROR,
577  "invalid number of slices: %d\n", s->nb_slices);
578  return AVERROR_INVALIDDATA;
579  }
580 
581  if (s->interlaced) {
582  if ((s->slice_height >> s->vshift[1]) < 2) {
583  av_log(avctx, AV_LOG_ERROR, "impossible slice height\n");
584  return AVERROR_INVALIDDATA;
585  }
586  if ((avctx->coded_height % s->slice_height) && ((avctx->coded_height % s->slice_height) >> s->vshift[1]) < 2) {
587  av_log(avctx, AV_LOG_ERROR, "impossible height\n");
588  return AVERROR_INVALIDDATA;
589  }
590  }
591 
592  if (bytestream2_get_bytes_left(&gb) <= s->nb_slices * s->planes * 5)
593  return AVERROR_INVALIDDATA;
594  for (i = 0; i < s->planes; i++) {
595  av_fast_malloc(&s->slices[i], &s->slices_size[i], s->nb_slices * sizeof(Slice));
596  if (!s->slices[i])
597  return AVERROR(ENOMEM);
598 
599  offset = bytestream2_get_le32u(&gb);
600  if (offset >= avpkt->size - header_size)
601  return AVERROR_INVALIDDATA;
602 
603  if (i == 0)
604  first_offset = offset;
605 
606  for (j = 0; j < s->nb_slices - 1; j++) {
607  s->slices[i][j].start = offset + header_size;
608 
609  next_offset = bytestream2_get_le32u(&gb);
610  if (next_offset <= offset || next_offset >= avpkt->size - header_size)
611  return AVERROR_INVALIDDATA;
612 
613  s->slices[i][j].size = next_offset - offset;
614  if (s->slices[i][j].size < 2)
615  return AVERROR_INVALIDDATA;
616  offset = next_offset;
617  }
618 
619  s->slices[i][j].start = offset + header_size;
620  s->slices[i][j].size = avpkt->size - s->slices[i][j].start;
621 
622  if (s->slices[i][j].size < 2)
623  return AVERROR_INVALIDDATA;
624  }
625 
626  if (bytestream2_get_byteu(&gb) != s->planes)
627  return AVERROR_INVALIDDATA;
628 
629  bytestream2_skipu(&gb, s->nb_slices * s->planes);
630 
631  table_size = header_size + first_offset - bytestream2_tell(&gb);
632  if (table_size < 2)
633  return AVERROR_INVALIDDATA;
634 
635  ret = build_huffman(avctx, avpkt->data + bytestream2_tell(&gb),
636  table_size, s->max);
637  if (ret < 0)
638  return ret;
639 
641  p->key_frame = 1;
642 
643  if ((ret = ff_thread_get_buffer(avctx, p, 0)) < 0)
644  return ret;
645 
646  s->buf = avpkt->data;
647  s->p = p;
648  avctx->execute2(avctx, s->magy_decode_slice, NULL, NULL, s->nb_slices);
649 
650  if (avctx->pix_fmt == AV_PIX_FMT_GBRP ||
651  avctx->pix_fmt == AV_PIX_FMT_GBRAP ||
652  avctx->pix_fmt == AV_PIX_FMT_GBRP10 ||
653  avctx->pix_fmt == AV_PIX_FMT_GBRAP10||
654  avctx->pix_fmt == AV_PIX_FMT_GBRAP12||
655  avctx->pix_fmt == AV_PIX_FMT_GBRP12) {
656  FFSWAP(uint8_t*, p->data[0], p->data[1]);
657  FFSWAP(int, p->linesize[0], p->linesize[1]);
658  } else {
659  switch (s->color_matrix) {
660  case 1:
662  break;
663  case 2:
665  break;
666  }
667  p->color_range = (s->flags & 4) ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
668  }
669 
670  *got_frame = 1;
671 
672  return avpkt->size;
673 }
674 
676 {
677  MagicYUVContext *s = avctx->priv_data;
678  ff_llviddsp_init(&s->llviddsp);
679  return 0;
680 }
681 
683 {
684  MagicYUVContext * const s = avctx->priv_data;
685  int i;
686 
687  for (i = 0; i < FF_ARRAY_ELEMS(s->slices); i++) {
688  av_freep(&s->slices[i]);
689  s->slices_size[i] = 0;
690  ff_free_vlc(&s->vlc[i]);
691  }
692 
693  return 0;
694 }
695 
697  .p.name = "magicyuv",
698  .p.long_name = NULL_IF_CONFIG_SMALL("MagicYUV video"),
699  .p.type = AVMEDIA_TYPE_VIDEO,
700  .p.id = AV_CODEC_ID_MAGICYUV,
701  .priv_data_size = sizeof(MagicYUVContext),
703  .close = magy_decode_end,
705  .p.capabilities = AV_CODEC_CAP_DR1 |
708  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
709 };
ff_magicyuv_decoder
const FFCodec ff_magicyuv_decoder
Definition: magicyuv.c:696
MagicYUVContext::slices
Slice * slices[4]
Definition: magicyuv.c:68
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:578
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:839
r
const char * r
Definition: vf_curves.c:116
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
Prediction
Definition: aptx.h:71
GetByteContext
Definition: bytestream.h:33
HuffEntry::len
uint8_t len
Definition: exr.c:95
MEDIAN
@ MEDIAN
Definition: magicyuv.c:46
MagicYUVContext::nb_slices
int nb_slices
Definition: magicyuv.c:59
src1
const pixel * src1
Definition: h264pred_template.c:421
MagicYUVContext::hshift
int hshift[4]
Definition: magicyuv.c:66
bytestream2_skipu
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:174
huff_build
static int huff_build(const uint8_t len[], uint16_t codes_pos[33], VLC *vlc, int nb_elems, void *logctx)
Definition: magicyuv.c:76
AVFrame::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:589
MagicYUVContext::color_matrix
int color_matrix
Definition: magicyuv.c:62
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
pixdesc.h
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:599
MagicYUVContext::llviddsp
LLVidDSPContext llviddsp
Definition: magicyuv.c:73
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:374
huffyuvdsp.h
b
#define b
Definition: input.c:34
table
static const uint16_t table[]
Definition: prosumer.c:206
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:406
FFCodec
Definition: codec_internal.h:112
max
#define max(a, b)
Definition: cuda_runtime.h:33
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
build_huffman
static int build_huffman(AVCodecContext *avctx, const uint8_t *table, int table_size, int max)
Definition: magicyuv.c:378
Slice::size
uint32_t size
Definition: magicyuv.c:40
magy_decode_slice10
static int magy_decode_slice10(AVCodecContext *avctx, void *tdata, int j, int threadnr)
Definition: magicyuv.c:115
thread.h
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:346
MagicYUVContext
Definition: magicyuv.c:54
init
static int init
Definition: av_tx.c:47
Slice::start
uint32_t start
Definition: magicyuv.c:39
MagicYUVContext::max
int max
Definition: magicyuv.c:56
av_pix_fmt_count_planes
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2702
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:531
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:116
MagicYUVContext::bps
int bps
Definition: magicyuv.c:57
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:205
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:422
GetBitContext
Definition: get_bits.h:61
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
AVFrame::key_frame
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:417
AVCodecContext::coded_height
int coded_height
Definition: avcodec.h:577
LLVidDSPContext
Definition: lossless_videodsp.h:28
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:409
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:667
MagicYUVContext::slices_size
unsigned int slices_size[4]
Definition: magicyuv.c:69
AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:426
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:254
s
#define s(width, name)
Definition: cbs_vp9.c:256
AV_PIX_FMT_GBRAP12
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:427
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:50
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
g
const char * g
Definition: vf_curves.c:117
HuffEntry::sym
uint16_t sym
Definition: exr.c:96
GRADIENT
@ GRADIENT
Definition: magicyuv.c:45
get_bits.h
Slice
Definition: magicyuv.c:38
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
MagicYUVContext::decorrelate
int decorrelate
Definition: magicyuv.c:61
AV_PIX_FMT_GRAY10
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:387
if
if(ret)
Definition: filter_design.txt:179
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:113
MagicYUVContext::vshift
int vshift[4]
Definition: magicyuv.c:67
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
magy_decode_init
static av_cold int magy_decode_init(AVCodecContext *avctx)
Definition: magicyuv.c:675
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
ff_init_vlc_from_lengths
int ff_init_vlc_from_lengths(VLC *vlc, int nb_bits, int nb_codes, const int8_t *lens, int lens_wrap, const void *symbols, int symbols_wrap, int symbols_size, int offset, int flags, void *logctx)
Build VLC decoding tables suitable for use with get_vlc2()
Definition: vlc.c:328
Prediction
Prediction
Definition: magicyuv.c:43
MagicYUVContext::magy_decode_slice
int(* magy_decode_slice)(AVCodecContext *avctx, void *tdata, int j, int threadnr)
Definition: magicyuv.c:71
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:407
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:787
AV_CODEC_ID_MAGICYUV
@ AV_CODEC_ID_MAGICYUV
Definition: codec_id.h:269
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:422
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:375
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
codec_internal.h
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
bps
unsigned bps
Definition: movenc.c:1647
MagicYUVContext::p
AVFrame * p
Definition: magicyuv.c:55
height
#define height
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:167
MagicYUVContext::slice_height
int slice_height
Definition: magicyuv.c:58
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:117
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
MagicYUVContext::flags
int flags
Definition: magicyuv.c:63
version
version
Definition: libkvazaar.c:313
interlaced
uint8_t interlaced
Definition: mxfenc.c:2042
MagicYUVContext::vlc
VLC vlc[4]
Definition: magicyuv.c:70
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
AV_PIX_FMT_GBRP12
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:423
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: codec_internal.h:31
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:203
len
int len
Definition: vorbis_enc_data.h:426
MagicYUVContext::interlaced
int interlaced
Definition: magicyuv.c:64
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:599
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:582
magy_decode_end
static av_cold int magy_decode_end(AVCodecContext *avctx)
Definition: magicyuv.c:682
ff_free_vlc
void ff_free_vlc(VLC *vlc)
Definition: vlc.c:375
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
bytestream_get_buffer
static av_always_inline unsigned int bytestream_get_buffer(const uint8_t **b, uint8_t *dst, unsigned int size)
Definition: bytestream.h:363
mid_pred
#define mid_pred
Definition: mathops.h:97
ret
ret
Definition: filter_design.txt:187
pred
static const float pred[4]
Definition: siprdata.h:259
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
magy_decode_frame
static int magy_decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, AVPacket *avpkt)
Definition: magicyuv.c:431
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
ff_llviddsp_init
void ff_llviddsp_init(LLVidDSPContext *c)
Definition: lossless_videodsp.c:113
AVCodecContext
main external API structure.
Definition: avcodec.h:389
magicyuv_median_pred16
static void magicyuv_median_pred16(uint16_t *dst, const uint16_t *src1, const uint16_t *diff, intptr_t w, int *left, int *left_top, int max)
Definition: magicyuv.c:94
VLC
Definition: vlc.h:31
HuffEntry
Definition: exr.c:94
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
AVCodecContext::coded_width
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:577
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:90
lossless_videodsp.h
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
diff
static av_always_inline int diff(const uint32_t a, const uint32_t b)
Definition: vf_palettegen.c:139
AVPacket
This structure stores compressed data.
Definition: packet.h:351
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:416
MagicYUVContext::planes
int planes
Definition: magicyuv.c:60
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:565
bytestream.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:370
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
MagicYUVContext::buf
const uint8_t * buf
Definition: magicyuv.c:65
magy_decode_slice
static int magy_decode_slice(AVCodecContext *avctx, void *tdata, int j, int threadnr)
Definition: magicyuv.c:247
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:527
int
int
Definition: ffmpeg_filter.c:153
AVCodecContext::execute2
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
Definition: avcodec.h:1533
LEFT
@ LEFT
Definition: magicyuv.c:44