FFmpeg
utvideodec.c
Go to the documentation of this file.
1 /*
2  * Ut Video decoder
3  * Copyright (c) 2011 Konstantin Shishkov
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Ut Video decoder
25  */
26 
27 #include <inttypes.h>
28 #include <stdlib.h>
29 
30 #define CACHED_BITSTREAM_READER !ARCH_X86_32
31 #define UNCHECKED_BITSTREAM_READER 1
32 
33 #include "libavutil/intreadwrite.h"
34 #include "libavutil/mem.h"
35 #include "libavutil/pixdesc.h"
36 #include "avcodec.h"
37 #include "bswapdsp.h"
38 #include "bytestream.h"
39 #include "codec_internal.h"
40 #include "get_bits.h"
41 #include "lossless_videodsp.h"
42 #include "thread.h"
43 #include "utvideo.h"
44 #include "utvideodsp.h"
45 
46 typedef struct UtvideoContext {
51 
53  int planes;
54  int slices;
58  int pro;
59  int pack;
60 
61  uint8_t *slice_bits;
63  void *buffer;
64 
65  const uint8_t *packed_stream[4][256];
66  size_t packed_stream_size[4][256];
67  const uint8_t *control_stream[4][256];
68  size_t control_stream_size[4][256];
70 
71 typedef struct HuffEntry {
72  uint8_t len;
73  uint16_t sym;
74 } HuffEntry;
75 
76 static int build_huff(UtvideoContext *c, const uint8_t *src, VLC *vlc,
77  VLC_MULTI *multi, int *fsym, unsigned nb_elems)
78 {
79  int i;
80  HuffEntry he[1024];
81  uint8_t bits[1024];
82  uint16_t codes_count[33] = { 0 };
83 
84  *fsym = -1;
85  for (i = 0; i < nb_elems; i++) {
86  if (src[i] == 0) {
87  *fsym = i;
88  return 0;
89  } else if (src[i] == 255) {
90  bits[i] = 0;
91  } else if (src[i] <= 32) {
92  bits[i] = src[i];
93  } else
94  return AVERROR_INVALIDDATA;
95 
96  codes_count[bits[i]]++;
97  }
98  if (codes_count[0] == nb_elems)
99  return AVERROR_INVALIDDATA;
100 
101  /* For Ut Video, longer codes are to the left of the tree and
102  * for codes with the same length the symbol is descending from
103  * left to right. So after the next loop --codes_count[i] will
104  * be the index of the first (lowest) symbol of length i when
105  * indexed by the position in the tree with left nodes being first. */
106  for (int i = 31; i >= 0; i--)
107  codes_count[i] += codes_count[i + 1];
108 
109  for (unsigned i = 0; i < nb_elems; i++)
110  he[--codes_count[bits[i]]] = (HuffEntry) { bits[i], i };
111 
112 #define VLC_BITS 11
113  return ff_vlc_init_multi_from_lengths(vlc, multi, VLC_BITS, nb_elems, codes_count[0],
114  &he[0].len, sizeof(*he),
115  &he[0].sym, sizeof(*he), 2, 0, 0, c->avctx);
116 }
117 
118 #define READ_PLANE(b, end) \
119 { \
120  buf = !use_pred ? dest : c->buffer; \
121  i = 0; \
122  for (; CACHED_BITSTREAM_READER && i < width-end && get_bits_left(&gb) > 0;) {\
123  ret = get_vlc_multi(&gb, (uint8_t *)buf + i * b, multi.table, \
124  vlc.table, VLC_BITS, 3, b); \
125  if (ret > 0) \
126  i += ret; \
127  if (ret <= 0) \
128  goto fail; \
129  } \
130  for (; i < width && get_bits_left(&gb) > 0; i++) \
131  buf[i] = get_vlc2(&gb, vlc.table, VLC_BITS, 3); \
132  if (use_pred) { \
133  if (b == 2) \
134  c->llviddsp.add_left_pred_int16((uint16_t *)dest, (const uint16_t *)buf, 0x3ff, width, prev); \
135  else \
136  c->llviddsp.add_left_pred((uint8_t *)dest, (const uint8_t *)buf, width, prev); \
137  } \
138  prev = dest[width-1]; \
139  dest += stride; \
140 }
141 
142 static int decode_plane10(UtvideoContext *c, int plane_no,
143  uint16_t *dst, ptrdiff_t stride,
144  int width, int height,
145  const uint8_t *src, const uint8_t *huff,
146  int use_pred)
147 {
148  int i, j, slice, pix, ret;
149  int sstart, send;
150  VLC_MULTI multi;
151  VLC vlc;
152  GetBitContext gb;
153  int prev, fsym;
154 
155  if ((ret = build_huff(c, huff, &vlc, &multi, &fsym, 1024)) < 0) {
156  av_log(c->avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n");
157  return ret;
158  }
159  if (fsym >= 0) { // build_huff reported a symbol to fill slices with
160  send = 0;
161  for (slice = 0; slice < c->slices; slice++) {
162  uint16_t *dest;
163 
164  sstart = send;
165  send = (height * (slice + 1) / c->slices);
166  dest = dst + sstart * stride;
167 
168  prev = 0x200;
169  for (j = sstart; j < send; j++) {
170  for (i = 0; i < width; i++) {
171  pix = fsym;
172  if (use_pred) {
173  prev += pix;
174  prev &= 0x3FF;
175  pix = prev;
176  }
177  dest[i] = pix;
178  }
179  dest += stride;
180  }
181  }
182  return 0;
183  }
184 
185  send = 0;
186  for (slice = 0; slice < c->slices; slice++) {
187  uint16_t *dest, *buf;
188  int slice_data_start, slice_data_end, slice_size;
189 
190  sstart = send;
191  send = (height * (slice + 1) / c->slices);
192  dest = dst + sstart * stride;
193 
194  // slice offset and size validation was done earlier
195  slice_data_start = slice ? AV_RL32(src + slice * 4 - 4) : 0;
196  slice_data_end = AV_RL32(src + slice * 4);
197  slice_size = slice_data_end - slice_data_start;
198 
199  if (!slice_size) {
200  av_log(c->avctx, AV_LOG_ERROR, "Plane has more than one symbol "
201  "yet a slice has a length of zero.\n");
202  goto fail;
203  }
204 
205  memset(c->slice_bits + slice_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
206  c->bdsp.bswap_buf((uint32_t *) c->slice_bits,
207  (uint32_t *)(src + slice_data_start + c->slices * 4),
208  (slice_data_end - slice_data_start + 3) >> 2);
209  init_get_bits(&gb, c->slice_bits, slice_size * 8);
210 
211  prev = 0x200;
212  for (j = sstart; j < send; j++)
213  READ_PLANE(2, 3)
214  if (get_bits_left(&gb) > 32)
215  av_log(c->avctx, AV_LOG_WARNING,
216  "%d bits left after decoding slice\n", get_bits_left(&gb));
217  }
218 
219  ff_vlc_free(&vlc);
220  ff_vlc_free_multi(&multi);
221 
222  return 0;
223 fail:
224  ff_vlc_free(&vlc);
225  ff_vlc_free_multi(&multi);
226  return AVERROR_INVALIDDATA;
227 }
228 
229 static int compute_cmask(int plane_no, int interlaced, enum AVPixelFormat pix_fmt)
230 {
231  const int is_luma = (pix_fmt == AV_PIX_FMT_YUV420P) && !plane_no;
232 
233  if (interlaced)
234  return ~(1 + 2 * is_luma);
235 
236  return ~is_luma;
237 }
238 
239 static int decode_plane(UtvideoContext *c, int plane_no,
240  uint8_t *dst, ptrdiff_t stride,
241  int width, int height,
242  const uint8_t *src, int use_pred)
243 {
244  int i, j, slice, pix;
245  int sstart, send;
246  VLC_MULTI multi;
247  VLC vlc;
248  GetBitContext gb;
249  int ret, prev, fsym;
250  const int cmask = compute_cmask(plane_no, c->interlaced, c->avctx->pix_fmt);
251 
252  if (c->pack) {
253  send = 0;
254  for (slice = 0; slice < c->slices; slice++) {
255  GetBitContext cbit, pbit;
256  uint8_t *dest, *p;
257 
258  ret = init_get_bits8_le(&cbit, c->control_stream[plane_no][slice], c->control_stream_size[plane_no][slice]);
259  if (ret < 0)
260  return ret;
261 
262  ret = init_get_bits8_le(&pbit, c->packed_stream[plane_no][slice], c->packed_stream_size[plane_no][slice]);
263  if (ret < 0)
264  return ret;
265 
266  sstart = send;
267  send = (height * (slice + 1) / c->slices) & cmask;
268  dest = dst + sstart * stride;
269 
270  if (3 * ((dst + send * stride - dest + 7)/8) > get_bits_left(&cbit))
271  return AVERROR_INVALIDDATA;
272 
273  for (p = dest; p < dst + send * stride; p += 8) {
274  int bits = get_bits_le(&cbit, 3);
275 
276  if (bits == 0) {
277  *(uint64_t *) p = 0;
278  } else {
279  uint32_t sub = 0x80 >> (8 - (bits + 1)), add;
280  int k;
281 
282  if ((bits + 1) * 8 > get_bits_left(&pbit))
283  return AVERROR_INVALIDDATA;
284 
285  for (k = 0; k < 8; k++) {
286 
287  p[k] = get_bits_le(&pbit, bits + 1);
288  add = (~p[k] & sub) << (8 - bits);
289  p[k] -= sub;
290  p[k] += add;
291  }
292  }
293  }
294  }
295 
296  return 0;
297  }
298 
299  if (build_huff(c, src, &vlc, &multi, &fsym, 256)) {
300  av_log(c->avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n");
301  return AVERROR_INVALIDDATA;
302  }
303  if (fsym >= 0) { // build_huff reported a symbol to fill slices with
304  send = 0;
305  for (slice = 0; slice < c->slices; slice++) {
306  uint8_t *dest;
307 
308  sstart = send;
309  send = (height * (slice + 1) / c->slices) & cmask;
310  dest = dst + sstart * stride;
311 
312  prev = 0x80;
313  for (j = sstart; j < send; j++) {
314  for (i = 0; i < width; i++) {
315  pix = fsym;
316  if (use_pred) {
317  prev += (unsigned)pix;
318  pix = prev;
319  }
320  dest[i] = pix;
321  }
322  dest += stride;
323  }
324  }
325  return 0;
326  }
327 
328  src += 256;
329 
330  send = 0;
331  for (slice = 0; slice < c->slices; slice++) {
332  uint8_t *dest, *buf;
333  int slice_data_start, slice_data_end, slice_size;
334 
335  sstart = send;
336  send = (height * (slice + 1) / c->slices) & cmask;
337  dest = dst + sstart * stride;
338 
339  // slice offset and size validation was done earlier
340  slice_data_start = slice ? AV_RL32(src + slice * 4 - 4) : 0;
341  slice_data_end = AV_RL32(src + slice * 4);
342  slice_size = slice_data_end - slice_data_start;
343 
344  if (!slice_size) {
345  av_log(c->avctx, AV_LOG_ERROR, "Plane has more than one symbol "
346  "yet a slice has a length of zero.\n");
347  goto fail;
348  }
349 
350  memset(c->slice_bits + slice_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
351  c->bdsp.bswap_buf((uint32_t *) c->slice_bits,
352  (uint32_t *)(src + slice_data_start + c->slices * 4),
353  (slice_data_end - slice_data_start + 3) >> 2);
354  init_get_bits(&gb, c->slice_bits, slice_size * 8);
355 
356  prev = 0x80;
357  for (j = sstart; j < send; j++)
358  READ_PLANE(1, 5)
359  if (get_bits_left(&gb) > 32)
360  av_log(c->avctx, AV_LOG_WARNING,
361  "%d bits left after decoding slice\n", get_bits_left(&gb));
362  }
363 
364  ff_vlc_free(&vlc);
365  ff_vlc_free_multi(&multi);
366 
367  return 0;
368 fail:
369  ff_vlc_free(&vlc);
370  ff_vlc_free_multi(&multi);
371  return AVERROR_INVALIDDATA;
372 }
373 
374 #undef A
375 #undef B
376 #undef C
377 
378 static void restore_median_planar(UtvideoContext *c, uint8_t *src, ptrdiff_t stride,
379  int width, int height, int slices, int rmode)
380 {
381  int i, j, slice;
382  int A, B, C;
383  uint8_t *bsrc;
384  int slice_start, slice_height;
385  const int cmask = ~rmode;
386 
387  for (slice = 0; slice < slices; slice++) {
388  slice_start = ((slice * height) / slices) & cmask;
389  slice_height = ((((slice + 1) * height) / slices) & cmask) -
390  slice_start;
391 
392  if (!slice_height)
393  continue;
394  bsrc = src + slice_start * stride;
395 
396  // first line - left neighbour prediction
397  bsrc[0] += 0x80;
398  c->llviddsp.add_left_pred(bsrc, bsrc, width, 0);
399  bsrc += stride;
400  if (slice_height <= 1)
401  continue;
402  // second line - first element has top prediction, the rest uses median
403  C = bsrc[-stride];
404  bsrc[0] += C;
405  A = bsrc[0];
406  for (i = 1; i < FFMIN(width, 16); i++) { /* scalar loop (DSP need align 16) */
407  B = bsrc[i - stride];
408  bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
409  C = B;
410  A = bsrc[i];
411  }
412  if (width > 16)
413  c->llviddsp.add_median_pred(bsrc + 16, bsrc - stride + 16,
414  bsrc + 16, width - 16, &A, &B);
415 
416  bsrc += stride;
417  // the rest of lines use continuous median prediction
418  for (j = 2; j < slice_height; j++) {
419  c->llviddsp.add_median_pred(bsrc, bsrc - stride,
420  bsrc, width, &A, &B);
421  bsrc += stride;
422  }
423  }
424 }
425 
426 /* UtVideo interlaced mode treats every two lines as a single one,
427  * so restoring function should take care of possible padding between
428  * two parts of the same "line".
429  */
430 static void restore_median_planar_il(UtvideoContext *c, uint8_t *src, ptrdiff_t stride,
431  int width, int height, int slices, int rmode)
432 {
433  int i, j, slice;
434  int A, B, C;
435  uint8_t *bsrc;
436  int slice_start, slice_height;
437  const int cmask = ~(rmode ? 3 : 1);
438  const ptrdiff_t stride2 = stride << 1;
439 
440  for (slice = 0; slice < slices; slice++) {
441  slice_start = ((slice * height) / slices) & cmask;
442  slice_height = ((((slice + 1) * height) / slices) & cmask) -
443  slice_start;
444  slice_height >>= 1;
445  if (!slice_height)
446  continue;
447 
448  bsrc = src + slice_start * stride;
449 
450  // first line - left neighbour prediction
451  bsrc[0] += 0x80;
452  A = c->llviddsp.add_left_pred(bsrc, bsrc, width, 0);
453  c->llviddsp.add_left_pred(bsrc + stride, bsrc + stride, width, A);
454  bsrc += stride2;
455  if (slice_height <= 1)
456  continue;
457  // second line - first element has top prediction, the rest uses median
458  C = bsrc[-stride2];
459  bsrc[0] += C;
460  A = bsrc[0];
461  for (i = 1; i < FFMIN(width, 16); i++) { /* scalar loop (DSP need align 16) */
462  B = bsrc[i - stride2];
463  bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
464  C = B;
465  A = bsrc[i];
466  }
467  if (width > 16)
468  c->llviddsp.add_median_pred(bsrc + 16, bsrc - stride2 + 16,
469  bsrc + 16, width - 16, &A, &B);
470 
471  c->llviddsp.add_median_pred(bsrc + stride, bsrc - stride,
472  bsrc + stride, width, &A, &B);
473  bsrc += stride2;
474  // the rest of lines use continuous median prediction
475  for (j = 2; j < slice_height; j++) {
476  c->llviddsp.add_median_pred(bsrc, bsrc - stride2,
477  bsrc, width, &A, &B);
478  c->llviddsp.add_median_pred(bsrc + stride, bsrc - stride,
479  bsrc + stride, width, &A, &B);
480  bsrc += stride2;
481  }
482  }
483 }
484 
485 static void restore_gradient_planar(UtvideoContext *c, uint8_t *src, ptrdiff_t stride,
486  int width, int height, int slices, int rmode)
487 {
488  int i, j, slice;
489  int A, B, C;
490  uint8_t *bsrc;
491  int slice_start, slice_height;
492  const int cmask = ~rmode;
493  int min_width = FFMIN(width, 32);
494 
495  for (slice = 0; slice < slices; slice++) {
496  slice_start = ((slice * height) / slices) & cmask;
497  slice_height = ((((slice + 1) * height) / slices) & cmask) -
498  slice_start;
499 
500  if (!slice_height)
501  continue;
502  bsrc = src + slice_start * stride;
503 
504  // first line - left neighbour prediction
505  bsrc[0] += 0x80;
506  c->llviddsp.add_left_pred(bsrc, bsrc, width, 0);
507  bsrc += stride;
508  if (slice_height <= 1)
509  continue;
510  for (j = 1; j < slice_height; j++) {
511  // second line - first element has top prediction, the rest uses gradient
512  bsrc[0] = (bsrc[0] + bsrc[-stride]) & 0xFF;
513  for (i = 1; i < min_width; i++) { /* dsp need align 32 */
514  A = bsrc[i - stride];
515  B = bsrc[i - (stride + 1)];
516  C = bsrc[i - 1];
517  bsrc[i] = (A - B + C + bsrc[i]) & 0xFF;
518  }
519  if (width > 32)
520  c->llviddsp.add_gradient_pred(bsrc + 32, stride, width - 32);
521  bsrc += stride;
522  }
523  }
524 }
525 
526 static void restore_gradient_planar_il(UtvideoContext *c, uint8_t *src, ptrdiff_t stride,
527  int width, int height, int slices, int rmode)
528 {
529  int i, j, slice;
530  int A, B, C;
531  uint8_t *bsrc;
532  int slice_start, slice_height;
533  const int cmask = ~(rmode ? 3 : 1);
534  const ptrdiff_t stride2 = stride << 1;
535  int min_width = FFMIN(width, 32);
536 
537  for (slice = 0; slice < slices; slice++) {
538  slice_start = ((slice * height) / slices) & cmask;
539  slice_height = ((((slice + 1) * height) / slices) & cmask) -
540  slice_start;
541  slice_height >>= 1;
542  if (!slice_height)
543  continue;
544 
545  bsrc = src + slice_start * stride;
546 
547  // first line - left neighbour prediction
548  bsrc[0] += 0x80;
549  A = c->llviddsp.add_left_pred(bsrc, bsrc, width, 0);
550  c->llviddsp.add_left_pred(bsrc + stride, bsrc + stride, width, A);
551  bsrc += stride2;
552  if (slice_height <= 1)
553  continue;
554  for (j = 1; j < slice_height; j++) {
555  // second line - first element has top prediction, the rest uses gradient
556  bsrc[0] = (bsrc[0] + bsrc[-stride2]) & 0xFF;
557  for (i = 1; i < min_width; i++) { /* dsp need align 32 */
558  A = bsrc[i - stride2];
559  B = bsrc[i - (stride2 + 1)];
560  C = bsrc[i - 1];
561  bsrc[i] = (A - B + C + bsrc[i]) & 0xFF;
562  }
563  if (width > 32)
564  c->llviddsp.add_gradient_pred(bsrc + 32, stride2, width - 32);
565 
566  A = bsrc[-stride];
567  B = bsrc[-(1 + stride + stride - width)];
568  C = bsrc[width - 1];
569  bsrc[stride] = (A - B + C + bsrc[stride]) & 0xFF;
570  for (i = 1; i < width; i++) {
571  A = bsrc[i - stride];
572  B = bsrc[i - (1 + stride)];
573  C = bsrc[i - 1 + stride];
574  bsrc[i + stride] = (A - B + C + bsrc[i + stride]) & 0xFF;
575  }
576  bsrc += stride2;
577  }
578  }
579 }
580 
582  int *got_frame, AVPacket *avpkt)
583 {
584  const uint8_t *buf = avpkt->data;
585  int buf_size = avpkt->size;
586  UtvideoContext *c = avctx->priv_data;
587  int i, j;
588  const uint8_t *plane_start[5];
589  int plane_size, max_slice_size = 0, slice_start, slice_end, slice_size;
590  int ret;
591  GetByteContext gb;
592 
593  if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
594  return ret;
595 
596  /* parse plane structure to get frame flags and validate slice offsets */
597  bytestream2_init(&gb, buf, buf_size);
598 
599  if (c->pack) {
600  const uint8_t *packed_stream;
601  const uint8_t *control_stream;
602  GetByteContext pb;
603  uint32_t nb_cbs;
604  int left;
605 
606  c->frame_info = PRED_GRADIENT << 8;
607 
608  if (bytestream2_get_byte(&gb) != 1)
609  return AVERROR_INVALIDDATA;
610  bytestream2_skip(&gb, 3);
611  c->offset = bytestream2_get_le32(&gb);
612 
613  if (buf_size <= c->offset + 8LL)
614  return AVERROR_INVALIDDATA;
615 
616  bytestream2_init(&pb, buf + 8 + c->offset, buf_size - 8 - c->offset);
617 
618  nb_cbs = bytestream2_get_le32(&pb);
619  if (nb_cbs > c->offset)
620  return AVERROR_INVALIDDATA;
621 
622  packed_stream = buf + 8;
623  control_stream = packed_stream + (c->offset - nb_cbs);
624  left = control_stream - packed_stream;
625 
626  for (i = 0; i < c->planes; i++) {
627  for (j = 0; j < c->slices; j++) {
628  c->packed_stream[i][j] = packed_stream;
629  c->packed_stream_size[i][j] = bytestream2_get_le32(&pb);
630  if (c->packed_stream_size[i][j] > left)
631  return AVERROR_INVALIDDATA;
632  left -= c->packed_stream_size[i][j];
633  packed_stream += c->packed_stream_size[i][j];
634  }
635  }
636 
637  left = buf + buf_size - control_stream;
638 
639  for (i = 0; i < c->planes; i++) {
640  for (j = 0; j < c->slices; j++) {
641  c->control_stream[i][j] = control_stream;
642  c->control_stream_size[i][j] = bytestream2_get_le32(&pb);
643  if (c->control_stream_size[i][j] > left)
644  return AVERROR_INVALIDDATA;
645  left -= c->control_stream_size[i][j];
646  control_stream += c->control_stream_size[i][j];
647  }
648  }
649  } else if (c->pro) {
650  if (bytestream2_get_bytes_left(&gb) < c->frame_info_size) {
651  av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n");
652  return AVERROR_INVALIDDATA;
653  }
654  c->frame_info = bytestream2_get_le32u(&gb);
655  c->slices = ((c->frame_info >> 16) & 0xff) + 1;
656  for (i = 0; i < c->planes; i++) {
657  plane_start[i] = gb.buffer;
658  if (bytestream2_get_bytes_left(&gb) < 1024 + 4 * c->slices) {
659  av_log(avctx, AV_LOG_ERROR, "Insufficient data for a plane\n");
660  return AVERROR_INVALIDDATA;
661  }
662  slice_start = 0;
663  slice_end = 0;
664  for (j = 0; j < c->slices; j++) {
665  slice_end = bytestream2_get_le32u(&gb);
666  if (slice_end < 0 || slice_end < slice_start ||
667  bytestream2_get_bytes_left(&gb) < slice_end + 1024LL) {
668  av_log(avctx, AV_LOG_ERROR, "Incorrect slice size\n");
669  return AVERROR_INVALIDDATA;
670  }
671  slice_size = slice_end - slice_start;
673  max_slice_size = FFMAX(max_slice_size, slice_size);
674  }
675  plane_size = slice_end;
676  bytestream2_skipu(&gb, plane_size);
677  bytestream2_skipu(&gb, 1024);
678  }
679  plane_start[c->planes] = gb.buffer;
680  } else {
681  for (i = 0; i < c->planes; i++) {
682  plane_start[i] = gb.buffer;
683  if (bytestream2_get_bytes_left(&gb) < 256 + 4 * c->slices) {
684  av_log(avctx, AV_LOG_ERROR, "Insufficient data for a plane\n");
685  return AVERROR_INVALIDDATA;
686  }
687  bytestream2_skipu(&gb, 256);
688  slice_start = 0;
689  slice_end = 0;
690  for (j = 0; j < c->slices; j++) {
691  slice_end = bytestream2_get_le32u(&gb);
692  if (slice_end < 0 || slice_end < slice_start ||
694  av_log(avctx, AV_LOG_ERROR, "Incorrect slice size\n");
695  return AVERROR_INVALIDDATA;
696  }
697  slice_size = slice_end - slice_start;
699  max_slice_size = FFMAX(max_slice_size, slice_size);
700  }
701  plane_size = slice_end;
702  bytestream2_skipu(&gb, plane_size);
703  }
704  plane_start[c->planes] = gb.buffer;
705  if (bytestream2_get_bytes_left(&gb) < c->frame_info_size) {
706  av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n");
707  return AVERROR_INVALIDDATA;
708  }
709  c->frame_info = bytestream2_get_le32u(&gb);
710  }
711  av_log(avctx, AV_LOG_DEBUG, "frame information flags %"PRIX32"\n",
712  c->frame_info);
713 
714  c->frame_pred = (c->frame_info >> 8) & 3;
715 
716  max_slice_size += 4*avctx->width;
717 
718  if (!c->pack) {
719  av_fast_malloc(&c->slice_bits, &c->slice_bits_size,
720  max_slice_size + AV_INPUT_BUFFER_PADDING_SIZE);
721 
722  if (!c->slice_bits) {
723  av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer\n");
724  return AVERROR(ENOMEM);
725  }
726  }
727 
728  switch (c->avctx->pix_fmt) {
729  case AV_PIX_FMT_GBRP:
730  case AV_PIX_FMT_GBRAP:
731  for (i = 0; i < c->planes; i++) {
732  ret = decode_plane(c, i, frame->data[i],
733  frame->linesize[i], avctx->width,
734  avctx->height, plane_start[i],
735  c->frame_pred == PRED_LEFT);
736  if (ret)
737  return ret;
738  if (c->frame_pred == PRED_MEDIAN) {
739  if (!c->interlaced) {
740  restore_median_planar(c, frame->data[i],
741  frame->linesize[i], avctx->width,
742  avctx->height, c->slices, 0);
743  } else {
745  frame->linesize[i],
746  avctx->width, avctx->height, c->slices,
747  0);
748  }
749  } else if (c->frame_pred == PRED_GRADIENT) {
750  if (!c->interlaced) {
752  frame->linesize[i], avctx->width,
753  avctx->height, c->slices, 0);
754  } else {
756  frame->linesize[i],
757  avctx->width, avctx->height, c->slices,
758  0);
759  }
760  }
761  }
762  c->utdsp.restore_rgb_planes(frame->data[2], frame->data[0], frame->data[1],
763  frame->linesize[2], frame->linesize[0], frame->linesize[1],
764  avctx->width, avctx->height);
765  break;
766  case AV_PIX_FMT_GBRAP10:
767  case AV_PIX_FMT_GBRP10:
768  for (i = 0; i < c->planes; i++) {
769  ret = decode_plane10(c, i, (uint16_t *)frame->data[i],
770  frame->linesize[i] / 2, avctx->width,
771  avctx->height, plane_start[i],
772  plane_start[i + 1] - 1024,
773  c->frame_pred == PRED_LEFT);
774  if (ret)
775  return ret;
776  }
777  c->utdsp.restore_rgb_planes10((uint16_t *)frame->data[2], (uint16_t *)frame->data[0], (uint16_t *)frame->data[1],
778  frame->linesize[2] / 2, frame->linesize[0] / 2, frame->linesize[1] / 2,
779  avctx->width, avctx->height);
780  break;
781  case AV_PIX_FMT_YUV420P:
782  for (i = 0; i < 3; i++) {
783  ret = decode_plane(c, i, frame->data[i], frame->linesize[i],
784  avctx->width >> !!i, avctx->height >> !!i,
785  plane_start[i], c->frame_pred == PRED_LEFT);
786  if (ret)
787  return ret;
788  if (c->frame_pred == PRED_MEDIAN) {
789  if (!c->interlaced) {
790  restore_median_planar(c, frame->data[i], frame->linesize[i],
791  avctx->width >> !!i, avctx->height >> !!i,
792  c->slices, !i);
793  } else {
794  restore_median_planar_il(c, frame->data[i], frame->linesize[i],
795  avctx->width >> !!i,
796  avctx->height >> !!i,
797  c->slices, !i);
798  }
799  } else if (c->frame_pred == PRED_GRADIENT) {
800  if (!c->interlaced) {
801  restore_gradient_planar(c, frame->data[i], frame->linesize[i],
802  avctx->width >> !!i, avctx->height >> !!i,
803  c->slices, !i);
804  } else {
805  restore_gradient_planar_il(c, frame->data[i], frame->linesize[i],
806  avctx->width >> !!i,
807  avctx->height >> !!i,
808  c->slices, !i);
809  }
810  }
811  }
812  break;
813  case AV_PIX_FMT_YUV422P:
814  for (i = 0; i < 3; i++) {
815  ret = decode_plane(c, i, frame->data[i], frame->linesize[i],
816  avctx->width >> !!i, avctx->height,
817  plane_start[i], c->frame_pred == PRED_LEFT);
818  if (ret)
819  return ret;
820  if (c->frame_pred == PRED_MEDIAN) {
821  if (!c->interlaced) {
822  restore_median_planar(c, frame->data[i], frame->linesize[i],
823  avctx->width >> !!i, avctx->height,
824  c->slices, 0);
825  } else {
826  restore_median_planar_il(c, frame->data[i], frame->linesize[i],
827  avctx->width >> !!i, avctx->height,
828  c->slices, 0);
829  }
830  } else if (c->frame_pred == PRED_GRADIENT) {
831  if (!c->interlaced) {
832  restore_gradient_planar(c, frame->data[i], frame->linesize[i],
833  avctx->width >> !!i, avctx->height,
834  c->slices, 0);
835  } else {
836  restore_gradient_planar_il(c, frame->data[i], frame->linesize[i],
837  avctx->width >> !!i, avctx->height,
838  c->slices, 0);
839  }
840  }
841  }
842  break;
843  case AV_PIX_FMT_YUV444P:
844  for (i = 0; i < 3; i++) {
845  ret = decode_plane(c, i, frame->data[i], frame->linesize[i],
846  avctx->width, avctx->height,
847  plane_start[i], c->frame_pred == PRED_LEFT);
848  if (ret)
849  return ret;
850  if (c->frame_pred == PRED_MEDIAN) {
851  if (!c->interlaced) {
852  restore_median_planar(c, frame->data[i], frame->linesize[i],
853  avctx->width, avctx->height,
854  c->slices, 0);
855  } else {
856  restore_median_planar_il(c, frame->data[i], frame->linesize[i],
857  avctx->width, avctx->height,
858  c->slices, 0);
859  }
860  } else if (c->frame_pred == PRED_GRADIENT) {
861  if (!c->interlaced) {
862  restore_gradient_planar(c, frame->data[i], frame->linesize[i],
863  avctx->width, avctx->height,
864  c->slices, 0);
865  } else {
866  restore_gradient_planar_il(c, frame->data[i], frame->linesize[i],
867  avctx->width, avctx->height,
868  c->slices, 0);
869  }
870  }
871  }
872  break;
874  for (i = 0; i < 3; i++) {
875  ret = decode_plane10(c, i, (uint16_t *)frame->data[i], frame->linesize[i] / 2,
876  avctx->width >> !!i, avctx->height >> !!i,
877  plane_start[i], plane_start[i + 1] - 1024, c->frame_pred == PRED_LEFT);
878  if (ret)
879  return ret;
880  }
881  break;
883  for (i = 0; i < 3; i++) {
884  ret = decode_plane10(c, i, (uint16_t *)frame->data[i], frame->linesize[i] / 2,
885  avctx->width >> !!i, avctx->height,
886  plane_start[i], plane_start[i + 1] - 1024, c->frame_pred == PRED_LEFT);
887  if (ret)
888  return ret;
889  }
890  break;
891  }
892 
893  if (c->interlaced)
895 
896  *got_frame = 1;
897 
898  /* always report that the buffer was completely consumed */
899  return buf_size;
900 }
901 
903 {
904  UtvideoContext * const c = avctx->priv_data;
905  int h_shift, v_shift;
906 
907  c->avctx = avctx;
908 
909  ff_utvideodsp_init(&c->utdsp);
910  ff_bswapdsp_init(&c->bdsp);
911  ff_llviddsp_init(&c->llviddsp);
912 
913  c->slice_bits_size = 0;
914 
915  switch (avctx->codec_tag) {
916  case MKTAG('U', 'L', 'R', 'G'):
917  c->planes = 3;
918  avctx->pix_fmt = AV_PIX_FMT_GBRP;
919  break;
920  case MKTAG('U', 'L', 'R', 'A'):
921  c->planes = 4;
922  avctx->pix_fmt = AV_PIX_FMT_GBRAP;
923  break;
924  case MKTAG('U', 'L', 'Y', '0'):
925  c->planes = 3;
926  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
927  avctx->colorspace = AVCOL_SPC_BT470BG;
928  break;
929  case MKTAG('U', 'L', 'Y', '2'):
930  c->planes = 3;
931  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
932  avctx->colorspace = AVCOL_SPC_BT470BG;
933  break;
934  case MKTAG('U', 'L', 'Y', '4'):
935  c->planes = 3;
936  avctx->pix_fmt = AV_PIX_FMT_YUV444P;
937  avctx->colorspace = AVCOL_SPC_BT470BG;
938  break;
939  case MKTAG('U', 'Q', 'Y', '0'):
940  c->planes = 3;
941  c->pro = 1;
942  avctx->pix_fmt = AV_PIX_FMT_YUV420P10;
943  break;
944  case MKTAG('U', 'Q', 'Y', '2'):
945  c->planes = 3;
946  c->pro = 1;
947  avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
948  break;
949  case MKTAG('U', 'Q', 'R', 'G'):
950  c->planes = 3;
951  c->pro = 1;
952  avctx->pix_fmt = AV_PIX_FMT_GBRP10;
953  break;
954  case MKTAG('U', 'Q', 'R', 'A'):
955  c->planes = 4;
956  c->pro = 1;
957  avctx->pix_fmt = AV_PIX_FMT_GBRAP10;
958  break;
959  case MKTAG('U', 'L', 'H', '0'):
960  c->planes = 3;
961  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
962  avctx->colorspace = AVCOL_SPC_BT709;
963  break;
964  case MKTAG('U', 'L', 'H', '2'):
965  c->planes = 3;
966  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
967  avctx->colorspace = AVCOL_SPC_BT709;
968  break;
969  case MKTAG('U', 'L', 'H', '4'):
970  c->planes = 3;
971  avctx->pix_fmt = AV_PIX_FMT_YUV444P;
972  avctx->colorspace = AVCOL_SPC_BT709;
973  break;
974  case MKTAG('U', 'M', 'Y', '2'):
975  c->planes = 3;
976  c->pack = 1;
977  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
978  avctx->colorspace = AVCOL_SPC_BT470BG;
979  break;
980  case MKTAG('U', 'M', 'H', '2'):
981  c->planes = 3;
982  c->pack = 1;
983  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
984  avctx->colorspace = AVCOL_SPC_BT709;
985  break;
986  case MKTAG('U', 'M', 'Y', '4'):
987  c->planes = 3;
988  c->pack = 1;
989  avctx->pix_fmt = AV_PIX_FMT_YUV444P;
990  avctx->colorspace = AVCOL_SPC_BT470BG;
991  break;
992  case MKTAG('U', 'M', 'H', '4'):
993  c->planes = 3;
994  c->pack = 1;
995  avctx->pix_fmt = AV_PIX_FMT_YUV444P;
996  avctx->colorspace = AVCOL_SPC_BT709;
997  break;
998  case MKTAG('U', 'M', 'R', 'G'):
999  c->planes = 3;
1000  c->pack = 1;
1001  avctx->pix_fmt = AV_PIX_FMT_GBRP;
1002  break;
1003  case MKTAG('U', 'M', 'R', 'A'):
1004  c->planes = 4;
1005  c->pack = 1;
1006  avctx->pix_fmt = AV_PIX_FMT_GBRAP;
1007  break;
1008  default:
1009  av_log(avctx, AV_LOG_ERROR, "Unknown Ut Video FOURCC provided (%08X)\n",
1010  avctx->codec_tag);
1011  return AVERROR_INVALIDDATA;
1012  }
1013 
1014  av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &h_shift, &v_shift);
1015  if ((avctx->width & ((1<<h_shift)-1)) ||
1016  (avctx->height & ((1<<v_shift)-1))) {
1017  avpriv_request_sample(avctx, "Odd dimensions");
1018  return AVERROR_PATCHWELCOME;
1019  }
1020 
1021  if (c->pack && avctx->extradata_size >= 16) {
1022  av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n",
1023  avctx->extradata[3], avctx->extradata[2],
1024  avctx->extradata[1], avctx->extradata[0]);
1025  av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n",
1026  AV_RB32(avctx->extradata + 4));
1027  c->compression = avctx->extradata[8];
1028  if (c->compression != 2)
1029  avpriv_request_sample(avctx, "Unknown compression type");
1030  c->slices = avctx->extradata[9] + 1;
1031  } else if (!c->pro && avctx->extradata_size >= 16) {
1032  av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n",
1033  avctx->extradata[3], avctx->extradata[2],
1034  avctx->extradata[1], avctx->extradata[0]);
1035  av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n",
1036  AV_RB32(avctx->extradata + 4));
1037  c->frame_info_size = AV_RL32(avctx->extradata + 8);
1038  c->flags = AV_RL32(avctx->extradata + 12);
1039 
1040  if (c->frame_info_size != 4)
1041  avpriv_request_sample(avctx, "Frame info not 4 bytes");
1042  av_log(avctx, AV_LOG_DEBUG, "Encoding parameters %08"PRIX32"\n", c->flags);
1043  c->slices = (c->flags >> 24) + 1;
1044  c->compression = c->flags & 1;
1045  c->interlaced = c->flags & 0x800;
1046  } else if (c->pro && avctx->extradata_size == 8) {
1047  av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n",
1048  avctx->extradata[3], avctx->extradata[2],
1049  avctx->extradata[1], avctx->extradata[0]);
1050  av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n",
1051  AV_RB32(avctx->extradata + 4));
1052  c->interlaced = 0;
1053  c->frame_info_size = 4;
1054  } else {
1055  av_log(avctx, AV_LOG_ERROR,
1056  "Insufficient extradata size %d, should be at least 16\n",
1057  avctx->extradata_size);
1058  return AVERROR_INVALIDDATA;
1059  }
1060 
1061  c->buffer = av_calloc(avctx->width + 8, c->pro?2:1);
1062  if (!c->buffer)
1063  return AVERROR(ENOMEM);
1064 
1065  return 0;
1066 }
1067 
1069 {
1070  UtvideoContext * const c = avctx->priv_data;
1071 
1072  av_freep(&c->slice_bits);
1073  av_freep(&c->buffer);
1074 
1075  return 0;
1076 }
1077 
1079  .p.name = "utvideo",
1080  CODEC_LONG_NAME("Ut Video"),
1081  .p.type = AVMEDIA_TYPE_VIDEO,
1082  .p.id = AV_CODEC_ID_UTVIDEO,
1083  .priv_data_size = sizeof(UtvideoContext),
1084  .init = decode_init,
1085  .close = decode_end,
1087  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
1088 };
UTVideoDSPContext
Definition: utvideodsp.h:27
A
#define A(x)
Definition: vpx_arith.h:28
utvideo.h
bswapdsp.h
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:695
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:685
restore_gradient_planar_il
static void restore_gradient_planar_il(UtvideoContext *c, uint8_t *src, ptrdiff_t stride, int width, int height, int slices, int rmode)
Definition: utvideodec.c:526
decode_init
static av_cold int decode_init(AVCodecContext *avctx)
Definition: utvideodec.c:902
UtvideoContext::buffer
void * buffer
Definition: utvideodec.c:63
GetByteContext
Definition: bytestream.h:33
HuffEntry::len
uint8_t len
Definition: exr.c:96
bytestream2_skipu
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:174
UtvideoContext::llviddsp
LLVidDSPContext llviddsp
Definition: utvideodec.c:50
decode_end
static av_cold int decode_end(AVCodecContext *avctx)
Definition: utvideodec.c:1068
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
pixdesc.h
AVPacket::data
uint8_t * data
Definition: packet.h:524
compute_cmask
static int compute_cmask(int plane_no, int interlaced, enum AVPixelFormat pix_fmt)
Definition: utvideodec.c:229
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:478
UtvideoContext::offset
uint32_t offset
Definition: utvideodec.c:52
FFCodec
Definition: codec_internal.h:126
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
restore_gradient_planar
static void restore_gradient_planar(UtvideoContext *c, uint8_t *src, ptrdiff_t stride, int width, int height, int slices, int rmode)
Definition: utvideodec.c:485
UtvideoContext::slices
int slices
Definition: utvideodec.c:54
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:514
thread.h
decode_plane10
static int decode_plane10(UtvideoContext *c, int plane_no, uint16_t *dst, ptrdiff_t stride, int width, int height, const uint8_t *src, const uint8_t *huff, int use_pred)
Definition: utvideodec.c:142
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:615
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:130
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:212
fail
#define fail()
Definition: checkasm.h:182
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:494
GetBitContext
Definition: get_bits.h:108
READ_PLANE
#define READ_PLANE(b, end)
Definition: utvideodec.c:118
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2993
PRED_GRADIENT
@ PRED_GRADIENT
Definition: utvideo.h:35
LLVidDSPContext
Definition: lossless_videodsp.h:28
C
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
Definition: writing_filters.txt:58
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:498
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:524
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:286
intreadwrite.h
VLC_MULTI
Definition: vlc.h:51
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demux_decode.c:41
ff_thread_get_buffer
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
Definition: pthread_frame.c:986
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:1730
GetByteContext::buffer
const uint8_t * buffer
Definition: bytestream.h:34
ff_vlc_free_multi
void ff_vlc_free_multi(VLC_MULTI *vlc)
Definition: vlc.c:575
HuffEntry::sym
uint16_t sym
Definition: exr.c:97
bits
uint8_t bits
Definition: vp3data.h:128
B
#define B
Definition: huffyuv.h:42
get_bits_le
static unsigned int get_bits_le(GetBitContext *s, int n)
Definition: get_bits.h:356
AV_CODEC_ID_UTVIDEO
@ AV_CODEC_ID_UTVIDEO
Definition: codec_id.h:205
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
get_bits.h
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
CODEC_LONG_NAME
#define CODEC_LONG_NAME(str)
Definition: codec_internal.h:271
UtvideoContext::interlaced
int interlaced
Definition: utvideodec.c:56
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:110
ff_bswapdsp_init
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
UtvideoContext::control_stream
const uint8_t * control_stream[4][256]
Definition: utvideodec.c:67
UtvideoContext::slice_bits
uint8_t * slice_bits
Definition: utvideodec.c:61
UtvideoContext::frame_pred
int frame_pred
Definition: utvideodec.c:57
ff_vlc_init_multi_from_lengths
int ff_vlc_init_multi_from_lengths(VLC *vlc, VLC_MULTI *multi, int nb_bits, int nb_elems, int nb_codes, const int8_t *lens, int lens_wrap, const void *symbols, int symbols_wrap, int symbols_size, int offset, int flags, void *logctx)
Build VLC decoding tables suitable for use with get_vlc_multi()
Definition: vlc.c:517
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:479
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
UtvideoContext::frame_info
uint32_t frame_info
Definition: utvideodec.c:52
init
int(* init)(AVBSFContext *ctx)
Definition: dts2pts.c:366
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:525
codec_internal.h
decode_frame
static int decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: utvideodec.c:581
UtvideoContext::packed_stream
const uint8_t * packed_stream[4][256]
Definition: utvideodec.c:65
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
VLC_BITS
#define VLC_BITS
height
#define height
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
restore_median_planar_il
static void restore_median_planar_il(UtvideoContext *c, uint8_t *src, ptrdiff_t stride, int width, int height, int slices, int rmode)
Definition: utvideodec.c:430
PRED_MEDIAN
@ PRED_MEDIAN
Definition: utvideo.h:36
interlaced
uint8_t interlaced
Definition: mxfenc.c:2264
restore_median_planar
static void restore_median_planar(UtvideoContext *c, uint8_t *src, ptrdiff_t stride, int width, int height, int slices, int rmode)
Definition: utvideodec.c:378
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:523
ff_utvideo_decoder
const FFCodec ff_utvideo_decoder
Definition: utvideodec.c:1078
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:194
UtvideoContext
Definition: utvideodec.c:46
len
int len
Definition: vorbis_enc_data.h:426
AVCodecContext::height
int height
Definition: avcodec.h:618
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:657
AV_FRAME_FLAG_INTERLACED
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
Definition: frame.h:633
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:264
UtvideoContext::pack
int pack
Definition: utvideodec.c:59
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
build_huff
static int build_huff(UtvideoContext *c, const uint8_t *src, VLC *vlc, VLC_MULTI *multi, int *fsym, unsigned nb_elems)
Definition: utvideodec.c:76
mid_pred
#define mid_pred
Definition: mathops.h:98
ff_vlc_free
void ff_vlc_free(VLC *vlc)
Definition: vlc.c:580
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
UtvideoContext::frame_info_size
uint32_t frame_info_size
Definition: utvideodec.c:52
ff_llviddsp_init
void ff_llviddsp_init(LLVidDSPContext *c)
Definition: lossless_videodsp.c:113
AVCodecContext
main external API structure.
Definition: avcodec.h:445
VLC
Definition: vlc.h:36
HuffEntry
Definition: exr.c:95
slice_start
static int slice_start(SliceContext *sc, VVCContext *s, VVCFrameContext *fc, const CodedBitstreamUnit *unit, const int is_first_slice)
Definition: dec.c:699
UtvideoContext::compression
int compression
Definition: utvideodec.c:55
UtvideoContext::flags
uint32_t flags
Definition: utvideodec.c:52
decode_plane
static int decode_plane(UtvideoContext *c, int plane_no, uint8_t *dst, ptrdiff_t stride, int width, int height, const uint8_t *src, int use_pred)
Definition: utvideodec.c:239
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:78
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:165
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:77
mem.h
lossless_videodsp.h
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
init_get_bits8_le
static int init_get_bits8_le(GetBitContext *s, const uint8_t *buffer, int byte_size)
Definition: get_bits.h:553
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:470
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:472
AVPacket
This structure stores compressed data.
Definition: packet.h:501
UtvideoContext::slice_bits_size
int slice_bits_size
Definition: utvideodec.c:62
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:557
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
utvideodsp.h
ff_utvideodsp_init
av_cold void ff_utvideodsp_init(UTVideoDSPContext *c)
Definition: utvideodsp.c:75
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:618
UtvideoContext::bdsp
BswapDSPContext bdsp
Definition: utvideodec.c:49
bytestream.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
PRED_LEFT
@ PRED_LEFT
Definition: utvideo.h:34
UtvideoContext::pro
int pro
Definition: utvideodec.c:58
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
BswapDSPContext
Definition: bswapdsp.h:24
UtvideoContext::avctx
AVCodecContext * avctx
Definition: utvideodec.c:47
UtvideoContext::packed_stream_size
size_t packed_stream_size[4][256]
Definition: utvideodec.c:66
UtvideoContext::control_stream_size
size_t control_stream_size[4][256]
Definition: utvideodec.c:68
UtvideoContext::utdsp
UTVideoDSPContext utdsp
Definition: utvideodec.c:48
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:611
UtvideoContext::planes
int planes
Definition: utvideodec.c:53