FFmpeg
utvideodec.c
Go to the documentation of this file.
1 /*
2  * Ut Video decoder
3  * Copyright (c) 2011 Konstantin Shishkov
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Ut Video decoder
25  */
26 
27 #include <inttypes.h>
28 #include <stdlib.h>
29 
30 #define CACHED_BITSTREAM_READER !ARCH_X86_32
31 #define UNCHECKED_BITSTREAM_READER 1
32 
33 #include "libavutil/intreadwrite.h"
34 #include "libavutil/pixdesc.h"
35 #include "avcodec.h"
36 #include "bswapdsp.h"
37 #include "bytestream.h"
38 #include "codec_internal.h"
39 #include "get_bits.h"
40 #include "thread.h"
41 #include "utvideo.h"
42 
43 typedef struct HuffEntry {
44  uint8_t len;
45  uint16_t sym;
46 } HuffEntry;
47 
48 static int build_huff(UtvideoContext *c, const uint8_t *src, VLC *vlc,
49  int *fsym, unsigned nb_elems)
50 {
51  int i;
52  HuffEntry he[1024];
53  uint8_t bits[1024];
54  uint16_t codes_count[33] = { 0 };
55 
56  *fsym = -1;
57  for (i = 0; i < nb_elems; i++) {
58  if (src[i] == 0) {
59  *fsym = i;
60  return 0;
61  } else if (src[i] == 255) {
62  bits[i] = 0;
63  } else if (src[i] <= 32) {
64  bits[i] = src[i];
65  } else
66  return AVERROR_INVALIDDATA;
67 
68  codes_count[bits[i]]++;
69  }
70  if (codes_count[0] == nb_elems)
71  return AVERROR_INVALIDDATA;
72 
73  /* For Ut Video, longer codes are to the left of the tree and
74  * for codes with the same length the symbol is descending from
75  * left to right. So after the next loop --codes_count[i] will
76  * be the index of the first (lowest) symbol of length i when
77  * indexed by the position in the tree with left nodes being first. */
78  for (int i = 31; i >= 0; i--)
79  codes_count[i] += codes_count[i + 1];
80 
81  for (unsigned i = 0; i < nb_elems; i++)
82  he[--codes_count[bits[i]]] = (HuffEntry) { bits[i], i };
83 
84 #define VLC_BITS 11
85  return ff_init_vlc_from_lengths(vlc, VLC_BITS, codes_count[0],
86  &he[0].len, sizeof(*he),
87  &he[0].sym, sizeof(*he), 2, 0, 0, c->avctx);
88 }
89 
90 static int decode_plane10(UtvideoContext *c, int plane_no,
91  uint16_t *dst, ptrdiff_t stride,
92  int width, int height,
93  const uint8_t *src, const uint8_t *huff,
94  int use_pred)
95 {
96  int i, j, slice, pix, ret;
97  int sstart, send;
98  VLC vlc;
99  GetBitContext gb;
100  int prev, fsym;
101 
102  if ((ret = build_huff(c, huff, &vlc, &fsym, 1024)) < 0) {
103  av_log(c->avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n");
104  return ret;
105  }
106  if (fsym >= 0) { // build_huff reported a symbol to fill slices with
107  send = 0;
108  for (slice = 0; slice < c->slices; slice++) {
109  uint16_t *dest;
110 
111  sstart = send;
112  send = (height * (slice + 1) / c->slices);
113  dest = dst + sstart * stride;
114 
115  prev = 0x200;
116  for (j = sstart; j < send; j++) {
117  for (i = 0; i < width; i++) {
118  pix = fsym;
119  if (use_pred) {
120  prev += pix;
121  prev &= 0x3FF;
122  pix = prev;
123  }
124  dest[i] = pix;
125  }
126  dest += stride;
127  }
128  }
129  return 0;
130  }
131 
132  send = 0;
133  for (slice = 0; slice < c->slices; slice++) {
134  uint16_t *dest;
135  int slice_data_start, slice_data_end, slice_size;
136 
137  sstart = send;
138  send = (height * (slice + 1) / c->slices);
139  dest = dst + sstart * stride;
140 
141  // slice offset and size validation was done earlier
142  slice_data_start = slice ? AV_RL32(src + slice * 4 - 4) : 0;
143  slice_data_end = AV_RL32(src + slice * 4);
144  slice_size = slice_data_end - slice_data_start;
145 
146  if (!slice_size) {
147  av_log(c->avctx, AV_LOG_ERROR, "Plane has more than one symbol "
148  "yet a slice has a length of zero.\n");
149  goto fail;
150  }
151 
152  memset(c->slice_bits + slice_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
153  c->bdsp.bswap_buf((uint32_t *) c->slice_bits,
154  (uint32_t *)(src + slice_data_start + c->slices * 4),
155  (slice_data_end - slice_data_start + 3) >> 2);
156  init_get_bits(&gb, c->slice_bits, slice_size * 8);
157 
158  prev = 0x200;
159  for (j = sstart; j < send; j++) {
160  for (i = 0; i < width; i++) {
161  pix = get_vlc2(&gb, vlc.table, VLC_BITS, 3);
162  if (pix < 0) {
163  av_log(c->avctx, AV_LOG_ERROR, "Decoding error\n");
164  goto fail;
165  }
166  if (use_pred) {
167  prev += pix;
168  prev &= 0x3FF;
169  pix = prev;
170  }
171  dest[i] = pix;
172  }
173  dest += stride;
174  if (get_bits_left(&gb) < 0) {
175  av_log(c->avctx, AV_LOG_ERROR,
176  "Slice decoding ran out of bits\n");
177  goto fail;
178  }
179  }
180  if (get_bits_left(&gb) > 32)
181  av_log(c->avctx, AV_LOG_WARNING,
182  "%d bits left after decoding slice\n", get_bits_left(&gb));
183  }
184 
185  ff_free_vlc(&vlc);
186 
187  return 0;
188 fail:
189  ff_free_vlc(&vlc);
190  return AVERROR_INVALIDDATA;
191 }
192 
193 static int compute_cmask(int plane_no, int interlaced, enum AVPixelFormat pix_fmt)
194 {
195  const int is_luma = (pix_fmt == AV_PIX_FMT_YUV420P) && !plane_no;
196 
197  if (interlaced)
198  return ~(1 + 2 * is_luma);
199 
200  return ~is_luma;
201 }
202 
203 static int decode_plane(UtvideoContext *c, int plane_no,
204  uint8_t *dst, ptrdiff_t stride,
205  int width, int height,
206  const uint8_t *src, int use_pred)
207 {
208  int i, j, slice, pix;
209  int sstart, send;
210  VLC vlc;
211  GetBitContext gb;
212  int ret, prev, fsym;
213  const int cmask = compute_cmask(plane_no, c->interlaced, c->avctx->pix_fmt);
214 
215  if (c->pack) {
216  send = 0;
217  for (slice = 0; slice < c->slices; slice++) {
218  GetBitContext cbit, pbit;
219  uint8_t *dest, *p;
220 
221  ret = init_get_bits8_le(&cbit, c->control_stream[plane_no][slice], c->control_stream_size[plane_no][slice]);
222  if (ret < 0)
223  return ret;
224 
225  ret = init_get_bits8_le(&pbit, c->packed_stream[plane_no][slice], c->packed_stream_size[plane_no][slice]);
226  if (ret < 0)
227  return ret;
228 
229  sstart = send;
230  send = (height * (slice + 1) / c->slices) & cmask;
231  dest = dst + sstart * stride;
232 
233  if (3 * ((dst + send * stride - dest + 7)/8) > get_bits_left(&cbit))
234  return AVERROR_INVALIDDATA;
235 
236  for (p = dest; p < dst + send * stride; p += 8) {
237  int bits = get_bits_le(&cbit, 3);
238 
239  if (bits == 0) {
240  *(uint64_t *) p = 0;
241  } else {
242  uint32_t sub = 0x80 >> (8 - (bits + 1)), add;
243  int k;
244 
245  if ((bits + 1) * 8 > get_bits_left(&pbit))
246  return AVERROR_INVALIDDATA;
247 
248  for (k = 0; k < 8; k++) {
249 
250  p[k] = get_bits_le(&pbit, bits + 1);
251  add = (~p[k] & sub) << (8 - bits);
252  p[k] -= sub;
253  p[k] += add;
254  }
255  }
256  }
257  }
258 
259  return 0;
260  }
261 
262  if (build_huff(c, src, &vlc, &fsym, 256)) {
263  av_log(c->avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n");
264  return AVERROR_INVALIDDATA;
265  }
266  if (fsym >= 0) { // build_huff reported a symbol to fill slices with
267  send = 0;
268  for (slice = 0; slice < c->slices; slice++) {
269  uint8_t *dest;
270 
271  sstart = send;
272  send = (height * (slice + 1) / c->slices) & cmask;
273  dest = dst + sstart * stride;
274 
275  prev = 0x80;
276  for (j = sstart; j < send; j++) {
277  for (i = 0; i < width; i++) {
278  pix = fsym;
279  if (use_pred) {
280  prev += (unsigned)pix;
281  pix = prev;
282  }
283  dest[i] = pix;
284  }
285  dest += stride;
286  }
287  }
288  return 0;
289  }
290 
291  src += 256;
292 
293  send = 0;
294  for (slice = 0; slice < c->slices; slice++) {
295  uint8_t *dest;
296  int slice_data_start, slice_data_end, slice_size;
297 
298  sstart = send;
299  send = (height * (slice + 1) / c->slices) & cmask;
300  dest = dst + sstart * stride;
301 
302  // slice offset and size validation was done earlier
303  slice_data_start = slice ? AV_RL32(src + slice * 4 - 4) : 0;
304  slice_data_end = AV_RL32(src + slice * 4);
305  slice_size = slice_data_end - slice_data_start;
306 
307  if (!slice_size) {
308  av_log(c->avctx, AV_LOG_ERROR, "Plane has more than one symbol "
309  "yet a slice has a length of zero.\n");
310  goto fail;
311  }
312 
313  memset(c->slice_bits + slice_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
314  c->bdsp.bswap_buf((uint32_t *) c->slice_bits,
315  (uint32_t *)(src + slice_data_start + c->slices * 4),
316  (slice_data_end - slice_data_start + 3) >> 2);
317  init_get_bits(&gb, c->slice_bits, slice_size * 8);
318 
319  prev = 0x80;
320  for (j = sstart; j < send; j++) {
321  for (i = 0; i < width; i++) {
322  pix = get_vlc2(&gb, vlc.table, VLC_BITS, 3);
323  if (pix < 0) {
324  av_log(c->avctx, AV_LOG_ERROR, "Decoding error\n");
325  goto fail;
326  }
327  if (use_pred) {
328  prev += pix;
329  pix = prev;
330  }
331  dest[i] = pix;
332  }
333  if (get_bits_left(&gb) < 0) {
334  av_log(c->avctx, AV_LOG_ERROR,
335  "Slice decoding ran out of bits\n");
336  goto fail;
337  }
338  dest += stride;
339  }
340  if (get_bits_left(&gb) > 32)
341  av_log(c->avctx, AV_LOG_WARNING,
342  "%d bits left after decoding slice\n", get_bits_left(&gb));
343  }
344 
345  ff_free_vlc(&vlc);
346 
347  return 0;
348 fail:
349  ff_free_vlc(&vlc);
350  return AVERROR_INVALIDDATA;
351 }
352 
353 #undef A
354 #undef B
355 #undef C
356 
357 static void restore_median_planar(UtvideoContext *c, uint8_t *src, ptrdiff_t stride,
358  int width, int height, int slices, int rmode)
359 {
360  int i, j, slice;
361  int A, B, C;
362  uint8_t *bsrc;
363  int slice_start, slice_height;
364  const int cmask = ~rmode;
365 
366  for (slice = 0; slice < slices; slice++) {
367  slice_start = ((slice * height) / slices) & cmask;
368  slice_height = ((((slice + 1) * height) / slices) & cmask) -
369  slice_start;
370 
371  if (!slice_height)
372  continue;
373  bsrc = src + slice_start * stride;
374 
375  // first line - left neighbour prediction
376  bsrc[0] += 0x80;
377  c->llviddsp.add_left_pred(bsrc, bsrc, width, 0);
378  bsrc += stride;
379  if (slice_height <= 1)
380  continue;
381  // second line - first element has top prediction, the rest uses median
382  C = bsrc[-stride];
383  bsrc[0] += C;
384  A = bsrc[0];
385  for (i = 1; i < FFMIN(width, 16); i++) { /* scalar loop (DSP need align 16) */
386  B = bsrc[i - stride];
387  bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
388  C = B;
389  A = bsrc[i];
390  }
391  if (width > 16)
392  c->llviddsp.add_median_pred(bsrc + 16, bsrc - stride + 16,
393  bsrc + 16, width - 16, &A, &B);
394 
395  bsrc += stride;
396  // the rest of lines use continuous median prediction
397  for (j = 2; j < slice_height; j++) {
398  c->llviddsp.add_median_pred(bsrc, bsrc - stride,
399  bsrc, width, &A, &B);
400  bsrc += stride;
401  }
402  }
403 }
404 
405 /* UtVideo interlaced mode treats every two lines as a single one,
406  * so restoring function should take care of possible padding between
407  * two parts of the same "line".
408  */
409 static void restore_median_planar_il(UtvideoContext *c, uint8_t *src, ptrdiff_t stride,
410  int width, int height, int slices, int rmode)
411 {
412  int i, j, slice;
413  int A, B, C;
414  uint8_t *bsrc;
415  int slice_start, slice_height;
416  const int cmask = ~(rmode ? 3 : 1);
417  const ptrdiff_t stride2 = stride << 1;
418 
419  for (slice = 0; slice < slices; slice++) {
420  slice_start = ((slice * height) / slices) & cmask;
421  slice_height = ((((slice + 1) * height) / slices) & cmask) -
422  slice_start;
423  slice_height >>= 1;
424  if (!slice_height)
425  continue;
426 
427  bsrc = src + slice_start * stride;
428 
429  // first line - left neighbour prediction
430  bsrc[0] += 0x80;
431  A = c->llviddsp.add_left_pred(bsrc, bsrc, width, 0);
432  c->llviddsp.add_left_pred(bsrc + stride, bsrc + stride, width, A);
433  bsrc += stride2;
434  if (slice_height <= 1)
435  continue;
436  // second line - first element has top prediction, the rest uses median
437  C = bsrc[-stride2];
438  bsrc[0] += C;
439  A = bsrc[0];
440  for (i = 1; i < FFMIN(width, 16); i++) { /* scalar loop (DSP need align 16) */
441  B = bsrc[i - stride2];
442  bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
443  C = B;
444  A = bsrc[i];
445  }
446  if (width > 16)
447  c->llviddsp.add_median_pred(bsrc + 16, bsrc - stride2 + 16,
448  bsrc + 16, width - 16, &A, &B);
449 
450  c->llviddsp.add_median_pred(bsrc + stride, bsrc - stride,
451  bsrc + stride, width, &A, &B);
452  bsrc += stride2;
453  // the rest of lines use continuous median prediction
454  for (j = 2; j < slice_height; j++) {
455  c->llviddsp.add_median_pred(bsrc, bsrc - stride2,
456  bsrc, width, &A, &B);
457  c->llviddsp.add_median_pred(bsrc + stride, bsrc - stride,
458  bsrc + stride, width, &A, &B);
459  bsrc += stride2;
460  }
461  }
462 }
463 
464 static void restore_gradient_planar(UtvideoContext *c, uint8_t *src, ptrdiff_t stride,
465  int width, int height, int slices, int rmode)
466 {
467  int i, j, slice;
468  int A, B, C;
469  uint8_t *bsrc;
470  int slice_start, slice_height;
471  const int cmask = ~rmode;
472  int min_width = FFMIN(width, 32);
473 
474  for (slice = 0; slice < slices; slice++) {
475  slice_start = ((slice * height) / slices) & cmask;
476  slice_height = ((((slice + 1) * height) / slices) & cmask) -
477  slice_start;
478 
479  if (!slice_height)
480  continue;
481  bsrc = src + slice_start * stride;
482 
483  // first line - left neighbour prediction
484  bsrc[0] += 0x80;
485  c->llviddsp.add_left_pred(bsrc, bsrc, width, 0);
486  bsrc += stride;
487  if (slice_height <= 1)
488  continue;
489  for (j = 1; j < slice_height; j++) {
490  // second line - first element has top prediction, the rest uses gradient
491  bsrc[0] = (bsrc[0] + bsrc[-stride]) & 0xFF;
492  for (i = 1; i < min_width; i++) { /* dsp need align 32 */
493  A = bsrc[i - stride];
494  B = bsrc[i - (stride + 1)];
495  C = bsrc[i - 1];
496  bsrc[i] = (A - B + C + bsrc[i]) & 0xFF;
497  }
498  if (width > 32)
499  c->llviddsp.add_gradient_pred(bsrc + 32, stride, width - 32);
500  bsrc += stride;
501  }
502  }
503 }
504 
505 static void restore_gradient_planar_il(UtvideoContext *c, uint8_t *src, ptrdiff_t stride,
506  int width, int height, int slices, int rmode)
507 {
508  int i, j, slice;
509  int A, B, C;
510  uint8_t *bsrc;
511  int slice_start, slice_height;
512  const int cmask = ~(rmode ? 3 : 1);
513  const ptrdiff_t stride2 = stride << 1;
514  int min_width = FFMIN(width, 32);
515 
516  for (slice = 0; slice < slices; slice++) {
517  slice_start = ((slice * height) / slices) & cmask;
518  slice_height = ((((slice + 1) * height) / slices) & cmask) -
519  slice_start;
520  slice_height >>= 1;
521  if (!slice_height)
522  continue;
523 
524  bsrc = src + slice_start * stride;
525 
526  // first line - left neighbour prediction
527  bsrc[0] += 0x80;
528  A = c->llviddsp.add_left_pred(bsrc, bsrc, width, 0);
529  c->llviddsp.add_left_pred(bsrc + stride, bsrc + stride, width, A);
530  bsrc += stride2;
531  if (slice_height <= 1)
532  continue;
533  for (j = 1; j < slice_height; j++) {
534  // second line - first element has top prediction, the rest uses gradient
535  bsrc[0] = (bsrc[0] + bsrc[-stride2]) & 0xFF;
536  for (i = 1; i < min_width; i++) { /* dsp need align 32 */
537  A = bsrc[i - stride2];
538  B = bsrc[i - (stride2 + 1)];
539  C = bsrc[i - 1];
540  bsrc[i] = (A - B + C + bsrc[i]) & 0xFF;
541  }
542  if (width > 32)
543  c->llviddsp.add_gradient_pred(bsrc + 32, stride2, width - 32);
544 
545  A = bsrc[-stride];
546  B = bsrc[-(1 + stride + stride - width)];
547  C = bsrc[width - 1];
548  bsrc[stride] = (A - B + C + bsrc[stride]) & 0xFF;
549  for (i = 1; i < width; i++) {
550  A = bsrc[i - stride];
551  B = bsrc[i - (1 + stride)];
552  C = bsrc[i - 1 + stride];
553  bsrc[i + stride] = (A - B + C + bsrc[i + stride]) & 0xFF;
554  }
555  bsrc += stride2;
556  }
557  }
558 }
559 
561  int *got_frame, AVPacket *avpkt)
562 {
563  const uint8_t *buf = avpkt->data;
564  int buf_size = avpkt->size;
565  UtvideoContext *c = avctx->priv_data;
566  int i, j;
567  const uint8_t *plane_start[5];
568  int plane_size, max_slice_size = 0, slice_start, slice_end, slice_size;
569  int ret;
570  GetByteContext gb;
571 
572  if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
573  return ret;
574 
575  /* parse plane structure to get frame flags and validate slice offsets */
576  bytestream2_init(&gb, buf, buf_size);
577 
578  if (c->pack) {
579  const uint8_t *packed_stream;
580  const uint8_t *control_stream;
581  GetByteContext pb;
582  uint32_t nb_cbs;
583  int left;
584 
585  c->frame_info = PRED_GRADIENT << 8;
586 
587  if (bytestream2_get_byte(&gb) != 1)
588  return AVERROR_INVALIDDATA;
589  bytestream2_skip(&gb, 3);
590  c->offset = bytestream2_get_le32(&gb);
591 
592  if (buf_size <= c->offset + 8LL)
593  return AVERROR_INVALIDDATA;
594 
595  bytestream2_init(&pb, buf + 8 + c->offset, buf_size - 8 - c->offset);
596 
597  nb_cbs = bytestream2_get_le32(&pb);
598  if (nb_cbs > c->offset)
599  return AVERROR_INVALIDDATA;
600 
601  packed_stream = buf + 8;
602  control_stream = packed_stream + (c->offset - nb_cbs);
603  left = control_stream - packed_stream;
604 
605  for (i = 0; i < c->planes; i++) {
606  for (j = 0; j < c->slices; j++) {
607  c->packed_stream[i][j] = packed_stream;
608  c->packed_stream_size[i][j] = bytestream2_get_le32(&pb);
609  if (c->packed_stream_size[i][j] > left)
610  return AVERROR_INVALIDDATA;
611  left -= c->packed_stream_size[i][j];
612  packed_stream += c->packed_stream_size[i][j];
613  }
614  }
615 
616  left = buf + buf_size - control_stream;
617 
618  for (i = 0; i < c->planes; i++) {
619  for (j = 0; j < c->slices; j++) {
620  c->control_stream[i][j] = control_stream;
621  c->control_stream_size[i][j] = bytestream2_get_le32(&pb);
622  if (c->control_stream_size[i][j] > left)
623  return AVERROR_INVALIDDATA;
624  left -= c->control_stream_size[i][j];
625  control_stream += c->control_stream_size[i][j];
626  }
627  }
628  } else if (c->pro) {
629  if (bytestream2_get_bytes_left(&gb) < c->frame_info_size) {
630  av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n");
631  return AVERROR_INVALIDDATA;
632  }
633  c->frame_info = bytestream2_get_le32u(&gb);
634  c->slices = ((c->frame_info >> 16) & 0xff) + 1;
635  for (i = 0; i < c->planes; i++) {
636  plane_start[i] = gb.buffer;
637  if (bytestream2_get_bytes_left(&gb) < 1024 + 4 * c->slices) {
638  av_log(avctx, AV_LOG_ERROR, "Insufficient data for a plane\n");
639  return AVERROR_INVALIDDATA;
640  }
641  slice_start = 0;
642  slice_end = 0;
643  for (j = 0; j < c->slices; j++) {
644  slice_end = bytestream2_get_le32u(&gb);
645  if (slice_end < 0 || slice_end < slice_start ||
646  bytestream2_get_bytes_left(&gb) < slice_end + 1024LL) {
647  av_log(avctx, AV_LOG_ERROR, "Incorrect slice size\n");
648  return AVERROR_INVALIDDATA;
649  }
650  slice_size = slice_end - slice_start;
651  slice_start = slice_end;
652  max_slice_size = FFMAX(max_slice_size, slice_size);
653  }
654  plane_size = slice_end;
655  bytestream2_skipu(&gb, plane_size);
656  bytestream2_skipu(&gb, 1024);
657  }
658  plane_start[c->planes] = gb.buffer;
659  } else {
660  for (i = 0; i < c->planes; i++) {
661  plane_start[i] = gb.buffer;
662  if (bytestream2_get_bytes_left(&gb) < 256 + 4 * c->slices) {
663  av_log(avctx, AV_LOG_ERROR, "Insufficient data for a plane\n");
664  return AVERROR_INVALIDDATA;
665  }
666  bytestream2_skipu(&gb, 256);
667  slice_start = 0;
668  slice_end = 0;
669  for (j = 0; j < c->slices; j++) {
670  slice_end = bytestream2_get_le32u(&gb);
671  if (slice_end < 0 || slice_end < slice_start ||
673  av_log(avctx, AV_LOG_ERROR, "Incorrect slice size\n");
674  return AVERROR_INVALIDDATA;
675  }
676  slice_size = slice_end - slice_start;
677  slice_start = slice_end;
678  max_slice_size = FFMAX(max_slice_size, slice_size);
679  }
680  plane_size = slice_end;
681  bytestream2_skipu(&gb, plane_size);
682  }
683  plane_start[c->planes] = gb.buffer;
684  if (bytestream2_get_bytes_left(&gb) < c->frame_info_size) {
685  av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n");
686  return AVERROR_INVALIDDATA;
687  }
688  c->frame_info = bytestream2_get_le32u(&gb);
689  }
690  av_log(avctx, AV_LOG_DEBUG, "frame information flags %"PRIX32"\n",
691  c->frame_info);
692 
693  c->frame_pred = (c->frame_info >> 8) & 3;
694 
695  max_slice_size += 4*avctx->width;
696 
697  if (!c->pack) {
698  av_fast_malloc(&c->slice_bits, &c->slice_bits_size,
699  max_slice_size + AV_INPUT_BUFFER_PADDING_SIZE);
700 
701  if (!c->slice_bits) {
702  av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer\n");
703  return AVERROR(ENOMEM);
704  }
705  }
706 
707  switch (c->avctx->pix_fmt) {
708  case AV_PIX_FMT_GBRP:
709  case AV_PIX_FMT_GBRAP:
710  for (i = 0; i < c->planes; i++) {
711  ret = decode_plane(c, i, frame->data[i],
712  frame->linesize[i], avctx->width,
713  avctx->height, plane_start[i],
714  c->frame_pred == PRED_LEFT);
715  if (ret)
716  return ret;
717  if (c->frame_pred == PRED_MEDIAN) {
718  if (!c->interlaced) {
719  restore_median_planar(c, frame->data[i],
720  frame->linesize[i], avctx->width,
721  avctx->height, c->slices, 0);
722  } else {
724  frame->linesize[i],
725  avctx->width, avctx->height, c->slices,
726  0);
727  }
728  } else if (c->frame_pred == PRED_GRADIENT) {
729  if (!c->interlaced) {
731  frame->linesize[i], avctx->width,
732  avctx->height, c->slices, 0);
733  } else {
735  frame->linesize[i],
736  avctx->width, avctx->height, c->slices,
737  0);
738  }
739  }
740  }
741  c->utdsp.restore_rgb_planes(frame->data[2], frame->data[0], frame->data[1],
742  frame->linesize[2], frame->linesize[0], frame->linesize[1],
743  avctx->width, avctx->height);
744  break;
745  case AV_PIX_FMT_GBRAP10:
746  case AV_PIX_FMT_GBRP10:
747  for (i = 0; i < c->planes; i++) {
748  ret = decode_plane10(c, i, (uint16_t *)frame->data[i],
749  frame->linesize[i] / 2, avctx->width,
750  avctx->height, plane_start[i],
751  plane_start[i + 1] - 1024,
752  c->frame_pred == PRED_LEFT);
753  if (ret)
754  return ret;
755  }
756  c->utdsp.restore_rgb_planes10((uint16_t *)frame->data[2], (uint16_t *)frame->data[0], (uint16_t *)frame->data[1],
757  frame->linesize[2] / 2, frame->linesize[0] / 2, frame->linesize[1] / 2,
758  avctx->width, avctx->height);
759  break;
760  case AV_PIX_FMT_YUV420P:
761  for (i = 0; i < 3; i++) {
762  ret = decode_plane(c, i, frame->data[i], frame->linesize[i],
763  avctx->width >> !!i, avctx->height >> !!i,
764  plane_start[i], c->frame_pred == PRED_LEFT);
765  if (ret)
766  return ret;
767  if (c->frame_pred == PRED_MEDIAN) {
768  if (!c->interlaced) {
769  restore_median_planar(c, frame->data[i], frame->linesize[i],
770  avctx->width >> !!i, avctx->height >> !!i,
771  c->slices, !i);
772  } else {
773  restore_median_planar_il(c, frame->data[i], frame->linesize[i],
774  avctx->width >> !!i,
775  avctx->height >> !!i,
776  c->slices, !i);
777  }
778  } else if (c->frame_pred == PRED_GRADIENT) {
779  if (!c->interlaced) {
780  restore_gradient_planar(c, frame->data[i], frame->linesize[i],
781  avctx->width >> !!i, avctx->height >> !!i,
782  c->slices, !i);
783  } else {
784  restore_gradient_planar_il(c, frame->data[i], frame->linesize[i],
785  avctx->width >> !!i,
786  avctx->height >> !!i,
787  c->slices, !i);
788  }
789  }
790  }
791  break;
792  case AV_PIX_FMT_YUV422P:
793  for (i = 0; i < 3; i++) {
794  ret = decode_plane(c, i, frame->data[i], frame->linesize[i],
795  avctx->width >> !!i, avctx->height,
796  plane_start[i], c->frame_pred == PRED_LEFT);
797  if (ret)
798  return ret;
799  if (c->frame_pred == PRED_MEDIAN) {
800  if (!c->interlaced) {
801  restore_median_planar(c, frame->data[i], frame->linesize[i],
802  avctx->width >> !!i, avctx->height,
803  c->slices, 0);
804  } else {
805  restore_median_planar_il(c, frame->data[i], frame->linesize[i],
806  avctx->width >> !!i, avctx->height,
807  c->slices, 0);
808  }
809  } else if (c->frame_pred == PRED_GRADIENT) {
810  if (!c->interlaced) {
811  restore_gradient_planar(c, frame->data[i], frame->linesize[i],
812  avctx->width >> !!i, avctx->height,
813  c->slices, 0);
814  } else {
815  restore_gradient_planar_il(c, frame->data[i], frame->linesize[i],
816  avctx->width >> !!i, avctx->height,
817  c->slices, 0);
818  }
819  }
820  }
821  break;
822  case AV_PIX_FMT_YUV444P:
823  for (i = 0; i < 3; i++) {
824  ret = decode_plane(c, i, frame->data[i], frame->linesize[i],
825  avctx->width, avctx->height,
826  plane_start[i], c->frame_pred == PRED_LEFT);
827  if (ret)
828  return ret;
829  if (c->frame_pred == PRED_MEDIAN) {
830  if (!c->interlaced) {
831  restore_median_planar(c, frame->data[i], frame->linesize[i],
832  avctx->width, avctx->height,
833  c->slices, 0);
834  } else {
835  restore_median_planar_il(c, frame->data[i], frame->linesize[i],
836  avctx->width, avctx->height,
837  c->slices, 0);
838  }
839  } else if (c->frame_pred == PRED_GRADIENT) {
840  if (!c->interlaced) {
841  restore_gradient_planar(c, frame->data[i], frame->linesize[i],
842  avctx->width, avctx->height,
843  c->slices, 0);
844  } else {
845  restore_gradient_planar_il(c, frame->data[i], frame->linesize[i],
846  avctx->width, avctx->height,
847  c->slices, 0);
848  }
849  }
850  }
851  break;
853  for (i = 0; i < 3; i++) {
854  ret = decode_plane10(c, i, (uint16_t *)frame->data[i], frame->linesize[i] / 2,
855  avctx->width >> !!i, avctx->height >> !!i,
856  plane_start[i], plane_start[i + 1] - 1024, c->frame_pred == PRED_LEFT);
857  if (ret)
858  return ret;
859  }
860  break;
862  for (i = 0; i < 3; i++) {
863  ret = decode_plane10(c, i, (uint16_t *)frame->data[i], frame->linesize[i] / 2,
864  avctx->width >> !!i, avctx->height,
865  plane_start[i], plane_start[i + 1] - 1024, c->frame_pred == PRED_LEFT);
866  if (ret)
867  return ret;
868  }
869  break;
870  }
871 
872  frame->key_frame = 1;
873  frame->pict_type = AV_PICTURE_TYPE_I;
874  frame->interlaced_frame = !!c->interlaced;
875 
876  *got_frame = 1;
877 
878  /* always report that the buffer was completely consumed */
879  return buf_size;
880 }
881 
883 {
884  UtvideoContext * const c = avctx->priv_data;
885  int h_shift, v_shift;
886 
887  c->avctx = avctx;
888 
889  ff_utvideodsp_init(&c->utdsp);
890  ff_bswapdsp_init(&c->bdsp);
891  ff_llviddsp_init(&c->llviddsp);
892 
893  c->slice_bits_size = 0;
894 
895  switch (avctx->codec_tag) {
896  case MKTAG('U', 'L', 'R', 'G'):
897  c->planes = 3;
898  avctx->pix_fmt = AV_PIX_FMT_GBRP;
899  break;
900  case MKTAG('U', 'L', 'R', 'A'):
901  c->planes = 4;
902  avctx->pix_fmt = AV_PIX_FMT_GBRAP;
903  break;
904  case MKTAG('U', 'L', 'Y', '0'):
905  c->planes = 3;
906  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
907  avctx->colorspace = AVCOL_SPC_BT470BG;
908  break;
909  case MKTAG('U', 'L', 'Y', '2'):
910  c->planes = 3;
911  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
912  avctx->colorspace = AVCOL_SPC_BT470BG;
913  break;
914  case MKTAG('U', 'L', 'Y', '4'):
915  c->planes = 3;
916  avctx->pix_fmt = AV_PIX_FMT_YUV444P;
917  avctx->colorspace = AVCOL_SPC_BT470BG;
918  break;
919  case MKTAG('U', 'Q', 'Y', '0'):
920  c->planes = 3;
921  c->pro = 1;
922  avctx->pix_fmt = AV_PIX_FMT_YUV420P10;
923  break;
924  case MKTAG('U', 'Q', 'Y', '2'):
925  c->planes = 3;
926  c->pro = 1;
927  avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
928  break;
929  case MKTAG('U', 'Q', 'R', 'G'):
930  c->planes = 3;
931  c->pro = 1;
932  avctx->pix_fmt = AV_PIX_FMT_GBRP10;
933  break;
934  case MKTAG('U', 'Q', 'R', 'A'):
935  c->planes = 4;
936  c->pro = 1;
937  avctx->pix_fmt = AV_PIX_FMT_GBRAP10;
938  break;
939  case MKTAG('U', 'L', 'H', '0'):
940  c->planes = 3;
941  avctx->pix_fmt = AV_PIX_FMT_YUV420P;
942  avctx->colorspace = AVCOL_SPC_BT709;
943  break;
944  case MKTAG('U', 'L', 'H', '2'):
945  c->planes = 3;
946  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
947  avctx->colorspace = AVCOL_SPC_BT709;
948  break;
949  case MKTAG('U', 'L', 'H', '4'):
950  c->planes = 3;
951  avctx->pix_fmt = AV_PIX_FMT_YUV444P;
952  avctx->colorspace = AVCOL_SPC_BT709;
953  break;
954  case MKTAG('U', 'M', 'Y', '2'):
955  c->planes = 3;
956  c->pack = 1;
957  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
958  avctx->colorspace = AVCOL_SPC_BT470BG;
959  break;
960  case MKTAG('U', 'M', 'H', '2'):
961  c->planes = 3;
962  c->pack = 1;
963  avctx->pix_fmt = AV_PIX_FMT_YUV422P;
964  avctx->colorspace = AVCOL_SPC_BT709;
965  break;
966  case MKTAG('U', 'M', 'Y', '4'):
967  c->planes = 3;
968  c->pack = 1;
969  avctx->pix_fmt = AV_PIX_FMT_YUV444P;
970  avctx->colorspace = AVCOL_SPC_BT470BG;
971  break;
972  case MKTAG('U', 'M', 'H', '4'):
973  c->planes = 3;
974  c->pack = 1;
975  avctx->pix_fmt = AV_PIX_FMT_YUV444P;
976  avctx->colorspace = AVCOL_SPC_BT709;
977  break;
978  case MKTAG('U', 'M', 'R', 'G'):
979  c->planes = 3;
980  c->pack = 1;
981  avctx->pix_fmt = AV_PIX_FMT_GBRP;
982  break;
983  case MKTAG('U', 'M', 'R', 'A'):
984  c->planes = 4;
985  c->pack = 1;
986  avctx->pix_fmt = AV_PIX_FMT_GBRAP;
987  break;
988  default:
989  av_log(avctx, AV_LOG_ERROR, "Unknown Ut Video FOURCC provided (%08X)\n",
990  avctx->codec_tag);
991  return AVERROR_INVALIDDATA;
992  }
993 
994  av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &h_shift, &v_shift);
995  if ((avctx->width & ((1<<h_shift)-1)) ||
996  (avctx->height & ((1<<v_shift)-1))) {
997  avpriv_request_sample(avctx, "Odd dimensions");
998  return AVERROR_PATCHWELCOME;
999  }
1000 
1001  if (c->pack && avctx->extradata_size >= 16) {
1002  av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n",
1003  avctx->extradata[3], avctx->extradata[2],
1004  avctx->extradata[1], avctx->extradata[0]);
1005  av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n",
1006  AV_RB32(avctx->extradata + 4));
1007  c->compression = avctx->extradata[8];
1008  if (c->compression != 2)
1009  avpriv_request_sample(avctx, "Unknown compression type");
1010  c->slices = avctx->extradata[9] + 1;
1011  } else if (!c->pro && avctx->extradata_size >= 16) {
1012  av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n",
1013  avctx->extradata[3], avctx->extradata[2],
1014  avctx->extradata[1], avctx->extradata[0]);
1015  av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n",
1016  AV_RB32(avctx->extradata + 4));
1017  c->frame_info_size = AV_RL32(avctx->extradata + 8);
1018  c->flags = AV_RL32(avctx->extradata + 12);
1019 
1020  if (c->frame_info_size != 4)
1021  avpriv_request_sample(avctx, "Frame info not 4 bytes");
1022  av_log(avctx, AV_LOG_DEBUG, "Encoding parameters %08"PRIX32"\n", c->flags);
1023  c->slices = (c->flags >> 24) + 1;
1024  c->compression = c->flags & 1;
1025  c->interlaced = c->flags & 0x800;
1026  } else if (c->pro && avctx->extradata_size == 8) {
1027  av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n",
1028  avctx->extradata[3], avctx->extradata[2],
1029  avctx->extradata[1], avctx->extradata[0]);
1030  av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n",
1031  AV_RB32(avctx->extradata + 4));
1032  c->interlaced = 0;
1033  c->frame_info_size = 4;
1034  } else {
1035  av_log(avctx, AV_LOG_ERROR,
1036  "Insufficient extradata size %d, should be at least 16\n",
1037  avctx->extradata_size);
1038  return AVERROR_INVALIDDATA;
1039  }
1040 
1041  return 0;
1042 }
1043 
1045 {
1046  UtvideoContext * const c = avctx->priv_data;
1047 
1048  av_freep(&c->slice_bits);
1049 
1050  return 0;
1051 }
1052 
1054  .p.name = "utvideo",
1055  .p.long_name = NULL_IF_CONFIG_SMALL("Ut Video"),
1056  .p.type = AVMEDIA_TYPE_VIDEO,
1057  .p.id = AV_CODEC_ID_UTVIDEO,
1058  .priv_data_size = sizeof(UtvideoContext),
1059  .init = decode_init,
1060  .close = decode_end,
1062  .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
1063  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
1064 };
utvideo.h
bswapdsp.h
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:839
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:966
restore_gradient_planar_il
static void restore_gradient_planar_il(UtvideoContext *c, uint8_t *src, ptrdiff_t stride, int width, int height, int slices, int rmode)
Definition: utvideodec.c:505
decode_init
static av_cold int decode_init(AVCodecContext *avctx)
Definition: utvideodec.c:882
GetByteContext
Definition: bytestream.h:33
sub
static float sub(float src0, float src1)
Definition: dnn_backend_native_layer_mathbinary.c:31
HuffEntry::len
uint8_t len
Definition: exr.c:95
bytestream2_skipu
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
Definition: bytestream.h:174
decode_end
static av_cold int decode_end(AVCodecContext *avctx)
Definition: utvideodec.c:1044
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
pixdesc.h
AVPacket::data
uint8_t * data
Definition: packet.h:374
PRED_LEFT
@ PRED_LEFT
Definition: utvideo.h:39
compute_cmask
static int compute_cmask(int plane_no, int interlaced, enum AVPixelFormat pix_fmt)
Definition: utvideodec.c:193
AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:406
FFCodec
Definition: codec_internal.h:112
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
restore_gradient_planar
static void restore_gradient_planar(UtvideoContext *c, uint8_t *src, ptrdiff_t stride, int width, int height, int slices, int rmode)
Definition: utvideodec.c:464
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:649
thread.h
decode_plane10
static int decode_plane10(UtvideoContext *c, int plane_no, uint16_t *dst, ptrdiff_t stride, int width, int height, const uint8_t *src, const uint8_t *huff, int use_pred)
Definition: utvideodec.c:90
init
static int init
Definition: av_tx.c:47
A
#define A(x)
Definition: vp56_arith.h:28
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:531
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:116
AV_PIX_FMT_GBRAP
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:205
fail
#define fail()
Definition: checkasm.h:131
AV_PIX_FMT_GBRP10
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:422
GetBitContext
Definition: get_bits.h:61
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
av_pix_fmt_get_chroma_sub_sample
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2690
build_huff
static int build_huff(UtvideoContext *c, const uint8_t *src, VLC *vlc, int *fsym, unsigned nb_elems)
Definition: utvideodec.c:48
C
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
Definition: writing_filters.txt:58
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
PRED_GRADIENT
@ PRED_GRADIENT
Definition: utvideo.h:40
AV_PIX_FMT_GBRAP10
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:426
PRED_MEDIAN
@ PRED_MEDIAN
Definition: utvideo.h:41
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:491
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:254
intreadwrite.h
slice_end
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
Definition: mpeg12dec.c:2013
GetByteContext::buffer
const uint8_t * buffer
Definition: bytestream.h:34
HuffEntry::sym
uint16_t sym
Definition: exr.c:96
bits
uint8_t bits
Definition: vp3data.h:141
get_bits_le
static unsigned int get_bits_le(GetBitContext *s, int n)
Definition: get_bits.h:420
AV_CODEC_ID_UTVIDEO
@ AV_CODEC_ID_UTVIDEO
Definition: codec_id.h:203
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
get_bits.h
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demuxing_decoding.c:41
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:113
ff_bswapdsp_init
av_cold void ff_bswapdsp_init(BswapDSPContext *c)
Definition: bswapdsp.c:49
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
ff_init_vlc_from_lengths
int ff_init_vlc_from_lengths(VLC *vlc, int nb_bits, int nb_codes, const int8_t *lens, int lens_wrap, const void *symbols, int symbols_wrap, int symbols_size, int offset, int flags, void *logctx)
Build VLC decoding tables suitable for use with get_vlc2()
Definition: vlc.c:328
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:407
get_vlc2
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:787
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AVPacket::size
int size
Definition: packet.h:375
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
codec_internal.h
decode_frame
static int decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: utvideodec.c:560
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
VLC_BITS
#define VLC_BITS
height
#define height
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
restore_median_planar_il
static void restore_median_planar_il(UtvideoContext *c, uint8_t *src, ptrdiff_t stride, int width, int height, int slices, int rmode)
Definition: utvideodec.c:409
interlaced
uint8_t interlaced
Definition: mxfenc.c:2042
restore_median_planar
static void restore_median_planar(UtvideoContext *c, uint8_t *src, ptrdiff_t stride, int width, int height, int slices, int rmode)
Definition: utvideodec.c:357
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
AVCodecContext::extradata
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:490
ff_utvideo_decoder
const FFCodec ff_utvideo_decoder
Definition: utvideodec.c:1053
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: codec_internal.h:31
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:203
UtvideoContext
Definition: utvideo.h:64
len
int len
Definition: vorbis_enc_data.h:426
AVCodecContext::height
int height
Definition: avcodec.h:562
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:599
ff_free_vlc
void ff_free_vlc(VLC *vlc)
Definition: vlc.c:375
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
mid_pred
#define mid_pred
Definition: mathops.h:97
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AV_INPUT_BUFFER_PADDING_SIZE
#define AV_INPUT_BUFFER_PADDING_SIZE
Definition: defs.h:40
left
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2] ... the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so ...,+,-,+,-,+,+,-,+,-,+,... hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32 - hcoeff[1] - hcoeff[2] - ... a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2} an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||......... intra?||||:Block01 :yes no||||:Block02 :....... ..........||||:Block03 ::y DC ::ref index:||||:Block04 ::cb DC ::motion x :||||......... :cr DC ::motion y :||||....... ..........|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------ ------------ ------------|||Y subbands||Cb subbands||Cr subbands||||--- ---||--- ---||--- ---|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||--- ---||--- ---||--- ---||||--- ---||--- ---||--- ---|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------ ------------ ------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction ------------|\ Dequantization ------------------- \||Reference frames|\ IDWT|------- -------|Motion \|||Frame 0||Frame 1||Compensation . OBMC v -------|------- -------|--------------. \------> Frame n output Frame Frame<----------------------------------/|...|------------------- Range Coder:============Binary Range Coder:------------------- The implemented range coder is an adapted version based upon "Range encoding: an algorithm for removing redundancy from a digitised message." by G. N. N. Martin. The symbols encoded by the Snow range coder are bits(0|1). The associated probabilities are not fix but change depending on the symbol mix seen so far. bit seen|new state ---------+----------------------------------------------- 0|256 - state_transition_table[256 - old_state];1|state_transition_table[old_state];state_transition_table={ 0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:------------------------- FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1. the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:386
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
B
#define B
Definition: huffyuvdsp.h:32
ff_llviddsp_init
void ff_llviddsp_init(LLVidDSPContext *c)
Definition: lossless_videodsp.c:113
AVCodecContext
main external API structure.
Definition: avcodec.h:389
VLC
Definition: vlc.h:31
HuffEntry
Definition: exr.c:94
VLC::table
VLCElem * table
Definition: vlc.h:33
decode_plane
static int decode_plane(UtvideoContext *c, int plane_no, uint8_t *dst, ptrdiff_t stride, int width, int height, const uint8_t *src, int use_pred)
Definition: utvideodec.c:203
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AV_PIX_FMT_GBRP
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:158
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
add
static float add(float src0, float src1)
Definition: dnn_backend_native_layer_mathbinary.c:35
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
init_get_bits8_le
static int init_get_bits8_le(GetBitContext *s, const uint8_t *buffer, int byte_size)
Definition: get_bits.h:675
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:414
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:416
AVPacket
This structure stores compressed data.
Definition: packet.h:351
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:565
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
ff_utvideodsp_init
av_cold void ff_utvideodsp_init(UTVideoDSPContext *c)
Definition: utvideodsp.c:75
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:562
bytestream.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:527