FFmpeg
proresdec2.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010-2011 Maxim Poliakovski
3  * Copyright (c) 2010-2011 Elvis Presley
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Known FOURCCs: 'apch' (HQ), 'apcn' (SD), 'apcs' (LT), 'apco' (Proxy), 'ap4h' (4444), 'ap4x' (4444 XQ)
25  */
26 
27 //#define DEBUG
28 
29 #define LONG_BITSTREAM_READER
30 
31 #include "config_components.h"
32 
33 #include "libavutil/internal.h"
34 #include "libavutil/mem_internal.h"
35 
36 #include "avcodec.h"
37 #include "codec_internal.h"
38 #include "get_bits.h"
39 #include "hwconfig.h"
40 #include "idctdsp.h"
41 #include "internal.h"
42 #include "profiles.h"
43 #include "simple_idct.h"
44 #include "proresdec.h"
45 #include "proresdata.h"
46 #include "thread.h"
47 
48 static void permute(uint8_t *dst, const uint8_t *src, const uint8_t permutation[64])
49 {
50  int i;
51  for (i = 0; i < 64; i++)
52  dst[i] = permutation[src[i]];
53 }
54 
55 #define ALPHA_SHIFT_16_TO_10(alpha_val) (alpha_val >> 6)
56 #define ALPHA_SHIFT_8_TO_10(alpha_val) ((alpha_val << 2) | (alpha_val >> 6))
57 #define ALPHA_SHIFT_16_TO_12(alpha_val) (alpha_val >> 4)
58 #define ALPHA_SHIFT_8_TO_12(alpha_val) ((alpha_val << 4) | (alpha_val >> 4))
59 
60 static void inline unpack_alpha(GetBitContext *gb, uint16_t *dst, int num_coeffs,
61  const int num_bits, const int decode_precision) {
62  const int mask = (1 << num_bits) - 1;
63  int i, idx, val, alpha_val;
64 
65  idx = 0;
66  alpha_val = mask;
67  do {
68  do {
69  if (get_bits1(gb)) {
70  val = get_bits(gb, num_bits);
71  } else {
72  int sign;
73  val = get_bits(gb, num_bits == 16 ? 7 : 4);
74  sign = val & 1;
75  val = (val + 2) >> 1;
76  if (sign)
77  val = -val;
78  }
79  alpha_val = (alpha_val + val) & mask;
80  if (num_bits == 16) {
81  if (decode_precision == 10) {
82  dst[idx++] = ALPHA_SHIFT_16_TO_10(alpha_val);
83  } else { /* 12b */
84  dst[idx++] = ALPHA_SHIFT_16_TO_12(alpha_val);
85  }
86  } else {
87  if (decode_precision == 10) {
88  dst[idx++] = ALPHA_SHIFT_8_TO_10(alpha_val);
89  } else { /* 12b */
90  dst[idx++] = ALPHA_SHIFT_8_TO_12(alpha_val);
91  }
92  }
93  if (idx >= num_coeffs)
94  break;
95  } while (get_bits_left(gb)>0 && get_bits1(gb));
96  val = get_bits(gb, 4);
97  if (!val)
98  val = get_bits(gb, 11);
99  if (idx + val > num_coeffs)
100  val = num_coeffs - idx;
101  if (num_bits == 16) {
102  for (i = 0; i < val; i++) {
103  if (decode_precision == 10) {
104  dst[idx++] = ALPHA_SHIFT_16_TO_10(alpha_val);
105  } else { /* 12b */
106  dst[idx++] = ALPHA_SHIFT_16_TO_12(alpha_val);
107  }
108  }
109  } else {
110  for (i = 0; i < val; i++) {
111  if (decode_precision == 10) {
112  dst[idx++] = ALPHA_SHIFT_8_TO_10(alpha_val);
113  } else { /* 12b */
114  dst[idx++] = ALPHA_SHIFT_8_TO_12(alpha_val);
115  }
116  }
117  }
118  } while (idx < num_coeffs);
119 }
120 
121 static void unpack_alpha_10(GetBitContext *gb, uint16_t *dst, int num_coeffs,
122  const int num_bits)
123 {
124  if (num_bits == 16) {
125  unpack_alpha(gb, dst, num_coeffs, 16, 10);
126  } else { /* 8 bits alpha */
127  unpack_alpha(gb, dst, num_coeffs, 8, 10);
128  }
129 }
130 
131 static void unpack_alpha_12(GetBitContext *gb, uint16_t *dst, int num_coeffs,
132  const int num_bits)
133 {
134  if (num_bits == 16) {
135  unpack_alpha(gb, dst, num_coeffs, 16, 12);
136  } else { /* 8 bits alpha */
137  unpack_alpha(gb, dst, num_coeffs, 8, 12);
138  }
139 }
140 
142 {
143  int ret = 0;
144  ProresContext *ctx = avctx->priv_data;
145  uint8_t idct_permutation[64];
146 
147  avctx->bits_per_raw_sample = 10;
148 
149  switch (avctx->codec_tag) {
150  case MKTAG('a','p','c','o'):
152  break;
153  case MKTAG('a','p','c','s'):
154  avctx->profile = FF_PROFILE_PRORES_LT;
155  break;
156  case MKTAG('a','p','c','n'):
158  break;
159  case MKTAG('a','p','c','h'):
160  avctx->profile = FF_PROFILE_PRORES_HQ;
161  break;
162  case MKTAG('a','p','4','h'):
164  avctx->bits_per_raw_sample = 12;
165  break;
166  case MKTAG('a','p','4','x'):
167  avctx->profile = FF_PROFILE_PRORES_XQ;
168  avctx->bits_per_raw_sample = 12;
169  break;
170  default:
171  avctx->profile = FF_PROFILE_UNKNOWN;
172  av_log(avctx, AV_LOG_WARNING, "Unknown prores profile %d\n", avctx->codec_tag);
173  }
174 
175  if (avctx->bits_per_raw_sample == 10) {
176  av_log(avctx, AV_LOG_DEBUG, "Auto bitdepth precision. Use 10b decoding based on codec tag.\n");
177  } else { /* 12b */
178  av_log(avctx, AV_LOG_DEBUG, "Auto bitdepth precision. Use 12b decoding based on codec tag.\n");
179  }
180 
181  ff_blockdsp_init(&ctx->bdsp, avctx);
182  ret = ff_proresdsp_init(&ctx->prodsp, avctx);
183  if (ret < 0) {
184  av_log(avctx, AV_LOG_ERROR, "Fail to init proresdsp for bits per raw sample %d\n", avctx->bits_per_raw_sample);
185  return ret;
186  }
187 
188  ff_init_scantable_permutation(idct_permutation,
189  ctx->prodsp.idct_permutation_type);
190 
191  permute(ctx->progressive_scan, ff_prores_progressive_scan, idct_permutation);
192  permute(ctx->interlaced_scan, ff_prores_interlaced_scan, idct_permutation);
193 
194  ctx->pix_fmt = AV_PIX_FMT_NONE;
195 
196  if (avctx->bits_per_raw_sample == 10){
197  ctx->unpack_alpha = unpack_alpha_10;
198  } else if (avctx->bits_per_raw_sample == 12){
199  ctx->unpack_alpha = unpack_alpha_12;
200  } else {
201  av_log(avctx, AV_LOG_ERROR, "Fail to set unpack_alpha for bits per raw sample %d\n", avctx->bits_per_raw_sample);
202  return AVERROR_BUG;
203  }
204  return ret;
205 }
206 
207 static int decode_frame_header(ProresContext *ctx, const uint8_t *buf,
208  const int data_size, AVCodecContext *avctx)
209 {
210  int hdr_size, width, height, flags;
211  int version;
212  const uint8_t *ptr;
213  enum AVPixelFormat pix_fmt;
214 
215  hdr_size = AV_RB16(buf);
216  ff_dlog(avctx, "header size %d\n", hdr_size);
217  if (hdr_size > data_size) {
218  av_log(avctx, AV_LOG_ERROR, "error, wrong header size\n");
219  return AVERROR_INVALIDDATA;
220  }
221 
222  version = AV_RB16(buf + 2);
223  ff_dlog(avctx, "%.4s version %d\n", buf+4, version);
224  if (version > 1) {
225  av_log(avctx, AV_LOG_ERROR, "unsupported version: %d\n", version);
226  return AVERROR_PATCHWELCOME;
227  }
228 
229  width = AV_RB16(buf + 8);
230  height = AV_RB16(buf + 10);
231 
232  if (width != avctx->width || height != avctx->height) {
233  int ret;
234 
235  av_log(avctx, AV_LOG_WARNING, "picture resolution change: %dx%d -> %dx%d\n",
236  avctx->width, avctx->height, width, height);
237  if ((ret = ff_set_dimensions(avctx, width, height)) < 0)
238  return ret;
239  }
240 
241  ctx->frame_type = (buf[12] >> 2) & 3;
242  ctx->alpha_info = buf[17] & 0xf;
243 
244  if (ctx->alpha_info > 2) {
245  av_log(avctx, AV_LOG_ERROR, "Invalid alpha mode %d\n", ctx->alpha_info);
246  return AVERROR_INVALIDDATA;
247  }
248  if (avctx->skip_alpha) ctx->alpha_info = 0;
249 
250  ff_dlog(avctx, "frame type %d\n", ctx->frame_type);
251 
252  if (ctx->frame_type == 0) {
253  ctx->scan = ctx->progressive_scan; // permuted
254  } else {
255  ctx->scan = ctx->interlaced_scan; // permuted
256  ctx->frame->interlaced_frame = 1;
257  ctx->frame->top_field_first = ctx->frame_type == 1;
258  }
259 
260  if (ctx->alpha_info) {
261  if (avctx->bits_per_raw_sample == 10) {
262  pix_fmt = (buf[12] & 0xC0) == 0xC0 ? AV_PIX_FMT_YUVA444P10 : AV_PIX_FMT_YUVA422P10;
263  } else { /* 12b */
264  pix_fmt = (buf[12] & 0xC0) == 0xC0 ? AV_PIX_FMT_YUVA444P12 : AV_PIX_FMT_YUVA422P12;
265  }
266  } else {
267  if (avctx->bits_per_raw_sample == 10) {
268  pix_fmt = (buf[12] & 0xC0) == 0xC0 ? AV_PIX_FMT_YUV444P10 : AV_PIX_FMT_YUV422P10;
269  } else { /* 12b */
270  pix_fmt = (buf[12] & 0xC0) == 0xC0 ? AV_PIX_FMT_YUV444P12 : AV_PIX_FMT_YUV422P12;
271  }
272  }
273 
274  if (pix_fmt != ctx->pix_fmt) {
275 #define HWACCEL_MAX (CONFIG_PRORES_VIDEOTOOLBOX_HWACCEL)
276  enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmtp = pix_fmts;
277  int ret;
278 
279  ctx->pix_fmt = pix_fmt;
280 
281 #if CONFIG_PRORES_VIDEOTOOLBOX_HWACCEL
282  *fmtp++ = AV_PIX_FMT_VIDEOTOOLBOX;
283 #endif
284  *fmtp++ = ctx->pix_fmt;
285  *fmtp = AV_PIX_FMT_NONE;
286 
287  if ((ret = ff_thread_get_format(avctx, pix_fmts)) < 0)
288  return ret;
289 
290  avctx->pix_fmt = ret;
291  }
292 
293  avctx->color_primaries = buf[14];
294  avctx->color_trc = buf[15];
295  avctx->colorspace = buf[16];
296  avctx->color_range = AVCOL_RANGE_MPEG;
297 
298  ptr = buf + 20;
299  flags = buf[19];
300  ff_dlog(avctx, "flags %x\n", flags);
301 
302  if (flags & 2) {
303  if(buf + data_size - ptr < 64) {
304  av_log(avctx, AV_LOG_ERROR, "Header truncated\n");
305  return AVERROR_INVALIDDATA;
306  }
307  permute(ctx->qmat_luma, ctx->prodsp.idct_permutation, ptr);
308  ptr += 64;
309  } else {
310  memset(ctx->qmat_luma, 4, 64);
311  }
312 
313  if (flags & 1) {
314  if(buf + data_size - ptr < 64) {
315  av_log(avctx, AV_LOG_ERROR, "Header truncated\n");
316  return AVERROR_INVALIDDATA;
317  }
318  permute(ctx->qmat_chroma, ctx->prodsp.idct_permutation, ptr);
319  } else {
320  memcpy(ctx->qmat_chroma, ctx->qmat_luma, 64);
321  }
322 
323  return hdr_size;
324 }
325 
326 static int decode_picture_header(AVCodecContext *avctx, const uint8_t *buf, const int buf_size)
327 {
328  ProresContext *ctx = avctx->priv_data;
329  int i, hdr_size, slice_count;
330  unsigned pic_data_size;
331  int log2_slice_mb_width, log2_slice_mb_height;
332  int slice_mb_count, mb_x, mb_y;
333  const uint8_t *data_ptr, *index_ptr;
334 
335  hdr_size = buf[0] >> 3;
336  if (hdr_size < 8 || hdr_size > buf_size) {
337  av_log(avctx, AV_LOG_ERROR, "error, wrong picture header size\n");
338  return AVERROR_INVALIDDATA;
339  }
340 
341  pic_data_size = AV_RB32(buf + 1);
342  if (pic_data_size > buf_size) {
343  av_log(avctx, AV_LOG_ERROR, "error, wrong picture data size\n");
344  return AVERROR_INVALIDDATA;
345  }
346 
347  log2_slice_mb_width = buf[7] >> 4;
348  log2_slice_mb_height = buf[7] & 0xF;
349  if (log2_slice_mb_width > 3 || log2_slice_mb_height) {
350  av_log(avctx, AV_LOG_ERROR, "unsupported slice resolution: %dx%d\n",
351  1 << log2_slice_mb_width, 1 << log2_slice_mb_height);
352  return AVERROR_INVALIDDATA;
353  }
354 
355  ctx->mb_width = (avctx->width + 15) >> 4;
356  if (ctx->frame_type)
357  ctx->mb_height = (avctx->height + 31) >> 5;
358  else
359  ctx->mb_height = (avctx->height + 15) >> 4;
360 
361  // QT ignores the written value
362  // slice_count = AV_RB16(buf + 5);
363  slice_count = ctx->mb_height * ((ctx->mb_width >> log2_slice_mb_width) +
364  av_popcount(ctx->mb_width & (1 << log2_slice_mb_width) - 1));
365 
366  if (ctx->slice_count != slice_count || !ctx->slices) {
367  av_freep(&ctx->slices);
368  ctx->slice_count = 0;
369  ctx->slices = av_calloc(slice_count, sizeof(*ctx->slices));
370  if (!ctx->slices)
371  return AVERROR(ENOMEM);
372  ctx->slice_count = slice_count;
373  }
374 
375  if (!slice_count)
376  return AVERROR(EINVAL);
377 
378  if (hdr_size + slice_count*2 > buf_size) {
379  av_log(avctx, AV_LOG_ERROR, "error, wrong slice count\n");
380  return AVERROR_INVALIDDATA;
381  }
382 
383  // parse slice information
384  index_ptr = buf + hdr_size;
385  data_ptr = index_ptr + slice_count*2;
386 
387  slice_mb_count = 1 << log2_slice_mb_width;
388  mb_x = 0;
389  mb_y = 0;
390 
391  for (i = 0; i < slice_count; i++) {
392  SliceContext *slice = &ctx->slices[i];
393 
394  slice->data = data_ptr;
395  data_ptr += AV_RB16(index_ptr + i*2);
396 
397  while (ctx->mb_width - mb_x < slice_mb_count)
398  slice_mb_count >>= 1;
399 
400  slice->mb_x = mb_x;
401  slice->mb_y = mb_y;
402  slice->mb_count = slice_mb_count;
403  slice->data_size = data_ptr - slice->data;
404 
405  if (slice->data_size < 6) {
406  av_log(avctx, AV_LOG_ERROR, "error, wrong slice data size\n");
407  return AVERROR_INVALIDDATA;
408  }
409 
410  mb_x += slice_mb_count;
411  if (mb_x == ctx->mb_width) {
412  slice_mb_count = 1 << log2_slice_mb_width;
413  mb_x = 0;
414  mb_y++;
415  }
416  if (data_ptr > buf + buf_size) {
417  av_log(avctx, AV_LOG_ERROR, "error, slice out of bounds\n");
418  return AVERROR_INVALIDDATA;
419  }
420  }
421 
422  if (mb_x || mb_y != ctx->mb_height) {
423  av_log(avctx, AV_LOG_ERROR, "error wrong mb count y %d h %d\n",
424  mb_y, ctx->mb_height);
425  return AVERROR_INVALIDDATA;
426  }
427 
428  return pic_data_size;
429 }
430 
431 #define DECODE_CODEWORD(val, codebook, SKIP) \
432  do { \
433  unsigned int rice_order, exp_order, switch_bits; \
434  unsigned int q, buf, bits; \
435  \
436  UPDATE_CACHE(re, gb); \
437  buf = GET_CACHE(re, gb); \
438  \
439  /* number of bits to switch between rice and exp golomb */ \
440  switch_bits = codebook & 3; \
441  rice_order = codebook >> 5; \
442  exp_order = (codebook >> 2) & 7; \
443  \
444  q = 31 - av_log2(buf); \
445  \
446  if (q > switch_bits) { /* exp golomb */ \
447  bits = exp_order - switch_bits + (q<<1); \
448  if (bits > FFMIN(MIN_CACHE_BITS, 31)) \
449  return AVERROR_INVALIDDATA; \
450  val = SHOW_UBITS(re, gb, bits) - (1 << exp_order) + \
451  ((switch_bits + 1) << rice_order); \
452  SKIP(re, gb, bits); \
453  } else if (rice_order) { \
454  SKIP_BITS(re, gb, q+1); \
455  val = (q << rice_order) + SHOW_UBITS(re, gb, rice_order); \
456  SKIP(re, gb, rice_order); \
457  } else { \
458  val = q; \
459  SKIP(re, gb, q+1); \
460  } \
461  } while (0)
462 
463 #define TOSIGNED(x) (((x) >> 1) ^ (-((x) & 1)))
464 
465 #define FIRST_DC_CB 0xB8
466 
467 static const uint8_t dc_codebook[7] = { 0x04, 0x28, 0x28, 0x4D, 0x4D, 0x70, 0x70};
468 
470  int blocks_per_slice)
471 {
472  int16_t prev_dc;
473  int code, i, sign;
474 
475  OPEN_READER(re, gb);
476 
478  prev_dc = TOSIGNED(code);
479  out[0] = prev_dc;
480 
481  out += 64; // dc coeff for the next block
482 
483  code = 5;
484  sign = 0;
485  for (i = 1; i < blocks_per_slice; i++, out += 64) {
487  if(code) sign ^= -(code & 1);
488  else sign = 0;
489  prev_dc += (((code + 1) >> 1) ^ sign) - sign;
490  out[0] = prev_dc;
491  }
492  CLOSE_READER(re, gb);
493  return 0;
494 }
495 
496 // adaptive codebook switching lut according to previous run/level values
497 static const uint8_t run_to_cb[16] = { 0x06, 0x06, 0x05, 0x05, 0x04, 0x29, 0x29, 0x29, 0x29, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x4C };
498 static const uint8_t lev_to_cb[10] = { 0x04, 0x0A, 0x05, 0x06, 0x04, 0x28, 0x28, 0x28, 0x28, 0x4C };
499 
501  int16_t *out, int blocks_per_slice)
502 {
503  ProresContext *ctx = avctx->priv_data;
504  int block_mask, sign;
505  unsigned pos, run, level;
506  int max_coeffs, i, bits_left;
507  int log2_block_count = av_log2(blocks_per_slice);
508 
509  OPEN_READER(re, gb);
510  UPDATE_CACHE(re, gb); \
511  run = 4;
512  level = 2;
513 
514  max_coeffs = 64 << log2_block_count;
515  block_mask = blocks_per_slice - 1;
516 
517  for (pos = block_mask;;) {
518  bits_left = gb->size_in_bits - re_index;
519  if (!bits_left || (bits_left < 32 && !SHOW_UBITS(re, gb, bits_left)))
520  break;
521 
523  pos += run + 1;
524  if (pos >= max_coeffs) {
525  av_log(avctx, AV_LOG_ERROR, "ac tex damaged %d, %d\n", pos, max_coeffs);
526  return AVERROR_INVALIDDATA;
527  }
528 
530  level += 1;
531 
532  i = pos >> log2_block_count;
533 
534  sign = SHOW_SBITS(re, gb, 1);
535  SKIP_BITS(re, gb, 1);
536  out[((pos & block_mask) << 6) + ctx->scan[i]] = ((level ^ sign) - sign);
537  }
538 
539  CLOSE_READER(re, gb);
540  return 0;
541 }
542 
544  uint16_t *dst, int dst_stride,
545  const uint8_t *buf, unsigned buf_size,
546  const int16_t *qmat)
547 {
548  ProresContext *ctx = avctx->priv_data;
549  LOCAL_ALIGNED_32(int16_t, blocks, [8*4*64]);
550  int16_t *block;
551  GetBitContext gb;
552  int i, blocks_per_slice = slice->mb_count<<2;
553  int ret;
554 
555  for (i = 0; i < blocks_per_slice; i++)
556  ctx->bdsp.clear_block(blocks+(i<<6));
557 
558  init_get_bits(&gb, buf, buf_size << 3);
559 
560  if ((ret = decode_dc_coeffs(&gb, blocks, blocks_per_slice)) < 0)
561  return ret;
562  if ((ret = decode_ac_coeffs(avctx, &gb, blocks, blocks_per_slice)) < 0)
563  return ret;
564 
565  block = blocks;
566  for (i = 0; i < slice->mb_count; i++) {
567  ctx->prodsp.idct_put(dst, dst_stride, block+(0<<6), qmat);
568  ctx->prodsp.idct_put(dst +8, dst_stride, block+(1<<6), qmat);
569  ctx->prodsp.idct_put(dst+4*dst_stride , dst_stride, block+(2<<6), qmat);
570  ctx->prodsp.idct_put(dst+4*dst_stride+8, dst_stride, block+(3<<6), qmat);
571  block += 4*64;
572  dst += 16;
573  }
574  return 0;
575 }
576 
578  uint16_t *dst, int dst_stride,
579  const uint8_t *buf, unsigned buf_size,
580  const int16_t *qmat, int log2_blocks_per_mb)
581 {
582  ProresContext *ctx = avctx->priv_data;
583  LOCAL_ALIGNED_32(int16_t, blocks, [8*4*64]);
584  int16_t *block;
585  GetBitContext gb;
586  int i, j, blocks_per_slice = slice->mb_count << log2_blocks_per_mb;
587  int ret;
588 
589  for (i = 0; i < blocks_per_slice; i++)
590  ctx->bdsp.clear_block(blocks+(i<<6));
591 
592  init_get_bits(&gb, buf, buf_size << 3);
593 
594  if ((ret = decode_dc_coeffs(&gb, blocks, blocks_per_slice)) < 0)
595  return ret;
596  if ((ret = decode_ac_coeffs(avctx, &gb, blocks, blocks_per_slice)) < 0)
597  return ret;
598 
599  block = blocks;
600  for (i = 0; i < slice->mb_count; i++) {
601  for (j = 0; j < log2_blocks_per_mb; j++) {
602  ctx->prodsp.idct_put(dst, dst_stride, block+(0<<6), qmat);
603  ctx->prodsp.idct_put(dst+4*dst_stride, dst_stride, block+(1<<6), qmat);
604  block += 2*64;
605  dst += 8;
606  }
607  }
608  return 0;
609 }
610 
611 /**
612  * Decode alpha slice plane.
613  */
615  uint16_t *dst, int dst_stride,
616  const uint8_t *buf, int buf_size,
617  int blocks_per_slice)
618 {
619  GetBitContext gb;
620  int i;
621  LOCAL_ALIGNED_32(int16_t, blocks, [8*4*64]);
622  int16_t *block;
623 
624  for (i = 0; i < blocks_per_slice<<2; i++)
625  ctx->bdsp.clear_block(blocks+(i<<6));
626 
627  init_get_bits(&gb, buf, buf_size << 3);
628 
629  if (ctx->alpha_info == 2) {
630  ctx->unpack_alpha(&gb, blocks, blocks_per_slice * 4 * 64, 16);
631  } else {
632  ctx->unpack_alpha(&gb, blocks, blocks_per_slice * 4 * 64, 8);
633  }
634 
635  block = blocks;
636 
637  for (i = 0; i < 16; i++) {
638  memcpy(dst, block, 16 * blocks_per_slice * sizeof(*dst));
639  dst += dst_stride >> 1;
640  block += 16 * blocks_per_slice;
641  }
642 }
643 
644 static int decode_slice_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
645 {
646  ProresContext *ctx = avctx->priv_data;
647  SliceContext *slice = &ctx->slices[jobnr];
648  const uint8_t *buf = slice->data;
649  AVFrame *pic = ctx->frame;
650  int i, hdr_size, qscale, log2_chroma_blocks_per_mb;
651  int luma_stride, chroma_stride;
652  int y_data_size, u_data_size, v_data_size, a_data_size, offset;
653  uint8_t *dest_y, *dest_u, *dest_v;
654  LOCAL_ALIGNED_16(int16_t, qmat_luma_scaled, [64]);
655  LOCAL_ALIGNED_16(int16_t, qmat_chroma_scaled,[64]);
656  int mb_x_shift;
657  int ret;
658  uint16_t val_no_chroma;
659 
660  slice->ret = -1;
661  //av_log(avctx, AV_LOG_INFO, "slice %d mb width %d mb x %d y %d\n",
662  // jobnr, slice->mb_count, slice->mb_x, slice->mb_y);
663 
664  // slice header
665  hdr_size = buf[0] >> 3;
666  qscale = av_clip(buf[1], 1, 224);
667  qscale = qscale > 128 ? qscale - 96 << 2: qscale;
668  y_data_size = AV_RB16(buf + 2);
669  u_data_size = AV_RB16(buf + 4);
670  v_data_size = slice->data_size - y_data_size - u_data_size - hdr_size;
671  if (hdr_size > 7) v_data_size = AV_RB16(buf + 6);
672  a_data_size = slice->data_size - y_data_size - u_data_size -
673  v_data_size - hdr_size;
674 
675  if (y_data_size < 0 || u_data_size < 0 || v_data_size < 0
676  || hdr_size+y_data_size+u_data_size+v_data_size > slice->data_size){
677  av_log(avctx, AV_LOG_ERROR, "invalid plane data size\n");
678  return AVERROR_INVALIDDATA;
679  }
680 
681  buf += hdr_size;
682 
683  for (i = 0; i < 64; i++) {
684  qmat_luma_scaled [i] = ctx->qmat_luma [i] * qscale;
685  qmat_chroma_scaled[i] = ctx->qmat_chroma[i] * qscale;
686  }
687 
688  if (ctx->frame_type == 0) {
689  luma_stride = pic->linesize[0];
690  chroma_stride = pic->linesize[1];
691  } else {
692  luma_stride = pic->linesize[0] << 1;
693  chroma_stride = pic->linesize[1] << 1;
694  }
695 
696  if (avctx->pix_fmt == AV_PIX_FMT_YUV444P10 || avctx->pix_fmt == AV_PIX_FMT_YUVA444P10 ||
698  mb_x_shift = 5;
699  log2_chroma_blocks_per_mb = 2;
700  } else {
701  mb_x_shift = 4;
702  log2_chroma_blocks_per_mb = 1;
703  }
704 
705  offset = (slice->mb_y << 4) * luma_stride + (slice->mb_x << 5);
706  dest_y = pic->data[0] + offset;
707  dest_u = pic->data[1] + (slice->mb_y << 4) * chroma_stride + (slice->mb_x << mb_x_shift);
708  dest_v = pic->data[2] + (slice->mb_y << 4) * chroma_stride + (slice->mb_x << mb_x_shift);
709 
710  if (ctx->frame_type && ctx->first_field ^ ctx->frame->top_field_first) {
711  dest_y += pic->linesize[0];
712  dest_u += pic->linesize[1];
713  dest_v += pic->linesize[2];
714  offset += pic->linesize[3];
715  }
716 
717  ret = decode_slice_luma(avctx, slice, (uint16_t*)dest_y, luma_stride,
718  buf, y_data_size, qmat_luma_scaled);
719  if (ret < 0)
720  return ret;
721 
722  if (!(avctx->flags & AV_CODEC_FLAG_GRAY) && (u_data_size + v_data_size) > 0) {
723  ret = decode_slice_chroma(avctx, slice, (uint16_t*)dest_u, chroma_stride,
724  buf + y_data_size, u_data_size,
725  qmat_chroma_scaled, log2_chroma_blocks_per_mb);
726  if (ret < 0)
727  return ret;
728 
729  ret = decode_slice_chroma(avctx, slice, (uint16_t*)dest_v, chroma_stride,
730  buf + y_data_size + u_data_size, v_data_size,
731  qmat_chroma_scaled, log2_chroma_blocks_per_mb);
732  if (ret < 0)
733  return ret;
734  }
735  else {
736  size_t mb_max_x = slice->mb_count << (mb_x_shift - 1);
737  size_t i, j;
738  if (avctx->bits_per_raw_sample == 10) {
739  val_no_chroma = 511;
740  } else { /* 12b */
741  val_no_chroma = 511 * 4;
742  }
743  for (i = 0; i < 16; ++i)
744  for (j = 0; j < mb_max_x; ++j) {
745  *(uint16_t*)(dest_u + (i * chroma_stride) + (j << 1)) = val_no_chroma;
746  *(uint16_t*)(dest_v + (i * chroma_stride) + (j << 1)) = val_no_chroma;
747  }
748  }
749 
750  /* decode alpha plane if available */
751  if (ctx->alpha_info && pic->data[3] && a_data_size) {
752  uint8_t *dest_a = pic->data[3] + offset;
753  decode_slice_alpha(ctx, (uint16_t*)dest_a, luma_stride,
754  buf + y_data_size + u_data_size + v_data_size,
755  a_data_size, slice->mb_count);
756  }
757 
758  slice->ret = 0;
759  return 0;
760 }
761 
762 static int decode_picture(AVCodecContext *avctx)
763 {
764  ProresContext *ctx = avctx->priv_data;
765  int i;
766  int error = 0;
767 
768  avctx->execute2(avctx, decode_slice_thread, NULL, NULL, ctx->slice_count);
769 
770  for (i = 0; i < ctx->slice_count; i++)
771  error += ctx->slices[i].ret < 0;
772 
773  if (error)
774  ctx->frame->decode_error_flags = FF_DECODE_ERROR_INVALID_BITSTREAM;
775  if (error < ctx->slice_count)
776  return 0;
777 
778  return ctx->slices[0].ret;
779 }
780 
782  int *got_frame, AVPacket *avpkt)
783 {
784  ProresContext *ctx = avctx->priv_data;
785  const uint8_t *buf = avpkt->data;
786  int buf_size = avpkt->size;
787  int frame_hdr_size, pic_size, ret;
788 
789  if (buf_size < 28 || AV_RL32(buf + 4) != AV_RL32("icpf")) {
790  av_log(avctx, AV_LOG_ERROR, "invalid frame header\n");
791  return AVERROR_INVALIDDATA;
792  }
793 
794  ctx->frame = frame;
795  ctx->frame->pict_type = AV_PICTURE_TYPE_I;
796  ctx->frame->key_frame = 1;
797  ctx->first_field = 1;
798 
799  buf += 8;
800  buf_size -= 8;
801 
802  frame_hdr_size = decode_frame_header(ctx, buf, buf_size, avctx);
803  if (frame_hdr_size < 0)
804  return frame_hdr_size;
805 
806  buf += frame_hdr_size;
807  buf_size -= frame_hdr_size;
808 
809  if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
810  return ret;
811  ff_thread_finish_setup(avctx);
812 
813  if (avctx->hwaccel) {
814  ret = avctx->hwaccel->start_frame(avctx, NULL, 0);
815  if (ret < 0)
816  return ret;
817  ret = avctx->hwaccel->decode_slice(avctx, avpkt->data, avpkt->size);
818  if (ret < 0)
819  return ret;
820  ret = avctx->hwaccel->end_frame(avctx);
821  if (ret < 0)
822  return ret;
823  goto finish;
824  }
825 
827  pic_size = decode_picture_header(avctx, buf, buf_size);
828  if (pic_size < 0) {
829  av_log(avctx, AV_LOG_ERROR, "error decoding picture header\n");
830  return pic_size;
831  }
832 
833  if ((ret = decode_picture(avctx)) < 0) {
834  av_log(avctx, AV_LOG_ERROR, "error decoding picture\n");
835  return ret;
836  }
837 
838  buf += pic_size;
839  buf_size -= pic_size;
840 
841  if (ctx->frame_type && buf_size > 0 && ctx->first_field) {
842  ctx->first_field = 0;
843  goto decode_picture;
844  }
845 
846 finish:
847  *got_frame = 1;
848 
849  return avpkt->size;
850 }
851 
853 {
854  ProresContext *ctx = avctx->priv_data;
855 
856  av_freep(&ctx->slices);
857 
858  return 0;
859 }
860 
861 #if HAVE_THREADS
863 {
864  ProresContext *csrc = src->priv_data;
865  ProresContext *cdst = dst->priv_data;
866 
867  cdst->pix_fmt = csrc->pix_fmt;
868 
869  return 0;
870 }
871 #endif
872 
874  .p.name = "prores",
875  .p.long_name = NULL_IF_CONFIG_SMALL("Apple ProRes (iCodec Pro)"),
876  .p.type = AVMEDIA_TYPE_VIDEO,
877  .p.id = AV_CODEC_ID_PRORES,
878  .priv_data_size = sizeof(ProresContext),
879  .init = decode_init,
880  .close = decode_close,
882  .update_thread_context = ONLY_IF_THREADS_ENABLED(update_thread_context),
885  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
886  .hw_configs = (const AVCodecHWConfigInternal *const []) {
887 #if CONFIG_PRORES_VIDEOTOOLBOX_HWACCEL
888  HWACCEL_VIDEOTOOLBOX(prores),
889 #endif
890  NULL
891  },
892 };
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:31
hwconfig.h
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1379
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
DECODE_CODEWORD
#define DECODE_CODEWORD(val, codebook, SKIP)
Definition: proresdec2.c:431
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
FF_PROFILE_PRORES_XQ
#define FF_PROFILE_PRORES_XQ
Definition: avcodec.h:1660
level
uint8_t level
Definition: svq3.c:206
av_clip
#define av_clip
Definition: common.h:95
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:839
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:966
mem_internal.h
out
FILE * out
Definition: movenc.c:54
ff_prores_profiles
const AVProfile ff_prores_profiles[]
Definition: profiles.c:159
ff_proresdsp_init
av_cold int ff_proresdsp_init(ProresDSPContext *dsp, AVCodecContext *avctx)
Definition: proresdsp.c:79
GetBitContext::size_in_bits
int size_in_bits
Definition: get_bits.h:68
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:959
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:374
ProresContext
Definition: proresdec.h:38
TOSIGNED
#define TOSIGNED(x)
Definition: proresdec2.c:463
SliceContext::mb_x
unsigned mb_x
Definition: proresdec.h:31
FFCodec
Definition: codec_internal.h:112
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:178
av_popcount
#define av_popcount
Definition: common.h:149
AV_PIX_FMT_YUVA422P10
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:444
ff_prores_progressive_scan
const uint8_t ff_prores_progressive_scan[64]
Definition: proresdata.c:25
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:649
thread.h
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:346
init
static int init
Definition: av_tx.c:47
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:116
finish
static void finish(void)
Definition: movenc.c:342
U
#define U(x)
Definition: vp56_arith.h:37
ProresContext::pix_fmt
enum AVPixelFormat pix_fmt
Definition: proresdec.h:55
GetBitContext
Definition: get_bits.h:61
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:469
proresdec.h
val
static double val(void *priv, double ch)
Definition: aeval.c:77
FF_PROFILE_PRORES_LT
#define FF_PROFILE_PRORES_LT
Definition: avcodec.h:1656
ALPHA_SHIFT_8_TO_10
#define ALPHA_SHIFT_8_TO_10(alpha_val)
Definition: proresdec2.c:56
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:409
unpack_alpha
static void unpack_alpha(GetBitContext *gb, uint16_t *dst, int num_coeffs, const int num_bits, const int decode_precision)
Definition: proresdec2.c:60
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:952
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
ff_prores_decoder
const FFCodec ff_prores_decoder
Definition: proresdec2.c:873
FIRST_DC_CB
#define FIRST_DC_CB
Definition: proresdec2.c:465
mask
static const uint16_t mask[17]
Definition: lzw.c:38
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:149
decode_picture_header
static int decode_picture_header(AVCodecContext *avctx, const uint8_t *buf, const int buf_size)
Definition: proresdec2.c:326
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:254
SliceContext::data_size
unsigned data_size
Definition: proresdec.h:34
decode_picture
static int decode_picture(AVCodecContext *avctx)
Definition: proresdec2.c:762
SHOW_SBITS
#define SHOW_SBITS(name, gb, num)
Definition: get_bits.h:212
permute
static void permute(uint8_t *dst, const uint8_t *src, const uint8_t permutation[64])
Definition: proresdec2.c:48
LOCAL_ALIGNED_16
#define LOCAL_ALIGNED_16(t, v,...)
Definition: mem_internal.h:130
FF_PROFILE_UNKNOWN
#define FF_PROFILE_UNKNOWN
Definition: avcodec.h:1548
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
AV_PIX_FMT_YUVA444P12
#define AV_PIX_FMT_YUVA444P12
Definition: pixfmt.h:447
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1448
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
ctx
AVFormatContext * ctx
Definition: movenc.c:48
get_bits.h
simple_idct.h
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demuxing_decoding.c:41
SKIP_BITS
#define SKIP_BITS(name, gb, num)
Definition: get_bits.h:193
ff_prores_interlaced_scan
const uint8_t ff_prores_interlaced_scan[64]
Definition: proresdata.c:36
arg
const char * arg
Definition: jacosubdec.c:67
if
if(ret)
Definition: filter_design.txt:179
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:113
NULL
#define NULL
Definition: coverity.c:32
FF_DECODE_ERROR_INVALID_BITSTREAM
#define FF_DECODE_ERROR_INVALID_BITSTREAM
Definition: frame.h:630
LOCAL_ALIGNED_32
#define LOCAL_ALIGNED_32(t, v,...)
Definition: mem_internal.h:136
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
run
uint8_t run
Definition: svq3.c:205
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:973
decode_ac_coeffs
static av_always_inline int decode_ac_coeffs(AVCodecContext *avctx, GetBitContext *gb, int16_t *out, int blocks_per_slice)
Definition: proresdec2.c:500
SliceContext::ret
int ret
Definition: proresdec.h:35
AVHWAccel::end_frame
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
Definition: avcodec.h:2167
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
run_to_cb
static const uint8_t run_to_cb[16]
Definition: proresdec2.c:497
profiles.h
SliceContext::mb_y
unsigned mb_y
Definition: proresdec.h:32
LAST_SKIP_BITS
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:199
ONLY_IF_THREADS_ENABLED
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:156
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:407
decode_slice_chroma
static int decode_slice_chroma(AVCodecContext *avctx, SliceContext *slice, uint16_t *dst, int dst_stride, const uint8_t *buf, unsigned buf_size, const int16_t *qmat, int log2_blocks_per_mb)
Definition: proresdec2.c:577
SliceContext
Definition: mss12.h:70
decode_slice_luma
static int decode_slice_luma(AVCodecContext *avctx, SliceContext *slice, uint16_t *dst, int dst_stride, const uint8_t *buf, unsigned buf_size, const int16_t *qmat)
Definition: proresdec2.c:543
dc_codebook
static const uint8_t dc_codebook[7]
Definition: proresdec2.c:467
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:249
AVPacket::size
int size
Definition: packet.h:375
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
FF_PROFILE_PRORES_HQ
#define FF_PROFILE_PRORES_HQ
Definition: avcodec.h:1658
codec_internal.h
FF_PROFILE_PRORES_STANDARD
#define FF_PROFILE_PRORES_STANDARD
Definition: avcodec.h:1657
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:411
SliceContext::mb_count
unsigned mb_count
Definition: proresdec.h:33
ALPHA_SHIFT_16_TO_12
#define ALPHA_SHIFT_16_TO_12(alpha_val)
Definition: proresdec2.c:57
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:413
proresdata.h
ALPHA_SHIFT_8_TO_12
#define ALPHA_SHIFT_8_TO_12(alpha_val)
Definition: proresdec2.c:58
AVCodecContext::skip_alpha
int skip_alpha
Skip processing alpha if supported by codec.
Definition: avcodec.h:1796
decode_close
static av_cold int decode_close(AVCodecContext *avctx)
Definition: proresdec2.c:852
AVCodecHWConfigInternal
Definition: hwconfig.h:29
decode_frame_header
static int decode_frame_header(ProresContext *ctx, const uint8_t *buf, const int data_size, AVCodecContext *avctx)
Definition: proresdec2.c:207
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:138
height
#define height
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:117
AV_PIX_FMT_YUVA444P10
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:445
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
version
version
Definition: libkvazaar.c:313
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:274
decode_slice_alpha
static void decode_slice_alpha(ProresContext *ctx, uint16_t *dst, int dst_stride, const uint8_t *buf, int buf_size, int blocks_per_slice)
Decode alpha slice plane.
Definition: proresdec2.c:614
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
AVHWAccel::decode_slice
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
Definition: avcodec.h:2156
internal.h
SliceContext::data
const uint8_t * data
Definition: proresdec.h:30
HWACCEL_MAX
#define HWACCEL_MAX
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: codec_internal.h:31
decode_frame
static int decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt)
Definition: proresdec2.c:781
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:203
update_thread_context
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread. Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little speed gain at this point but it should work. If there are inter-frame dependencies
AVCodecContext::height
int height
Definition: avcodec.h:562
lev_to_cb
static const uint8_t lev_to_cb[10]
Definition: proresdec2.c:498
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:599
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:582
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:272
HWACCEL_VIDEOTOOLBOX
#define HWACCEL_VIDEOTOOLBOX(codec)
Definition: hwconfig.h:77
idctdsp.h
avcodec.h
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
FF_PROFILE_PRORES_4444
#define FF_PROFILE_PRORES_4444
Definition: avcodec.h:1659
FF_PROFILE_PRORES_PROXY
#define FF_PROFILE_PRORES_PROXY
Definition: avcodec.h:1655
unpack_alpha_10
static void unpack_alpha_10(GetBitContext *gb, uint16_t *dst, int num_coeffs, const int num_bits)
Definition: proresdec2.c:121
pos
unsigned int pos
Definition: spdifenc.c:412
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
AVCodecContext
main external API structure.
Definition: avcodec.h:389
ALPHA_SHIFT_16_TO_10
#define ALPHA_SHIFT_16_TO_10(alpha_val)
Definition: proresdec2.c:55
SHOW_UBITS
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:211
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_PIX_FMT_YUVA422P12
#define AV_PIX_FMT_YUVA422P12
Definition: pixfmt.h:446
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1547
ff_thread_get_format
FF_DISABLE_DEPRECATION_WARNINGS enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Wrapper around get_format() for frame-multithreaded codecs.
Definition: pthread_frame.c:1005
decode_dc_coeffs
static av_always_inline int decode_dc_coeffs(GetBitContext *gb, int16_t *out, int blocks_per_slice)
Definition: proresdec2.c:469
AVHWAccel::start_frame
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
Definition: avcodec.h:2129
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:90
decode_slice_thread
static int decode_slice_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
Definition: proresdec2.c:644
ff_init_scantable_permutation
av_cold void ff_init_scantable_permutation(uint8_t *idct_permutation, enum idct_permutation_type perm_type)
Definition: idctdsp.c:51
decode_init
static av_cold int decode_init(AVCodecContext *avctx)
Definition: proresdec2.c:141
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:414
AVPacket
This structure stores compressed data.
Definition: packet.h:351
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:416
unpack_alpha_12
static void unpack_alpha_12(GetBitContext *gb, uint16_t *dst, int num_coeffs, const int num_bits)
Definition: proresdec2.c:131
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:562
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:370
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
ff_blockdsp_init
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
Definition: blockdsp.c:59
AVCodecContext::execute2
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
Definition: avcodec.h:1533
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
AV_CODEC_ID_PRORES
@ AV_CODEC_ID_PRORES
Definition: codec_id.h:198
AV_RB16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
Definition: bytestream.h:98
re
float re
Definition: fft.c:79