FFmpeg
proresdec2.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010-2011 Maxim Poliakovski
3  * Copyright (c) 2010-2011 Elvis Presley
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Known FOURCCs: 'apch' (HQ), 'apcn' (SD), 'apcs' (LT), 'acpo' (Proxy), 'ap4h' (4444)
25  */
26 
27 //#define DEBUG
28 
29 #define LONG_BITSTREAM_READER
30 
31 #include "libavutil/internal.h"
32 #include "libavutil/mem_internal.h"
33 
34 #include "avcodec.h"
35 #include "get_bits.h"
36 #include "hwconfig.h"
37 #include "idctdsp.h"
38 #include "internal.h"
39 #include "profiles.h"
40 #include "simple_idct.h"
41 #include "proresdec.h"
42 #include "proresdata.h"
43 #include "thread.h"
44 
45 static void permute(uint8_t *dst, const uint8_t *src, const uint8_t permutation[64])
46 {
47  int i;
48  for (i = 0; i < 64; i++)
49  dst[i] = permutation[src[i]];
50 }
51 
52 #define ALPHA_SHIFT_16_TO_10(alpha_val) (alpha_val >> 6)
53 #define ALPHA_SHIFT_8_TO_10(alpha_val) ((alpha_val << 2) | (alpha_val >> 6))
54 #define ALPHA_SHIFT_16_TO_12(alpha_val) (alpha_val >> 4)
55 #define ALPHA_SHIFT_8_TO_12(alpha_val) ((alpha_val << 4) | (alpha_val >> 4))
56 
57 static void inline unpack_alpha(GetBitContext *gb, uint16_t *dst, int num_coeffs,
58  const int num_bits, const int decode_precision) {
59  const int mask = (1 << num_bits) - 1;
60  int i, idx, val, alpha_val;
61 
62  idx = 0;
63  alpha_val = mask;
64  do {
65  do {
66  if (get_bits1(gb)) {
67  val = get_bits(gb, num_bits);
68  } else {
69  int sign;
70  val = get_bits(gb, num_bits == 16 ? 7 : 4);
71  sign = val & 1;
72  val = (val + 2) >> 1;
73  if (sign)
74  val = -val;
75  }
76  alpha_val = (alpha_val + val) & mask;
77  if (num_bits == 16) {
78  if (decode_precision == 10) {
79  dst[idx++] = ALPHA_SHIFT_16_TO_10(alpha_val);
80  } else { /* 12b */
81  dst[idx++] = ALPHA_SHIFT_16_TO_12(alpha_val);
82  }
83  } else {
84  if (decode_precision == 10) {
85  dst[idx++] = ALPHA_SHIFT_8_TO_10(alpha_val);
86  } else { /* 12b */
87  dst[idx++] = ALPHA_SHIFT_8_TO_12(alpha_val);
88  }
89  }
90  if (idx >= num_coeffs)
91  break;
92  } while (get_bits_left(gb)>0 && get_bits1(gb));
93  val = get_bits(gb, 4);
94  if (!val)
95  val = get_bits(gb, 11);
96  if (idx + val > num_coeffs)
97  val = num_coeffs - idx;
98  if (num_bits == 16) {
99  for (i = 0; i < val; i++) {
100  if (decode_precision == 10) {
101  dst[idx++] = ALPHA_SHIFT_16_TO_10(alpha_val);
102  } else { /* 12b */
103  dst[idx++] = ALPHA_SHIFT_16_TO_12(alpha_val);
104  }
105  }
106  } else {
107  for (i = 0; i < val; i++) {
108  if (decode_precision == 10) {
109  dst[idx++] = ALPHA_SHIFT_8_TO_10(alpha_val);
110  } else { /* 12b */
111  dst[idx++] = ALPHA_SHIFT_8_TO_12(alpha_val);
112  }
113  }
114  }
115  } while (idx < num_coeffs);
116 }
117 
118 static void unpack_alpha_10(GetBitContext *gb, uint16_t *dst, int num_coeffs,
119  const int num_bits)
120 {
121  if (num_bits == 16) {
122  unpack_alpha(gb, dst, num_coeffs, 16, 10);
123  } else { /* 8 bits alpha */
124  unpack_alpha(gb, dst, num_coeffs, 8, 10);
125  }
126 }
127 
128 static void unpack_alpha_12(GetBitContext *gb, uint16_t *dst, int num_coeffs,
129  const int num_bits)
130 {
131  if (num_bits == 16) {
132  unpack_alpha(gb, dst, num_coeffs, 16, 12);
133  } else { /* 8 bits alpha */
134  unpack_alpha(gb, dst, num_coeffs, 8, 12);
135  }
136 }
137 
139 {
140  int ret = 0;
141  ProresContext *ctx = avctx->priv_data;
142  uint8_t idct_permutation[64];
143 
144  avctx->bits_per_raw_sample = 10;
145 
146  switch (avctx->codec_tag) {
147  case MKTAG('a','p','c','o'):
149  break;
150  case MKTAG('a','p','c','s'):
151  avctx->profile = FF_PROFILE_PRORES_LT;
152  break;
153  case MKTAG('a','p','c','n'):
155  break;
156  case MKTAG('a','p','c','h'):
157  avctx->profile = FF_PROFILE_PRORES_HQ;
158  break;
159  case MKTAG('a','p','4','h'):
161  avctx->bits_per_raw_sample = 12;
162  break;
163  case MKTAG('a','p','4','x'):
164  avctx->profile = FF_PROFILE_PRORES_XQ;
165  avctx->bits_per_raw_sample = 12;
166  break;
167  default:
168  avctx->profile = FF_PROFILE_UNKNOWN;
169  av_log(avctx, AV_LOG_WARNING, "Unknown prores profile %d\n", avctx->codec_tag);
170  }
171 
172  if (avctx->bits_per_raw_sample == 10) {
173  av_log(avctx, AV_LOG_DEBUG, "Auto bitdepth precision. Use 10b decoding based on codec tag.\n");
174  } else { /* 12b */
175  av_log(avctx, AV_LOG_DEBUG, "Auto bitdepth precision. Use 12b decoding based on codec tag.\n");
176  }
177 
178  ff_blockdsp_init(&ctx->bdsp, avctx);
179  ret = ff_proresdsp_init(&ctx->prodsp, avctx);
180  if (ret < 0) {
181  av_log(avctx, AV_LOG_ERROR, "Fail to init proresdsp for bits per raw sample %d\n", avctx->bits_per_raw_sample);
182  return ret;
183  }
184 
185  ff_init_scantable_permutation(idct_permutation,
186  ctx->prodsp.idct_permutation_type);
187 
188  permute(ctx->progressive_scan, ff_prores_progressive_scan, idct_permutation);
189  permute(ctx->interlaced_scan, ff_prores_interlaced_scan, idct_permutation);
190 
191  ctx->pix_fmt = AV_PIX_FMT_NONE;
192 
193  if (avctx->bits_per_raw_sample == 10){
194  ctx->unpack_alpha = unpack_alpha_10;
195  } else if (avctx->bits_per_raw_sample == 12){
196  ctx->unpack_alpha = unpack_alpha_12;
197  } else {
198  av_log(avctx, AV_LOG_ERROR, "Fail to set unpack_alpha for bits per raw sample %d\n", avctx->bits_per_raw_sample);
199  return AVERROR_BUG;
200  }
201  return ret;
202 }
203 
204 static int decode_frame_header(ProresContext *ctx, const uint8_t *buf,
205  const int data_size, AVCodecContext *avctx)
206 {
207  int hdr_size, width, height, flags;
208  int version;
209  const uint8_t *ptr;
210  enum AVPixelFormat pix_fmt;
211 
212  hdr_size = AV_RB16(buf);
213  ff_dlog(avctx, "header size %d\n", hdr_size);
214  if (hdr_size > data_size) {
215  av_log(avctx, AV_LOG_ERROR, "error, wrong header size\n");
216  return AVERROR_INVALIDDATA;
217  }
218 
219  version = AV_RB16(buf + 2);
220  ff_dlog(avctx, "%.4s version %d\n", buf+4, version);
221  if (version > 1) {
222  av_log(avctx, AV_LOG_ERROR, "unsupported version: %d\n", version);
223  return AVERROR_PATCHWELCOME;
224  }
225 
226  width = AV_RB16(buf + 8);
227  height = AV_RB16(buf + 10);
228 
229  if (width != avctx->width || height != avctx->height) {
230  int ret;
231 
232  av_log(avctx, AV_LOG_WARNING, "picture resolution change: %dx%d -> %dx%d\n",
233  avctx->width, avctx->height, width, height);
234  if ((ret = ff_set_dimensions(avctx, width, height)) < 0)
235  return ret;
236  }
237 
238  ctx->frame_type = (buf[12] >> 2) & 3;
239  ctx->alpha_info = buf[17] & 0xf;
240 
241  if (ctx->alpha_info > 2) {
242  av_log(avctx, AV_LOG_ERROR, "Invalid alpha mode %d\n", ctx->alpha_info);
243  return AVERROR_INVALIDDATA;
244  }
245  if (avctx->skip_alpha) ctx->alpha_info = 0;
246 
247  ff_dlog(avctx, "frame type %d\n", ctx->frame_type);
248 
249  if (ctx->frame_type == 0) {
250  ctx->scan = ctx->progressive_scan; // permuted
251  } else {
252  ctx->scan = ctx->interlaced_scan; // permuted
253  ctx->frame->interlaced_frame = 1;
254  ctx->frame->top_field_first = ctx->frame_type == 1;
255  }
256 
257  if (ctx->alpha_info) {
258  if (avctx->bits_per_raw_sample == 10) {
259  pix_fmt = (buf[12] & 0xC0) == 0xC0 ? AV_PIX_FMT_YUVA444P10 : AV_PIX_FMT_YUVA422P10;
260  } else { /* 12b */
261  pix_fmt = (buf[12] & 0xC0) == 0xC0 ? AV_PIX_FMT_YUVA444P12 : AV_PIX_FMT_YUVA422P12;
262  }
263  } else {
264  if (avctx->bits_per_raw_sample == 10) {
265  pix_fmt = (buf[12] & 0xC0) == 0xC0 ? AV_PIX_FMT_YUV444P10 : AV_PIX_FMT_YUV422P10;
266  } else { /* 12b */
267  pix_fmt = (buf[12] & 0xC0) == 0xC0 ? AV_PIX_FMT_YUV444P12 : AV_PIX_FMT_YUV422P12;
268  }
269  }
270 
271  if (pix_fmt != ctx->pix_fmt) {
272 #define HWACCEL_MAX (CONFIG_PRORES_VIDEOTOOLBOX_HWACCEL)
273  enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmtp = pix_fmts;
274  int ret;
275 
276  ctx->pix_fmt = pix_fmt;
277 
278 #if CONFIG_PRORES_VIDEOTOOLBOX_HWACCEL
279  *fmtp++ = AV_PIX_FMT_VIDEOTOOLBOX;
280 #endif
281  *fmtp++ = ctx->pix_fmt;
282  *fmtp = AV_PIX_FMT_NONE;
283 
284  if ((ret = ff_thread_get_format(avctx, pix_fmts)) < 0)
285  return ret;
286 
287  avctx->pix_fmt = ret;
288  }
289 
290  avctx->color_primaries = buf[14];
291  avctx->color_trc = buf[15];
292  avctx->colorspace = buf[16];
293  avctx->color_range = AVCOL_RANGE_MPEG;
294 
295  ptr = buf + 20;
296  flags = buf[19];
297  ff_dlog(avctx, "flags %x\n", flags);
298 
299  if (flags & 2) {
300  if(buf + data_size - ptr < 64) {
301  av_log(avctx, AV_LOG_ERROR, "Header truncated\n");
302  return AVERROR_INVALIDDATA;
303  }
304  permute(ctx->qmat_luma, ctx->prodsp.idct_permutation, ptr);
305  ptr += 64;
306  } else {
307  memset(ctx->qmat_luma, 4, 64);
308  }
309 
310  if (flags & 1) {
311  if(buf + data_size - ptr < 64) {
312  av_log(avctx, AV_LOG_ERROR, "Header truncated\n");
313  return AVERROR_INVALIDDATA;
314  }
315  permute(ctx->qmat_chroma, ctx->prodsp.idct_permutation, ptr);
316  } else {
317  memcpy(ctx->qmat_chroma, ctx->qmat_luma, 64);
318  }
319 
320  return hdr_size;
321 }
322 
323 static int decode_picture_header(AVCodecContext *avctx, const uint8_t *buf, const int buf_size)
324 {
325  ProresContext *ctx = avctx->priv_data;
326  int i, hdr_size, slice_count;
327  unsigned pic_data_size;
328  int log2_slice_mb_width, log2_slice_mb_height;
329  int slice_mb_count, mb_x, mb_y;
330  const uint8_t *data_ptr, *index_ptr;
331 
332  hdr_size = buf[0] >> 3;
333  if (hdr_size < 8 || hdr_size > buf_size) {
334  av_log(avctx, AV_LOG_ERROR, "error, wrong picture header size\n");
335  return AVERROR_INVALIDDATA;
336  }
337 
338  pic_data_size = AV_RB32(buf + 1);
339  if (pic_data_size > buf_size) {
340  av_log(avctx, AV_LOG_ERROR, "error, wrong picture data size\n");
341  return AVERROR_INVALIDDATA;
342  }
343 
344  log2_slice_mb_width = buf[7] >> 4;
345  log2_slice_mb_height = buf[7] & 0xF;
346  if (log2_slice_mb_width > 3 || log2_slice_mb_height) {
347  av_log(avctx, AV_LOG_ERROR, "unsupported slice resolution: %dx%d\n",
348  1 << log2_slice_mb_width, 1 << log2_slice_mb_height);
349  return AVERROR_INVALIDDATA;
350  }
351 
352  ctx->mb_width = (avctx->width + 15) >> 4;
353  if (ctx->frame_type)
354  ctx->mb_height = (avctx->height + 31) >> 5;
355  else
356  ctx->mb_height = (avctx->height + 15) >> 4;
357 
358  // QT ignores the written value
359  // slice_count = AV_RB16(buf + 5);
360  slice_count = ctx->mb_height * ((ctx->mb_width >> log2_slice_mb_width) +
361  av_popcount(ctx->mb_width & (1 << log2_slice_mb_width) - 1));
362 
363  if (ctx->slice_count != slice_count || !ctx->slices) {
364  av_freep(&ctx->slices);
365  ctx->slice_count = 0;
366  ctx->slices = av_calloc(slice_count, sizeof(*ctx->slices));
367  if (!ctx->slices)
368  return AVERROR(ENOMEM);
369  ctx->slice_count = slice_count;
370  }
371 
372  if (!slice_count)
373  return AVERROR(EINVAL);
374 
375  if (hdr_size + slice_count*2 > buf_size) {
376  av_log(avctx, AV_LOG_ERROR, "error, wrong slice count\n");
377  return AVERROR_INVALIDDATA;
378  }
379 
380  // parse slice information
381  index_ptr = buf + hdr_size;
382  data_ptr = index_ptr + slice_count*2;
383 
384  slice_mb_count = 1 << log2_slice_mb_width;
385  mb_x = 0;
386  mb_y = 0;
387 
388  for (i = 0; i < slice_count; i++) {
389  SliceContext *slice = &ctx->slices[i];
390 
391  slice->data = data_ptr;
392  data_ptr += AV_RB16(index_ptr + i*2);
393 
394  while (ctx->mb_width - mb_x < slice_mb_count)
395  slice_mb_count >>= 1;
396 
397  slice->mb_x = mb_x;
398  slice->mb_y = mb_y;
399  slice->mb_count = slice_mb_count;
400  slice->data_size = data_ptr - slice->data;
401 
402  if (slice->data_size < 6) {
403  av_log(avctx, AV_LOG_ERROR, "error, wrong slice data size\n");
404  return AVERROR_INVALIDDATA;
405  }
406 
407  mb_x += slice_mb_count;
408  if (mb_x == ctx->mb_width) {
409  slice_mb_count = 1 << log2_slice_mb_width;
410  mb_x = 0;
411  mb_y++;
412  }
413  if (data_ptr > buf + buf_size) {
414  av_log(avctx, AV_LOG_ERROR, "error, slice out of bounds\n");
415  return AVERROR_INVALIDDATA;
416  }
417  }
418 
419  if (mb_x || mb_y != ctx->mb_height) {
420  av_log(avctx, AV_LOG_ERROR, "error wrong mb count y %d h %d\n",
421  mb_y, ctx->mb_height);
422  return AVERROR_INVALIDDATA;
423  }
424 
425  return pic_data_size;
426 }
427 
428 #define DECODE_CODEWORD(val, codebook, SKIP) \
429  do { \
430  unsigned int rice_order, exp_order, switch_bits; \
431  unsigned int q, buf, bits; \
432  \
433  UPDATE_CACHE(re, gb); \
434  buf = GET_CACHE(re, gb); \
435  \
436  /* number of bits to switch between rice and exp golomb */ \
437  switch_bits = codebook & 3; \
438  rice_order = codebook >> 5; \
439  exp_order = (codebook >> 2) & 7; \
440  \
441  q = 31 - av_log2(buf); \
442  \
443  if (q > switch_bits) { /* exp golomb */ \
444  bits = exp_order - switch_bits + (q<<1); \
445  if (bits > FFMIN(MIN_CACHE_BITS, 31)) \
446  return AVERROR_INVALIDDATA; \
447  val = SHOW_UBITS(re, gb, bits) - (1 << exp_order) + \
448  ((switch_bits + 1) << rice_order); \
449  SKIP(re, gb, bits); \
450  } else if (rice_order) { \
451  SKIP_BITS(re, gb, q+1); \
452  val = (q << rice_order) + SHOW_UBITS(re, gb, rice_order); \
453  SKIP(re, gb, rice_order); \
454  } else { \
455  val = q; \
456  SKIP(re, gb, q+1); \
457  } \
458  } while (0)
459 
460 #define TOSIGNED(x) (((x) >> 1) ^ (-((x) & 1)))
461 
462 #define FIRST_DC_CB 0xB8
463 
464 static const uint8_t dc_codebook[7] = { 0x04, 0x28, 0x28, 0x4D, 0x4D, 0x70, 0x70};
465 
467  int blocks_per_slice)
468 {
469  int16_t prev_dc;
470  int code, i, sign;
471 
472  OPEN_READER(re, gb);
473 
475  prev_dc = TOSIGNED(code);
476  out[0] = prev_dc;
477 
478  out += 64; // dc coeff for the next block
479 
480  code = 5;
481  sign = 0;
482  for (i = 1; i < blocks_per_slice; i++, out += 64) {
484  if(code) sign ^= -(code & 1);
485  else sign = 0;
486  prev_dc += (((code + 1) >> 1) ^ sign) - sign;
487  out[0] = prev_dc;
488  }
489  CLOSE_READER(re, gb);
490  return 0;
491 }
492 
493 // adaptive codebook switching lut according to previous run/level values
494 static const uint8_t run_to_cb[16] = { 0x06, 0x06, 0x05, 0x05, 0x04, 0x29, 0x29, 0x29, 0x29, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x4C };
495 static const uint8_t lev_to_cb[10] = { 0x04, 0x0A, 0x05, 0x06, 0x04, 0x28, 0x28, 0x28, 0x28, 0x4C };
496 
498  int16_t *out, int blocks_per_slice)
499 {
500  ProresContext *ctx = avctx->priv_data;
501  int block_mask, sign;
502  unsigned pos, run, level;
503  int max_coeffs, i, bits_left;
504  int log2_block_count = av_log2(blocks_per_slice);
505 
506  OPEN_READER(re, gb);
507  UPDATE_CACHE(re, gb); \
508  run = 4;
509  level = 2;
510 
511  max_coeffs = 64 << log2_block_count;
512  block_mask = blocks_per_slice - 1;
513 
514  for (pos = block_mask;;) {
515  bits_left = gb->size_in_bits - re_index;
516  if (!bits_left || (bits_left < 32 && !SHOW_UBITS(re, gb, bits_left)))
517  break;
518 
520  pos += run + 1;
521  if (pos >= max_coeffs) {
522  av_log(avctx, AV_LOG_ERROR, "ac tex damaged %d, %d\n", pos, max_coeffs);
523  return AVERROR_INVALIDDATA;
524  }
525 
527  level += 1;
528 
529  i = pos >> log2_block_count;
530 
531  sign = SHOW_SBITS(re, gb, 1);
532  SKIP_BITS(re, gb, 1);
533  out[((pos & block_mask) << 6) + ctx->scan[i]] = ((level ^ sign) - sign);
534  }
535 
536  CLOSE_READER(re, gb);
537  return 0;
538 }
539 
541  uint16_t *dst, int dst_stride,
542  const uint8_t *buf, unsigned buf_size,
543  const int16_t *qmat)
544 {
545  ProresContext *ctx = avctx->priv_data;
546  LOCAL_ALIGNED_32(int16_t, blocks, [8*4*64]);
547  int16_t *block;
548  GetBitContext gb;
549  int i, blocks_per_slice = slice->mb_count<<2;
550  int ret;
551 
552  for (i = 0; i < blocks_per_slice; i++)
553  ctx->bdsp.clear_block(blocks+(i<<6));
554 
555  init_get_bits(&gb, buf, buf_size << 3);
556 
557  if ((ret = decode_dc_coeffs(&gb, blocks, blocks_per_slice)) < 0)
558  return ret;
559  if ((ret = decode_ac_coeffs(avctx, &gb, blocks, blocks_per_slice)) < 0)
560  return ret;
561 
562  block = blocks;
563  for (i = 0; i < slice->mb_count; i++) {
564  ctx->prodsp.idct_put(dst, dst_stride, block+(0<<6), qmat);
565  ctx->prodsp.idct_put(dst +8, dst_stride, block+(1<<6), qmat);
566  ctx->prodsp.idct_put(dst+4*dst_stride , dst_stride, block+(2<<6), qmat);
567  ctx->prodsp.idct_put(dst+4*dst_stride+8, dst_stride, block+(3<<6), qmat);
568  block += 4*64;
569  dst += 16;
570  }
571  return 0;
572 }
573 
575  uint16_t *dst, int dst_stride,
576  const uint8_t *buf, unsigned buf_size,
577  const int16_t *qmat, int log2_blocks_per_mb)
578 {
579  ProresContext *ctx = avctx->priv_data;
580  LOCAL_ALIGNED_32(int16_t, blocks, [8*4*64]);
581  int16_t *block;
582  GetBitContext gb;
583  int i, j, blocks_per_slice = slice->mb_count << log2_blocks_per_mb;
584  int ret;
585 
586  for (i = 0; i < blocks_per_slice; i++)
587  ctx->bdsp.clear_block(blocks+(i<<6));
588 
589  init_get_bits(&gb, buf, buf_size << 3);
590 
591  if ((ret = decode_dc_coeffs(&gb, blocks, blocks_per_slice)) < 0)
592  return ret;
593  if ((ret = decode_ac_coeffs(avctx, &gb, blocks, blocks_per_slice)) < 0)
594  return ret;
595 
596  block = blocks;
597  for (i = 0; i < slice->mb_count; i++) {
598  for (j = 0; j < log2_blocks_per_mb; j++) {
599  ctx->prodsp.idct_put(dst, dst_stride, block+(0<<6), qmat);
600  ctx->prodsp.idct_put(dst+4*dst_stride, dst_stride, block+(1<<6), qmat);
601  block += 2*64;
602  dst += 8;
603  }
604  }
605  return 0;
606 }
607 
608 /**
609  * Decode alpha slice plane.
610  */
612  uint16_t *dst, int dst_stride,
613  const uint8_t *buf, int buf_size,
614  int blocks_per_slice)
615 {
616  GetBitContext gb;
617  int i;
618  LOCAL_ALIGNED_32(int16_t, blocks, [8*4*64]);
619  int16_t *block;
620 
621  for (i = 0; i < blocks_per_slice<<2; i++)
622  ctx->bdsp.clear_block(blocks+(i<<6));
623 
624  init_get_bits(&gb, buf, buf_size << 3);
625 
626  if (ctx->alpha_info == 2) {
627  ctx->unpack_alpha(&gb, blocks, blocks_per_slice * 4 * 64, 16);
628  } else {
629  ctx->unpack_alpha(&gb, blocks, blocks_per_slice * 4 * 64, 8);
630  }
631 
632  block = blocks;
633 
634  for (i = 0; i < 16; i++) {
635  memcpy(dst, block, 16 * blocks_per_slice * sizeof(*dst));
636  dst += dst_stride >> 1;
637  block += 16 * blocks_per_slice;
638  }
639 }
640 
641 static int decode_slice_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
642 {
643  ProresContext *ctx = avctx->priv_data;
644  SliceContext *slice = &ctx->slices[jobnr];
645  const uint8_t *buf = slice->data;
646  AVFrame *pic = ctx->frame;
647  int i, hdr_size, qscale, log2_chroma_blocks_per_mb;
648  int luma_stride, chroma_stride;
649  int y_data_size, u_data_size, v_data_size, a_data_size, offset;
650  uint8_t *dest_y, *dest_u, *dest_v;
651  LOCAL_ALIGNED_16(int16_t, qmat_luma_scaled, [64]);
652  LOCAL_ALIGNED_16(int16_t, qmat_chroma_scaled,[64]);
653  int mb_x_shift;
654  int ret;
655  uint16_t val_no_chroma;
656 
657  slice->ret = -1;
658  //av_log(avctx, AV_LOG_INFO, "slice %d mb width %d mb x %d y %d\n",
659  // jobnr, slice->mb_count, slice->mb_x, slice->mb_y);
660 
661  // slice header
662  hdr_size = buf[0] >> 3;
663  qscale = av_clip(buf[1], 1, 224);
664  qscale = qscale > 128 ? qscale - 96 << 2: qscale;
665  y_data_size = AV_RB16(buf + 2);
666  u_data_size = AV_RB16(buf + 4);
667  v_data_size = slice->data_size - y_data_size - u_data_size - hdr_size;
668  if (hdr_size > 7) v_data_size = AV_RB16(buf + 6);
669  a_data_size = slice->data_size - y_data_size - u_data_size -
670  v_data_size - hdr_size;
671 
672  if (y_data_size < 0 || u_data_size < 0 || v_data_size < 0
673  || hdr_size+y_data_size+u_data_size+v_data_size > slice->data_size){
674  av_log(avctx, AV_LOG_ERROR, "invalid plane data size\n");
675  return AVERROR_INVALIDDATA;
676  }
677 
678  buf += hdr_size;
679 
680  for (i = 0; i < 64; i++) {
681  qmat_luma_scaled [i] = ctx->qmat_luma [i] * qscale;
682  qmat_chroma_scaled[i] = ctx->qmat_chroma[i] * qscale;
683  }
684 
685  if (ctx->frame_type == 0) {
686  luma_stride = pic->linesize[0];
687  chroma_stride = pic->linesize[1];
688  } else {
689  luma_stride = pic->linesize[0] << 1;
690  chroma_stride = pic->linesize[1] << 1;
691  }
692 
693  if (avctx->pix_fmt == AV_PIX_FMT_YUV444P10 || avctx->pix_fmt == AV_PIX_FMT_YUVA444P10 ||
695  mb_x_shift = 5;
696  log2_chroma_blocks_per_mb = 2;
697  } else {
698  mb_x_shift = 4;
699  log2_chroma_blocks_per_mb = 1;
700  }
701 
702  offset = (slice->mb_y << 4) * luma_stride + (slice->mb_x << 5);
703  dest_y = pic->data[0] + offset;
704  dest_u = pic->data[1] + (slice->mb_y << 4) * chroma_stride + (slice->mb_x << mb_x_shift);
705  dest_v = pic->data[2] + (slice->mb_y << 4) * chroma_stride + (slice->mb_x << mb_x_shift);
706 
707  if (ctx->frame_type && ctx->first_field ^ ctx->frame->top_field_first) {
708  dest_y += pic->linesize[0];
709  dest_u += pic->linesize[1];
710  dest_v += pic->linesize[2];
711  offset += pic->linesize[3];
712  }
713 
714  ret = decode_slice_luma(avctx, slice, (uint16_t*)dest_y, luma_stride,
715  buf, y_data_size, qmat_luma_scaled);
716  if (ret < 0)
717  return ret;
718 
719  if (!(avctx->flags & AV_CODEC_FLAG_GRAY) && (u_data_size + v_data_size) > 0) {
720  ret = decode_slice_chroma(avctx, slice, (uint16_t*)dest_u, chroma_stride,
721  buf + y_data_size, u_data_size,
722  qmat_chroma_scaled, log2_chroma_blocks_per_mb);
723  if (ret < 0)
724  return ret;
725 
726  ret = decode_slice_chroma(avctx, slice, (uint16_t*)dest_v, chroma_stride,
727  buf + y_data_size + u_data_size, v_data_size,
728  qmat_chroma_scaled, log2_chroma_blocks_per_mb);
729  if (ret < 0)
730  return ret;
731  }
732  else {
733  size_t mb_max_x = slice->mb_count << (mb_x_shift - 1);
734  size_t i, j;
735  if (avctx->bits_per_raw_sample == 10) {
736  val_no_chroma = 511;
737  } else { /* 12b */
738  val_no_chroma = 511 * 4;
739  }
740  for (i = 0; i < 16; ++i)
741  for (j = 0; j < mb_max_x; ++j) {
742  *(uint16_t*)(dest_u + (i * chroma_stride) + (j << 1)) = val_no_chroma;
743  *(uint16_t*)(dest_v + (i * chroma_stride) + (j << 1)) = val_no_chroma;
744  }
745  }
746 
747  /* decode alpha plane if available */
748  if (ctx->alpha_info && pic->data[3] && a_data_size) {
749  uint8_t *dest_a = pic->data[3] + offset;
750  decode_slice_alpha(ctx, (uint16_t*)dest_a, luma_stride,
751  buf + y_data_size + u_data_size + v_data_size,
752  a_data_size, slice->mb_count);
753  }
754 
755  slice->ret = 0;
756  return 0;
757 }
758 
759 static int decode_picture(AVCodecContext *avctx)
760 {
761  ProresContext *ctx = avctx->priv_data;
762  int i;
763  int error = 0;
764 
765  avctx->execute2(avctx, decode_slice_thread, NULL, NULL, ctx->slice_count);
766 
767  for (i = 0; i < ctx->slice_count; i++)
768  error += ctx->slices[i].ret < 0;
769 
770  if (error)
771  ctx->frame->decode_error_flags = FF_DECODE_ERROR_INVALID_BITSTREAM;
772  if (error < ctx->slice_count)
773  return 0;
774 
775  return ctx->slices[0].ret;
776 }
777 
778 static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
779  AVPacket *avpkt)
780 {
781  ProresContext *ctx = avctx->priv_data;
782  ThreadFrame tframe = { .f = data };
783  AVFrame *frame = data;
784  const uint8_t *buf = avpkt->data;
785  int buf_size = avpkt->size;
786  int frame_hdr_size, pic_size, ret;
787 
788  if (buf_size < 28 || AV_RL32(buf + 4) != AV_RL32("icpf")) {
789  av_log(avctx, AV_LOG_ERROR, "invalid frame header\n");
790  return AVERROR_INVALIDDATA;
791  }
792 
793  ctx->frame = frame;
794  ctx->frame->pict_type = AV_PICTURE_TYPE_I;
795  ctx->frame->key_frame = 1;
796  ctx->first_field = 1;
797 
798  buf += 8;
799  buf_size -= 8;
800 
801  frame_hdr_size = decode_frame_header(ctx, buf, buf_size, avctx);
802  if (frame_hdr_size < 0)
803  return frame_hdr_size;
804 
805  buf += frame_hdr_size;
806  buf_size -= frame_hdr_size;
807 
808  if ((ret = ff_thread_get_buffer(avctx, &tframe, 0)) < 0)
809  return ret;
810  ff_thread_finish_setup(avctx);
811 
812  if (avctx->hwaccel) {
813  ret = avctx->hwaccel->start_frame(avctx, NULL, 0);
814  if (ret < 0)
815  return ret;
816  ret = avctx->hwaccel->decode_slice(avctx, avpkt->data, avpkt->size);
817  if (ret < 0)
818  return ret;
819  ret = avctx->hwaccel->end_frame(avctx);
820  if (ret < 0)
821  return ret;
822  goto finish;
823  }
824 
826  pic_size = decode_picture_header(avctx, buf, buf_size);
827  if (pic_size < 0) {
828  av_log(avctx, AV_LOG_ERROR, "error decoding picture header\n");
829  return pic_size;
830  }
831 
832  if ((ret = decode_picture(avctx)) < 0) {
833  av_log(avctx, AV_LOG_ERROR, "error decoding picture\n");
834  return ret;
835  }
836 
837  buf += pic_size;
838  buf_size -= pic_size;
839 
840  if (ctx->frame_type && buf_size > 0 && ctx->first_field) {
841  ctx->first_field = 0;
842  goto decode_picture;
843  }
844 
845 finish:
846  *got_frame = 1;
847 
848  return avpkt->size;
849 }
850 
852 {
853  ProresContext *ctx = avctx->priv_data;
854 
855  av_freep(&ctx->slices);
856 
857  return 0;
858 }
859 
860 #if HAVE_THREADS
862 {
863  ProresContext *csrc = src->priv_data;
864  ProresContext *cdst = dst->priv_data;
865 
866  cdst->pix_fmt = csrc->pix_fmt;
867 
868  return 0;
869 }
870 #endif
871 
873  .name = "prores",
874  .long_name = NULL_IF_CONFIG_SMALL("Apple ProRes (iCodec Pro)"),
875  .type = AVMEDIA_TYPE_VIDEO,
876  .id = AV_CODEC_ID_PRORES,
877  .priv_data_size = sizeof(ProresContext),
878  .init = decode_init,
879  .close = decode_close,
880  .decode = decode_frame,
884  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
885  .hw_configs = (const AVCodecHWConfigInternal *const []) {
886 #if CONFIG_PRORES_VIDEOTOOLBOX_HWACCEL
887  HWACCEL_VIDEOTOOLBOX(prores),
888 #endif
889  NULL
890  },
891 };
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:31
hwconfig.h
AVCodecContext::hwaccel
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:1359
AVCodec
AVCodec.
Definition: codec.h:202
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:42
DECODE_CODEWORD
#define DECODE_CODEWORD(val, codebook, SKIP)
Definition: proresdec2.c:428
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
FF_PROFILE_PRORES_XQ
#define FF_PROFILE_PRORES_XQ
Definition: avcodec.h:1638
level
uint8_t level
Definition: svq3.c:204
av_clip
#define av_clip
Definition: common.h:96
get_bits_left
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:850
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:960
mem_internal.h
out
FILE * out
Definition: movenc.c:54
ff_prores_profiles
const AVProfile ff_prores_profiles[]
Definition: profiles.c:159
ff_proresdsp_init
av_cold int ff_proresdsp_init(ProresDSPContext *dsp, AVCodecContext *avctx)
Definition: proresdsp.c:79
GetBitContext::size_in_bits
int size_in_bits
Definition: get_bits.h:69
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:317
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:953
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:373
data
const char data[16]
Definition: mxf.c:143
ProresContext
Definition: proresdec.h:38
TOSIGNED
#define TOSIGNED(x)
Definition: proresdec2.c:460
SliceContext::mb_x
unsigned mb_x
Definition: proresdec.h:31
UPDATE_CACHE
#define UPDATE_CACHE(name, gb)
Definition: get_bits.h:179
av_popcount
#define av_popcount
Definition: common.h:150
AV_PIX_FMT_YUVA422P10
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:442
ff_prores_progressive_scan
const uint8_t ff_prores_progressive_scan[64]
Definition: proresdata.c:25
init_get_bits
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:660
thread.h
ThreadFrame::f
AVFrame * f
Definition: thread.h:35
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:338
init
static int init
Definition: av_tx.c:47
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:380
finish
static void finish(void)
Definition: movenc.c:342
U
#define U(x)
Definition: vp56_arith.h:37
ProresContext::pix_fmt
enum AVPixelFormat pix_fmt
Definition: proresdec.h:55
GetBitContext
Definition: get_bits.h:62
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:463
proresdec.h
val
static double val(void *priv, double ch)
Definition: aeval.c:76
FF_PROFILE_PRORES_LT
#define FF_PROFILE_PRORES_LT
Definition: avcodec.h:1634
ALPHA_SHIFT_8_TO_10
#define ALPHA_SHIFT_8_TO_10(alpha_val)
Definition: proresdec2.c:53
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:407
unpack_alpha
static void unpack_alpha(GetBitContext *gb, uint16_t *dst, int num_coeffs, const int num_bits, const int decode_precision)
Definition: proresdec2.c:57
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:946
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_cold
#define av_cold
Definition: attributes.h:90
FIRST_DC_CB
#define FIRST_DC_CB
Definition: proresdec2.c:462
mask
static const uint16_t mask[17]
Definition: lzw.c:38
CLOSE_READER
#define CLOSE_READER(name, gb)
Definition: get_bits.h:150
decode
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
decode_picture_header
static int decode_picture_header(AVCodecContext *avctx, const uint8_t *buf, const int buf_size)
Definition: proresdec2.c:323
width
#define width
SliceContext::data_size
unsigned data_size
Definition: proresdec.h:34
decode_picture
static int decode_picture(AVCodecContext *avctx)
Definition: proresdec2.c:759
SHOW_SBITS
#define SHOW_SBITS(name, gb, num)
Definition: get_bits.h:213
permute
static void permute(uint8_t *dst, const uint8_t *src, const uint8_t permutation[64])
Definition: proresdec2.c:45
LOCAL_ALIGNED_16
#define LOCAL_ALIGNED_16(t, v,...)
Definition: mem_internal.h:130
FF_PROFILE_UNKNOWN
#define FF_PROFILE_UNKNOWN
Definition: avcodec.h:1526
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
AV_PIX_FMT_YUVA444P12
#define AV_PIX_FMT_YUVA444P12
Definition: pixfmt.h:445
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1425
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
ctx
AVFormatContext * ctx
Definition: movenc.c:48
get_bits.h
simple_idct.h
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demuxing_decoding.c:41
SKIP_BITS
#define SKIP_BITS(name, gb, num)
Definition: get_bits.h:194
ff_prores_interlaced_scan
const uint8_t ff_prores_interlaced_scan[64]
Definition: proresdata.c:36
arg
const char * arg
Definition: jacosubdec.c:67
if
if(ret)
Definition: filter_design.txt:179
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:113
NULL
#define NULL
Definition: coverity.c:32
FF_DECODE_ERROR_INVALID_BITSTREAM
#define FF_DECODE_ERROR_INVALID_BITSTREAM
Definition: frame.h:618
LOCAL_ALIGNED_32
#define LOCAL_ALIGNED_32(t, v,...)
Definition: mem_internal.h:136
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
run
uint8_t run
Definition: svq3.c:203
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:967
decode_ac_coeffs
static av_always_inline int decode_ac_coeffs(AVCodecContext *avctx, GetBitContext *gb, int16_t *out, int blocks_per_slice)
Definition: proresdec2.c:497
SliceContext::ret
int ret
Definition: proresdec.h:35
AVHWAccel::end_frame
int(* end_frame)(AVCodecContext *avctx)
Called at the end of each frame or field picture.
Definition: avcodec.h:2140
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:499
run_to_cb
static const uint8_t run_to_cb[16]
Definition: proresdec2.c:494
profiles.h
src
#define src
Definition: vp8dsp.c:255
SliceContext::mb_y
unsigned mb_y
Definition: proresdec.h:32
LAST_SKIP_BITS
#define LAST_SKIP_BITS(name, gb, num)
Definition: get_bits.h:200
ONLY_IF_THREADS_ENABLED
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:156
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:405
ff_prores_decoder
const AVCodec ff_prores_decoder
Definition: proresdec2.c:872
decode_slice_chroma
static int decode_slice_chroma(AVCodecContext *avctx, SliceContext *slice, uint16_t *dst, int dst_stride, const uint8_t *buf, unsigned buf_size, const int16_t *qmat, int log2_blocks_per_mb)
Definition: proresdec2.c:574
SliceContext
Definition: mss12.h:70
decode_slice_luma
static int decode_slice_luma(AVCodecContext *avctx, SliceContext *slice, uint16_t *dst, int dst_stride, const uint8_t *buf, unsigned buf_size, const int16_t *qmat)
Definition: proresdec2.c:540
dc_codebook
static const uint8_t dc_codebook[7]
Definition: proresdec2.c:464
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:29
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:243
AVPacket::size
int size
Definition: packet.h:374
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
FF_PROFILE_PRORES_HQ
#define FF_PROFILE_PRORES_HQ
Definition: avcodec.h:1636
FF_PROFILE_PRORES_STANDARD
#define FF_PROFILE_PRORES_STANDARD
Definition: avcodec.h:1635
AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:409
SliceContext::mb_count
unsigned mb_count
Definition: proresdec.h:33
ALPHA_SHIFT_16_TO_12
#define ALPHA_SHIFT_16_TO_12(alpha_val)
Definition: proresdec2.c:54
AV_RB32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
Definition: bytestream.h:96
AV_PIX_FMT_YUV444P12
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:411
proresdata.h
ALPHA_SHIFT_8_TO_12
#define ALPHA_SHIFT_8_TO_12(alpha_val)
Definition: proresdec2.c:55
AVCodecContext::skip_alpha
int skip_alpha
Skip processing alpha if supported by codec.
Definition: avcodec.h:1774
decode_close
static av_cold int decode_close(AVCodecContext *avctx)
Definition: proresdec2.c:851
AVCodecHWConfigInternal
Definition: hwconfig.h:29
decode_frame_header
static int decode_frame_header(ProresContext *ctx, const uint8_t *buf, const int data_size, AVCodecContext *avctx)
Definition: proresdec2.c:204
OPEN_READER
#define OPEN_READER(name, gb)
Definition: get_bits.h:139
height
#define height
AV_CODEC_CAP_SLICE_THREADS
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:117
AV_PIX_FMT_YUVA444P10
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:443
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
version
version
Definition: libkvazaar.c:313
ff_init_scantable_permutation
av_cold void ff_init_scantable_permutation(uint8_t *idct_permutation, enum idct_permutation_type perm_type)
Definition: idctdsp.c:50
AV_PIX_FMT_VIDEOTOOLBOX
@ AV_PIX_FMT_VIDEOTOOLBOX
hardware decoding through Videotoolbox
Definition: pixfmt.h:272
decode_slice_alpha
static void decode_slice_alpha(ProresContext *ctx, uint16_t *dst, int dst_stride, const uint8_t *buf, int buf_size, int blocks_per_slice)
Decode alpha slice plane.
Definition: proresdec2.c:611
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
code
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
Definition: filter_design.txt:178
AVHWAccel::decode_slice
int(* decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Callback for each slice.
Definition: avcodec.h:2129
internal.h
SliceContext::data
const uint8_t * data
Definition: proresdec.h:30
HWACCEL_MAX
#define HWACCEL_MAX
av_always_inline
#define av_always_inline
Definition: attributes.h:49
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:209
update_thread_context
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread. Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little speed gain at this point but it should work. If there are inter-frame dependencies
AVCodecContext::height
int height
Definition: avcodec.h:556
lev_to_cb
static const uint8_t lev_to_cb[10]
Definition: proresdec2.c:495
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:593
AVCOL_RANGE_MPEG
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
Definition: pixfmt.h:580
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:271
HWACCEL_VIDEOTOOLBOX
#define HWACCEL_VIDEOTOOLBOX(codec)
Definition: hwconfig.h:77
idctdsp.h
avcodec.h
ret
ret
Definition: filter_design.txt:187
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
FF_PROFILE_PRORES_4444
#define FF_PROFILE_PRORES_4444
Definition: avcodec.h:1637
FF_PROFILE_PRORES_PROXY
#define FF_PROFILE_PRORES_PROXY
Definition: avcodec.h:1633
unpack_alpha_10
static void unpack_alpha_10(GetBitContext *gb, uint16_t *dst, int num_coeffs, const int num_bits)
Definition: proresdec2.c:118
pos
unsigned int pos
Definition: spdifenc.c:412
ff_thread_finish_setup
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards. If some code can 't be moved
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
AVCodecContext
main external API structure.
Definition: avcodec.h:383
ALPHA_SHIFT_16_TO_10
#define ALPHA_SHIFT_16_TO_10(alpha_val)
Definition: proresdec2.c:52
decode_frame
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: proresdec2.c:778
ThreadFrame
Definition: thread.h:34
SHOW_UBITS
#define SHOW_UBITS(name, gb, num)
Definition: get_bits.h:212
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_PIX_FMT_YUVA422P12
#define AV_PIX_FMT_YUVA422P12
Definition: pixfmt.h:444
AVCodecContext::profile
int profile
profile
Definition: avcodec.h:1525
ff_thread_get_format
FF_DISABLE_DEPRECATION_WARNINGS enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Wrapper around get_format() for frame-multithreaded codecs.
Definition: pthread_frame.c:1020
decode_dc_coeffs
static av_always_inline int decode_dc_coeffs(GetBitContext *gb, int16_t *out, int blocks_per_slice)
Definition: proresdec2.c:466
AVHWAccel::start_frame
int(* start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
Called at the beginning of each frame or field picture.
Definition: avcodec.h:2101
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:86
decode_slice_thread
static int decode_slice_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
Definition: proresdec2.c:641
decode_init
static av_cold int decode_init(AVCodecContext *avctx)
Definition: proresdec2.c:138
AVCodecContext::codec_tag
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:408
AVPacket
This structure stores compressed data.
Definition: packet.h:350
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:410
unpack_alpha_12
static void unpack_alpha_12(GetBitContext *gb, uint16_t *dst, int num_coeffs, const int num_bits)
Definition: proresdec2.c:128
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:556
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:362
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
MKTAG
#define MKTAG(a, b, c, d)
Definition: macros.h:55
ff_blockdsp_init
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
Definition: blockdsp.c:59
AVCodecContext::execute2
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
Definition: avcodec.h:1511
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
AV_CODEC_ID_PRORES
@ AV_CODEC_ID_PRORES
Definition: codec_id.h:198
AV_RB16
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
Definition: bytestream.h:98
re
float re
Definition: fft.c:78