29 #define LONG_BITSTREAM_READER 47 for (i = 0; i < 64; i++)
48 dst[i] = permutation[src[i]];
51 #define ALPHA_SHIFT_16_TO_10(alpha_val) (alpha_val >> 6) 52 #define ALPHA_SHIFT_8_TO_10(alpha_val) ((alpha_val << 2) | (alpha_val >> 6)) 53 #define ALPHA_SHIFT_16_TO_12(alpha_val) (alpha_val >> 4) 54 #define ALPHA_SHIFT_8_TO_12(alpha_val) ((alpha_val << 4) | (alpha_val >> 4)) 57 const int num_bits,
const int decode_precision) {
58 const int mask = (1 << num_bits) - 1;
59 int i, idx,
val, alpha_val;
69 val =
get_bits(gb, num_bits == 16 ? 7 : 4);
75 alpha_val = (alpha_val +
val) & mask;
77 if (decode_precision == 10) {
83 if (decode_precision == 10) {
89 if (idx >= num_coeffs)
95 if (idx + val > num_coeffs)
96 val = num_coeffs - idx;
98 for (i = 0; i <
val; i++) {
99 if (decode_precision == 10) {
106 for (i = 0; i <
val; i++) {
107 if (decode_precision == 10) {
114 }
while (idx < num_coeffs);
120 if (num_bits == 16) {
130 if (num_bits == 16) {
146 case MKTAG(
'a',
'p',
'c',
'o'):
149 case MKTAG(
'a',
'p',
'c',
's'):
152 case MKTAG(
'a',
'p',
'c',
'n'):
155 case MKTAG(
'a',
'p',
'c',
'h'):
158 case MKTAG(
'a',
'p',
'4',
'h'):
162 case MKTAG(
'a',
'p',
'4',
'x'):
172 av_log(avctx,
AV_LOG_DEBUG,
"Auto bitdepth precision. Use 10b decoding based on codec tag.\n");
174 av_log(avctx,
AV_LOG_DEBUG,
"Auto bitdepth precision. Use 12b decoding based on codec tag.\n");
209 ff_dlog(avctx,
"header size %d\n", hdr_size);
210 if (hdr_size > data_size) {
216 ff_dlog(avctx,
"%.4s version %d\n", buf+4, version);
225 if (width != avctx->
width || height != avctx->
height) {
274 ff_dlog(avctx,
"flags %x\n", flags);
277 if(buf + data_size - ptr < 64) {
288 if(buf + data_size - ptr < 64) {
303 int i, hdr_size, slice_count;
304 unsigned pic_data_size;
305 int log2_slice_mb_width, log2_slice_mb_height;
306 int slice_mb_count, mb_x, mb_y;
307 const uint8_t *data_ptr, *index_ptr;
309 hdr_size = buf[0] >> 3;
310 if (hdr_size < 8 || hdr_size > buf_size) {
315 pic_data_size =
AV_RB32(buf + 1);
316 if (pic_data_size > buf_size) {
321 log2_slice_mb_width = buf[7] >> 4;
322 log2_slice_mb_height = buf[7] & 0xF;
323 if (log2_slice_mb_width > 3 || log2_slice_mb_height) {
325 1 << log2_slice_mb_width, 1 << log2_slice_mb_height);
352 if (hdr_size + slice_count*2 > buf_size) {
358 index_ptr = buf + hdr_size;
359 data_ptr = index_ptr + slice_count*2;
361 slice_mb_count = 1 << log2_slice_mb_width;
365 for (i = 0; i < slice_count; i++) {
368 slice->
data = data_ptr;
369 data_ptr +=
AV_RB16(index_ptr + i*2);
371 while (ctx->
mb_width - mb_x < slice_mb_count)
372 slice_mb_count >>= 1;
384 mb_x += slice_mb_count;
386 slice_mb_count = 1 << log2_slice_mb_width;
390 if (data_ptr > buf + buf_size) {
402 return pic_data_size;
405 #define DECODE_CODEWORD(val, codebook, SKIP) \ 407 unsigned int rice_order, exp_order, switch_bits; \ 408 unsigned int q, buf, bits; \ 410 UPDATE_CACHE(re, gb); \ 411 buf = GET_CACHE(re, gb); \ 414 switch_bits = codebook & 3; \ 415 rice_order = codebook >> 5; \ 416 exp_order = (codebook >> 2) & 7; \ 418 q = 31 - av_log2(buf); \ 420 if (q > switch_bits) { \ 421 bits = exp_order - switch_bits + (q<<1); \ 422 if (bits > FFMIN(MIN_CACHE_BITS, 31)) \ 423 return AVERROR_INVALIDDATA; \ 424 val = SHOW_UBITS(re, gb, bits) - (1 << exp_order) + \ 425 ((switch_bits + 1) << rice_order); \ 426 SKIP(re, gb, bits); \ 427 } else if (rice_order) { \ 428 SKIP_BITS(re, gb, q+1); \ 429 val = (q << rice_order) + SHOW_UBITS(re, gb, rice_order); \ 430 SKIP(re, gb, rice_order); \ 437 #define TOSIGNED(x) (((x) >> 1) ^ (-((x) & 1))) 439 #define FIRST_DC_CB 0xB8 444 int blocks_per_slice)
459 for (i = 1; i < blocks_per_slice; i++, out += 64) {
461 if(code) sign ^= -(code & 1);
463 prev_dc += (((code + 1) >> 1) ^ sign) - sign;
471 static const uint8_t run_to_cb[16] = { 0x06, 0x06, 0x05, 0x05, 0x04, 0x29, 0x29, 0x29, 0x29, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x4C };
472 static const uint8_t lev_to_cb[10] = { 0x04, 0x0A, 0x05, 0x06, 0x04, 0x28, 0x28, 0x28, 0x28, 0x4C };
475 int16_t *
out,
int blocks_per_slice)
478 int block_mask, sign;
480 int max_coeffs,
i, bits_left;
481 int log2_block_count =
av_log2(blocks_per_slice);
488 max_coeffs = 64 << log2_block_count;
489 block_mask = blocks_per_slice - 1;
491 for (pos = block_mask;;) {
493 if (!bits_left || (bits_left < 32 && !
SHOW_UBITS(
re, gb, bits_left)))
498 if (pos >= max_coeffs) {
506 i = pos >> log2_block_count;
510 out[((pos & block_mask) << 6) + ctx->
scan[
i]] = ((level ^ sign) - sign);
518 uint16_t *dst,
int dst_stride,
519 const uint8_t *buf,
unsigned buf_size,
526 int i, blocks_per_slice = slice->
mb_count<<2;
529 for (i = 0; i < blocks_per_slice; i++)
540 for (i = 0; i < slice->
mb_count; i++) {
543 ctx->
prodsp.
idct_put(dst+4*dst_stride , dst_stride, block+(2<<6), qmat);
544 ctx->
prodsp.
idct_put(dst+4*dst_stride+8, dst_stride, block+(3<<6), qmat);
552 uint16_t *dst,
int dst_stride,
553 const uint8_t *buf,
unsigned buf_size,
554 const int16_t *qmat,
int log2_blocks_per_mb)
560 int i, j, blocks_per_slice = slice->
mb_count << log2_blocks_per_mb;
563 for (i = 0; i < blocks_per_slice; i++)
574 for (i = 0; i < slice->
mb_count; i++) {
575 for (j = 0; j < log2_blocks_per_mb; j++) {
577 ctx->
prodsp.
idct_put(dst+4*dst_stride, dst_stride, block+(1<<6), qmat);
589 uint16_t *dst,
int dst_stride,
590 const uint8_t *buf,
int buf_size,
591 int blocks_per_slice)
598 for (i = 0; i < blocks_per_slice<<2; i++)
604 ctx->
unpack_alpha(&gb, blocks, blocks_per_slice * 4 * 64, 16);
606 ctx->
unpack_alpha(&gb, blocks, blocks_per_slice * 4 * 64, 8);
611 for (i = 0; i < 16; i++) {
612 memcpy(dst, block, 16 * blocks_per_slice *
sizeof(*dst));
613 dst += dst_stride >> 1;
614 block += 16 * blocks_per_slice;
624 int i, hdr_size, qscale, log2_chroma_blocks_per_mb;
625 int luma_stride, chroma_stride;
626 int y_data_size, u_data_size, v_data_size, a_data_size;
627 uint8_t *dest_y, *dest_u, *dest_v, *dest_a;
632 uint16_t val_no_chroma;
639 hdr_size = buf[0] >> 3;
640 qscale =
av_clip(buf[1], 1, 224);
641 qscale = qscale > 128 ? qscale - 96 << 2: qscale;
642 y_data_size =
AV_RB16(buf + 2);
643 u_data_size =
AV_RB16(buf + 4);
644 v_data_size = slice->
data_size - y_data_size - u_data_size - hdr_size;
645 if (hdr_size > 7) v_data_size =
AV_RB16(buf + 6);
646 a_data_size = slice->
data_size - y_data_size - u_data_size -
647 v_data_size - hdr_size;
649 if (y_data_size < 0 || u_data_size < 0 || v_data_size < 0
650 || hdr_size+y_data_size+u_data_size+v_data_size > slice->
data_size){
657 for (i = 0; i < 64; i++) {
658 qmat_luma_scaled [
i] = ctx->
qmat_luma [
i] * qscale;
666 luma_stride = pic->
linesize[0] << 1;
667 chroma_stride = pic->
linesize[1] << 1;
673 log2_chroma_blocks_per_mb = 2;
676 log2_chroma_blocks_per_mb = 1;
679 dest_y = pic->
data[0] + (slice->
mb_y << 4) * luma_stride + (slice->
mb_x << 5);
680 dest_u = pic->
data[1] + (slice->
mb_y << 4) * chroma_stride + (slice->
mb_x << mb_x_shift);
681 dest_v = pic->
data[2] + (slice->
mb_y << 4) * chroma_stride + (slice->
mb_x << mb_x_shift);
682 dest_a = pic->
data[3] + (slice->
mb_y << 4) * luma_stride + (slice->
mb_x << 5);
692 buf, y_data_size, qmat_luma_scaled);
698 buf + y_data_size, u_data_size,
699 qmat_chroma_scaled, log2_chroma_blocks_per_mb);
704 buf + y_data_size + u_data_size, v_data_size,
705 qmat_chroma_scaled, log2_chroma_blocks_per_mb);
710 size_t mb_max_x = slice->
mb_count << (mb_x_shift - 1);
715 val_no_chroma = 511 * 4;
717 for (i = 0; i < 16; ++
i)
718 for (j = 0; j < mb_max_x; ++j) {
719 *(uint16_t*)(dest_u + (i * chroma_stride) + (j << 1)) = val_no_chroma;
720 *(uint16_t*)(dest_v + (i * chroma_stride) + (j << 1)) = val_no_chroma;
727 buf + y_data_size + u_data_size + v_data_size,
747 if (error < ctx->slice_count)
760 int buf_size = avpkt->
size;
761 int frame_hdr_size, pic_size,
ret;
777 if (frame_hdr_size < 0)
778 return frame_hdr_size;
780 buf += frame_hdr_size;
781 buf_size -= frame_hdr_size;
800 buf_size -= pic_size;
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
This structure describes decoded (raw) audio or video data.
ptrdiff_t const GLvoid * data
#define AV_PIX_FMT_YUVA422P10
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
#define AV_LOG_WARNING
Something somehow does not look correct.
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
static av_cold int init(AVCodecContext *avctx)
static int decode_slice_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
void(* clear_block)(int16_t *block)
static int decode_slice_luma(AVCodecContext *avctx, SliceContext *slice, uint16_t *dst, int dst_stride, const uint8_t *buf, unsigned buf_size, const int16_t *qmat)
enum AVColorRange color_range
MPEG vs JPEG YUV range.
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
#define ALPHA_SHIFT_16_TO_12(alpha_val)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
#define FF_PROFILE_PRORES_LT
unsigned mb_height
height of the current picture in mb
int idct_permutation_type
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
static void error(const char *err)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_WB32 unsigned int_TMPL AV_WB24 unsigned int_TMPL AV_RB16
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
#define FF_PROFILE_PRORES_XQ
#define LOCAL_ALIGNED_16(t, v,...)
The exact code depends on how similar the blocks are and how related they are to the block
static int decode_picture_header(AVCodecContext *avctx, const uint8_t *buf, const int buf_size)
Multithreading support functions.
#define LOCAL_ALIGNED_32(t, v,...)
static av_cold int decode_init(AVCodecContext *avctx)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
#define ALPHA_SHIFT_8_TO_12(alpha_val)
static void unpack_alpha_12(GetBitContext *gb, uint16_t *dst, int num_coeffs, const int num_bits)
AVCodec ff_prores_decoder
bitstream reader API header.
int interlaced_frame
The content of the picture is interlaced.
#define AV_PIX_FMT_YUV422P12
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
const AVProfile ff_prores_profiles[]
int slice_count
number of slices in the current picture
static int get_bits_left(GetBitContext *gb)
unsigned mb_width
width of the current picture in mb
#define UPDATE_CACHE(name, gb)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static void unpack_alpha_10(GetBitContext *gb, uint16_t *dst, int num_coeffs, const int num_bits)
static const uint16_t mask[17]
int skip_alpha
Skip processing alpha if supported by codec.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
int flags
AV_CODEC_FLAG_*.
const char * name
Name of the codec implementation.
#define AV_PIX_FMT_YUV444P10
#define CLOSE_READER(name, gb)
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
#define SKIP_BITS(name, gb, num)
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames.The frames must then be freed with ff_thread_release_buffer().Otherwise decode directly into the user-supplied frames.Call ff_thread_report_progress() after some part of the current picture has decoded.A good place to put this is where draw_horiz_band() is called-add this if it isn't called anywhere
common internal API header
static void unpack_alpha(GetBitContext *gb, uint16_t *dst, int num_coeffs, const int num_bits, const int decode_precision)
enum AVPictureType pict_type
Picture type of the frame.
av_cold void ff_init_scantable_permutation(uint8_t *idct_permutation, enum idct_permutation_type perm_type)
#define AV_PIX_FMT_YUVA444P12
int width
picture width / height.
#define FF_PROFILE_PRORES_STANDARD
#define FF_PROFILE_UNKNOWN
uint8_t idct_permutation[64]
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define LAST_SKIP_BITS(name, gb, num)
static const uint8_t lev_to_cb[10]
#define AV_PIX_FMT_YUVA444P10
uint8_t interlaced_scan[64]
#define ALPHA_SHIFT_16_TO_10(alpha_val)
#define SHOW_UBITS(name, gb, num)
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
#define FF_PROFILE_PRORES_4444
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
const uint8_t ff_prores_interlaced_scan[64]
Libavcodec external API header.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
main external API structure.
const uint8_t ff_prores_progressive_scan[64]
static void permute(uint8_t *dst, const uint8_t *src, const uint8_t permutation[64])
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
#define OPEN_READER(name, gb)
static av_always_inline int decode_ac_coeffs(AVCodecContext *avctx, GetBitContext *gb, int16_t *out, int blocks_per_slice)
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
static unsigned int get_bits1(GetBitContext *s)
static int decode_slice_chroma(AVCodecContext *avctx, SliceContext *slice, uint16_t *dst, int dst_stride, const uint8_t *buf, unsigned buf_size, const int16_t *qmat, int log2_blocks_per_mb)
static const uint8_t run_to_cb[16]
static const AVProfile profiles[]
uint8_t progressive_scan[64]
enum AVColorSpace colorspace
YUV colorspace type.
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
#define FF_PROFILE_PRORES_HQ
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
#define DECODE_CODEWORD(val, codebook, SKIP)
#define FF_DECODE_ERROR_INVALID_BITSTREAM
int decode_error_flags
decode error flags of the frame, set to a combination of FF_DECODE_ERROR_xxx flags if the decoder pro...
#define flags(name, subs,...)
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some it can consider them to be part of the FIFO and delay acknowledging a status change accordingly Example code
#define AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV444P12
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Narrow or limited range content.
#define SHOW_SBITS(name, gb, num)
common internal api header.
static const uint8_t dc_codebook[7]
#define ALPHA_SHIFT_8_TO_10(alpha_val)
int top_field_first
If the content is interlaced, is top field displayed first.
av_cold int ff_proresdsp_init(ProresDSPContext *dsp, AVCodecContext *avctx)
void(* unpack_alpha)(GetBitContext *gb, uint16_t *dst, int num_coeffs, const int num_bits)
static void decode_slice_alpha(ProresContext *ctx, uint16_t *dst, int dst_stride, const uint8_t *buf, int buf_size, int blocks_per_slice)
Decode alpha slice plane.
int key_frame
1 -> keyframe, 0-> not
static int decode_frame_header(ProresContext *ctx, const uint8_t *buf, const int data_size, AVCodecContext *avctx)
static av_always_inline int decode_dc_coeffs(GetBitContext *gb, int16_t *out, int blocks_per_slice)
#define FF_PROFILE_PRORES_PROXY
static av_cold int decode_close(AVCodecContext *avctx)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int frame_type
0 = progressive, 1 = tff, 2 = bff
#define MKTAG(a, b, c, d)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
static double val(void *priv, double ch)
This structure stores compressed data.
static int decode_picture(AVCodecContext *avctx)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
void(* idct_put)(uint16_t *out, ptrdiff_t linesize, int16_t *block, const int16_t *qmat)
#define AV_PIX_FMT_YUVA422P12
void * av_mallocz_array(size_t nmemb, size_t size)