42 #define DNX10BIT_QMAT_SHIFT 18 43 #define RC_VARIANCE 1 // use variance or ssd for fast rc 44 #define LAMBDA_FRAC_BITS 10 46 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM 48 {
"nitris_compat",
"encode with Avid Nitris compatibility",
50 {
"ibias",
"intra quant bias",
52 { .i64 = 0 }, INT_MIN, INT_MAX,
VE },
57 0, 0,
VE,
"profile" },
59 0, 0,
VE,
"profile" },
61 0, 0,
VE,
"profile" },
63 0, 0,
VE,
"profile" },
65 0, 0,
VE,
"profile" },
67 0, 0,
VE,
"profile" },
83 for (i = 0; i < 4; i++) {
95 memcpy(block, block - 8,
sizeof(*block) * 8);
96 memcpy(block + 8, block - 16,
sizeof(*block) * 8);
97 memcpy(block + 16, block - 24,
sizeof(*block) * 8);
98 memcpy(block + 24, block - 32,
sizeof(*block) * 8);
106 memcpy(block + 0 * 8, pixels + 0 * line_size, 8 *
sizeof(*block));
107 memcpy(block + 7 * 8, pixels + 0 * line_size, 8 *
sizeof(*block));
108 memcpy(block + 1 * 8, pixels + 1 * line_size, 8 *
sizeof(*block));
109 memcpy(block + 6 * 8, pixels + 1 * line_size, 8 *
sizeof(*block));
110 memcpy(block + 2 * 8, pixels + 2 * line_size, 8 *
sizeof(*block));
111 memcpy(block + 5 * 8, pixels + 2 * line_size, 8 *
sizeof(*block));
112 memcpy(block + 3 * 8, pixels + 3 * line_size, 8 *
sizeof(*block));
113 memcpy(block + 4 * 8, pixels + 3 * line_size, 8 *
sizeof(*block));
119 int i, j,
level, last_non_zero, start_i;
124 unsigned int threshold1, threshold2;
128 block[0] = (block[0] + 2) >> 2;
133 threshold1 = (1 << 16) - bias - 1;
134 threshold2 = (threshold1 << 1);
136 for (i = 63; i >= start_i; i--) {
138 level = block[j] * qmat[j];
140 if (((
unsigned)(level + threshold1)) > threshold2) {
148 for (i = start_i; i <= last_non_zero; i++) {
150 level = block[j] * qmat[j];
152 if (((
unsigned)(level + threshold1)) > threshold2) {
154 level = (bias +
level) >> 16;
157 level = (bias -
level) >> 16;
170 scantable, last_non_zero);
172 return last_non_zero;
180 int last_non_zero = 0;
186 block[0] = (block[0] + 2) >> 2;
188 for (i = 1; i < 64; ++
i) {
189 int j = scantable[
i];
191 int level = (block[j] ^ sign) - sign;
193 block[j] = (level ^ sign) - sign;
201 scantable, last_non_zero);
203 return last_non_zero;
209 int max_level = 1 << (ctx->
bit_depth + 2);
218 for (level = -max_level; level < max_level; level++) {
219 for (run = 0; run < 2; run++) {
220 int index = level * (1 << 1) | run;
225 offset = (alevel - 1) >> 6;
226 alevel -= offset << 6;
228 for (j = 0; j < 257; j++) {
252 for (i = 0; i < 62; i++) {
264 uint16_t weight_matrix[64] = { 1, };
276 for (i = 1; i < 64; i++) {
283 for (i = 1; i < 64; i++) {
291 for (qscale = 1; qscale <= ctx->
m.
avctx->
qmax; qscale++) {
292 for (i = 0; i < 64; i++) {
303 for (qscale = 1; qscale <= ctx->
m.
avctx->
qmax; qscale++) {
304 for (i = 1; i < 64; i++) {
319 (qscale * luma_weight_table[
i]);
321 (qscale * chroma_weight_table[
i]);
367 "pixel format is incompatible with DNxHD\n");
376 "pixel format is incompatible with DNxHD profile\n");
382 "pixel format is incompatible with DNxHR HQX profile\n");
390 "pixel format is incompatible with DNxHR LB/SQ/HQ profile\n");
399 "video parameters incompatible with DNxHD. Valid DNxHD profiles:\n");
405 if (ctx->
cid >= 1270 && ctx->
cid <= 1274)
410 "Input dimensions too small, input must be at least 256x120\n");
462 "Interlaced encoding is not supported for DNxHR profiles.\n");
502 #if FF_API_CODED_FRAME 516 if (avctx->
qmax <= 1) {
542 if (ctx->
cid >= 1270 && ctx->
cid <= 1274)
554 buf[0x21] = ctx->
bit_depth == 10 ? 0x58 : 0x38;
566 ctx->
msip = buf + 0x170;
586 int last_index,
int n)
588 int last_non_zero = 0;
594 for (i = 1; i <= last_index; i++) {
598 int run_level = i - last_non_zero - 1;
599 int rlevel = slevel * (1 << 1) | !!run_level;
612 int qscale,
int last_index)
626 for (i = 1; i <= last_index; i++) {
631 level = (1 - 2 *
level) * qscale * weight_matrix[i];
633 if (weight_matrix[i] != 8)
637 if (weight_matrix[i] != 32)
643 level = (2 * level + 1) * qscale * weight_matrix[i];
645 if (weight_matrix[i] != 8)
649 if (weight_matrix[i] != 32)
663 for (i = 0; i < 64; i++)
664 score += (block[i] - qblock[i]) * (block[
i] - qblock[
i]);
671 int last_non_zero = 0;
674 for (i = 1; i <= last_index; i++) {
678 int run_level = i - last_non_zero - 1;
679 bits += ctx->
vlc_bits[level * (1 << 1) |
680 !!run_level] + ctx->
run_bits[run_level];
691 const int bw = 1 << bs;
697 ((mb_y << 4) * ctx->
m.
linesize) + (mb_x << bs + 1);
709 int uv_w = (y_w + 1) / 2;
727 dct_y_offset = bw * linesize;
728 dct_uv_offset = bw * uvlinesize;
736 int uv_w = ctx->
is_444 ? y_w : (y_w + 1) / 2;
739 uvlinesize = 16 + 16 * ctx->
is_444;
754 dct_y_offset = bw * linesize / 2;
755 dct_uv_offset = bw * uvlinesize / 2;
770 ptr_y + dct_y_offset,
773 ptr_y + dct_y_offset + bw,
776 ptr_u + dct_uv_offset,
779 ptr_v + dct_uv_offset,
789 ptr_y + dct_y_offset, linesize);
791 ptr_y + dct_y_offset + bw, linesize);
793 ptr_u + dct_uv_offset, uvlinesize);
795 ptr_v + dct_uv_offset, uvlinesize);
823 const static uint8_t component[8]={0,0,1,2,0,0,1,2};
830 int jobnr,
int threadnr)
833 int mb_y = jobnr, mb_x;
836 ctx = ctx->
thread[threadnr];
842 for (mb_x = 0; mb_x < ctx->
m.
mb_width; mb_x++) {
851 for (i = 0; i < 8 + 4 * ctx->
is_444; i++) {
852 int16_t *src_block = ctx->
blocks[
i];
856 memcpy(
block, src_block, 64 *
sizeof(*
block));
858 ctx->
is_444 ? 4 * (n > 0): 4 & (2*i),
887 int jobnr,
int threadnr)
890 int mb_y = jobnr, mb_x;
891 ctx = ctx->
thread[threadnr];
898 for (mb_x = 0; mb_x < ctx->
m.
mb_width; mb_x++) {
908 for (i = 0; i < 8 + 4 * ctx->
is_444; i++) {
912 ctx->
is_444 ? (((i >> 1) % 3) < 1 ? 0 : 4): 4 & (2*i),
928 for (mb_y = 0; mb_y < ctx->
m.
mb_height; mb_y++) {
932 for (mb_x = 0; mb_x < ctx->
m.
mb_width; mb_x++) {
939 offset += thread_size;
944 int jobnr,
int threadnr)
947 int mb_y = jobnr, mb_x, x, y;
948 int partial_last_row = (mb_y == ctx->
m.
mb_height - 1) &&
951 ctx = ctx->
thread[threadnr];
954 for (mb_x = 0; mb_x < ctx->
m.
mb_width; ++mb_x, pix += 16) {
959 if (!partial_last_row && mb_x * 16 <= avctx->
width - 16 && (avctx->
width % 16) == 0) {
966 for (y = 0; y < bh; y++) {
967 for (x = 0; x < bw; x++) {
974 varc = (varc - (((unsigned) sum * sum) >> 8) + 128) >> 8;
980 const int linesize = ctx->
m.
linesize >> 1;
981 for (mb_x = 0; mb_x < ctx->
m.
mb_width; ++mb_x) {
982 uint16_t *pix = (uint16_t *)ctx->
thread[0]->
src[0] +
983 ((mb_y << 4) * linesize) + (mb_x << 4);
992 for (i = 0; i < bh; ++
i) {
993 for (j = 0; j < bw; ++j) {
995 const int sample = (unsigned) pix[j] >> 6;
1003 sqmean = sqsum >> 8;
1013 int lambda, up_step, down_step;
1014 int last_lower = INT_MAX, last_higher = 0;
1017 for (q = 1; q < avctx->
qmax; q++) {
1028 if (lambda == last_higher) {
1034 unsigned min = UINT_MAX;
1038 for (q = 1; q < avctx->
qmax; q++) {
1040 unsigned score = ctx->
mb_rc[
i].
bits * lambda +
1052 bits = (bits + 31) & ~31;
1061 if (bits < ctx->frame_bits) {
1062 last_lower =
FFMIN(lambda, last_lower);
1063 if (last_higher != 0)
1064 lambda = (lambda+last_higher)>>1;
1066 lambda -= down_step;
1067 down_step =
FFMIN((int64_t)down_step*5, INT_MAX);
1069 lambda =
FFMAX(1, lambda);
1070 if (lambda == last_lower)
1073 last_higher =
FFMAX(lambda, last_higher);
1074 if (last_lower != INT_MAX)
1075 lambda = (lambda+last_lower)>>1;
1076 else if ((int64_t)lambda + up_step > INT_MAX)
1080 up_step =
FFMIN((int64_t)up_step*5, INT_MAX);
1093 int last_higher = 0;
1094 int last_lower = INT_MAX;
1108 bits = (bits+31)&~31;
1112 if (bits < ctx->frame_bits) {
1115 if (last_higher == qscale - 1) {
1116 qscale = last_higher;
1119 last_lower =
FFMIN(qscale, last_lower);
1120 if (last_higher != 0)
1121 qscale = (qscale + last_higher) >> 1;
1123 qscale -= down_step++;
1128 if (last_lower == qscale + 1)
1130 last_higher =
FFMAX(qscale, last_higher);
1131 if (last_lower != INT_MAX)
1132 qscale = (qscale + last_lower) >> 1;
1134 qscale += up_step++;
1144 #define BUCKET_BITS 8 1145 #define RADIX_PASSES 4 1146 #define NBUCKETS (1 << BUCKET_BITS) 1159 memset(buckets, 0,
sizeof(buckets[0][0]) *
RADIX_PASSES * NBUCKETS);
1160 for (i = 0; i <
size; i++) {
1170 for (i = NBUCKETS - 1; i >= 0; i--)
1171 buckets[j][i] = offset -= buckets[j][i];
1181 for (i = 0; i <
size; i++) {
1183 int pos = buckets[v]++;
1219 delta_bits ? ((ctx->
mb_rc[rc].
ssd -
1255 #if FF_API_CODED_FRAME 1278 for (i = 0; i < 3; i++) {
1292 "picture could not fit ratecontrol constraints, increase qmax\n");
1317 goto encode_coding_unit;
1320 #if FF_API_CODED_FRAME #define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
static av_always_inline int dnxhd_ssd_block(int16_t *qblock, int16_t *block)
#define MASK_ABS(mask, level)
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
static void radix_count(const RCCMPEntry *data, int size, int buckets[RADIX_PASSES][NBUCKETS])
static av_always_inline void dnxhd_get_blocks(DNXHDEncContext *ctx, int mb_x, int mb_y)
static int shift(int a, int b)
This structure describes decoded (raw) audio or video data.
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
ptrdiff_t const GLvoid * data
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
const uint8_t * luma_weight
#define LIBAVUTIL_VERSION_INT
static av_cold int init(AVCodecContext *avctx)
uint16_t(* q_chroma_intra_matrix16)[2][64]
void(* clear_block)(int16_t *block)
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
#define AV_PIX_FMT_GBRP10
const char * av_default_item_name(void *ptr)
Return the context name.
const uint16_t * run_codes
static const AVClass dnxhd_class
void ff_convert_matrix(MpegEncContext *s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static av_always_inline void dnxhd_encode_dc(DNXHDEncContext *ctx, int diff)
static av_cold int dnxhd_init_vlc(DNXHDEncContext *ctx)
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
int ff_dnxhd_find_cid(AVCodecContext *avctx, int bit_depth)
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
int mb_num
number of MBs of a picture
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
struct DNXHDEncContext * thread[MAX_THREADS]
int av_log2_16bit(unsigned v)
Undefined Behavior In the C some operations are like signed integer overflow
static av_always_inline int dnxhd_calc_ac_bits(DNXHDEncContext *ctx, int16_t *block, int last_index)
int h263_aic
Advanced INTRA Coding (AIC)
Macro definitions for various function/variable attributes.
#define LOCAL_ALIGNED_16(t, v,...)
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
#define av_assert0(cond)
assert() equivalent, that is always enabled.
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
The exact code depends on how similar the blocks are and how related they are to the block
static int get_bucket(int value, int shift)
const AVProfile ff_dnxhd_profiles[]
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
static av_cold int end(AVCodecContext *avctx)
static int dnxhd_encode_fast(AVCodecContext *avctx, DNXHDEncContext *ctx)
GLsizei GLboolean const GLfloat * value
int(* q_chroma_intra_matrix)[64]
static void radix_sort_pass(RCCMPEntry *dst, const RCCMPEntry *data, int size, int buckets[NBUCKETS], int pass)
const uint8_t * scantable
int interlaced_frame
The content of the picture is interlaced.
av_cold void ff_mpv_idct_init(MpegEncContext *s)
int mb_height
number of MBs horizontally & vertically
static av_cold int dnxhd_init_qmat(DNXHDEncContext *ctx, int lbias, int cbias)
unsigned int coding_unit_size
int max_qcoeff
maximum encodable coefficient
static int first_field(const struct video_data *s)
static av_cold int dnxhd_encode_end(AVCodecContext *avctx)
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
int(* q_intra_matrix)[64]
precomputed matrix (combine qscale and DCT renorm)
static av_always_inline int dnxhd_switch_matrix(DNXHDEncContext *ctx, int i)
const uint16_t * ac_codes
static int dnxhd_calc_bits_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int last_dc[3]
last DC values for MPEG-1
static int dnxhd_find_qscale(DNXHDEncContext *ctx)
int qmax
maximum quantizer
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
int active_thread_type
Which multithreading methods are in use by the codec.
static av_always_inline void dnxhd_unquantize_c(DNXHDEncContext *ctx, int16_t *block, int n, int qscale, int last_index)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
int flags
AV_CODEC_FLAG_*.
static int dnxhd_write_header(AVCodecContext *avctx, uint8_t *buf)
MpegvideoEncDSPContext mpvencdsp
const char * name
Name of the codec implementation.
#define AV_PIX_FMT_YUV444P10
#define FF_PROFILE_DNXHR_LB
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, AVCodecContext *avctx)
static float mean(const float *input, int size)
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
int(* pix_norm1)(uint8_t *pix, int line_size)
int(* pix_sum)(uint8_t *pix, int line_size)
int flags
A combination of AV_PKT_FLAG values.
static int put_bits_count(PutBitContext *s)
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
const uint8_t * chroma_weight
common internal API header
static const AVOption options[]
static void dnxhd_8bit_get_pixels_8x4_sym(int16_t *av_restrict block, const uint8_t *pixels, ptrdiff_t line_size)
enum AVPictureType pict_type
Picture type of the frame.
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
static const AVCodecDefault dnxhd_defaults[]
MpegEncContext m
Used for quantization dsp functions.
static av_always_inline void dnxhd_encode_block(DNXHDEncContext *ctx, int16_t *block, int last_index, int n)
int intra_quant_bias
bias for the quantizer
int width
picture width / height.
const CIDEntry * ff_dnxhd_get_cid_table(int cid)
static void bit_depth(AudioStatsContext *s, uint64_t mask, uint64_t imask, AVRational *depth)
static av_always_inline void dnxhd_10bit_get_pixels_8x4_sym(int16_t *av_restrict block, const uint8_t *pixels, ptrdiff_t line_size)
uint16_t(* q_intra_matrix16)[2][64]
identical to the above but for MMX & these are not permutated, second 64 entries are bias ...
uint16_t(* qmatrix_l16)[2][64]
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
uint8_t * msip
Macroblock Scan Indexes Payload.
uint8_t idct_permutation[64]
IDCT input permutation.
void(* fdct)(int16_t *block)
int mb_decision
macroblock decision mode
void ff_dnxhd_print_profiles(AVCodecContext *avctx, int loglevel)
#define FF_PROFILE_DNXHR_HQ
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
int ff_dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
static int dnxhd_encode_rdo(AVCodecContext *avctx, DNXHDEncContext *ctx)
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
int(* execute2)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count)
The codec may call this to execute several independent things.
Libavcodec external API header.
static int dnxhd_10bit_dct_quantize(MpegEncContext *ctx, int16_t *block, int n, int qscale, int *overflow)
ptrdiff_t linesize
line size, in bytes, may be different from width
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
static int dnxhd_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet)
main external API structure.
ScanTable intra_scantable
int ff_dct_encode_init(MpegEncContext *s)
static av_cold int dnxhd_init_rc(DNXHDEncContext *ctx)
uint32_t * orig_vlc_codes
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
void ff_dnxhdenc_init_x86(DNXHDEncContext *ctx)
Describe the class of an AVClass context structure.
#define FF_MB_DECISION_RD
rate distortion
const uint8_t ff_zigzag_direct[64]
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
static void dnxhd_setup_threads_slices(DNXHDEncContext *ctx)
#define FF_PROFILE_DNXHR_SQ
const CIDEntry * cid_table
int(* dct_quantize)(struct MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
uint16_t(* qmatrix_c16)[2][64]
static enum AVPixelFormat pix_fmts[]
static void dnxhd_load_picture(DNXHDEncContext *ctx, const AVFrame *frame)
void(* get_pixels)(int16_t *av_restrict block, const uint8_t *pixels, ptrdiff_t stride)
#define AV_PIX_FMT_YUV422P10
#define DNX10BIT_QMAT_SHIFT
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
uint8_t edge_buf_uv[2][512]
struct AVCodecContext * avctx
PutBitContext pb
bit output
static int dnxhd_mb_var_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
#define DNXHD_VARIABLE
Indicate that a CIDEntry value must be read in the bitstream.
#define FF_PROFILE_DNXHR_HQX
static int dnxhd_encode_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
static av_cold int dnxhd_encode_init(AVCodecContext *avctx)
#define FF_DISABLE_DEPRECATION_WARNINGS
common internal api header.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
void(* get_pixels_8x4_sym)(int16_t *av_restrictblock, const uint8_t *pixels, ptrdiff_t line_size)
attribute_deprecated AVFrame * coded_frame
the picture in the bitstream
static int dnxhd_10bit_dct_quantize_444(MpegEncContext *ctx, int16_t *block, int n, int qscale, int *overflow)
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
#define FF_PROFILE_DNXHR_444
static av_always_inline int diff(const uint32_t a, const uint32_t b)
#define FF_ENABLE_DEPRECATION_WARNINGS
static void radix_sort(RCCMPEntry *data, RCCMPEntry *tmp, int size)
int top_field_first
If the content is interlaced, is top field displayed first.
int key_frame
1 -> keyframe, 0-> not
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
void(* idct)(int16_t *block)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
#define MKTAG(a, b, c, d)
AVPixelFormat
Pixel format.
static double val(void *priv, double ch)
This structure stores compressed data.
int avpriv_dnxhd_get_hr_frame_size(int cid, int w, int h)
enum idct_permutation_type perm_type