40 #define ALPHA_COMPAND_DC_OFFSET 256 41 #define ALPHA_COMPAND_GAIN 9400 49 for (
int i = 0;
i < 64;
i++) {
67 for (
int i = 0;
i < 256;
i++)
68 s->
lut[1][
i] =
i + ((768LL *
i *
i *
i) / (256 * 256 * 256));
111 if (codebook == 0 || codebook == 1) {
114 return level * quantisation;
121 for (i = 0; i <
height; i++) {
122 for (j = 1; j <
width; j++) {
123 band[j] += band[j-1];
132 for (i = 0; i <
length; i++)
134 band[
i] = bytestream2_get_le16(&peak->
base);
140 for (i = 0; i <
width; i++) {
146 channel = av_clip_uintp2(channel, 12);
153 const int linesize = frame->
linesize[0];
154 uint16_t *
r = (uint16_t *)frame->
data[0];
155 uint16_t *g1 = (uint16_t *)(frame->
data[0] + 2);
156 uint16_t *g2 = (uint16_t *)(frame->
data[0] + frame->
linesize[0]);
157 uint16_t *
b = (uint16_t *)(frame->
data[0] + frame->
linesize[0] + 2);
158 const int mid = 1 << (bpc - 1);
159 const int factor = 1 << (16 - bpc);
161 for (
int y = 0; y < frame->
height >> 1; y++) {
162 for (
int x = 0; x < frame->
width; x += 2) {
172 R = (rg - mid) * 2 + g;
175 B = (bg - mid) * 2 + g;
177 R = av_clip_uintp2(R * factor, 16);
178 G1 = av_clip_uintp2(G1 * factor, 16);
179 G2 = av_clip_uintp2(G2 * factor, 16);
180 B = av_clip_uintp2(B * factor, 16);
196 int width,
int linesize,
int plane)
200 for (i = 0; i <
width; i++) {
201 even = (low[
i] - high[
i])/2;
202 odd = (low[
i] + high[
i])/2;
203 output[
i] = av_clip_uintp2(even, 10);
204 output[i + linesize] = av_clip_uintp2(odd, 10);
211 int even = (low[
i] - high[
i]) / 2;
212 int odd = (low[
i] + high[
i]) / 2;
231 for (j = 0; j < 10; j++)
242 int chroma_x_shift, chroma_y_shift;
253 &chroma_y_shift)) < 0)
263 for (i = 0; i <
planes; i++) {
264 int w8, h8, w4, h4, w2, h2;
269 if (chroma_y_shift && !bayer)
333 int t = j < 1 ? 0 : (j < 3 ? 1 : 2);
355 s->
plane[
i].
l_h[9] = frame2 + 2 * w2 * h2;
374 int ret = 0,
i, j, plane, got_buffer = 0;
384 uint16_t tagu = bytestream2_get_be16(&gb);
385 int16_t
tag = (int16_t)tagu;
386 int8_t tag8 = (int8_t)(tagu >> 8);
387 uint16_t abstag =
abs(tag);
388 int8_t abs_tag8 =
abs(tag8);
389 uint16_t data = bytestream2_get_be16(&gb);
390 if (abs_tag8 >= 0x60 && abs_tag8 <= 0x6f) {
406 }
else if (abstag ==
Version) {
472 for (
i = 0;
i < 8;
i++)
476 if (!data || data > 5) {
498 }
else if (data == 1) {
509 }
else if (abstag >= 0x4000 && abstag <= 0x40ff) {
510 if (abstag == 0x4001)
512 av_log(avctx,
AV_LOG_DEBUG,
"Small chunk length %d %s\n", data * 4, tag < 0 ?
"optional" :
"required");
525 uint32_t
offset = bytestream2_get_be32(&gb);
566 if (data >= 100 && data <= 105) {
568 }
else if (data >= 122 && data <= 128) {
570 }
else if (data == 30) {
583 if (!(data == 10 || data == 12)) {
593 }
else if (data == 2) {
595 }
else if (data == 3) {
597 }
else if (data == 4) {
664 if (avctx->
height < height)
701 if (lowpass_width < 3 ||
702 lowpass_width > lowpass_a_width) {
708 if (lowpass_height < 3 ||
709 lowpass_height > lowpass_a_height) {
721 if (lowpass_height > lowpass_a_height || lowpass_width > lowpass_a_width ||
729 for (
i = 0;
i < lowpass_height;
i++) {
730 for (j = 0; j < lowpass_width; j++)
731 coeff_data[j] = bytestream2_get_be16u(&gb);
733 coeff_data += lowpass_width;
740 if (lowpass_height & 1) {
741 memcpy(&coeff_data[lowpass_height * lowpass_width],
742 &coeff_data[(lowpass_height - 1) * lowpass_width],
743 lowpass_width *
sizeof(*coeff_data));
746 av_log(avctx,
AV_LOG_DEBUG,
"Lowpass coefficients %d\n", lowpass_width * lowpass_height);
756 int a_expected = highpass_a_height * highpass_a_width;
758 int count = 0, bytes;
766 if (highpass_height > highpass_a_height || highpass_width > highpass_a_width || a_expected < highpass_height * (uint64_t)highpass_stride) {
771 expected = highpass_height * highpass_stride;
797 if (count > expected)
807 for (
i = 0;
i <
run;
i++) {
808 *coeff_data |= coeff << 8;
813 *coeff_data++ = coeff;
823 if (level == 255 && run == 2)
828 if (count > expected)
838 for (
i = 0;
i <
run;
i++) {
839 *coeff_data |= coeff << 8;
844 *coeff_data++ = coeff;
851 if (count > expected) {
869 av_log(avctx,
AV_LOG_DEBUG,
"End subband coeffs %i extra %i\n", count, count - expected);
898 for (plane = 0; plane < s->
planes && !
ret; plane++) {
904 int act_plane = plane == 1 ? 2 : plane == 2 ? 1 : plane;
905 ptrdiff_t dst_linesize;
906 int16_t *low, *high, *
output, *dst;
910 dst_linesize = pic->
linesize[act_plane];
912 dst_linesize = pic->
linesize[act_plane] / 2;
917 lowpass_width < 3 || lowpass_height < 3) {
923 av_log(avctx,
AV_LOG_DEBUG,
"Decoding level 1 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
928 dsp->
vert_filter(output, output_stride, low, lowpass_width, high, highpass_stride, lowpass_width, lowpass_height);
934 dsp->
vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
939 dsp->
horiz_filter(output, output_stride, low, output_stride, high, output_stride, lowpass_width, lowpass_height * 2);
942 for (
i = 0;
i < lowpass_height * 2;
i++) {
943 for (j = 0; j < lowpass_width * 2; j++)
946 output += output_stride * 2;
958 lowpass_width < 3 || lowpass_height < 3) {
964 av_log(avctx,
AV_LOG_DEBUG,
"Level 2 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
969 dsp->
vert_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
974 dsp->
vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
979 dsp->
horiz_filter(output, output_stride, low, output_stride, high, output_stride, lowpass_width, lowpass_height * 2);
982 for (
i = 0;
i < lowpass_height * 2;
i++) {
983 for (j = 0; j < lowpass_width * 2; j++)
986 output += output_stride * 2;
997 lowpass_height < 3 || lowpass_width < 3 || lowpass_width * 2 > s->
plane[plane].
width) {
1003 av_log(avctx,
AV_LOG_DEBUG,
"Level 3 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
1008 dsp->
vert_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1013 dsp->
vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1015 dst = (int16_t *)pic->
data[act_plane];
1020 dst += pic->
linesize[act_plane] >> 1;
1037 low += output_stride;
1038 high += output_stride;
1039 dst += dst_linesize;
1047 dsp->
horiz_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1052 dsp->
horiz_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1054 dst = (int16_t *)pic->
data[act_plane];
1059 low += output_stride * 2;
1060 high += output_stride * 2;
1066 for (plane = 0; plane < s->
planes && !
ret; plane++) {
1071 int act_plane = plane == 1 ? 2 : plane == 2 ? 1 : plane;
1072 int16_t *low, *high, *
output, *dst;
1073 ptrdiff_t dst_linesize;
1077 dst_linesize = pic->
linesize[act_plane];
1079 dst_linesize = pic->
linesize[act_plane] / 2;
1084 lowpass_width < 3 || lowpass_height < 3) {
1090 av_log(avctx,
AV_LOG_DEBUG,
"Decoding level 1 plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
1095 dsp->
vert_filter(output, output_stride, low, lowpass_width, high, highpass_stride, lowpass_width, lowpass_height);
1100 dsp->
vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1105 dsp->
horiz_filter(output, output_stride, low, output_stride, high, output_stride, lowpass_width, lowpass_height * 2);
1108 for (
i = 0;
i < lowpass_height * 2;
i++) {
1109 for (j = 0; j < lowpass_width * 2; j++)
1112 output += output_stride * 2;
1123 lowpass_width < 3 || lowpass_height < 3) {
1129 av_log(avctx,
AV_LOG_DEBUG,
"Level 2 lowpass plane %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
1134 dsp->
vert_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1139 dsp->
vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1144 dsp->
horiz_filter(output, output_stride, low, output_stride, high, output_stride, lowpass_width, lowpass_height * 2);
1147 for (
i = 0;
i < lowpass_height * 2;
i++) {
1148 for (j = 0; j < lowpass_width * 2; j++)
1150 output += output_stride * 2;
1156 dsp->
vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1161 dsp->
vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1166 dsp->
horiz_filter(output, output_stride, low, output_stride, high, output_stride, lowpass_width, lowpass_height * 2);
1172 av_log(avctx,
AV_LOG_DEBUG,
"temporal level %i %i %i %i\n", plane, lowpass_height, lowpass_width, highpass_stride);
1176 lowpass_width < 3 || lowpass_height < 3) {
1185 for (
i = 0;
i < lowpass_height;
i++) {
1187 low += output_stride;
1188 high += output_stride;
1194 dsp->
vert_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1199 dsp->
vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1204 dsp->
vert_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1209 dsp->
vert_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1214 dst = (int16_t *)pic->
data[act_plane];
1219 dst += pic->
linesize[act_plane] >> 1;
1234 low += output_stride;
1235 high += output_stride;
1236 dst += dst_linesize;
1243 dsp->
horiz_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1248 dsp->
horiz_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1253 dsp->
horiz_filter(output, output_stride, low, output_stride, high, highpass_stride, lowpass_width, lowpass_height);
1258 dsp->
horiz_filter(output, output_stride, low, highpass_stride, high, highpass_stride, lowpass_width, lowpass_height);
1263 dst = (int16_t *)pic->
data[act_plane];
1268 low += output_stride * 2;
1269 high += output_stride * 2;
1277 int16_t *low, *high, *dst;
1278 int output_stride, lowpass_height, lowpass_width;
1279 ptrdiff_t dst_linesize;
1281 for (plane = 0; plane < s->
planes; plane++) {
1282 int act_plane = plane == 1 ? 2 : plane == 2 ? 1 : plane;
1286 dst_linesize = pic->
linesize[act_plane];
1288 dst_linesize = pic->
linesize[act_plane] / 2;
1297 lowpass_width < 3 || lowpass_height < 3) {
1304 dst = (int16_t *)pic->
data[act_plane];
1312 dst += pic->
linesize[act_plane] >> 1;
1325 low += output_stride;
1326 high += output_stride;
1327 dst += dst_linesize;
1330 dst = (int16_t *)pic->
data[act_plane];
1335 low += output_stride * 2;
1336 high += output_stride * 2;
1397 for (
int plane = 0; plane < pdst->
planes; plane++) {
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static int cfhd_decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
static const unsigned codebook[256][2]
This structure describes decoded (raw) audio or video data.
ptrdiff_t const GLvoid * data
int coded_width
Bitstream width / height, may be different from width/height e.g.
static void peak_table(int16_t *band, Peak *peak, int length)
#define AV_LOG_WARNING
Something somehow does not look correct.
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
static av_cold int init(AVCodecContext *avctx)
#define ALPHA_COMPAND_GAIN
static const struct @319 planes[]
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
void(* vert_filter)(int16_t *output, ptrdiff_t out_stride, const int16_t *low, ptrdiff_t low_stride, const int16_t *high, ptrdiff_t high_stride, int width, int height)
static void inverse_temporal_filter(int16_t *low, int16_t *high, int width)
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
static void interlaced_vertical_filter(int16_t *output, int16_t *low, int16_t *high, int width, int linesize, int plane)
Macro definitions for various function/variable attributes.
int16_t * subband[SUBBAND_COUNT_3D]
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
void(* horiz_filter_clip)(int16_t *output, const int16_t *low, const int16_t *high, int width, int bpc)
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
static av_cold int end(AVCodecContext *avctx)
Multithreading support functions.
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
static int alloc_buffers(AVCodecContext *avctx)
static int get_bits_count(const GetBitContext *s)
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
bitstream reader API header.
int interlaced_frame
The content of the picture is interlaced.
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
CFHD_RL_VLC_ELEM table_18_rl_vlc[4572]
CFHD_RL_VLC_ELEM table_9_rl_vlc[2088]
FrameType
G723.1 frame types.
#define UPDATE_CACHE(name, gb)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
SubBand band[DWT_LEVELS_3D][4]
int is_copy
Whether the parent AVCodecContext is a copy of the context which had init() called on it...
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
#define AV_PIX_FMT_GBRAP12
const char * name
Name of the codec implementation.
#define CLOSE_READER(name, gb)
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
#define GET_RL_VLC(level, run, name, gb, table, bits,max_depth, need_update)
static av_cold int cfhd_close(AVCodecContext *avctx)
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
static void init_frame_defaults(CFHDContext *s)
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames.The frames must then be freed with ff_thread_release_buffer().Otherwise decode directly into the user-supplied frames.Call ff_thread_report_progress() after some part of the current picture has decoded.A good place to put this is where draw_horiz_band() is called-add this if it isn't called anywhere
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards.If some code can't be moved
static void init_plane_defaults(CFHDContext *s)
uint8_t prescale_table[8]
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static av_cold int cfhd_init(AVCodecContext *avctx)
#define FF_ARRAY_ELEMS(a)
static void process_bayer(AVFrame *frame, int bpc)
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
static av_always_inline int bytestream2_tell(GetByteContext *g)
Libavcodec external API header.
#define ALPHA_COMPAND_DC_OFFSET
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
static const int16_t alpha[]
main external API structure.
#define OPEN_READER(name, gb)
av_cold void ff_cfhddsp_init(CFHDDSPContext *c, int depth, int bayer)
static void difference_coding(int16_t *band, int width, int height)
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread.Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities.There will be very little speed gain at this point but it should work.If there are inter-frame dependencies
enum AVPixelFormat coded_format
refcounted data buffer API
static const int factor[16]
static int dequant_and_decompand(CFHDContext *s, int level, int quantisation, int codebook)
#define AV_PIX_FMT_GBRP12
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
#define AV_PIX_FMT_YUV422P10
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
#define AV_PIX_FMT_BAYER_RGGB16
void(* horiz_filter)(int16_t *output, ptrdiff_t out_stride, const int16_t *low, ptrdiff_t low_stride, const int16_t *high, ptrdiff_t high_stride, int width, int height)
GLint GLenum GLboolean GLsizei stride
common internal api header.
common internal and external API header
static void process_alpha(int16_t *alpha, int width)
channel
Use these values when setting the channel map with ebur128_set_channel().
struct AVCodecInternal * internal
Private context used for internal data.
static void init_peak_table_defaults(CFHDContext *s)
static const double coeff[2][5]
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
static void free_buffers(CFHDContext *s)
int ff_cfhd_init_vlcs(CFHDContext *s)
#define av_malloc_array(a, b)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
uint8_t lowpass_precision
static double val(void *priv, double ch)
This structure stores compressed data.
void ff_free_vlc(VLC *vlc)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
#define AV_CEIL_RSHIFT(a, b)
void * av_mallocz_array(size_t nmemb, size_t size)