37 #define PIXLET_MAGIC 0xDEADBEEF 101 for (plane = 0; plane < 3; plane++) {
102 unsigned shift = plane > 0;
110 for (i = 0; i < NB_LEVELS * 3; i++) {
111 unsigned scale = ctx->
levels - (i / 3);
113 ctx->
band[plane][i + 1].
width = w >> scale;
115 ctx->
band[plane][i + 1].
size = (w >> scale) * (h >> scale);
117 ctx->
band[plane][i + 1].
x = (w >> scale) * (((i + 1) % 3) != 2);
118 ctx->
band[plane][i + 1].
y = (h >> scale) * (((i + 1) % 3) != 1);
130 unsigned cnt1, nbits, k, j = 0,
i = 0;
132 int rlen, escape,
flag = 0;
135 nbits =
FFMIN(
ff_clz((state >> 8) + 3) ^ 0x1F, 14);
142 escape = ((1 << nbits) - 1) * cnt1;
145 escape = value + ((1 << nbits) - 1) * cnt1 - 1;
151 value = -((escape +
flag) & 1) | 1;
152 dst[j++] = value * ((escape + flag + 1) >> 1);
158 state = 120 * (escape +
flag) + state - (120 * state >> 8);
161 if (state * 4ULL > 0xFF ||
i >= size)
164 nbits = ((state + 8) >> 5) + (state ?
ff_clz(state) : 32) - 24;
165 escape = av_mod_uintp2(16383, nbits);
173 rlen = value + escape * cnt1 - 1;
176 rlen = escape * cnt1;
184 for (k = 0; k < rlen; k++) {
193 flag = rlen < 0xFFFF ? 1 : 0;
201 int size,
int c,
int a,
int d,
206 unsigned cnt1, shbits, rlen, nbits,
length,
i = 0, j = 0, k;
215 nbits = 33 -
ff_clz(a ^ (a >> 31));
225 if (((state >> 8) + 3) & 0xFFFFFFF)
226 value =
ff_clz((state >> 8) + 3) ^ 0x1F;
231 if (cnt1 >= length) {
234 pfx = 14 + ((((uint64_t)(value - 14)) >> 32) & (value - 14));
235 if (pfx < 1 || pfx > 25)
237 cnt1 *= (1 << pfx) - 1;
250 if (flag + cnt1 == 0) {
254 tmp = (int64_t)c * ((yflag + 1) >> 1) + (c >> 1);
255 value = xflag + (
tmp ^ -xflag);
264 state += (int64_t)d * (uint64_t)yflag - ((int64_t)(d * (uint64_t)
state) >> 8);
268 if ((uint64_t)state > 0xFF / 4 || i >= size)
271 pfx = ((state + 8) >> 5) + (state ?
ff_clz(state) : 32) - 24;
272 escape = av_mod_uintp2(16383, pfx);
275 if (pfx < 1 || pfx > 25)
281 rlen = value + escape * cnt1 - 1;
284 rlen = escape * cnt1;
292 rlen = value + 8 * escape;
295 if (rlen > 0xFFFF || i + rlen > size)
299 for (k = 0; k < rlen; k++) {
308 flag = rlen < 0xFFFF ? 1 : 0;
322 for (i = 0; i < ctx->
levels * 3; i++) {
326 int32_t d = bytestream2_get_be32(&ctx->
gb);
327 int16_t *dest = (int16_t *)frame->
data[plane] +
328 ctx->
band[plane][i + 1].
x +
329 ctx->
band[plane][i + 1].
y * stride;
331 uint32_t magic = bytestream2_get_be32(&ctx->
gb);
335 "wrong magic number: 0x%08"PRIX32
" for plane %d, band %d\n",
344 c, (b >=
FFABS(a)) ? b : a, d,
348 "error in highpass coefficients for plane %d, band %d\n",
364 memset(pred, 0, width *
sizeof(*pred));
366 for (i = 0; i <
height; i++) {
367 val = pred[0] + dst[0];
368 dst[0] = pred[0] =
val;
369 for (j = 1; j <
width; j++) {
370 val = pred[j] + dst[j];
371 dst[j] = pred[j] =
val;
380 int16_t *low, *high, *ll, *lh, *hl, *hh;
386 high = &low[hsize + 8];
388 memcpy(low, dest, size);
389 memcpy(high, dest + hsize, size);
395 for (i = 4, j = 2;
i; i--, j++, ll--, hh++, lh++, hl--) {
396 low[i - 5] = low[j - 1];
398 high[i - 5] = high[j - 2];
402 for (i = 0; i < hsize; i++) {
403 value = (int64_t) low [i + 1] * -INT64_C(325392907) +
404 (int64_t) low [i + 0] * INT64_C(3687786320) +
405 (int64_t) low [i - 1] * -INT64_C(325392907) +
406 (int64_t) high[i + 0] * INT64_C(1518500249) +
407 (int64_t) high[i - 1] * INT64_C(1518500249);
408 dest[i * 2] = av_clip_int16(((value >> 32) * scale) >> 32);
411 for (i = 0; i < hsize; i++) {
412 value = (int64_t) low [i + 2] * -INT64_C(65078576) +
413 (int64_t) low [i + 1] * INT64_C(1583578880) +
414 (int64_t) low [i + 0] * INT64_C(1583578880) +
415 (int64_t) low [i - 1] * -INT64_C(65078576) +
416 (int64_t) high[i + 1] * INT64_C(303700064) +
417 (int64_t) high[i + 0] * -INT64_C(3644400640) +
418 (int64_t) high[i - 1] * INT64_C(303700064);
419 dest[i * 2 + 1] = av_clip_int16(((value >> 32) * scale) >> 32);
425 int64_t *scaling_h, int64_t *scaling_v)
428 unsigned scaled_width, scaled_height;
437 int64_t scale_v = scaling_v[
i];
438 int64_t scale_h = scaling_h[
i];
443 for (j = 0; j < scaled_height; j++) {
448 for (j = 0; j < scaled_width; j++) {
450 for (k = 0; k < scaled_height; k++) {
458 for (k = 0; k < scaled_height; k++) {
469 int max = (1 << depth) - 1;
471 if (ctx->
depth == depth)
476 ctx->
lut[
i] = ((int64_t)
i *
i * 65535LL) / max / max;
480 int w,
int h,
int depth)
483 uint16_t *dsty = (uint16_t *)frame->
data[0];
484 int16_t *srcy = (int16_t *)frame->
data[0];
485 ptrdiff_t stridey = frame->
linesize[0] / 2;
486 uint16_t *lut = ctx->
lut;
489 for (j = 0; j <
h; j++) {
490 for (i = 0; i <
w; i++) {
493 else if (srcy[i] > ((1 << depth) - 1))
496 dsty[
i] = lut[srcy[
i]];
505 uint16_t *dstu = (uint16_t *)frame->
data[1];
506 uint16_t *dstv = (uint16_t *)frame->
data[2];
507 int16_t *srcu = (int16_t *)frame->
data[1];
508 int16_t *srcv = (int16_t *)frame->
data[2];
509 ptrdiff_t strideu = frame->
linesize[1] / 2;
510 ptrdiff_t stridev = frame->
linesize[2] / 2;
511 const unsigned add = 1 << (depth - 1);
512 const unsigned shift = 16 - depth;
515 for (j = 0; j <
h; j++) {
516 for (i = 0; i <
w; i++) {
532 unsigned shift = plane > 0;
536 for (i = ctx->
levels - 1; i >= 0; i--) {
543 ctx->
scaling[plane][
H][
i] = (1000000ULL << 32) / h;
544 ctx->
scaling[plane][
V][
i] = (1000000ULL << 32) / v;
549 dst = (int16_t *)frame->
data[plane];
561 "error in lowpass coefficients for plane %d, top row\n", plane);
569 "error in lowpass coefficients for plane %d, left column\n",
579 "error in lowpass coefficients for plane %d, rest\n", plane);
597 ctx->
h >> shift, stride, ctx->
scaling[plane][
H],
610 uint32_t pktsize, depth;
614 pktsize = bytestream2_get_be32(&ctx->
gb);
620 version = bytestream2_get_le32(&ctx->
gb);
625 if (bytestream2_get_be32(&ctx->
gb) != 1)
629 width = bytestream2_get_be32(&ctx->
gb);
630 height = bytestream2_get_be32(&ctx->
gb);
632 if ( width > INT_MAX - (1
U << (
NB_LEVELS + 1))
633 || height > INT_MAX - (1
U << (
NB_LEVELS + 1)))
639 ctx->
levels = bytestream2_get_be32(&ctx->
gb);
642 depth = bytestream2_get_be32(&ctx->
gb);
643 if (depth < 8 || depth > 15) {
656 if (ctx->
w != w || ctx->
h != h) {
680 for (i = 0; i < 3; i++) {
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
SubBand band[4][NB_LEVELS *3+1]
static void postprocess_luma(AVCodecContext *avctx, AVFrame *frame, int w, int h, int depth)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static int shift(int a, int b)
This structure describes decoded (raw) audio or video data.
ptrdiff_t const GLvoid * data
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
#define avpriv_request_sample(...)
enum AVColorRange color_range
MPEG vs JPEG YUV range.
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
static int read_low_coeffs(AVCodecContext *avctx, int16_t *dst, int size, int width, ptrdiff_t stride)
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Multithreading support functions.
GLsizei GLboolean const GLfloat * value
#define u(width, name, range_min, range_max)
static void free_buffers(AVCodecContext *avctx)
static int get_bits_count(const GetBitContext *s)
bitstream reader API header.
static void filterfn(int16_t *dest, int16_t *tmp, unsigned size, int64_t scale)
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
static int decode_plane(AVCodecContext *avctx, int plane, AVPacket *avpkt, AVFrame *frame)
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
enum AVColorRange color_range
MPEG vs JPEG YUV range.
int flags
AV_CODEC_FLAG_*.
const char * name
Name of the codec implementation.
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
static int read_highpass(AVCodecContext *avctx, uint8_t *ptr, int plane, AVFrame *frame)
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames.The frames must then be freed with ff_thread_release_buffer().Otherwise decode directly into the user-supplied frames.Call ff_thread_report_progress() after some part of the current picture has decoded.A good place to put this is where draw_horiz_band() is called-add this if it isn't called anywhere
static void build_luma_lut(AVCodecContext *avctx, int depth)
enum AVPictureType pict_type
Picture type of the frame.
int width
picture width / height.
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
static int init_decoder(AVCodecContext *avctx)
#define FF_ARRAY_ELEMS(a)
static const float pred[4]
#define AV_PIX_FMT_YUV420P16
static av_always_inline int bytestream2_tell(GetByteContext *g)
int64_t scaling[4][2][NB_LEVELS]
static av_cold int pixlet_init(AVCodecContext *avctx)
Libavcodec external API header.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
main external API structure.
static unsigned int get_bits1(GetBitContext *s)
Describe the class of an AVClass context structure.
static void skip_bits(GetBitContext *s, int n)
static void postprocess_chroma(AVFrame *frame, int w, int h, int depth)
static av_const int sign_extend(int val, unsigned bits)
static int pixlet_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
static int read_high_coeffs(AVCodecContext *avctx, uint8_t *src, int16_t *dst, int size, int c, int a, int d, int width, ptrdiff_t stride)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
GLint GLenum GLboolean GLsizei stride
common internal api header.
static int get_unary(GetBitContext *gb, int stop, int len)
Get unary code of limited length.
int key_frame
1 -> keyframe, 0-> not
static const uint8_t * align_get_bits(GetBitContext *s)
static void reconstruction(AVCodecContext *avctx, int16_t *dest, unsigned width, unsigned height, ptrdiff_t stride, int64_t *scaling_h, int64_t *scaling_v)
static float add(float src0, float src1)
#define av_malloc_array(a, b)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
static void lowpass_prediction(int16_t *dst, int16_t *pred, int width, int height, ptrdiff_t stride)
static av_cold int pixlet_close(AVCodecContext *avctx)
static double val(void *priv, double ch)
This structure stores compressed data.
AVCodec ff_pixlet_decoder
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)
Clip a signed integer to an unsigned power of two range.