Go to the documentation of this file.
39 #define CLV_VLC_BITS 9
93 int idx = 1, last = 0,
val, skip;
95 memset(
blk, 0,
sizeof(*
blk) * 64);
101 while (idx < 64 && !last) {
107 skip = (
val >> 4) & 0xFF;
118 val = ac_quant * (2 * aval + 1);
130 return (idx <= 64 && last) ? 0 : -1;
133 #define DCT_TEMPLATE(blk, step, bias, shift, dshift, OP) \
134 const int t0 = OP(2841 * blk[1 * step] + 565 * blk[7 * step]); \
135 const int t1 = OP( 565 * blk[1 * step] - 2841 * blk[7 * step]); \
136 const int t2 = OP(1609 * blk[5 * step] + 2408 * blk[3 * step]); \
137 const int t3 = OP(2408 * blk[5 * step] - 1609 * blk[3 * step]); \
138 const int t4 = OP(1108 * blk[2 * step] - 2676 * blk[6 * step]); \
139 const int t5 = OP(2676 * blk[2 * step] + 1108 * blk[6 * step]); \
140 const int t6 = ((blk[0 * step] + blk[4 * step]) * (1 << dshift)) + bias; \
141 const int t7 = ((blk[0 * step] - blk[4 * step]) * (1 << dshift)) + bias; \
142 const int t8 = t0 + t2; \
143 const int t9 = t0 - t2; \
144 const int tA = (int)(181U * (t9 + (t1 - t3)) + 0x80) >> 8; \
145 const int tB = (int)(181U * (t9 - (t1 - t3)) + 0x80) >> 8; \
146 const int tC = t1 + t3; \
148 blk[0 * step] = (t6 + t5 + t8) >> shift; \
149 blk[1 * step] = (t7 + t4 + tA) >> shift; \
150 blk[2 * step] = (t7 - t4 + tB) >> shift; \
151 blk[3 * step] = (t6 - t5 + tC) >> shift; \
152 blk[4 * step] = (t6 - t5 - tC) >> shift; \
153 blk[5 * step] = (t7 - t4 - tB) >> shift; \
154 blk[6 * step] = (t7 + t4 - tA) >> shift; \
155 blk[7 * step] = (t6 + t5 - t8) >> shift; \
158 #define COP(x) (((x) + 4) >> 3)
166 for (
i = 0;
i < 8;
i++) {
172 for (
i = 0;
i < 8;
i++) {
180 int i, has_ac[6], off;
182 for (
i = 0;
i < 6;
i++)
185 off = x * 16 + y * 16 *
c->pic->linesize[0];
186 for (
i = 0;
i < 4;
i++) {
189 if (!x && !(
i & 1)) {
190 c->block[0] +=
c->top_dc[0];
191 c->top_dc[0] =
c->block[0];
193 c->block[0] +=
c->left_dc[(
i & 2) >> 1];
195 c->left_dc[(
i & 2) >> 1] =
c->block[0];
196 c->block[0] *=
c->luma_dc_quant;
199 off +=
c->pic->linesize[0] * 8;
200 c->idsp.put_pixels_clamped(
c->block,
201 c->pic->data[0] + off + (
i & 1) * 8,
202 c->pic->linesize[0]);
205 off = x * 8 + y * 8 *
c->pic->linesize[1];
206 for (
i = 1;
i < 3;
i++) {
210 c->block[0] +=
c->top_dc[
i];
211 c->top_dc[
i] =
c->block[0];
213 c->block[0] +=
c->left_dc[
i + 1];
215 c->left_dc[
i + 1] =
c->block[0];
216 c->block[0] *=
c->chroma_dc_quant;
218 c->idsp.put_pixels_clamped(
c->block,
c->pic->data[
i] + off,
219 c->pic->linesize[
i]);
226 int plane,
int x,
int y,
int dx,
int dy,
int size)
228 int shift = plane > 0;
231 int sstride, dstride, soff, doff;
232 uint8_t *sbuf, *dbuf;
235 if (x < 0 || sx < 0 || y < 0 || sy < 0 ||
242 sstride =
src->linesize[plane];
244 soff = sx + sy * sstride;
245 sbuf =
src->data[plane];
246 doff = x + y * dstride;
247 dbuf = dst->
data[plane];
250 uint8_t *dptr = &dbuf[doff];
251 uint8_t *sptr = &sbuf[soff];
253 memcpy(dptr, sptr,
size);
262 int plane,
int x,
int y,
int dx,
int dy,
int size,
int bias)
264 int shift = plane > 0;
267 int sstride =
src->linesize[plane];
269 int soff = sx + sy * sstride;
270 uint8_t *sbuf =
src->data[plane];
271 int doff = x + y * dstride;
272 uint8_t *dbuf = dst->
data[plane];
275 if (x < 0 || sx < 0 || y < 0 || sy < 0 ||
282 for (j = 0; j <
size; j++) {
283 uint8_t *dptr = &dbuf[doff];
284 uint8_t *sptr = &sbuf[soff];
287 int val = sptr[
i] + bias;
302 int left_mv, right_mv, top_mv, bot_mv;
310 }
else if ((mb_x == 0) || (mb_x == mvi->
mb_w - 1)) {
314 MV B = mvi->
mv[ mb_x ];
315 MV C = mvi->
mv[ mb_x + 1];
322 left_mv = -((mb_x * mvi->
mb_size));
323 right_mv = ((mvi->
mb_w - mb_x - 1) * mvi->
mb_size);
324 if (res.
x < left_mv) {
327 if (res.
x > right_mv) {
330 top_mv = -((mb_y * mvi->
mb_size));
332 if (res.
y < top_mv) {
335 if (res.
y > bot_mv) {
372 if (lc[
level].flags_cb.table) {
376 if (lc[
level].mv_cb.table) {
380 mv.x = (int8_t)(mv_code & 0xff);
381 mv.y = (int8_t)(mv_code >> 8);
388 if (lc[
level].bias_cb.table) {
392 bias = (int16_t)(bias_val);
407 for (
i = 0;
i < 4;
i++) {
408 if (ti->
flags & (1 <<
i)) {
419 int plane,
int x,
int y,
int dx,
int dy,
int size,
int bias)
433 int plane,
int x,
int y,
int size,
439 mv.x = root_mv.
x + tile->
mv.
x;
440 mv.y = root_mv.
y + tile->
mv.
y;
445 int i, hsize =
size >> 1;
447 for (
i = 0;
i < 4;
i++) {
448 int xoff = (
i & 2) == 0 ? 0 : hsize;
449 int yoff = (
i & 1) == 0 ? 0 : hsize;
471 int size =
comp == 0 ? tile_size : tile_size >> 1;
473 uint8_t *framebuf = buf->
data[
comp];
478 if ((right ==
size) && (bottom ==
size)) {
483 for (j = 0; j <
h; j++) {
484 for (
i = 0;
i < right;
i++) {
485 framebuf[off +
i] = 0x80;
490 if (bottom !=
size) {
492 for (j = 0; j < bottom; j++) {
494 framebuf[off +
i] = 0x80;
505 const uint8_t *buf = avpkt->
data;
506 int buf_size = avpkt->
size;
515 int skip = bytestream2_get_byte(&gb);
525 if (buf_size < c->mb_width *
c->mb_height) {
533 c->pic->key_frame = 1;
536 bytestream2_get_be32(&gb);
537 c->ac_quant = bytestream2_get_byte(&gb);
538 c->luma_dc_quant = 32;
539 c->chroma_dc_quant = 32;
545 for (
i = 0;
i < 3;
i++)
550 for (j = 0; j <
c->mb_height; j++) {
551 for (
i = 0;
i <
c->mb_width;
i++) {
575 mvi_reset(&
c->mvi,
c->pmb_width,
c->pmb_height, 1 <<
c->tile_shift);
577 for (j = 0; j <
c->pmb_height; j++) {
578 for (
i = 0;
i <
c->pmb_width;
i++) {
584 for (plane = 0; plane < 3; plane++) {
585 int16_t x = plane == 0 ?
i <<
c->tile_shift :
i << (
c->tile_shift - 1);
586 int16_t y = plane == 0 ? j <<
c->tile_shift : j << (
c->tile_shift - 1);
587 int16_t
size = plane == 0 ? 1 <<
c->tile_shift : 1 << (
c->tile_shift - 1);
588 int16_t mx = plane == 0 ?
mv.x :
mv.x / 2;
589 int16_t my = plane == 0 ?
mv.y :
mv.y / 2;
596 int x =
i <<
c->tile_shift;
597 int y = j <<
c->tile_shift;
598 int size = 1 <<
c->tile_shift;
609 x =
i << (
c->tile_shift - 1);
610 y = j << (
c->tile_shift - 1);
611 size = 1 << (
c->tile_shift - 1);
612 cmv.
x =
mv.x + tile->
mv.
x;
613 cmv.
y =
mv.y + tile->
mv.
y;
637 c->pic->key_frame = 0;
651 return mb_ret < 0 ? mb_ret : buf_size;
655 const uint16_t **syms,
unsigned *
offset)
660 for (
int i = 0;
i < 16;
i++) {
661 unsigned count = counts[
i];
664 for (count += num; num < count; num++)
685 for (
unsigned i = 0, j = 0, k = 0,
offset = 0;;
i++) {
686 if (0x36F & (1 <<
i)) {
692 if (0x1B7 & (1 <<
i)) {
726 if (1
U <<
c->tile_shift !=
c->tile_size ||
c->tile_shift < 1 ||
c->tile_shift > 30) {
727 av_log(avctx,
AV_LOG_ERROR,
"Tile size: %d, is not power of 2 > 1 and < 2^31\n",
c->tile_size);
743 c->pmb_width = (
w +
c->tile_size - 1) >>
c->tile_shift;
744 c->pmb_height = (
h +
c->tile_size - 1) >>
c->tile_shift;
747 c->mvi.mv =
av_calloc(
c->pmb_width * 2,
sizeof(*
c->mvi.mv));
748 if (!
c->pic || !
c->prev || !
c->mvi.mv)
771 .
p.
name =
"clearvideo",
#define AV_LOG_WARNING
Something somehow does not look correct.
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
static int get_bits_left(GetBitContext *gb)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
static void comp(unsigned char *dst, ptrdiff_t dst_stride, unsigned char *src, ptrdiff_t src_stride, int add)
static const uint8_t clv_dc_syms[NUM_DC_CODES]
static const int8_t mv[256][2]
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
static int copyadd_block(AVCodecContext *avctx, AVFrame *dst, AVFrame *src, int plane, int x, int y, int dx, int dy, int size, int bias)
This structure describes decoded (raw) audio or video data.
static const uint16_t clv_mv_syms[]
static int clv_decode_frame(AVCodecContext *avctx, AVFrame *rframe, int *got_frame, AVPacket *avpkt)
static const uint8_t clv_flags_syms[][16]
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
static av_cold int clv_decode_init(AVCodecContext *avctx)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
static void extend_edges(AVFrame *buf, int tile_size)
AVCodec p
The public AVCodec.
static int decode_mb(CLVContext *c, int x, int y)
static const uint8_t clv_dc_lens[NUM_DC_CODES]
static MV mvi_predict(MVInfo *mvi, int mb_x, int mb_y, MV diff)
static double val(void *priv, double ch)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static av_cold void clv_init_static(void)
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going which in turn will define variables for the build system and the C
static int ff_thread_once(char *control, void(*routine)(void))
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static const uint16_t clv_bias_syms[]
#define FF_ARRAY_ELEMS(a)
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
struct TileInfo * child[4]
#define FF_CODEC_DECODE_CB(func)
static void clv_dct(int16_t *block)
static VLCElem vlc_buf[16716]
static int get_sbits(GetBitContext *s, int n)
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
#define INIT_VLC_STATIC_FROM_LENGTHS(vlc, bits, nb_codes, lens, len_wrap, symbols, symbols_wrap, symbols_size, offset, flags, static_size)
@ AV_PICTURE_TYPE_I
Intra.
static unsigned int get_bits1(GetBitContext *s)
int ff_init_vlc_from_lengths(VLC *vlc, int nb_bits, int nb_codes, const int8_t *lens, int lens_wrap, const void *symbols, int symbols_wrap, int symbols_size, int offset, int flags, void *logctx)
Build VLC decoding tables suitable for use with get_vlc2()
static av_cold int clv_decode_end(AVCodecContext *avctx)
static av_always_inline int get_vlc2(GetBitContext *s, const VLCElem *table, int bits, int max_depth)
Parse a vlc code.
static void mvi_update_row(MVInfo *mvi)
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
static av_always_inline int bytestream2_tell(GetByteContext *g)
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
static int restore_tree(AVCodecContext *avctx, AVFrame *dst, AVFrame *src, int plane, int x, int y, int size, TileInfo *tile, MV root_mv)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_WL24 unsigned int_TMPL AV_WL16 uint64_t_TMPL AV_WB64 unsigned int_TMPL AV_RB32
const FFCodec ff_clearvideo_decoder
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
static int copy_block(AVCodecContext *avctx, AVFrame *dst, AVFrame *src, int plane, int x, int y, int dx, int dy, int size)
#define DECLARE_ALIGNED(n, t, v)
#define i(width, name, range_min, range_max)
static void pred_mv(DiracBlock *block, int stride, int x, int y, int ref)
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
static const uint8_t clv_ac_bits[NUM_AC_CODES]
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
const char * name
Name of the codec implementation.
static LevelCodes lev[4+3+3]
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
void * av_calloc(size_t nmemb, size_t size)
static const uint8_t clv_flags_bits[][16]
const uint8_t ff_zigzag_direct[64]
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Identical in function to ff_get_buffer(), except it reuses the existing buffer if available.
#define INIT_VLC_STATIC_OVERLONG
#define FFSWAP(type, a, b)
static TileInfo * decode_tile_info(GetBitContext *gb, const LevelCodes *lc, int level)
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
main external API structure.
static int decode_block(CLVContext *ctx, int16_t *blk, int has_ac, int ac_quant)
#define DCT_TEMPLATE(blk, step, bias, shift, dshift, OP)
static int shift(int a, int b)
int coded_width
Bitstream width / height, may be different from width/height e.g.
@ AV_PICTURE_TYPE_P
Predicted.
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
static void mvi_reset(MVInfo *mvi, int mb_w, int mb_h, int mb_size)
static av_always_inline int diff(const uint32_t a, const uint32_t b)
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
This structure stores compressed data.
static const uint8_t clv_mv_len_counts[][16]
int width
picture width / height.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
#define flags(name, subs,...)
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
The exact code depends on how similar the blocks are and how related they are to the block
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
#define MKTAG(a, b, c, d)
static int tile_do_block(AVCodecContext *avctx, AVFrame *dst, AVFrame *src, int plane, int x, int y, int dx, int dy, int size, int bias)
static const uint16_t clv_ac_syms[NUM_AC_CODES]
static av_cold void build_vlc(VLC *vlc, const uint8_t counts[16], const uint16_t **syms, unsigned *offset)
static const uint8_t clv_bias_len_counts[][16]
static const uint8_t mv_syms[2][16][10]