FFmpeg
pixlet.c
Go to the documentation of this file.
1 /*
2  * Apple Pixlet decoder
3  * Copyright (c) 2016 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include <stdint.h>
23 
24 #include "libavutil/imgutils.h"
25 #include "libavutil/intmath.h"
26 #include "libavutil/opt.h"
27 
28 #include "avcodec.h"
29 #include "bytestream.h"
30 #include "codec_internal.h"
31 #include "get_bits.h"
32 #include "internal.h"
33 #include "thread.h"
34 #include "unary.h"
35 
36 #define NB_LEVELS 4
37 
38 #define PIXLET_MAGIC 0xDEADBEEF
39 
40 #define H 0
41 #define V 1
42 
43 typedef struct SubBand {
44  unsigned width, height;
45  unsigned size;
46  unsigned x, y;
47 } SubBand;
48 
49 typedef struct PixletContext {
50  AVClass *class;
51 
54 
55  int levels;
56  int depth;
57  int w, h;
58 
59  int16_t *filter[2];
60  int16_t *prediction;
61  int64_t scaling[4][2][NB_LEVELS];
62  uint16_t lut[65536];
63  SubBand band[4][NB_LEVELS * 3 + 1];
65 
67 {
70  return 0;
71 }
72 
73 static void free_buffers(AVCodecContext *avctx)
74 {
75  PixletContext *ctx = avctx->priv_data;
76 
77  av_freep(&ctx->filter[0]);
78  av_freep(&ctx->filter[1]);
79  av_freep(&ctx->prediction);
80 }
81 
83 {
84  PixletContext *ctx = avctx->priv_data;
85  free_buffers(avctx);
86  ctx->w = 0;
87  ctx->h = 0;
88  return 0;
89 }
90 
91 static int init_decoder(AVCodecContext *avctx)
92 {
93  PixletContext *ctx = avctx->priv_data;
94  int i, plane;
95 
96  ctx->filter[0] = av_malloc_array(ctx->h, sizeof(int16_t));
97  ctx->filter[1] = av_malloc_array(FFMAX(ctx->h, ctx->w) + 16, sizeof(int16_t));
98  ctx->prediction = av_malloc_array((ctx->w >> NB_LEVELS), sizeof(int16_t));
99  if (!ctx->filter[0] || !ctx->filter[1] || !ctx->prediction)
100  return AVERROR(ENOMEM);
101 
102  for (plane = 0; plane < 3; plane++) {
103  unsigned shift = plane > 0;
104  unsigned w = ctx->w >> shift;
105  unsigned h = ctx->h >> shift;
106 
107  ctx->band[plane][0].width = w >> NB_LEVELS;
108  ctx->band[plane][0].height = h >> NB_LEVELS;
109  ctx->band[plane][0].size = (w >> NB_LEVELS) * (h >> NB_LEVELS);
110 
111  for (i = 0; i < NB_LEVELS * 3; i++) {
112  unsigned scale = ctx->levels - (i / 3);
113 
114  ctx->band[plane][i + 1].width = w >> scale;
115  ctx->band[plane][i + 1].height = h >> scale;
116  ctx->band[plane][i + 1].size = (w >> scale) * (h >> scale);
117 
118  ctx->band[plane][i + 1].x = (w >> scale) * (((i + 1) % 3) != 2);
119  ctx->band[plane][i + 1].y = (h >> scale) * (((i + 1) % 3) != 1);
120  }
121  }
122 
123  return 0;
124 }
125 
126 static int read_low_coeffs(AVCodecContext *avctx, int16_t *dst, int size,
127  int width, ptrdiff_t stride)
128 {
129  PixletContext *ctx = avctx->priv_data;
130  GetBitContext *bc = &ctx->bc;
131  unsigned cnt1, nbits, k, j = 0, i = 0;
132  int64_t value, state = 3;
133  int rlen, escape, flag = 0;
134 
135  while (i < size) {
136  nbits = FFMIN(ff_clz((state >> 8) + 3) ^ 0x1F, 14);
137 
138  cnt1 = get_unary(bc, 0, 8);
139  if (cnt1 < 8) {
140  value = show_bits(bc, nbits);
141  if (value <= 1) {
142  skip_bits(bc, nbits - 1);
143  escape = ((1 << nbits) - 1) * cnt1;
144  } else {
145  skip_bits(bc, nbits);
146  escape = value + ((1 << nbits) - 1) * cnt1 - 1;
147  }
148  } else {
149  escape = get_bits(bc, 16);
150  }
151 
152  value = -((escape + flag) & 1) | 1;
153  dst[j++] = value * ((escape + flag + 1) >> 1);
154  i++;
155  if (j == width) {
156  j = 0;
157  dst += stride;
158  }
159  state = 120 * (escape + flag) + state - (120 * state >> 8);
160  flag = 0;
161 
162  if (state * 4ULL > 0xFF || i >= size)
163  continue;
164 
165  nbits = ((state + 8) >> 5) + (state ? ff_clz(state) : 32) - 24;
166  escape = av_mod_uintp2(16383, nbits);
167  cnt1 = get_unary(bc, 0, 8);
168  if (cnt1 > 7) {
169  rlen = get_bits(bc, 16);
170  } else {
171  value = show_bits(bc, nbits);
172  if (value > 1) {
173  skip_bits(bc, nbits);
174  rlen = value + escape * cnt1 - 1;
175  } else {
176  skip_bits(bc, nbits - 1);
177  rlen = escape * cnt1;
178  }
179  }
180 
181  if (rlen > size - i)
182  return AVERROR_INVALIDDATA;
183  i += rlen;
184 
185  for (k = 0; k < rlen; k++) {
186  dst[j++] = 0;
187  if (j == width) {
188  j = 0;
189  dst += stride;
190  }
191  }
192 
193  state = 0;
194  flag = rlen < 0xFFFF ? 1 : 0;
195  }
196 
197  align_get_bits(bc);
198  return get_bits_count(bc) >> 3;
199 }
200 
201 static int read_high_coeffs(AVCodecContext *avctx, const uint8_t *src, int16_t *dst,
202  int size, int c, int a, int d,
203  int width, ptrdiff_t stride)
204 {
205  PixletContext *ctx = avctx->priv_data;
206  GetBitContext *bc = &ctx->bc;
207  unsigned cnt1, shbits, rlen, nbits, length, i = 0, j = 0, k;
208  int ret, escape, pfx, value, yflag, xflag, flag = 0;
209  int64_t state = 3, tmp;
210 
212  if (ret < 0)
213  return ret;
214 
215  if (a ^ (a >> 31)) {
216  nbits = 33 - ff_clz(a ^ (a >> 31));
217  if (nbits > 16)
218  return AVERROR_INVALIDDATA;
219  } else {
220  nbits = 1;
221  }
222 
223  length = 25 - nbits;
224 
225  while (i < size) {
226  if (((state >> 8) + 3) & 0xFFFFFFF)
227  value = ff_clz((state >> 8) + 3) ^ 0x1F;
228  else
229  value = -1;
230 
231  cnt1 = get_unary(bc, 0, length);
232  if (cnt1 >= length) {
233  cnt1 = get_bits(bc, nbits);
234  } else {
235  pfx = 14 + ((((uint64_t)(value - 14)) >> 32) & (value - 14));
236  if (pfx < 1 || pfx > 25)
237  return AVERROR_INVALIDDATA;
238  cnt1 *= (1 << pfx) - 1;
239  shbits = show_bits(bc, pfx);
240  if (shbits <= 1) {
241  skip_bits(bc, pfx - 1);
242  } else {
243  skip_bits(bc, pfx);
244  cnt1 += shbits - 1;
245  }
246  }
247 
248  xflag = flag + cnt1;
249  yflag = xflag;
250 
251  if (flag + cnt1 == 0) {
252  value = 0;
253  } else {
254  xflag &= 1u;
255  tmp = (int64_t)c * ((yflag + 1) >> 1) + (c >> 1);
256  value = xflag + (tmp ^ -xflag);
257  }
258 
259  i++;
260  dst[j++] = value;
261  if (j == width) {
262  j = 0;
263  dst += stride;
264  }
265  state += (int64_t)d * (uint64_t)yflag - ((int64_t)(d * (uint64_t)state) >> 8);
266 
267  flag = 0;
268 
269  if ((uint64_t)state > 0xFF / 4 || i >= size)
270  continue;
271 
272  pfx = ((state + 8) >> 5) + (state ? ff_clz(state) : 32) - 24;
273  escape = av_mod_uintp2(16383, pfx);
274  cnt1 = get_unary(bc, 0, 8);
275  if (cnt1 < 8) {
276  if (pfx < 1 || pfx > 25)
277  return AVERROR_INVALIDDATA;
278 
279  value = show_bits(bc, pfx);
280  if (value > 1) {
281  skip_bits(bc, pfx);
282  rlen = value + escape * cnt1 - 1;
283  } else {
284  skip_bits(bc, pfx - 1);
285  rlen = escape * cnt1;
286  }
287  } else {
288  if (get_bits1(bc))
289  value = get_bits(bc, 16);
290  else
291  value = get_bits(bc, 8);
292 
293  rlen = value + 8 * escape;
294  }
295 
296  if (rlen > 0xFFFF || i + rlen > size)
297  return AVERROR_INVALIDDATA;
298  i += rlen;
299 
300  for (k = 0; k < rlen; k++) {
301  dst[j++] = 0;
302  if (j == width) {
303  j = 0;
304  dst += stride;
305  }
306  }
307 
308  state = 0;
309  flag = rlen < 0xFFFF ? 1 : 0;
310  }
311 
312  align_get_bits(bc);
313  return get_bits_count(bc) >> 3;
314 }
315 
316 static int read_highpass(AVCodecContext *avctx, const uint8_t *ptr,
317  int plane, AVFrame *frame)
318 {
319  PixletContext *ctx = avctx->priv_data;
320  ptrdiff_t stride = frame->linesize[plane] / 2;
321  int i, ret;
322 
323  for (i = 0; i < ctx->levels * 3; i++) {
324  int32_t a = bytestream2_get_be32(&ctx->gb);
325  int32_t b = bytestream2_get_be32(&ctx->gb);
326  int32_t c = bytestream2_get_be32(&ctx->gb);
327  int32_t d = bytestream2_get_be32(&ctx->gb);
328  int16_t *dest = (int16_t *)frame->data[plane] +
329  ctx->band[plane][i + 1].x +
330  ctx->band[plane][i + 1].y * stride;
331  unsigned size = ctx->band[plane][i + 1].size;
332  uint32_t magic = bytestream2_get_be32(&ctx->gb);
333 
334  if (magic != PIXLET_MAGIC) {
335  av_log(avctx, AV_LOG_ERROR,
336  "wrong magic number: 0x%08"PRIX32" for plane %d, band %d\n",
337  magic, plane, i);
338  return AVERROR_INVALIDDATA;
339  }
340 
341  if (a == INT32_MIN)
342  return AVERROR_INVALIDDATA;
343 
344  ret = read_high_coeffs(avctx, ptr + bytestream2_tell(&ctx->gb), dest, size,
345  c, (b >= FFABS(a)) ? b : a, d,
346  ctx->band[plane][i + 1].width, stride);
347  if (ret < 0) {
348  av_log(avctx, AV_LOG_ERROR,
349  "error in highpass coefficients for plane %d, band %d\n",
350  plane, i);
351  return ret;
352  }
353  bytestream2_skip(&ctx->gb, ret);
354  }
355 
356  return 0;
357 }
358 
359 static void lowpass_prediction(int16_t *dst, int16_t *pred,
360  int width, int height, ptrdiff_t stride)
361 {
362  int16_t val;
363  int i, j;
364 
365  memset(pred, 0, width * sizeof(*pred));
366 
367  for (i = 0; i < height; i++) {
368  val = pred[0] + dst[0];
369  dst[0] = pred[0] = val;
370  for (j = 1; j < width; j++) {
371  val = pred[j] + dst[j];
372  dst[j] = pred[j] = val;
373  dst[j] += dst[j-1];
374  }
375  dst += stride;
376  }
377 }
378 
379 static void filterfn(int16_t *dest, int16_t *tmp, unsigned size, int64_t scale)
380 {
381  int16_t *low, *high, *ll, *lh, *hl, *hh;
382  int hsize, i, j;
383  int64_t value;
384 
385  hsize = size >> 1;
386  low = tmp + 4;
387  high = &low[hsize + 8];
388 
389  memcpy(low, dest, size);
390  memcpy(high, dest + hsize, size);
391 
392  ll = &low[hsize];
393  lh = &low[hsize];
394  hl = &high[hsize];
395  hh = hl;
396  for (i = 4, j = 2; i; i--, j++, ll--, hh++, lh++, hl--) {
397  low[i - 5] = low[j - 1];
398  lh[0] = ll[-1];
399  high[i - 5] = high[j - 2];
400  hh[0] = hl[-2];
401  }
402 
403  for (i = 0; i < hsize; i++) {
404  value = (int64_t) low [i + 1] * -INT64_C(325392907) +
405  (int64_t) low [i + 0] * INT64_C(3687786320) +
406  (int64_t) low [i - 1] * -INT64_C(325392907) +
407  (int64_t) high[i + 0] * INT64_C(1518500249) +
408  (int64_t) high[i - 1] * INT64_C(1518500249);
409  dest[i * 2] = av_clip_int16(((value >> 32) * (uint64_t)scale) >> 32);
410  }
411 
412  for (i = 0; i < hsize; i++) {
413  value = (int64_t) low [i + 2] * -INT64_C(65078576) +
414  (int64_t) low [i + 1] * INT64_C(1583578880) +
415  (int64_t) low [i + 0] * INT64_C(1583578880) +
416  (int64_t) low [i - 1] * -INT64_C(65078576) +
417  (int64_t) high[i + 1] * INT64_C(303700064) +
418  (int64_t) high[i + 0] * -INT64_C(3644400640) +
419  (int64_t) high[i - 1] * INT64_C(303700064);
420  dest[i * 2 + 1] = av_clip_int16(((value >> 32) * (uint64_t)scale) >> 32);
421  }
422 }
423 
424 static void reconstruction(AVCodecContext *avctx, int16_t *dest,
425  unsigned width, unsigned height, ptrdiff_t stride,
426  int64_t *scaling_h, int64_t *scaling_v)
427 {
428  PixletContext *ctx = avctx->priv_data;
429  unsigned scaled_width, scaled_height;
430  int16_t *ptr, *tmp;
431  int i, j, k;
432 
433  scaled_width = width >> NB_LEVELS;
434  scaled_height = height >> NB_LEVELS;
435  tmp = ctx->filter[0];
436 
437  for (i = 0; i < NB_LEVELS; i++) {
438  int64_t scale_v = scaling_v[i];
439  int64_t scale_h = scaling_h[i];
440  scaled_width <<= 1;
441  scaled_height <<= 1;
442 
443  ptr = dest;
444  for (j = 0; j < scaled_height; j++) {
445  filterfn(ptr, ctx->filter[1], scaled_width, scale_v);
446  ptr += stride;
447  }
448 
449  for (j = 0; j < scaled_width; j++) {
450  ptr = dest + j;
451  for (k = 0; k < scaled_height; k++) {
452  tmp[k] = *ptr;
453  ptr += stride;
454  }
455 
456  filterfn(tmp, ctx->filter[1], scaled_height, scale_h);
457 
458  ptr = dest + j;
459  for (k = 0; k < scaled_height; k++) {
460  *ptr = tmp[k];
461  ptr += stride;
462  }
463  }
464  }
465 }
466 
467 static void build_luma_lut(AVCodecContext *avctx, int depth)
468 {
469  PixletContext *ctx = avctx->priv_data;
470  int max = (1 << depth) - 1;
471 
472  if (ctx->depth == depth)
473  return;
474  ctx->depth = depth;
475 
476  for (int i = 0; i < FF_ARRAY_ELEMS(ctx->lut); i++)
477  ctx->lut[i] = ((int64_t)i * i * 65535LL) / max / max;
478 }
479 
481  int w, int h, int depth)
482 {
483  PixletContext *ctx = avctx->priv_data;
484  uint16_t *dsty = (uint16_t *)frame->data[0];
485  int16_t *srcy = (int16_t *)frame->data[0];
486  ptrdiff_t stridey = frame->linesize[0] / 2;
487  uint16_t *lut = ctx->lut;
488  int i, j;
489 
490  for (j = 0; j < h; j++) {
491  for (i = 0; i < w; i++) {
492  if (srcy[i] <= 0)
493  dsty[i] = 0;
494  else if (srcy[i] > ((1 << depth) - 1))
495  dsty[i] = 65535;
496  else
497  dsty[i] = lut[srcy[i]];
498  }
499  dsty += stridey;
500  srcy += stridey;
501  }
502 }
503 
504 static void postprocess_chroma(AVFrame *frame, int w, int h, int depth)
505 {
506  uint16_t *dstu = (uint16_t *)frame->data[1];
507  uint16_t *dstv = (uint16_t *)frame->data[2];
508  int16_t *srcu = (int16_t *)frame->data[1];
509  int16_t *srcv = (int16_t *)frame->data[2];
510  ptrdiff_t strideu = frame->linesize[1] / 2;
511  ptrdiff_t stridev = frame->linesize[2] / 2;
512  const unsigned add = 1 << (depth - 1);
513  const unsigned shift = 16 - depth;
514  int i, j;
515 
516  for (j = 0; j < h; j++) {
517  for (i = 0; i < w; i++) {
518  dstu[i] = av_clip_uintp2_c(add + srcu[i], depth) << shift;
519  dstv[i] = av_clip_uintp2_c(add + srcv[i], depth) << shift;
520  }
521  dstu += strideu;
522  dstv += stridev;
523  srcu += strideu;
524  srcv += stridev;
525  }
526 }
527 
528 static int decode_plane(AVCodecContext *avctx, int plane,
529  const AVPacket *avpkt, AVFrame *frame)
530 {
531  PixletContext *ctx = avctx->priv_data;
532  ptrdiff_t stride = frame->linesize[plane] / 2;
533  unsigned shift = plane > 0;
534  int16_t *dst;
535  int i, ret;
536 
537  for (i = ctx->levels - 1; i >= 0; i--) {
538  int32_t h = sign_extend(bytestream2_get_be32(&ctx->gb), 32);
539  int32_t v = sign_extend(bytestream2_get_be32(&ctx->gb), 32);
540 
541  if (!h || !v)
542  return AVERROR_INVALIDDATA;
543 
544  ctx->scaling[plane][H][i] = (1000000ULL << 32) / h;
545  ctx->scaling[plane][V][i] = (1000000ULL << 32) / v;
546  }
547 
548  bytestream2_skip(&ctx->gb, 4);
549 
550  dst = (int16_t *)frame->data[plane];
551  dst[0] = sign_extend(bytestream2_get_be16(&ctx->gb), 16);
552 
553  ret = init_get_bits8(&ctx->bc, avpkt->data + bytestream2_tell(&ctx->gb),
555  if (ret < 0)
556  return ret;
557 
558  ret = read_low_coeffs(avctx, dst + 1, ctx->band[plane][0].width - 1,
559  ctx->band[plane][0].width - 1, 0);
560  if (ret < 0) {
561  av_log(avctx, AV_LOG_ERROR,
562  "error in lowpass coefficients for plane %d, top row\n", plane);
563  return ret;
564  }
565 
566  ret = read_low_coeffs(avctx, dst + stride,
567  ctx->band[plane][0].height - 1, 1, stride);
568  if (ret < 0) {
569  av_log(avctx, AV_LOG_ERROR,
570  "error in lowpass coefficients for plane %d, left column\n",
571  plane);
572  return ret;
573  }
574 
575  ret = read_low_coeffs(avctx, dst + stride + 1,
576  (ctx->band[plane][0].width - 1) * (ctx->band[plane][0].height - 1),
577  ctx->band[plane][0].width - 1, stride);
578  if (ret < 0) {
579  av_log(avctx, AV_LOG_ERROR,
580  "error in lowpass coefficients for plane %d, rest\n", plane);
581  return ret;
582  }
583 
584  bytestream2_skip(&ctx->gb, ret);
585  if (bytestream2_get_bytes_left(&ctx->gb) <= 0) {
586  av_log(avctx, AV_LOG_ERROR, "no bytes left\n");
587  return AVERROR_INVALIDDATA;
588  }
589 
590  ret = read_highpass(avctx, avpkt->data, plane, frame);
591  if (ret < 0)
592  return ret;
593 
594  lowpass_prediction(dst, ctx->prediction, ctx->band[plane][0].width,
595  ctx->band[plane][0].height, stride);
596 
597  reconstruction(avctx, (int16_t *)frame->data[plane], ctx->w >> shift,
598  ctx->h >> shift, stride, ctx->scaling[plane][H],
599  ctx->scaling[plane][V]);
600 
601  return 0;
602 }
603 
605  int *got_frame, AVPacket *avpkt)
606 {
607  PixletContext *ctx = avctx->priv_data;
608  int i, w, h, width, height, ret, version;
609  uint32_t pktsize, depth;
610 
611  bytestream2_init(&ctx->gb, avpkt->data, avpkt->size);
612 
613  pktsize = bytestream2_get_be32(&ctx->gb);
614  if (pktsize <= 44 + (NB_LEVELS * 8 + 6) * 3 || pktsize - 4 > bytestream2_get_bytes_left(&ctx->gb)) {
615  av_log(avctx, AV_LOG_ERROR, "Invalid packet size %"PRIu32"\n", pktsize);
616  return AVERROR_INVALIDDATA;
617  }
618 
619  version = bytestream2_get_le32(&ctx->gb);
620  if (version != 1)
621  avpriv_request_sample(avctx, "Version %d", version);
622 
623  bytestream2_skip(&ctx->gb, 4);
624  if (bytestream2_get_be32(&ctx->gb) != 1)
625  return AVERROR_INVALIDDATA;
626  bytestream2_skip(&ctx->gb, 4);
627 
628  width = bytestream2_get_be32(&ctx->gb);
629  height = bytestream2_get_be32(&ctx->gb);
630 
631  if ( width > INT_MAX - (1U << (NB_LEVELS + 1))
632  || height > INT_MAX - (1U << (NB_LEVELS + 1)))
633  return AVERROR_INVALIDDATA;
634 
635  w = FFALIGN(width, 1 << (NB_LEVELS + 1));
636  h = FFALIGN(height, 1 << (NB_LEVELS + 1));
637 
638  ctx->levels = bytestream2_get_be32(&ctx->gb);
639  if (ctx->levels != NB_LEVELS)
640  return AVERROR_INVALIDDATA;
641  depth = bytestream2_get_be32(&ctx->gb);
642  if (depth < 8 || depth > 15) {
643  avpriv_request_sample(avctx, "Depth %d", depth);
644  return AVERROR_INVALIDDATA;
645  }
646 
647  build_luma_lut(avctx, depth);
648 
649  ret = ff_set_dimensions(avctx, w, h);
650  if (ret < 0)
651  return ret;
652  avctx->width = width;
653  avctx->height = height;
654 
655  if (ctx->w != w || ctx->h != h) {
656  free_buffers(avctx);
657  ctx->w = w;
658  ctx->h = h;
659 
660  ret = init_decoder(avctx);
661  if (ret < 0) {
662  free_buffers(avctx);
663  ctx->w = 0;
664  ctx->h = 0;
665  return ret;
666  }
667  }
668 
669  bytestream2_skip(&ctx->gb, 8);
670 
672  p->key_frame = 1;
674 
675  ret = ff_thread_get_buffer(avctx, p, 0);
676  if (ret < 0)
677  return ret;
678 
679  for (i = 0; i < 3; i++) {
680  ret = decode_plane(avctx, i, avpkt, p);
681  if (ret < 0)
682  return ret;
683  if (avctx->flags & AV_CODEC_FLAG_GRAY)
684  break;
685  }
686 
687  postprocess_luma(avctx, p, ctx->w, ctx->h, ctx->depth);
688  postprocess_chroma(p, ctx->w >> 1, ctx->h >> 1, ctx->depth);
689 
690  *got_frame = 1;
691 
692  return pktsize;
693 }
694 
696  .p.name = "pixlet",
697  .p.long_name = NULL_IF_CONFIG_SMALL("Apple Pixlet"),
698  .p.type = AVMEDIA_TYPE_VIDEO,
699  .p.id = AV_CODEC_ID_PIXLET,
700  .init = pixlet_init,
701  .close = pixlet_close,
703  .priv_data_size = sizeof(PixletContext),
704  .p.capabilities = AV_CODEC_CAP_DR1 |
706  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
708 };
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:578
V
#define V
Definition: pixlet.c:41
FF_CODEC_CAP_INIT_CLEANUP
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: codec_internal.h:39
free_buffers
static void free_buffers(AVCodecContext *avctx)
Definition: pixlet.c:73
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
GetByteContext
Definition: bytestream.h:33
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:262
PixletContext
Definition: pixlet.c:49
pixlet_decode_frame
static int pixlet_decode_frame(AVCodecContext *avctx, AVFrame *p, int *got_frame, AVPacket *avpkt)
Definition: pixlet.c:604
get_bits_count
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:219
ff_clz
#define ff_clz
Definition: intmath.h:142
av_mod_uintp2
#define av_mod_uintp2
Definition: common.h:122
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
av_clip_uintp2_c
static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)
Clip a signed integer to an unsigned power of two range.
Definition: common.h:275
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:599
read_low_coeffs
static int read_low_coeffs(AVCodecContext *avctx, int16_t *dst, int size, int width, ptrdiff_t stride)
Definition: pixlet.c:126
internal.h
AVPacket::data
uint8_t * data
Definition: packet.h:374
PixletContext::w
int w
Definition: pixlet.c:57
b
#define b
Definition: input.c:34
SubBand::x
unsigned x
Definition: pixlet.c:46
PixletContext::bc
GetBitContext bc
Definition: pixlet.c:53
FFCodec
Definition: codec_internal.h:112
PixletContext::depth
int depth
Definition: pixlet.c:56
max
#define max(a, b)
Definition: cuda_runtime.h:33
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
SubBand::y
unsigned y
Definition: pixlet.c:46
thread.h
skip_bits
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:467
bytestream2_skip
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:168
get_bits
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:379
PixletContext::filter
int16_t * filter[2]
Definition: pixlet.c:59
FFCodec::p
AVCodec p
The public AVCodec.
Definition: codec_internal.h:116
AV_CODEC_ID_PIXLET
@ AV_CODEC_ID_PIXLET
Definition: codec_id.h:273
U
#define U(x)
Definition: vp56_arith.h:37
GetBitContext
Definition: get_bits.h:61
ff_thread_get_buffer
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have so the codec calls ff_thread_report set FF_CODEC_CAP_ALLOCATE_PROGRESS in AVCodec caps_internal and use ff_thread_get_buffer() to allocate frames. The frames must then be freed with ff_thread_release_buffer(). Otherwise decode directly into the user-supplied frames. Call ff_thread_report_progress() after some part of the current picture has decoded. A good place to put this is where draw_horiz_band() is called - add this if it isn 't called anywhere
PixletContext::lut
uint16_t lut[65536]
Definition: pixlet.c:62
read_high_coeffs
static int read_high_coeffs(AVCodecContext *avctx, const uint8_t *src, int16_t *dst, int size, int c, int a, int d, int width, ptrdiff_t stride)
Definition: pixlet.c:201
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:469
AVFrame::key_frame
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:417
val
static double val(void *priv, double ch)
Definition: aeval.c:77
scale
static av_always_inline float scale(float x, float s)
Definition: vf_v360.c:1389
filterfn
static void filterfn(int16_t *dest, int16_t *tmp, unsigned size, int64_t scale)
Definition: pixlet.c:379
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_cold
#define av_cold
Definition: attributes.h:90
init_get_bits8
static int init_get_bits8(GetBitContext *s, const uint8_t *buffer, int byte_size)
Initialize GetBitContext.
Definition: get_bits.h:667
width
#define width
FF_CODEC_DECODE_CB
#define FF_CODEC_DECODE_CB(func)
Definition: codec_internal.h:254
decode_plane
static int decode_plane(AVCodecContext *avctx, int plane, const AVPacket *avpkt, AVFrame *frame)
Definition: pixlet.c:528
AV_PIX_FMT_YUV420P16
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:417
ctx
AVFormatContext * ctx
Definition: movenc.c:48
get_bits.h
SubBand::size
unsigned size
Definition: pixlet.c:45
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:64
AV_CODEC_CAP_FRAME_THREADS
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: codec.h:113
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
av_clip_int16
#define av_clip_int16
Definition: common.h:110
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:973
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
get_bits1
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:498
postprocess_chroma
static void postprocess_chroma(AVFrame *frame, int w, int h, int depth)
Definition: pixlet.c:504
PixletContext::levels
int levels
Definition: pixlet.c:55
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
get_unary
static int get_unary(GetBitContext *gb, int stop, int len)
Get unary code of limited length.
Definition: unary.h:46
bytestream2_get_bytes_left
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:158
bytestream2_tell
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:192
build_luma_lut
static void build_luma_lut(AVCodecContext *avctx, int depth)
Definition: pixlet.c:467
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:422
AV_CODEC_CAP_DR1
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() or get_encode_buffer() for allocating buffers and supports custom allocators.
Definition: codec.h:52
AV_CODEC_FLAG_GRAY
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:249
AVPacket::size
int size
Definition: packet.h:375
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
codec_internal.h
size
int size
Definition: twinvq_data.h:10344
state
static struct @327 state
SubBand
Definition: cfhd.h:109
PixletContext::band
SubBand band[4][NB_LEVELS *3+1]
Definition: pixlet.c:63
height
#define height
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
H
#define H
Definition: pixlet.c:40
version
version
Definition: libkvazaar.c:313
unary.h
PixletContext::gb
GetByteContext gb
Definition: pixlet.c:52
PixletContext::h
int h
Definition: pixlet.c:57
pixlet_init
static av_cold int pixlet_init(AVCodecContext *avctx)
Definition: pixlet.c:66
flag
#define flag(name)
Definition: cbs_av1.c:553
postprocess_luma
static void postprocess_luma(AVCodecContext *avctx, AVFrame *frame, int w, int h, int depth)
Definition: pixlet.c:480
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
lowpass_prediction
static void lowpass_prediction(int16_t *dst, int16_t *pred, int width, int height, ptrdiff_t stride)
Definition: pixlet.c:359
show_bits
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:446
PIXLET_MAGIC
#define PIXLET_MAGIC
Definition: pixlet.c:38
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
FF_CODEC_CAP_INIT_THREADSAFE
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: codec_internal.h:31
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:203
AVCodecContext::height
int height
Definition: avcodec.h:562
PixletContext::prediction
int16_t * prediction
Definition: pixlet.c:60
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:599
pixlet_close
static av_cold int pixlet_close(AVCodecContext *avctx)
Definition: pixlet.c:82
avcodec.h
stride
#define stride
Definition: h264pred_template.c:537
ret
ret
Definition: filter_design.txt:187
SubBand::width
unsigned width
Definition: pixlet.c:44
pred
static const float pred[4]
Definition: siprdata.h:259
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
align_get_bits
static const uint8_t * align_get_bits(GetBitContext *s)
Definition: get_bits.h:683
reconstruction
static void reconstruction(AVCodecContext *avctx, int16_t *dest, unsigned width, unsigned height, ptrdiff_t stride, int64_t *scaling_h, int64_t *scaling_v)
Definition: pixlet.c:424
ff_pixlet_decoder
const FFCodec ff_pixlet_decoder
Definition: pixlet.c:695
AVCodecContext
main external API structure.
Definition: avcodec.h:389
sign_extend
static av_const int sign_extend(int val, unsigned bits)
Definition: mathops.h:132
shift
static int shift(int a, int b)
Definition: sonic.c:88
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
add
static float add(float src0, float src1)
Definition: dnn_backend_native_layer_mathbinary.c:35
ff_set_dimensions
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:90
avpriv_request_sample
#define avpriv_request_sample(...)
Definition: tableprint_vlc.h:36
FFALIGN
#define FFALIGN(x, a)
Definition: macros.h:78
AVPacket
This structure stores compressed data.
Definition: packet.h:351
AVCodecContext::priv_data
void * priv_data
Definition: avcodec.h:416
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
PixletContext::scaling
int64_t scaling[4][2][NB_LEVELS]
Definition: pixlet.c:61
d
d
Definition: ffmpeg_filter.c:153
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:562
int32_t
int32_t
Definition: audioconvert.c:56
bytestream.h
imgutils.h
bytestream2_init
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:137
read_highpass
static int read_highpass(AVCodecContext *avctx, const uint8_t *ptr, int plane, AVFrame *frame)
Definition: pixlet.c:316
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
init_decoder
static int init_decoder(AVCodecContext *avctx)
Definition: pixlet.c:91
h
h
Definition: vp9dsp_template.c:2038
NB_LEVELS
#define NB_LEVELS
Definition: pixlet.c:36
SubBand::height
int height
Definition: cfhd.h:114
intmath.h