FFmpeg
pngdec.c
Go to the documentation of this file.
1 /*
2  * PNG image format
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 //#define DEBUG
23 
24 #include "libavutil/avassert.h"
25 #include "libavutil/bprint.h"
26 #include "libavutil/imgutils.h"
27 #include "libavutil/intreadwrite.h"
28 #include "libavutil/stereo3d.h"
30 
31 #include "avcodec.h"
32 #include "bytestream.h"
33 #include "internal.h"
34 #include "apng.h"
35 #include "png.h"
36 #include "pngdsp.h"
37 #include "thread.h"
38 
39 #include <zlib.h>
40 
42  PNG_IHDR = 1 << 0,
43  PNG_PLTE = 1 << 1,
44 };
45 
47  PNG_IDAT = 1 << 0,
48  PNG_ALLIMAGE = 1 << 1,
49 };
50 
51 typedef struct PNGDecContext {
54 
59 
62  int width, height;
63  int cur_w, cur_h;
64  int last_w, last_h;
69  int bit_depth;
74  int channels;
76  int bpp;
77  int has_trns;
79 
82  uint32_t palette[256];
85  unsigned int last_row_size;
87  unsigned int tmp_row_size;
90  int pass;
91  int crow_size; /* compressed row size (include filter type) */
92  int row_size; /* decompressed row size */
93  int pass_row_size; /* decompress row size of the current pass */
94  int y;
95  z_stream zstream;
97 
98 /* Mask to determine which pixels are valid in a pass */
99 static const uint8_t png_pass_mask[NB_PASSES] = {
100  0x01, 0x01, 0x11, 0x11, 0x55, 0x55, 0xff,
101 };
102 
103 /* Mask to determine which y pixels can be written in a pass */
105  0xff, 0xff, 0x0f, 0xff, 0x33, 0xff, 0x55,
106 };
107 
108 /* Mask to determine which pixels to overwrite while displaying */
110  0xff, 0x0f, 0xff, 0x33, 0xff, 0x55, 0xff
111 };
112 
113 /* NOTE: we try to construct a good looking image at each pass. width
114  * is the original image width. We also do pixel format conversion at
115  * this stage */
116 static void png_put_interlaced_row(uint8_t *dst, int width,
117  int bits_per_pixel, int pass,
118  int color_type, const uint8_t *src)
119 {
120  int x, mask, dsp_mask, j, src_x, b, bpp;
121  uint8_t *d;
122  const uint8_t *s;
123 
124  mask = png_pass_mask[pass];
125  dsp_mask = png_pass_dsp_mask[pass];
126 
127  switch (bits_per_pixel) {
128  case 1:
129  src_x = 0;
130  for (x = 0; x < width; x++) {
131  j = (x & 7);
132  if ((dsp_mask << j) & 0x80) {
133  b = (src[src_x >> 3] >> (7 - (src_x & 7))) & 1;
134  dst[x >> 3] &= 0xFF7F>>j;
135  dst[x >> 3] |= b << (7 - j);
136  }
137  if ((mask << j) & 0x80)
138  src_x++;
139  }
140  break;
141  case 2:
142  src_x = 0;
143  for (x = 0; x < width; x++) {
144  int j2 = 2 * (x & 3);
145  j = (x & 7);
146  if ((dsp_mask << j) & 0x80) {
147  b = (src[src_x >> 2] >> (6 - 2*(src_x & 3))) & 3;
148  dst[x >> 2] &= 0xFF3F>>j2;
149  dst[x >> 2] |= b << (6 - j2);
150  }
151  if ((mask << j) & 0x80)
152  src_x++;
153  }
154  break;
155  case 4:
156  src_x = 0;
157  for (x = 0; x < width; x++) {
158  int j2 = 4*(x&1);
159  j = (x & 7);
160  if ((dsp_mask << j) & 0x80) {
161  b = (src[src_x >> 1] >> (4 - 4*(src_x & 1))) & 15;
162  dst[x >> 1] &= 0xFF0F>>j2;
163  dst[x >> 1] |= b << (4 - j2);
164  }
165  if ((mask << j) & 0x80)
166  src_x++;
167  }
168  break;
169  default:
170  bpp = bits_per_pixel >> 3;
171  d = dst;
172  s = src;
173  for (x = 0; x < width; x++) {
174  j = x & 7;
175  if ((dsp_mask << j) & 0x80) {
176  memcpy(d, s, bpp);
177  }
178  d += bpp;
179  if ((mask << j) & 0x80)
180  s += bpp;
181  }
182  break;
183  }
184 }
185 
187  int w, int bpp)
188 {
189  int i;
190  for (i = 0; i < w; i++) {
191  int a, b, c, p, pa, pb, pc;
192 
193  a = dst[i - bpp];
194  b = top[i];
195  c = top[i - bpp];
196 
197  p = b - c;
198  pc = a - c;
199 
200  pa = abs(p);
201  pb = abs(pc);
202  pc = abs(p + pc);
203 
204  if (pa <= pb && pa <= pc)
205  p = a;
206  else if (pb <= pc)
207  p = b;
208  else
209  p = c;
210  dst[i] = p + src[i];
211  }
212 }
213 
214 #define UNROLL1(bpp, op) \
215  { \
216  r = dst[0]; \
217  if (bpp >= 2) \
218  g = dst[1]; \
219  if (bpp >= 3) \
220  b = dst[2]; \
221  if (bpp >= 4) \
222  a = dst[3]; \
223  for (; i <= size - bpp; i += bpp) { \
224  dst[i + 0] = r = op(r, src[i + 0], last[i + 0]); \
225  if (bpp == 1) \
226  continue; \
227  dst[i + 1] = g = op(g, src[i + 1], last[i + 1]); \
228  if (bpp == 2) \
229  continue; \
230  dst[i + 2] = b = op(b, src[i + 2], last[i + 2]); \
231  if (bpp == 3) \
232  continue; \
233  dst[i + 3] = a = op(a, src[i + 3], last[i + 3]); \
234  } \
235  }
236 
237 #define UNROLL_FILTER(op) \
238  if (bpp == 1) { \
239  UNROLL1(1, op) \
240  } else if (bpp == 2) { \
241  UNROLL1(2, op) \
242  } else if (bpp == 3) { \
243  UNROLL1(3, op) \
244  } else if (bpp == 4) { \
245  UNROLL1(4, op) \
246  } \
247  for (; i < size; i++) { \
248  dst[i] = op(dst[i - bpp], src[i], last[i]); \
249  }
250 
251 /* NOTE: 'dst' can be equal to 'last' */
253  uint8_t *src, uint8_t *last, int size, int bpp)
254 {
255  int i, p, r, g, b, a;
256 
257  switch (filter_type) {
259  memcpy(dst, src, size);
260  break;
262  for (i = 0; i < bpp; i++)
263  dst[i] = src[i];
264  if (bpp == 4) {
265  p = *(int *)dst;
266  for (; i < size; i += bpp) {
267  unsigned s = *(int *)(src + i);
268  p = ((s & 0x7f7f7f7f) + (p & 0x7f7f7f7f)) ^ ((s ^ p) & 0x80808080);
269  *(int *)(dst + i) = p;
270  }
271  } else {
272 #define OP_SUB(x, s, l) ((x) + (s))
274  }
275  break;
276  case PNG_FILTER_VALUE_UP:
277  dsp->add_bytes_l2(dst, src, last, size);
278  break;
280  for (i = 0; i < bpp; i++) {
281  p = (last[i] >> 1);
282  dst[i] = p + src[i];
283  }
284 #define OP_AVG(x, s, l) (((((x) + (l)) >> 1) + (s)) & 0xff)
286  break;
288  for (i = 0; i < bpp; i++) {
289  p = last[i];
290  dst[i] = p + src[i];
291  }
292  if (bpp > 2 && size > 4) {
293  /* would write off the end of the array if we let it process
294  * the last pixel with bpp=3 */
295  int w = (bpp & 3) ? size - 3 : size;
296 
297  if (w > i) {
298  dsp->add_paeth_prediction(dst + i, src + i, last + i, size - i, bpp);
299  i = w;
300  }
301  }
302  ff_add_png_paeth_prediction(dst + i, src + i, last + i, size - i, bpp);
303  break;
304  }
305 }
306 
307 /* This used to be called "deloco" in FFmpeg
308  * and is actually an inverse reversible colorspace transformation */
309 #define YUV2RGB(NAME, TYPE) \
310 static void deloco_ ## NAME(TYPE *dst, int size, int alpha) \
311 { \
312  int i; \
313  for (i = 0; i < size; i += 3 + alpha) { \
314  int g = dst [i + 1]; \
315  dst[i + 0] += g; \
316  dst[i + 2] += g; \
317  } \
318 }
319 
320 YUV2RGB(rgb8, uint8_t)
321 YUV2RGB(rgb16, uint16_t)
322 
324 {
325  if (s->interlace_type) {
326  return 100 - 100 * s->pass / (NB_PASSES - 1);
327  } else {
328  return 100 - 100 * s->y / s->cur_h;
329  }
330 }
331 
332 /* process exactly one decompressed row */
334 {
335  uint8_t *ptr, *last_row;
336  int got_line;
337 
338  if (!s->interlace_type) {
339  ptr = s->image_buf + s->image_linesize * (s->y + s->y_offset) + s->x_offset * s->bpp;
340  if (s->y == 0)
341  last_row = s->last_row;
342  else
343  last_row = ptr - s->image_linesize;
344 
345  png_filter_row(&s->dsp, ptr, s->crow_buf[0], s->crow_buf + 1,
346  last_row, s->row_size, s->bpp);
347  /* loco lags by 1 row so that it doesn't interfere with top prediction */
348  if (s->filter_type == PNG_FILTER_TYPE_LOCO && s->y > 0) {
349  if (s->bit_depth == 16) {
350  deloco_rgb16((uint16_t *)(ptr - s->image_linesize), s->row_size / 2,
352  } else {
353  deloco_rgb8(ptr - s->image_linesize, s->row_size,
355  }
356  }
357  s->y++;
358  if (s->y == s->cur_h) {
359  s->pic_state |= PNG_ALLIMAGE;
360  if (s->filter_type == PNG_FILTER_TYPE_LOCO) {
361  if (s->bit_depth == 16) {
362  deloco_rgb16((uint16_t *)ptr, s->row_size / 2,
364  } else {
365  deloco_rgb8(ptr, s->row_size,
367  }
368  }
369  }
370  } else {
371  got_line = 0;
372  for (;;) {
373  ptr = s->image_buf + s->image_linesize * (s->y + s->y_offset) + s->x_offset * s->bpp;
374  if ((ff_png_pass_ymask[s->pass] << (s->y & 7)) & 0x80) {
375  /* if we already read one row, it is time to stop to
376  * wait for the next one */
377  if (got_line)
378  break;
379  png_filter_row(&s->dsp, s->tmp_row, s->crow_buf[0], s->crow_buf + 1,
380  s->last_row, s->pass_row_size, s->bpp);
381  FFSWAP(uint8_t *, s->last_row, s->tmp_row);
382  FFSWAP(unsigned int, s->last_row_size, s->tmp_row_size);
383  got_line = 1;
384  }
385  if ((png_pass_dsp_ymask[s->pass] << (s->y & 7)) & 0x80) {
387  s->color_type, s->last_row);
388  }
389  s->y++;
390  if (s->y == s->cur_h) {
391  memset(s->last_row, 0, s->row_size);
392  for (;;) {
393  if (s->pass == NB_PASSES - 1) {
394  s->pic_state |= PNG_ALLIMAGE;
395  goto the_end;
396  } else {
397  s->pass++;
398  s->y = 0;
400  s->bits_per_pixel,
401  s->cur_w);
402  s->crow_size = s->pass_row_size + 1;
403  if (s->pass_row_size != 0)
404  break;
405  /* skip pass if empty row */
406  }
407  }
408  }
409  }
410 the_end:;
411  }
412 }
413 
415 {
416  int ret;
417  s->zstream.avail_in = FFMIN(length, bytestream2_get_bytes_left(&s->gb));
418  s->zstream.next_in = (unsigned char *)s->gb.buffer;
419  bytestream2_skip(&s->gb, length);
420 
421  /* decode one line if possible */
422  while (s->zstream.avail_in > 0) {
423  ret = inflate(&s->zstream, Z_PARTIAL_FLUSH);
424  if (ret != Z_OK && ret != Z_STREAM_END) {
425  av_log(s->avctx, AV_LOG_ERROR, "inflate returned error %d\n", ret);
426  return AVERROR_EXTERNAL;
427  }
428  if (s->zstream.avail_out == 0) {
429  if (!(s->pic_state & PNG_ALLIMAGE)) {
430  png_handle_row(s);
431  }
432  s->zstream.avail_out = s->crow_size;
433  s->zstream.next_out = s->crow_buf;
434  }
435  if (ret == Z_STREAM_END && s->zstream.avail_in > 0) {
437  "%d undecompressed bytes left in buffer\n", s->zstream.avail_in);
438  return 0;
439  }
440  }
441  return 0;
442 }
443 
444 static int decode_zbuf(AVBPrint *bp, const uint8_t *data,
445  const uint8_t *data_end)
446 {
447  z_stream zstream;
448  unsigned char *buf;
449  unsigned buf_size;
450  int ret;
451 
452  zstream.zalloc = ff_png_zalloc;
453  zstream.zfree = ff_png_zfree;
454  zstream.opaque = NULL;
455  if (inflateInit(&zstream) != Z_OK)
456  return AVERROR_EXTERNAL;
457  zstream.next_in = (unsigned char *)data;
458  zstream.avail_in = data_end - data;
460 
461  while (zstream.avail_in > 0) {
462  av_bprint_get_buffer(bp, 2, &buf, &buf_size);
463  if (buf_size < 2) {
464  ret = AVERROR(ENOMEM);
465  goto fail;
466  }
467  zstream.next_out = buf;
468  zstream.avail_out = buf_size - 1;
469  ret = inflate(&zstream, Z_PARTIAL_FLUSH);
470  if (ret != Z_OK && ret != Z_STREAM_END) {
471  ret = AVERROR_EXTERNAL;
472  goto fail;
473  }
474  bp->len += zstream.next_out - buf;
475  if (ret == Z_STREAM_END)
476  break;
477  }
478  inflateEnd(&zstream);
479  bp->str[bp->len] = 0;
480  return 0;
481 
482 fail:
483  inflateEnd(&zstream);
485  return ret;
486 }
487 
488 static uint8_t *iso88591_to_utf8(const uint8_t *in, size_t size_in)
489 {
490  size_t extra = 0, i;
491  uint8_t *out, *q;
492 
493  for (i = 0; i < size_in; i++)
494  extra += in[i] >= 0x80;
495  if (size_in == SIZE_MAX || extra > SIZE_MAX - size_in - 1)
496  return NULL;
497  q = out = av_malloc(size_in + extra + 1);
498  if (!out)
499  return NULL;
500  for (i = 0; i < size_in; i++) {
501  if (in[i] >= 0x80) {
502  *(q++) = 0xC0 | (in[i] >> 6);
503  *(q++) = 0x80 | (in[i] & 0x3F);
504  } else {
505  *(q++) = in[i];
506  }
507  }
508  *(q++) = 0;
509  return out;
510 }
511 
512 static int decode_text_chunk(PNGDecContext *s, uint32_t length, int compressed,
513  AVDictionary **dict)
514 {
515  int ret, method;
516  const uint8_t *data = s->gb.buffer;
517  const uint8_t *data_end = data + length;
518  const uint8_t *keyword = data;
519  const uint8_t *keyword_end = memchr(keyword, 0, data_end - keyword);
520  uint8_t *kw_utf8 = NULL, *text, *txt_utf8 = NULL;
521  unsigned text_len;
522  AVBPrint bp;
523 
524  if (!keyword_end)
525  return AVERROR_INVALIDDATA;
526  data = keyword_end + 1;
527 
528  if (compressed) {
529  if (data == data_end)
530  return AVERROR_INVALIDDATA;
531  method = *(data++);
532  if (method)
533  return AVERROR_INVALIDDATA;
534  if ((ret = decode_zbuf(&bp, data, data_end)) < 0)
535  return ret;
536  text_len = bp.len;
537  ret = av_bprint_finalize(&bp, (char **)&text);
538  if (ret < 0)
539  return ret;
540  } else {
541  text = (uint8_t *)data;
542  text_len = data_end - text;
543  }
544 
545  kw_utf8 = iso88591_to_utf8(keyword, keyword_end - keyword);
546  txt_utf8 = iso88591_to_utf8(text, text_len);
547  if (text != data)
548  av_free(text);
549  if (!(kw_utf8 && txt_utf8)) {
550  av_free(kw_utf8);
551  av_free(txt_utf8);
552  return AVERROR(ENOMEM);
553  }
554 
555  av_dict_set(dict, kw_utf8, txt_utf8,
557  return 0;
558 }
559 
561  uint32_t length)
562 {
563  if (length != 13)
564  return AVERROR_INVALIDDATA;
565 
566  if (s->pic_state & PNG_IDAT) {
567  av_log(avctx, AV_LOG_ERROR, "IHDR after IDAT\n");
568  return AVERROR_INVALIDDATA;
569  }
570 
571  if (s->hdr_state & PNG_IHDR) {
572  av_log(avctx, AV_LOG_ERROR, "Multiple IHDR\n");
573  return AVERROR_INVALIDDATA;
574  }
575 
576  s->width = s->cur_w = bytestream2_get_be32(&s->gb);
577  s->height = s->cur_h = bytestream2_get_be32(&s->gb);
578  if (av_image_check_size(s->width, s->height, 0, avctx)) {
579  s->cur_w = s->cur_h = s->width = s->height = 0;
580  av_log(avctx, AV_LOG_ERROR, "Invalid image size\n");
581  return AVERROR_INVALIDDATA;
582  }
583  s->bit_depth = bytestream2_get_byte(&s->gb);
584  if (s->bit_depth != 1 && s->bit_depth != 2 && s->bit_depth != 4 &&
585  s->bit_depth != 8 && s->bit_depth != 16) {
586  av_log(avctx, AV_LOG_ERROR, "Invalid bit depth\n");
587  goto error;
588  }
589  s->color_type = bytestream2_get_byte(&s->gb);
590  s->compression_type = bytestream2_get_byte(&s->gb);
591  if (s->compression_type) {
592  av_log(avctx, AV_LOG_ERROR, "Invalid compression method %d\n", s->compression_type);
593  goto error;
594  }
595  s->filter_type = bytestream2_get_byte(&s->gb);
596  s->interlace_type = bytestream2_get_byte(&s->gb);
597  bytestream2_skip(&s->gb, 4); /* crc */
598  s->hdr_state |= PNG_IHDR;
599  if (avctx->debug & FF_DEBUG_PICT_INFO)
600  av_log(avctx, AV_LOG_DEBUG, "width=%d height=%d depth=%d color_type=%d "
601  "compression_type=%d filter_type=%d interlace_type=%d\n",
602  s->width, s->height, s->bit_depth, s->color_type,
604 
605  return 0;
606 error:
607  s->cur_w = s->cur_h = s->width = s->height = 0;
608  s->bit_depth = 8;
609  return AVERROR_INVALIDDATA;
610 }
611 
613 {
614  if (s->pic_state & PNG_IDAT) {
615  av_log(avctx, AV_LOG_ERROR, "pHYs after IDAT\n");
616  return AVERROR_INVALIDDATA;
617  }
618  avctx->sample_aspect_ratio.num = bytestream2_get_be32(&s->gb);
619  avctx->sample_aspect_ratio.den = bytestream2_get_be32(&s->gb);
620  if (avctx->sample_aspect_ratio.num < 0 || avctx->sample_aspect_ratio.den < 0)
621  avctx->sample_aspect_ratio = (AVRational){ 0, 1 };
622  bytestream2_skip(&s->gb, 1); /* unit specifier */
623  bytestream2_skip(&s->gb, 4); /* crc */
624 
625  return 0;
626 }
627 
629  uint32_t length, AVFrame *p)
630 {
631  int ret;
632  size_t byte_depth = s->bit_depth > 8 ? 2 : 1;
633 
634  if (!(s->hdr_state & PNG_IHDR)) {
635  av_log(avctx, AV_LOG_ERROR, "IDAT without IHDR\n");
636  return AVERROR_INVALIDDATA;
637  }
638  if (!(s->pic_state & PNG_IDAT)) {
639  /* init image info */
640  ret = ff_set_dimensions(avctx, s->width, s->height);
641  if (ret < 0)
642  return ret;
643 
645  s->bits_per_pixel = s->bit_depth * s->channels;
646  s->bpp = (s->bits_per_pixel + 7) >> 3;
647  s->row_size = (s->cur_w * s->bits_per_pixel + 7) >> 3;
648 
649  if ((s->bit_depth == 2 || s->bit_depth == 4 || s->bit_depth == 8) &&
651  avctx->pix_fmt = AV_PIX_FMT_RGB24;
652  } else if ((s->bit_depth == 2 || s->bit_depth == 4 || s->bit_depth == 8) &&
654  avctx->pix_fmt = AV_PIX_FMT_RGBA;
655  } else if ((s->bit_depth == 2 || s->bit_depth == 4 || s->bit_depth == 8) &&
657  avctx->pix_fmt = AV_PIX_FMT_GRAY8;
658  } else if (s->bit_depth == 16 &&
660  avctx->pix_fmt = AV_PIX_FMT_GRAY16BE;
661  } else if (s->bit_depth == 16 &&
663  avctx->pix_fmt = AV_PIX_FMT_RGB48BE;
664  } else if (s->bit_depth == 16 &&
666  avctx->pix_fmt = AV_PIX_FMT_RGBA64BE;
667  } else if ((s->bits_per_pixel == 1 || s->bits_per_pixel == 2 || s->bits_per_pixel == 4 || s->bits_per_pixel == 8) &&
669  avctx->pix_fmt = AV_PIX_FMT_PAL8;
670  } else if (s->bit_depth == 1 && s->bits_per_pixel == 1 && avctx->codec_id != AV_CODEC_ID_APNG) {
671  avctx->pix_fmt = AV_PIX_FMT_MONOBLACK;
672  } else if (s->bit_depth == 8 &&
674  avctx->pix_fmt = AV_PIX_FMT_YA8;
675  } else if (s->bit_depth == 16 &&
677  avctx->pix_fmt = AV_PIX_FMT_YA16BE;
678  } else {
680  "Bit depth %d color type %d",
681  s->bit_depth, s->color_type);
682  return AVERROR_PATCHWELCOME;
683  }
684 
685  if (s->has_trns && s->color_type != PNG_COLOR_TYPE_PALETTE) {
686  switch (avctx->pix_fmt) {
687  case AV_PIX_FMT_RGB24:
688  avctx->pix_fmt = AV_PIX_FMT_RGBA;
689  break;
690 
691  case AV_PIX_FMT_RGB48BE:
692  avctx->pix_fmt = AV_PIX_FMT_RGBA64BE;
693  break;
694 
695  case AV_PIX_FMT_GRAY8:
696  avctx->pix_fmt = AV_PIX_FMT_YA8;
697  break;
698 
699  case AV_PIX_FMT_GRAY16BE:
700  avctx->pix_fmt = AV_PIX_FMT_YA16BE;
701  break;
702 
703  default:
704  avpriv_request_sample(avctx, "bit depth %d "
705  "and color type %d with TRNS",
706  s->bit_depth, s->color_type);
707  return AVERROR_INVALIDDATA;
708  }
709 
710  s->bpp += byte_depth;
711  }
712 
713  if ((ret = ff_thread_get_buffer(avctx, &s->picture, AV_GET_BUFFER_FLAG_REF)) < 0)
714  return ret;
717  if ((ret = ff_thread_get_buffer(avctx, &s->previous_picture, AV_GET_BUFFER_FLAG_REF)) < 0)
718  return ret;
719  }
721  p->key_frame = 1;
723 
724  ff_thread_finish_setup(avctx);
725 
726  /* compute the compressed row size */
727  if (!s->interlace_type) {
728  s->crow_size = s->row_size + 1;
729  } else {
730  s->pass = 0;
732  s->bits_per_pixel,
733  s->cur_w);
734  s->crow_size = s->pass_row_size + 1;
735  }
736  ff_dlog(avctx, "row_size=%d crow_size =%d\n",
737  s->row_size, s->crow_size);
738  s->image_buf = p->data[0];
739  s->image_linesize = p->linesize[0];
740  /* copy the palette if needed */
741  if (avctx->pix_fmt == AV_PIX_FMT_PAL8)
742  memcpy(p->data[1], s->palette, 256 * sizeof(uint32_t));
743  /* empty row is used if differencing to the first row */
745  if (!s->last_row)
746  return AVERROR_INVALIDDATA;
747  if (s->interlace_type ||
750  if (!s->tmp_row)
751  return AVERROR_INVALIDDATA;
752  }
753  /* compressed row */
755  if (!s->buffer)
756  return AVERROR(ENOMEM);
757 
758  /* we want crow_buf+1 to be 16-byte aligned */
759  s->crow_buf = s->buffer + 15;
760  s->zstream.avail_out = s->crow_size;
761  s->zstream.next_out = s->crow_buf;
762  }
763 
764  s->pic_state |= PNG_IDAT;
765 
766  /* set image to non-transparent bpp while decompressing */
768  s->bpp -= byte_depth;
769 
770  ret = png_decode_idat(s, length);
771 
773  s->bpp += byte_depth;
774 
775  if (ret < 0)
776  return ret;
777 
778  bytestream2_skip(&s->gb, 4); /* crc */
779 
780  return 0;
781 }
782 
784  uint32_t length)
785 {
786  int n, i, r, g, b;
787 
788  if ((length % 3) != 0 || length > 256 * 3)
789  return AVERROR_INVALIDDATA;
790  /* read the palette */
791  n = length / 3;
792  for (i = 0; i < n; i++) {
793  r = bytestream2_get_byte(&s->gb);
794  g = bytestream2_get_byte(&s->gb);
795  b = bytestream2_get_byte(&s->gb);
796  s->palette[i] = (0xFFU << 24) | (r << 16) | (g << 8) | b;
797  }
798  for (; i < 256; i++)
799  s->palette[i] = (0xFFU << 24);
800  s->hdr_state |= PNG_PLTE;
801  bytestream2_skip(&s->gb, 4); /* crc */
802 
803  return 0;
804 }
805 
807  uint32_t length)
808 {
809  int v, i;
810 
811  if (!(s->hdr_state & PNG_IHDR)) {
812  av_log(avctx, AV_LOG_ERROR, "trns before IHDR\n");
813  return AVERROR_INVALIDDATA;
814  }
815 
816  if (s->pic_state & PNG_IDAT) {
817  av_log(avctx, AV_LOG_ERROR, "trns after IDAT\n");
818  return AVERROR_INVALIDDATA;
819  }
820 
822  if (length > 256 || !(s->hdr_state & PNG_PLTE))
823  return AVERROR_INVALIDDATA;
824 
825  for (i = 0; i < length; i++) {
826  unsigned v = bytestream2_get_byte(&s->gb);
827  s->palette[i] = (s->palette[i] & 0x00ffffff) | (v << 24);
828  }
829  } else if (s->color_type == PNG_COLOR_TYPE_GRAY || s->color_type == PNG_COLOR_TYPE_RGB) {
830  if ((s->color_type == PNG_COLOR_TYPE_GRAY && length != 2) ||
831  (s->color_type == PNG_COLOR_TYPE_RGB && length != 6) ||
832  s->bit_depth == 1)
833  return AVERROR_INVALIDDATA;
834 
835  for (i = 0; i < length / 2; i++) {
836  /* only use the least significant bits */
837  v = av_mod_uintp2(bytestream2_get_be16(&s->gb), s->bit_depth);
838 
839  if (s->bit_depth > 8)
840  AV_WB16(&s->transparent_color_be[2 * i], v);
841  else
842  s->transparent_color_be[i] = v;
843  }
844  } else {
845  return AVERROR_INVALIDDATA;
846  }
847 
848  bytestream2_skip(&s->gb, 4); /* crc */
849  s->has_trns = 1;
850 
851  return 0;
852 }
853 
855 {
856  int ret, cnt = 0;
857  uint8_t *data, profile_name[82];
858  AVBPrint bp;
859  AVFrameSideData *sd;
860 
861  while ((profile_name[cnt++] = bytestream2_get_byte(&s->gb)) && cnt < 81);
862  if (cnt > 80) {
863  av_log(s->avctx, AV_LOG_ERROR, "iCCP with invalid name!\n");
864  return AVERROR_INVALIDDATA;
865  }
866 
867  length = FFMAX(length - cnt, 0);
868 
869  if (bytestream2_get_byte(&s->gb) != 0) {
870  av_log(s->avctx, AV_LOG_ERROR, "iCCP with invalid compression!\n");
871  return AVERROR_INVALIDDATA;
872  }
873 
874  length = FFMAX(length - 1, 0);
875 
876  if ((ret = decode_zbuf(&bp, s->gb.buffer, s->gb.buffer + length)) < 0)
877  return ret;
878 
879  ret = av_bprint_finalize(&bp, (char **)&data);
880  if (ret < 0)
881  return ret;
882 
884  if (!sd) {
885  av_free(data);
886  return AVERROR(ENOMEM);
887  }
888 
889  av_dict_set(&sd->metadata, "name", profile_name, 0);
890  memcpy(sd->data, data, bp.len);
891  av_free(data);
892 
893  /* ICC compressed data and CRC */
894  bytestream2_skip(&s->gb, length + 4);
895 
896  return 0;
897 }
898 
900 {
901  if (s->bits_per_pixel == 1 && s->color_type == PNG_COLOR_TYPE_PALETTE) {
902  int i, j, k;
903  uint8_t *pd = p->data[0];
904  for (j = 0; j < s->height; j++) {
905  i = s->width / 8;
906  for (k = 7; k >= 1; k--)
907  if ((s->width&7) >= k)
908  pd[8*i + k - 1] = (pd[i]>>8-k) & 1;
909  for (i--; i >= 0; i--) {
910  pd[8*i + 7]= pd[i] & 1;
911  pd[8*i + 6]= (pd[i]>>1) & 1;
912  pd[8*i + 5]= (pd[i]>>2) & 1;
913  pd[8*i + 4]= (pd[i]>>3) & 1;
914  pd[8*i + 3]= (pd[i]>>4) & 1;
915  pd[8*i + 2]= (pd[i]>>5) & 1;
916  pd[8*i + 1]= (pd[i]>>6) & 1;
917  pd[8*i + 0]= pd[i]>>7;
918  }
919  pd += s->image_linesize;
920  }
921  } else if (s->bits_per_pixel == 2) {
922  int i, j;
923  uint8_t *pd = p->data[0];
924  for (j = 0; j < s->height; j++) {
925  i = s->width / 4;
927  if ((s->width&3) >= 3) pd[4*i + 2]= (pd[i] >> 2) & 3;
928  if ((s->width&3) >= 2) pd[4*i + 1]= (pd[i] >> 4) & 3;
929  if ((s->width&3) >= 1) pd[4*i + 0]= pd[i] >> 6;
930  for (i--; i >= 0; i--) {
931  pd[4*i + 3]= pd[i] & 3;
932  pd[4*i + 2]= (pd[i]>>2) & 3;
933  pd[4*i + 1]= (pd[i]>>4) & 3;
934  pd[4*i + 0]= pd[i]>>6;
935  }
936  } else {
937  if ((s->width&3) >= 3) pd[4*i + 2]= ((pd[i]>>2) & 3)*0x55;
938  if ((s->width&3) >= 2) pd[4*i + 1]= ((pd[i]>>4) & 3)*0x55;
939  if ((s->width&3) >= 1) pd[4*i + 0]= ( pd[i]>>6 )*0x55;
940  for (i--; i >= 0; i--) {
941  pd[4*i + 3]= ( pd[i] & 3)*0x55;
942  pd[4*i + 2]= ((pd[i]>>2) & 3)*0x55;
943  pd[4*i + 1]= ((pd[i]>>4) & 3)*0x55;
944  pd[4*i + 0]= ( pd[i]>>6 )*0x55;
945  }
946  }
947  pd += s->image_linesize;
948  }
949  } else if (s->bits_per_pixel == 4) {
950  int i, j;
951  uint8_t *pd = p->data[0];
952  for (j = 0; j < s->height; j++) {
953  i = s->width/2;
955  if (s->width&1) pd[2*i+0]= pd[i]>>4;
956  for (i--; i >= 0; i--) {
957  pd[2*i + 1] = pd[i] & 15;
958  pd[2*i + 0] = pd[i] >> 4;
959  }
960  } else {
961  if (s->width & 1) pd[2*i + 0]= (pd[i] >> 4) * 0x11;
962  for (i--; i >= 0; i--) {
963  pd[2*i + 1] = (pd[i] & 15) * 0x11;
964  pd[2*i + 0] = (pd[i] >> 4) * 0x11;
965  }
966  }
967  pd += s->image_linesize;
968  }
969  }
970 }
971 
973  uint32_t length)
974 {
975  uint32_t sequence_number;
977 
978  if (length != 26)
979  return AVERROR_INVALIDDATA;
980 
981  if (!(s->hdr_state & PNG_IHDR)) {
982  av_log(avctx, AV_LOG_ERROR, "fctl before IHDR\n");
983  return AVERROR_INVALIDDATA;
984  }
985 
986  s->last_w = s->cur_w;
987  s->last_h = s->cur_h;
988  s->last_x_offset = s->x_offset;
989  s->last_y_offset = s->y_offset;
990  s->last_dispose_op = s->dispose_op;
991 
992  sequence_number = bytestream2_get_be32(&s->gb);
993  cur_w = bytestream2_get_be32(&s->gb);
994  cur_h = bytestream2_get_be32(&s->gb);
995  x_offset = bytestream2_get_be32(&s->gb);
996  y_offset = bytestream2_get_be32(&s->gb);
997  bytestream2_skip(&s->gb, 4); /* delay_num (2), delay_den (2) */
998  dispose_op = bytestream2_get_byte(&s->gb);
999  blend_op = bytestream2_get_byte(&s->gb);
1000  bytestream2_skip(&s->gb, 4); /* crc */
1001 
1002  if (sequence_number == 0 &&
1003  (cur_w != s->width ||
1004  cur_h != s->height ||
1005  x_offset != 0 ||
1006  y_offset != 0) ||
1007  cur_w <= 0 || cur_h <= 0 ||
1008  x_offset < 0 || y_offset < 0 ||
1009  cur_w > s->width - x_offset|| cur_h > s->height - y_offset)
1010  return AVERROR_INVALIDDATA;
1011 
1012  if (blend_op != APNG_BLEND_OP_OVER && blend_op != APNG_BLEND_OP_SOURCE) {
1013  av_log(avctx, AV_LOG_ERROR, "Invalid blend_op %d\n", blend_op);
1014  return AVERROR_INVALIDDATA;
1015  }
1016 
1017  if ((sequence_number == 0 || !s->previous_picture.f->data[0]) &&
1018  dispose_op == APNG_DISPOSE_OP_PREVIOUS) {
1019  // No previous frame to revert to for the first frame
1020  // Spec says to just treat it as a APNG_DISPOSE_OP_BACKGROUND
1021  dispose_op = APNG_DISPOSE_OP_BACKGROUND;
1022  }
1023 
1024  if (blend_op == APNG_BLEND_OP_OVER && !s->has_trns && (
1025  avctx->pix_fmt == AV_PIX_FMT_RGB24 ||
1026  avctx->pix_fmt == AV_PIX_FMT_RGB48BE ||
1027  avctx->pix_fmt == AV_PIX_FMT_PAL8 ||
1028  avctx->pix_fmt == AV_PIX_FMT_GRAY8 ||
1029  avctx->pix_fmt == AV_PIX_FMT_GRAY16BE ||
1030  avctx->pix_fmt == AV_PIX_FMT_MONOBLACK
1031  )) {
1032  // APNG_BLEND_OP_OVER is the same as APNG_BLEND_OP_SOURCE when there is no alpha channel
1033  blend_op = APNG_BLEND_OP_SOURCE;
1034  }
1035 
1036  s->cur_w = cur_w;
1037  s->cur_h = cur_h;
1038  s->x_offset = x_offset;
1039  s->y_offset = y_offset;
1040  s->dispose_op = dispose_op;
1041  s->blend_op = blend_op;
1042 
1043  return 0;
1044 }
1045 
1047 {
1048  int i, j;
1049  uint8_t *pd = p->data[0];
1050  uint8_t *pd_last = s->last_picture.f->data[0];
1051  int ls = FFMIN(av_image_get_linesize(p->format, s->width, 0), s->width * s->bpp);
1052 
1053  ff_thread_await_progress(&s->last_picture, INT_MAX, 0);
1054  for (j = 0; j < s->height; j++) {
1055  for (i = 0; i < ls; i++)
1056  pd[i] += pd_last[i];
1057  pd += s->image_linesize;
1058  pd_last += s->image_linesize;
1059  }
1060 }
1061 
1062 // divide by 255 and round to nearest
1063 // apply a fast variant: (X+127)/255 = ((X+127)*257+257)>>16 = ((X+128)*257)>>16
1064 #define FAST_DIV255(x) ((((x) + 128) * 257) >> 16)
1065 
1067  AVFrame *p)
1068 {
1069  size_t x, y;
1070  uint8_t *buffer;
1071 
1072  if (s->blend_op == APNG_BLEND_OP_OVER &&
1073  avctx->pix_fmt != AV_PIX_FMT_RGBA &&
1074  avctx->pix_fmt != AV_PIX_FMT_GRAY8A &&
1075  avctx->pix_fmt != AV_PIX_FMT_PAL8) {
1076  avpriv_request_sample(avctx, "Blending with pixel format %s",
1077  av_get_pix_fmt_name(avctx->pix_fmt));
1078  return AVERROR_PATCHWELCOME;
1079  }
1080 
1081  buffer = av_malloc_array(s->image_linesize, s->height);
1082  if (!buffer)
1083  return AVERROR(ENOMEM);
1084 
1085 
1086  // Do the disposal operation specified by the last frame on the frame
1088  ff_thread_await_progress(&s->last_picture, INT_MAX, 0);
1089  memcpy(buffer, s->last_picture.f->data[0], s->image_linesize * s->height);
1090 
1092  for (y = s->last_y_offset; y < s->last_y_offset + s->last_h; ++y)
1093  memset(buffer + s->image_linesize * y + s->bpp * s->last_x_offset, 0, s->bpp * s->last_w);
1094 
1095  memcpy(s->previous_picture.f->data[0], buffer, s->image_linesize * s->height);
1097  } else {
1098  ff_thread_await_progress(&s->previous_picture, INT_MAX, 0);
1099  memcpy(buffer, s->previous_picture.f->data[0], s->image_linesize * s->height);
1100  }
1101 
1102  // Perform blending
1103  if (s->blend_op == APNG_BLEND_OP_SOURCE) {
1104  for (y = s->y_offset; y < s->y_offset + s->cur_h; ++y) {
1105  size_t row_start = s->image_linesize * y + s->bpp * s->x_offset;
1106  memcpy(buffer + row_start, p->data[0] + row_start, s->bpp * s->cur_w);
1107  }
1108  } else { // APNG_BLEND_OP_OVER
1109  for (y = s->y_offset; y < s->y_offset + s->cur_h; ++y) {
1110  uint8_t *foreground = p->data[0] + s->image_linesize * y + s->bpp * s->x_offset;
1111  uint8_t *background = buffer + s->image_linesize * y + s->bpp * s->x_offset;
1112  for (x = s->x_offset; x < s->x_offset + s->cur_w; ++x, foreground += s->bpp, background += s->bpp) {
1113  size_t b;
1114  uint8_t foreground_alpha, background_alpha, output_alpha;
1115  uint8_t output[10];
1116 
1117  // Since we might be blending alpha onto alpha, we use the following equations:
1118  // output_alpha = foreground_alpha + (1 - foreground_alpha) * background_alpha
1119  // output = (foreground_alpha * foreground + (1 - foreground_alpha) * background_alpha * background) / output_alpha
1120 
1121  switch (avctx->pix_fmt) {
1122  case AV_PIX_FMT_RGBA:
1123  foreground_alpha = foreground[3];
1124  background_alpha = background[3];
1125  break;
1126 
1127  case AV_PIX_FMT_GRAY8A:
1128  foreground_alpha = foreground[1];
1129  background_alpha = background[1];
1130  break;
1131 
1132  case AV_PIX_FMT_PAL8:
1133  foreground_alpha = s->palette[foreground[0]] >> 24;
1134  background_alpha = s->palette[background[0]] >> 24;
1135  break;
1136  }
1137 
1138  if (foreground_alpha == 0)
1139  continue;
1140 
1141  if (foreground_alpha == 255) {
1142  memcpy(background, foreground, s->bpp);
1143  continue;
1144  }
1145 
1146  if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
1147  // TODO: Alpha blending with PAL8 will likely need the entire image converted over to RGBA first
1148  avpriv_request_sample(avctx, "Alpha blending palette samples");
1149  background[0] = foreground[0];
1150  continue;
1151  }
1152 
1153  output_alpha = foreground_alpha + FAST_DIV255((255 - foreground_alpha) * background_alpha);
1154 
1155  av_assert0(s->bpp <= 10);
1156 
1157  for (b = 0; b < s->bpp - 1; ++b) {
1158  if (output_alpha == 0) {
1159  output[b] = 0;
1160  } else if (background_alpha == 255) {
1161  output[b] = FAST_DIV255(foreground_alpha * foreground[b] + (255 - foreground_alpha) * background[b]);
1162  } else {
1163  output[b] = (255 * foreground_alpha * foreground[b] + (255 - foreground_alpha) * background_alpha * background[b]) / (255 * output_alpha);
1164  }
1165  }
1166  output[b] = output_alpha;
1167  memcpy(background, output, s->bpp);
1168  }
1169  }
1170  }
1171 
1172  // Copy blended buffer into the frame and free
1173  memcpy(p->data[0], buffer, s->image_linesize * s->height);
1174  av_free(buffer);
1175 
1176  return 0;
1177 }
1178 
1180  AVFrame *p, AVPacket *avpkt)
1181 {
1182  AVDictionary **metadatap = NULL;
1183  uint32_t tag, length;
1184  int decode_next_dat = 0;
1185  int i, ret;
1186 
1187  for (;;) {
1188  length = bytestream2_get_bytes_left(&s->gb);
1189  if (length <= 0) {
1190 
1191  if (avctx->codec_id == AV_CODEC_ID_PNG &&
1192  avctx->skip_frame == AVDISCARD_ALL) {
1193  return 0;
1194  }
1195 
1196  if (CONFIG_APNG_DECODER && avctx->codec_id == AV_CODEC_ID_APNG && length == 0) {
1197  if (!(s->pic_state & PNG_IDAT))
1198  return 0;
1199  else
1200  goto exit_loop;
1201  }
1202  av_log(avctx, AV_LOG_ERROR, "%d bytes left\n", length);
1203  if ( s->pic_state & PNG_ALLIMAGE
1205  goto exit_loop;
1206  ret = AVERROR_INVALIDDATA;
1207  goto fail;
1208  }
1209 
1210  length = bytestream2_get_be32(&s->gb);
1211  if (length > 0x7fffffff || length > bytestream2_get_bytes_left(&s->gb)) {
1212  av_log(avctx, AV_LOG_ERROR, "chunk too big\n");
1213  ret = AVERROR_INVALIDDATA;
1214  goto fail;
1215  }
1216  tag = bytestream2_get_le32(&s->gb);
1217  if (avctx->debug & FF_DEBUG_STARTCODE)
1218  av_log(avctx, AV_LOG_DEBUG, "png: tag=%s length=%u\n",
1219  av_fourcc2str(tag), length);
1220 
1221  if (avctx->codec_id == AV_CODEC_ID_PNG &&
1222  avctx->skip_frame == AVDISCARD_ALL) {
1223  switch(tag) {
1224  case MKTAG('I', 'H', 'D', 'R'):
1225  case MKTAG('p', 'H', 'Y', 's'):
1226  case MKTAG('t', 'E', 'X', 't'):
1227  case MKTAG('I', 'D', 'A', 'T'):
1228  case MKTAG('t', 'R', 'N', 'S'):
1229  break;
1230  default:
1231  goto skip_tag;
1232  }
1233  }
1234 
1235  metadatap = &p->metadata;
1236  switch (tag) {
1237  case MKTAG('I', 'H', 'D', 'R'):
1238  if ((ret = decode_ihdr_chunk(avctx, s, length)) < 0)
1239  goto fail;
1240  break;
1241  case MKTAG('p', 'H', 'Y', 's'):
1242  if ((ret = decode_phys_chunk(avctx, s)) < 0)
1243  goto fail;
1244  break;
1245  case MKTAG('f', 'c', 'T', 'L'):
1246  if (!CONFIG_APNG_DECODER || avctx->codec_id != AV_CODEC_ID_APNG)
1247  goto skip_tag;
1248  if ((ret = decode_fctl_chunk(avctx, s, length)) < 0)
1249  goto fail;
1250  decode_next_dat = 1;
1251  break;
1252  case MKTAG('f', 'd', 'A', 'T'):
1253  if (!CONFIG_APNG_DECODER || avctx->codec_id != AV_CODEC_ID_APNG)
1254  goto skip_tag;
1255  if (!decode_next_dat) {
1256  ret = AVERROR_INVALIDDATA;
1257  goto fail;
1258  }
1259  bytestream2_get_be32(&s->gb);
1260  length -= 4;
1261  /* fallthrough */
1262  case MKTAG('I', 'D', 'A', 'T'):
1263  if (CONFIG_APNG_DECODER && avctx->codec_id == AV_CODEC_ID_APNG && !decode_next_dat)
1264  goto skip_tag;
1265  if ((ret = decode_idat_chunk(avctx, s, length, p)) < 0)
1266  goto fail;
1267  break;
1268  case MKTAG('P', 'L', 'T', 'E'):
1269  if (decode_plte_chunk(avctx, s, length) < 0)
1270  goto skip_tag;
1271  break;
1272  case MKTAG('t', 'R', 'N', 'S'):
1273  if (decode_trns_chunk(avctx, s, length) < 0)
1274  goto skip_tag;
1275  break;
1276  case MKTAG('t', 'E', 'X', 't'):
1277  if (decode_text_chunk(s, length, 0, metadatap) < 0)
1278  av_log(avctx, AV_LOG_WARNING, "Broken tEXt chunk\n");
1279  bytestream2_skip(&s->gb, length + 4);
1280  break;
1281  case MKTAG('z', 'T', 'X', 't'):
1282  if (decode_text_chunk(s, length, 1, metadatap) < 0)
1283  av_log(avctx, AV_LOG_WARNING, "Broken zTXt chunk\n");
1284  bytestream2_skip(&s->gb, length + 4);
1285  break;
1286  case MKTAG('s', 'T', 'E', 'R'): {
1287  int mode = bytestream2_get_byte(&s->gb);
1289  if (!stereo3d)
1290  goto fail;
1291 
1292  if (mode == 0 || mode == 1) {
1293  stereo3d->type = AV_STEREO3D_SIDEBYSIDE;
1294  stereo3d->flags = mode ? 0 : AV_STEREO3D_FLAG_INVERT;
1295  } else {
1296  av_log(avctx, AV_LOG_WARNING,
1297  "Unknown value in sTER chunk (%d)\n", mode);
1298  }
1299  bytestream2_skip(&s->gb, 4); /* crc */
1300  break;
1301  }
1302  case MKTAG('i', 'C', 'C', 'P'): {
1303  if (decode_iccp_chunk(s, length, p) < 0)
1304  goto fail;
1305  break;
1306  }
1307  case MKTAG('c', 'H', 'R', 'M'): {
1309  if (!mdm) {
1310  ret = AVERROR(ENOMEM);
1311  goto fail;
1312  }
1313 
1314  mdm->white_point[0] = av_make_q(bytestream2_get_be32(&s->gb), 100000);
1315  mdm->white_point[1] = av_make_q(bytestream2_get_be32(&s->gb), 100000);
1316 
1317  /* RGB Primaries */
1318  for (i = 0; i < 3; i++) {
1319  mdm->display_primaries[i][0] = av_make_q(bytestream2_get_be32(&s->gb), 100000);
1320  mdm->display_primaries[i][1] = av_make_q(bytestream2_get_be32(&s->gb), 100000);
1321  }
1322 
1323  mdm->has_primaries = 1;
1324  bytestream2_skip(&s->gb, 4); /* crc */
1325  break;
1326  }
1327  case MKTAG('g', 'A', 'M', 'A'): {
1328  AVBPrint bp;
1329  char *gamma_str;
1330  int num = bytestream2_get_be32(&s->gb);
1331 
1333  av_bprintf(&bp, "%i/%i", num, 100000);
1334  ret = av_bprint_finalize(&bp, &gamma_str);
1335  if (ret < 0)
1336  return ret;
1337 
1338  av_dict_set(&p->metadata, "gamma", gamma_str, AV_DICT_DONT_STRDUP_VAL);
1339 
1340  bytestream2_skip(&s->gb, 4); /* crc */
1341  break;
1342  }
1343  case MKTAG('I', 'E', 'N', 'D'):
1344  if (!(s->pic_state & PNG_ALLIMAGE))
1345  av_log(avctx, AV_LOG_ERROR, "IEND without all image\n");
1346  if (!(s->pic_state & (PNG_ALLIMAGE|PNG_IDAT))) {
1347  ret = AVERROR_INVALIDDATA;
1348  goto fail;
1349  }
1350  bytestream2_skip(&s->gb, 4); /* crc */
1351  goto exit_loop;
1352  default:
1353  /* skip tag */
1354 skip_tag:
1355  bytestream2_skip(&s->gb, length + 4);
1356  break;
1357  }
1358  }
1359 exit_loop:
1360 
1361  if (avctx->codec_id == AV_CODEC_ID_PNG &&
1362  avctx->skip_frame == AVDISCARD_ALL) {
1363  return 0;
1364  }
1365 
1367  return AVERROR_INVALIDDATA;
1368 
1369  if (s->bits_per_pixel <= 4)
1370  handle_small_bpp(s, p);
1371 
1372  /* apply transparency if needed */
1373  if (s->has_trns && s->color_type != PNG_COLOR_TYPE_PALETTE) {
1374  size_t byte_depth = s->bit_depth > 8 ? 2 : 1;
1375  size_t raw_bpp = s->bpp - byte_depth;
1376  unsigned x, y;
1377 
1378  av_assert0(s->bit_depth > 1);
1379 
1380  for (y = 0; y < s->height; ++y) {
1381  uint8_t *row = &s->image_buf[s->image_linesize * y];
1382 
1383  if (s->bpp == 2 && byte_depth == 1) {
1384  uint8_t *pixel = &row[2 * s->width - 1];
1385  uint8_t *rowp = &row[1 * s->width - 1];
1386  int tcolor = s->transparent_color_be[0];
1387  for (x = s->width; x > 0; --x) {
1388  *pixel-- = *rowp == tcolor ? 0 : 0xff;
1389  *pixel-- = *rowp--;
1390  }
1391  } else if (s->bpp == 4 && byte_depth == 1) {
1392  uint8_t *pixel = &row[4 * s->width - 1];
1393  uint8_t *rowp = &row[3 * s->width - 1];
1394  int tcolor = AV_RL24(s->transparent_color_be);
1395  for (x = s->width; x > 0; --x) {
1396  *pixel-- = AV_RL24(rowp-2) == tcolor ? 0 : 0xff;
1397  *pixel-- = *rowp--;
1398  *pixel-- = *rowp--;
1399  *pixel-- = *rowp--;
1400  }
1401  } else {
1402  /* since we're updating in-place, we have to go from right to left */
1403  for (x = s->width; x > 0; --x) {
1404  uint8_t *pixel = &row[s->bpp * (x - 1)];
1405  memmove(pixel, &row[raw_bpp * (x - 1)], raw_bpp);
1406 
1407  if (!memcmp(pixel, s->transparent_color_be, raw_bpp)) {
1408  memset(&pixel[raw_bpp], 0, byte_depth);
1409  } else {
1410  memset(&pixel[raw_bpp], 0xff, byte_depth);
1411  }
1412  }
1413  }
1414  }
1415  }
1416 
1417  /* handle P-frames only if a predecessor frame is available */
1418  if (s->last_picture.f->data[0]) {
1419  if ( !(avpkt->flags & AV_PKT_FLAG_KEY) && avctx->codec_tag != AV_RL32("MPNG")
1420  && s->last_picture.f->width == p->width
1421  && s->last_picture.f->height== p->height
1422  && s->last_picture.f->format== p->format
1423  ) {
1424  if (CONFIG_PNG_DECODER && avctx->codec_id != AV_CODEC_ID_APNG)
1425  handle_p_frame_png(s, p);
1426  else if (CONFIG_APNG_DECODER &&
1427  s->previous_picture.f->width == p->width &&
1428  s->previous_picture.f->height== p->height &&
1429  s->previous_picture.f->format== p->format &&
1430  avctx->codec_id == AV_CODEC_ID_APNG &&
1431  (ret = handle_p_frame_apng(avctx, s, p)) < 0)
1432  goto fail;
1433  }
1434  }
1435  ff_thread_report_progress(&s->picture, INT_MAX, 0);
1437 
1438  return 0;
1439 
1440 fail:
1441  ff_thread_report_progress(&s->picture, INT_MAX, 0);
1443  return ret;
1444 }
1445 
1446 #if CONFIG_PNG_DECODER
1447 static int decode_frame_png(AVCodecContext *avctx,
1448  void *data, int *got_frame,
1449  AVPacket *avpkt)
1450 {
1451  PNGDecContext *const s = avctx->priv_data;
1452  const uint8_t *buf = avpkt->data;
1453  int buf_size = avpkt->size;
1454  AVFrame *p;
1455  int64_t sig;
1456  int ret;
1457 
1460  p = s->picture.f;
1461 
1462  bytestream2_init(&s->gb, buf, buf_size);
1463 
1464  /* check signature */
1465  sig = bytestream2_get_be64(&s->gb);
1466  if (sig != PNGSIG &&
1467  sig != MNGSIG) {
1468  av_log(avctx, AV_LOG_ERROR, "Invalid PNG signature 0x%08"PRIX64".\n", sig);
1469  return AVERROR_INVALIDDATA;
1470  }
1471 
1472  s->y = s->has_trns = 0;
1473  s->hdr_state = 0;
1474  s->pic_state = 0;
1475 
1476  /* init the zlib */
1477  s->zstream.zalloc = ff_png_zalloc;
1478  s->zstream.zfree = ff_png_zfree;
1479  s->zstream.opaque = NULL;
1480  ret = inflateInit(&s->zstream);
1481  if (ret != Z_OK) {
1482  av_log(avctx, AV_LOG_ERROR, "inflateInit returned error %d\n", ret);
1483  return AVERROR_EXTERNAL;
1484  }
1485 
1486  if ((ret = decode_frame_common(avctx, s, p, avpkt)) < 0)
1487  goto the_end;
1488 
1489  if (avctx->skip_frame == AVDISCARD_ALL) {
1490  *got_frame = 0;
1491  ret = bytestream2_tell(&s->gb);
1492  goto the_end;
1493  }
1494 
1495  if ((ret = av_frame_ref(data, s->picture.f)) < 0)
1496  goto the_end;
1497 
1498  *got_frame = 1;
1499 
1500  ret = bytestream2_tell(&s->gb);
1501 the_end:
1502  inflateEnd(&s->zstream);
1503  s->crow_buf = NULL;
1504  return ret;
1505 }
1506 #endif
1507 
1508 #if CONFIG_APNG_DECODER
1509 static int decode_frame_apng(AVCodecContext *avctx,
1510  void *data, int *got_frame,
1511  AVPacket *avpkt)
1512 {
1513  PNGDecContext *const s = avctx->priv_data;
1514  int ret;
1515  AVFrame *p;
1516 
1519  p = s->picture.f;
1520 
1521  if (!(s->hdr_state & PNG_IHDR)) {
1522  if (!avctx->extradata_size)
1523  return AVERROR_INVALIDDATA;
1524 
1525  /* only init fields, there is no zlib use in extradata */
1526  s->zstream.zalloc = ff_png_zalloc;
1527  s->zstream.zfree = ff_png_zfree;
1528 
1529  bytestream2_init(&s->gb, avctx->extradata, avctx->extradata_size);
1530  if ((ret = decode_frame_common(avctx, s, p, avpkt)) < 0)
1531  goto end;
1532  }
1533 
1534  /* reset state for a new frame */
1535  if ((ret = inflateInit(&s->zstream)) != Z_OK) {
1536  av_log(avctx, AV_LOG_ERROR, "inflateInit returned error %d\n", ret);
1537  ret = AVERROR_EXTERNAL;
1538  goto end;
1539  }
1540  s->y = 0;
1541  s->pic_state = 0;
1542  bytestream2_init(&s->gb, avpkt->data, avpkt->size);
1543  if ((ret = decode_frame_common(avctx, s, p, avpkt)) < 0)
1544  goto end;
1545 
1546  if (!(s->pic_state & PNG_ALLIMAGE))
1547  av_log(avctx, AV_LOG_WARNING, "Frame did not contain a complete image\n");
1548  if (!(s->pic_state & (PNG_ALLIMAGE|PNG_IDAT))) {
1549  ret = AVERROR_INVALIDDATA;
1550  goto end;
1551  }
1552  if ((ret = av_frame_ref(data, s->picture.f)) < 0)
1553  goto end;
1554 
1555  *got_frame = 1;
1556  ret = bytestream2_tell(&s->gb);
1557 
1558 end:
1559  inflateEnd(&s->zstream);
1560  return ret;
1561 }
1562 #endif
1563 
1564 #if CONFIG_LSCR_DECODER
1565 static int decode_frame_lscr(AVCodecContext *avctx,
1566  void *data, int *got_frame,
1567  AVPacket *avpkt)
1568 {
1569  PNGDecContext *const s = avctx->priv_data;
1570  GetByteContext *gb = &s->gb;
1571  AVFrame *frame = data;
1572  int ret, nb_blocks, offset = 0;
1573 
1574  if (avpkt->size < 2)
1575  return AVERROR_INVALIDDATA;
1576 
1577  bytestream2_init(gb, avpkt->data, avpkt->size);
1578 
1579  if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0)
1580  return ret;
1581 
1582  nb_blocks = bytestream2_get_le16(gb);
1583  if (bytestream2_get_bytes_left(gb) < 2 + nb_blocks * (12 + 8))
1584  return AVERROR_INVALIDDATA;
1585 
1586  if (s->last_picture.f->data[0]) {
1587  ret = av_frame_copy(frame, s->last_picture.f);
1588  if (ret < 0)
1589  return ret;
1590  }
1591 
1592  for (int b = 0; b < nb_blocks; b++) {
1593  int x, y, x2, y2, w, h, left;
1594  uint32_t csize, size;
1595 
1596  s->zstream.zalloc = ff_png_zalloc;
1597  s->zstream.zfree = ff_png_zfree;
1598  s->zstream.opaque = NULL;
1599 
1600  if ((ret = inflateInit(&s->zstream)) != Z_OK) {
1601  av_log(avctx, AV_LOG_ERROR, "inflateInit returned error %d\n", ret);
1602  ret = AVERROR_EXTERNAL;
1603  goto end;
1604  }
1605 
1606  bytestream2_seek(gb, 2 + b * 12, SEEK_SET);
1607 
1608  x = bytestream2_get_le16(gb);
1609  y = bytestream2_get_le16(gb);
1610  x2 = bytestream2_get_le16(gb);
1611  y2 = bytestream2_get_le16(gb);
1612  s->width = s->cur_w = w = x2-x;
1613  s->height = s->cur_h = h = y2-y;
1614 
1615  if (w <= 0 || x < 0 || x >= avctx->width || w + x > avctx->width ||
1616  h <= 0 || y < 0 || y >= avctx->height || h + y > avctx->height) {
1617  ret = AVERROR_INVALIDDATA;
1618  goto end;
1619  }
1620 
1621  size = bytestream2_get_le32(gb);
1622 
1623  frame->key_frame = (nb_blocks == 1) &&
1624  (w == avctx->width) &&
1625  (h == avctx->height) &&
1626  (x == 0) && (y == 0);
1627 
1628  bytestream2_seek(gb, 2 + nb_blocks * 12 + offset, SEEK_SET);
1629  csize = bytestream2_get_be32(gb);
1630  if (bytestream2_get_le32(gb) != MKTAG('I', 'D', 'A', 'T')) {
1631  ret = AVERROR_INVALIDDATA;
1632  goto end;
1633  }
1634 
1635  offset += size;
1636  left = size;
1637 
1638  s->y = 0;
1639  s->row_size = w * 3;
1640 
1641  av_fast_padded_malloc(&s->buffer, &s->buffer_size, s->row_size + 16);
1642  if (!s->buffer) {
1643  ret = AVERROR(ENOMEM);
1644  goto end;
1645  }
1646 
1648  if (!s->last_row) {
1649  ret = AVERROR(ENOMEM);
1650  goto end;
1651  }
1652 
1653  s->crow_size = w * 3 + 1;
1654  s->crow_buf = s->buffer + 15;
1655  s->zstream.avail_out = s->crow_size;
1656  s->zstream.next_out = s->crow_buf;
1657  s->image_buf = frame->data[0] + (avctx->height - y - 1) * frame->linesize[0] + x * 3;
1658  s->image_linesize =-frame->linesize[0];
1659  s->bpp = 3;
1660  s->pic_state = 0;
1661 
1662  while (left > 16) {
1663  ret = png_decode_idat(s, csize);
1664  if (ret < 0)
1665  goto end;
1666  left -= csize + 16;
1667  if (left > 16) {
1668  bytestream2_skip(gb, 4);
1669  csize = bytestream2_get_be32(gb);
1670  if (bytestream2_get_le32(gb) != MKTAG('I', 'D', 'A', 'T')) {
1671  ret = AVERROR_INVALIDDATA;
1672  goto end;
1673  }
1674  }
1675  }
1676 
1677  inflateEnd(&s->zstream);
1678  }
1679 
1681 
1683  if ((ret = av_frame_ref(s->last_picture.f, frame)) < 0)
1684  return ret;
1685 
1686  *got_frame = 1;
1687 end:
1688  inflateEnd(&s->zstream);
1689 
1690  if (ret < 0)
1691  return ret;
1692  return avpkt->size;
1693 }
1694 
1695 static void decode_flush(AVCodecContext *avctx)
1696 {
1697  PNGDecContext *s = avctx->priv_data;
1698 
1700 }
1701 
1702 #endif
1703 
1704 #if HAVE_THREADS
1705 static int update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
1706 {
1707  PNGDecContext *psrc = src->priv_data;
1708  PNGDecContext *pdst = dst->priv_data;
1709  int ret;
1710 
1711  if (dst == src)
1712  return 0;
1713 
1714  ff_thread_release_buffer(dst, &pdst->picture);
1715  if (psrc->picture.f->data[0] &&
1716  (ret = ff_thread_ref_frame(&pdst->picture, &psrc->picture)) < 0)
1717  return ret;
1718  if (CONFIG_APNG_DECODER && dst->codec_id == AV_CODEC_ID_APNG) {
1719  pdst->width = psrc->width;
1720  pdst->height = psrc->height;
1721  pdst->bit_depth = psrc->bit_depth;
1722  pdst->color_type = psrc->color_type;
1723  pdst->compression_type = psrc->compression_type;
1724  pdst->interlace_type = psrc->interlace_type;
1725  pdst->filter_type = psrc->filter_type;
1726  pdst->cur_w = psrc->cur_w;
1727  pdst->cur_h = psrc->cur_h;
1728  pdst->x_offset = psrc->x_offset;
1729  pdst->y_offset = psrc->y_offset;
1730  pdst->has_trns = psrc->has_trns;
1731  memcpy(pdst->transparent_color_be, psrc->transparent_color_be, sizeof(pdst->transparent_color_be));
1732 
1733  pdst->dispose_op = psrc->dispose_op;
1734 
1735  memcpy(pdst->palette, psrc->palette, sizeof(pdst->palette));
1736 
1737  pdst->hdr_state |= psrc->hdr_state;
1738 
1740  if (psrc->last_picture.f->data[0] &&
1741  (ret = ff_thread_ref_frame(&pdst->last_picture, &psrc->last_picture)) < 0)
1742  return ret;
1743 
1745  if (psrc->previous_picture.f->data[0] &&
1746  (ret = ff_thread_ref_frame(&pdst->previous_picture, &psrc->previous_picture)) < 0)
1747  return ret;
1748  }
1749 
1750  return 0;
1751 }
1752 #endif
1753 
1755 {
1756  PNGDecContext *s = avctx->priv_data;
1757 
1758  avctx->color_range = AVCOL_RANGE_JPEG;
1759 
1760  if (avctx->codec_id == AV_CODEC_ID_LSCR)
1761  avctx->pix_fmt = AV_PIX_FMT_BGR24;
1762 
1763  s->avctx = avctx;
1765  s->last_picture.f = av_frame_alloc();
1766  s->picture.f = av_frame_alloc();
1767  if (!s->previous_picture.f || !s->last_picture.f || !s->picture.f) {
1770  av_frame_free(&s->picture.f);
1771  return AVERROR(ENOMEM);
1772  }
1773 
1774  if (!avctx->internal->is_copy) {
1775  avctx->internal->allocate_progress = 1;
1776  ff_pngdsp_init(&s->dsp);
1777  }
1778 
1779  return 0;
1780 }
1781 
1783 {
1784  PNGDecContext *s = avctx->priv_data;
1785 
1790  ff_thread_release_buffer(avctx, &s->picture);
1791  av_frame_free(&s->picture.f);
1792  av_freep(&s->buffer);
1793  s->buffer_size = 0;
1794  av_freep(&s->last_row);
1795  s->last_row_size = 0;
1796  av_freep(&s->tmp_row);
1797  s->tmp_row_size = 0;
1798 
1799  return 0;
1800 }
1801 
1802 #if CONFIG_APNG_DECODER
1804  .name = "apng",
1805  .long_name = NULL_IF_CONFIG_SMALL("APNG (Animated Portable Network Graphics) image"),
1806  .type = AVMEDIA_TYPE_VIDEO,
1807  .id = AV_CODEC_ID_APNG,
1808  .priv_data_size = sizeof(PNGDecContext),
1809  .init = png_dec_init,
1810  .close = png_dec_end,
1811  .decode = decode_frame_apng,
1813  .update_thread_context = ONLY_IF_THREADS_ENABLED(update_thread_context),
1814  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS /*| AV_CODEC_CAP_DRAW_HORIZ_BAND*/,
1815  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
1816 };
1817 #endif
1818 
1819 #if CONFIG_PNG_DECODER
1821  .name = "png",
1822  .long_name = NULL_IF_CONFIG_SMALL("PNG (Portable Network Graphics) image"),
1823  .type = AVMEDIA_TYPE_VIDEO,
1824  .id = AV_CODEC_ID_PNG,
1825  .priv_data_size = sizeof(PNGDecContext),
1826  .init = png_dec_init,
1827  .close = png_dec_end,
1828  .decode = decode_frame_png,
1830  .update_thread_context = ONLY_IF_THREADS_ENABLED(update_thread_context),
1831  .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS /*| AV_CODEC_CAP_DRAW_HORIZ_BAND*/,
1833 };
1834 #endif
1835 
1836 #if CONFIG_LSCR_DECODER
1838  .name = "lscr",
1839  .long_name = NULL_IF_CONFIG_SMALL("LEAD Screen Capture"),
1840  .type = AVMEDIA_TYPE_VIDEO,
1841  .id = AV_CODEC_ID_LSCR,
1842  .priv_data_size = sizeof(PNGDecContext),
1843  .init = png_dec_init,
1844  .close = png_dec_end,
1845  .decode = decode_frame_lscr,
1846  .flush = decode_flush,
1847  .capabilities = AV_CODEC_CAP_DR1 /*| AV_CODEC_CAP_DRAW_HORIZ_BAND*/,
1849 };
1850 #endif
#define AV_STEREO3D_FLAG_INVERT
Inverted views, Right/Bottom represents the left view.
Definition: stereo3d.h:167
static int decode_idat_chunk(AVCodecContext *avctx, PNGDecContext *s, uint32_t length, AVFrame *p)
Definition: pngdec.c:628
static int decode_fctl_chunk(AVCodecContext *avctx, PNGDecContext *s, uint32_t length)
Definition: pngdec.c:972
#define PNG_FILTER_VALUE_AVG
Definition: png.h:41
static void png_handle_row(PNGDecContext *s)
Definition: pngdec.c:333
ThreadFrame previous_picture
Definition: pngdec.c:56
#define NULL
Definition: coverity.c:32
int last_y_offset
Definition: pngdec.c:66
int av_image_get_linesize(enum AVPixelFormat pix_fmt, int width, int plane)
Compute the size of an image line with format pix_fmt and width width for the plane plane...
Definition: imgutils.c:76
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
int width
Definition: pngdec.c:62
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:100
static void flush(AVCodecContext *avctx)
unsigned int tmp_row_size
Definition: pngdec.c:87
8 bits gray, 8 bits alpha
Definition: pixfmt.h:143
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
AVFrame * f
Definition: thread.h:35
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
const char * g
Definition: vf_curves.c:115
int pass_row_size
Definition: pngdec.c:93
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
AVDictionary * metadata
Definition: frame.h:205
uint8_t * tmp_row
Definition: pngdec.c:86
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have add an init_thread_copy() which re-allocates them for other threads.Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities.There will be very little speed gain at this point but it should work.If there are inter-frame dependencies
#define avpriv_request_sample(...)
PNGHeaderState
Definition: pngdec.c:41
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2256
AVRational white_point[2]
CIE 1931 xy chromaticity coords of white point.
int num
Numerator.
Definition: rational.h:59
static int decode_text_chunk(PNGDecContext *s, uint32_t length, int compressed, AVDictionary **dict)
Definition: pngdec.c:512
int size
Definition: avcodec.h:1534
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:2000
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:36
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1831
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:133
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:70
enum PNGImageState pic_state
Definition: pngdec.c:61
int has_primaries
Flag indicating whether the display primaries (and white point) are set.
discard all
Definition: avcodec.h:829
Views are next to each other.
Definition: stereo3d.h:67
#define PNG_COLOR_TYPE_RGB
Definition: png.h:33
static void error(const char *err)
void(* add_bytes_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w)
Definition: pngdsp.h:28
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before ff_thread_await_progress() has been called on them.reget_buffer() and buffer age optimizations no longer work.*The contents of buffers must not be written to after ff_thread_report_progress() has been called on them.This includes draw_edges().Porting codecs to frame threading
#define PNG_COLOR_TYPE_GRAY_ALPHA
Definition: png.h:35
AVCodec.
Definition: avcodec.h:3555
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
Definition: decode_audio.c:71
#define PNG_COLOR_TYPE_PALETTE
Definition: png.h:32
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
int filter_type
Definition: pngdec.c:73
void ff_add_png_paeth_prediction(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)
Definition: pngdec.c:186
#define AV_DICT_DONT_STRDUP_KEY
Take ownership of a key that&#39;s been allocated with av_malloc() or another memory allocation function...
Definition: dict.h:73
#define PNG_FILTER_VALUE_PAETH
Definition: png.h:42
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:3096
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
static int percent_missing(PNGDecContext *s)
Definition: pngdec.c:323
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
int y_offset
Definition: pngdec.c:65
uint8_t
#define av_cold
Definition: attributes.h:82
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have update_thread_context() run it in the next thread.If the codec allocates writable tables in its init()
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
#define PNG_COLOR_TYPE_RGB_ALPHA
Definition: png.h:34
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
Stereo 3D type: this structure describes how two videos are packed within a single video surface...
Definition: stereo3d.h:176
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:2707
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
#define f(width, name)
Definition: cbs_vp9.c:255
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
Multithreading support functions.
AVCodec ff_apng_decoder
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is st...
Definition: pixfmt.h:205
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:444
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1722
static int decode_phys_chunk(AVCodecContext *avctx, PNGDecContext *s)
Definition: pngdec.c:612
Structure to hold side data for an AVFrame.
Definition: frame.h:201
uint8_t * data
Definition: avcodec.h:1533
const uint8_t * buffer
Definition: bytestream.h:34
uint32_t tag
Definition: movenc.c:1534
int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
Definition: utils.c:1827
#define ff_dlog(a,...)
AVDictionary * metadata
metadata.
Definition: frame.h:581
static int decode_iccp_chunk(PNGDecContext *s, int length, AVFrame *f)
Definition: pngdec.c:854
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:442
AVCodec ff_lscr_decoder
ptrdiff_t size
Definition: opengl_enc.c:100
unsigned int last_row_size
Definition: pngdec.c:85
#define AV_WB16(p, v)
Definition: intreadwrite.h:405
int cur_h
Definition: pngdec.c:63
#define av_log(a,...)
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1565
static int decode_plte_chunk(AVCodecContext *avctx, PNGDecContext *s, uint32_t length)
Definition: pngdec.c:783
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:154
#define U(x)
Definition: vp56_arith.h:37
#define src
Definition: vp8dsp.c:254
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
int width
Definition: frame.h:353
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
static const uint8_t png_pass_dsp_mask[NB_PASSES]
Definition: pngdec.c:109
int flags
Additional information about the frame packing.
Definition: stereo3d.h:185
16 bits gray, 16 bits alpha (big-endian)
Definition: pixfmt.h:212
#define AV_BPRINT_SIZE_UNLIMITED
static int decode_frame_common(AVCodecContext *avctx, PNGDecContext *s, AVFrame *p, AVPacket *avpkt)
Definition: pngdec.c:1179
static const uint16_t mask[17]
Definition: lzw.c:38
#define OP_SUB(x, s, l)
int is_copy
Whether the parent AVCodecContext is a copy of the context which had init() called on it...
Definition: internal.h:142
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:164
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
static void handle_p_frame_png(PNGDecContext *s, AVFrame *p)
Definition: pngdec.c:1046
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
uint8_t * crow_buf
Definition: pngdec.c:83
const char * r
Definition: vf_curves.c:114
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
int pass
Definition: pngdec.c:90
int ff_png_get_nb_channels(int color_type)
Definition: png.c:49
ThreadFrame picture
Definition: pngdec.c:58
int height
Definition: pngdec.c:62
#define av_fourcc2str(fourcc)
Definition: avutil.h:348
#define PNGSIG
Definition: png.h:47
simple assert() macros that are a bit more flexible than ISO C assert().
GLsizei GLsizei * length
Definition: opengl_enc.c:114
const char * name
Name of the codec implementation.
Definition: avcodec.h:3562
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_WL32 unsigned int_TMPL AV_RL24
Definition: bytestream.h:87
int bits_per_pixel
Definition: pngdec.c:75
GetByteContext gb
Definition: pngdec.c:55
#define FFMAX(a, b)
Definition: common.h:94
#define NB_PASSES
Definition: png.h:45
#define fail()
Definition: checkasm.h:122
#define AV_CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
Definition: avcodec.h:1055
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:800
uint8_t blend_op
Definition: pngdec.c:67
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1539
#define ONLY_IF_THREADS_ENABLED(x)
Define a function with only the non-default version specified.
Definition: internal.h:225
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:33
z_stream zstream
Definition: pngdec.c:95
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:282
#define b
Definition: input.c:41
AVMasteringDisplayMetadata * av_mastering_display_metadata_create_side_data(AVFrame *frame)
Allocate a complete AVMasteringDisplayMetadata and add it to the frame.
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:378
alias for AV_PIX_FMT_YA8
Definition: pixfmt.h:146
#define FFMIN(a, b)
Definition: common.h:96
#define PNG_FILTER_VALUE_SUB
Definition: png.h:39
uint32_t palette[256]
Definition: pngdec.c:82
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that&#39;s been allocated with av_malloc() or another memory allocation functio...
Definition: dict.h:76
#define PNG_COLOR_TYPE_GRAY
Definition: png.h:31
static void png_filter_row(PNGDSPContext *dsp, uint8_t *dst, int filter_type, uint8_t *src, uint8_t *last, int size, int bpp)
Definition: pngdec.c:252
int width
picture width / height.
Definition: avcodec.h:1794
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call ff_thread_finish_setup() afterwards.If some code can't be moved
uint8_t w
Definition: llviddspenc.c:38
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
#define s(width, name)
Definition: cbs_vp9.c:257
uint8_t * last_row
Definition: pngdec.c:84
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:143
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
AVCodecContext * avctx
Definition: pngdec.c:53
void av_bprint_get_buffer(AVBPrint *buf, unsigned size, unsigned char **mem, unsigned *actual_size)
Allocate bytes in the buffer for external use.
Definition: bprint.c:218
av_cold void ff_pngdsp_init(PNGDSPContext *dsp)
Definition: pngdsp.c:43
static int decode_zbuf(AVBPrint *bp, const uint8_t *data, const uint8_t *data_end)
Definition: pngdec.c:444
int channels
Definition: pngdec.c:74
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:535
if(ret)
static int decode_ihdr_chunk(AVCodecContext *avctx, PNGDecContext *s, uint32_t length)
Definition: pngdec.c:560
static uint8_t * iso88591_to_utf8(const uint8_t *in, size_t size_in)
Definition: pngdec.c:488
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:188
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:368
static av_cold int png_dec_init(AVCodecContext *avctx)
Definition: pngdec.c:1754
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:180
int discard_damaged_percentage
The percentage of damaged samples to discard a frame.
Definition: avcodec.h:3428
Libavcodec external API header.
enum PNGHeaderState hdr_state
Definition: pngdec.c:60
int buffer_size
Definition: pngdec.c:89
static int skip_tag(AVIOContext *in, int32_t tag_name)
Definition: ismindex.c:132
the pkt_dts and pkt_pts fields in AVFrame will work as usual Restrictions on codec whose streams don t reset across will not work because their bitstreams cannot be decoded in parallel *The contents of buffers must not be read before as well as code calling up to before the decode process starts Call have add an so the codec calls ff_thread_report set AVCodecInternal allocate_progress The frames must then be freed with ff_thread_release_buffer().Otherwise leave it at zero and decode directly into the user-supplied frames.Call ff_thread_report_progress() after some part of the current picture has decoded.A good place to put this is where draw_horiz_band() is called-add this if it isn't called anywhere
enum AVCodecID codec_id
Definition: avcodec.h:1631
#define PNG_FILTER_VALUE_UP
Definition: png.h:40
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
#define PNG_FILTER_TYPE_LOCO
Definition: png.h:37
uint8_t last_dispose_op
Definition: pngdec.c:68
#define abs(x)
Definition: cuda_runtime.h:35
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
int debug
debug
Definition: avcodec.h:2706
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
main external API structure.
Definition: avcodec.h:1621
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> (&#39;D&#39;<<24) + (&#39;C&#39;<<16) + (&#39;B&#39;<<8) + &#39;A&#39;).
Definition: avcodec.h:1646
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1969
uint8_t * data
Definition: frame.h:203
Tag MUST be and< 10hcoeff half pel interpolation filter coefficients, hcoeff[0] are the 2 middle coefficients[1] are the next outer ones and so on, resulting in a filter like:...eff[2], hcoeff[1], hcoeff[0], hcoeff[0], hcoeff[1], hcoeff[2]...the sign of the coefficients is not explicitly stored but alternates after each coeff and coeff[0] is positive, so...,+,-,+,-,+,+,-,+,-,+,...hcoeff[0] is not explicitly stored but found by subtracting the sum of all stored coefficients with signs from 32 hcoeff[0]=32-hcoeff[1]-hcoeff[2]-...a good choice for hcoeff and htaps is htaps=6 hcoeff={40,-10, 2}an alternative which requires more computations at both encoder and decoder side and may or may not be better is htaps=8 hcoeff={42,-14, 6,-2}ref_frames minimum of the number of available reference frames and max_ref_frames for example the first frame after a key frame always has ref_frames=1spatial_decomposition_type wavelet type 0 is a 9/7 symmetric compact integer wavelet 1 is a 5/3 symmetric compact integer wavelet others are reserved stored as delta from last, last is reset to 0 if always_reset||keyframeqlog quality(logarithmic quantizer scale) stored as delta from last, last is reset to 0 if always_reset||keyframemv_scale stored as delta from last, last is reset to 0 if always_reset||keyframe FIXME check that everything works fine if this changes between framesqbias dequantization bias stored as delta from last, last is reset to 0 if always_reset||keyframeblock_max_depth maximum depth of the block tree stored as delta from last, last is reset to 0 if always_reset||keyframequant_table quantization tableHighlevel bitstream structure:==============================--------------------------------------------|Header|--------------------------------------------|------------------------------------|||Block0||||split?||||yes no||||.........intra?||||:Block01:yes no||||:Block02:.................||||:Block03::y DC::ref index:||||:Block04::cb DC::motion x:||||.........:cr DC::motion y:||||.................|||------------------------------------||------------------------------------|||Block1|||...|--------------------------------------------|------------------------------------|||Y subbands||Cb subbands||Cr subbands||||------||------||------|||||LL0||HL0||||LL0||HL0||||LL0||HL0|||||------||------||------||||------||------||------|||||LH0||HH0||||LH0||HH0||||LH0||HH0|||||------||------||------||||------||------||------|||||HL1||LH1||||HL1||LH1||||HL1||LH1|||||------||------||------||||------||------||------|||||HH1||HL2||||HH1||HL2||||HH1||HL2|||||...||...||...|||------------------------------------|--------------------------------------------Decoding process:=================------------|||Subbands|------------||||------------|Intra DC||||LL0 subband prediction------------|\Dequantization-------------------\||Reference frames|\IDWT|--------------|Motion\|||Frame 0||Frame 1||Compensation.OBMC v-------|--------------|--------------.\------> Frame n output Frame Frame<----------------------------------/|...|-------------------Range Coder:============Binary Range Coder:-------------------The implemented range coder is an adapted version based upon"Range encoding: an algorithm for removing redundancy from a digitised message."by G.N.N.Martin.The symbols encoded by the Snow range coder are bits(0|1).The associated probabilities are not fix but change depending on the symbol mix seen so far.bit seen|new state---------+-----------------------------------------------0|256-state_transition_table[256-old_state];1|state_transition_table[old_state];state_transition_table={0, 0, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 190, 191, 192, 194, 194, 195, 196, 197, 198, 199, 200, 201, 202, 202, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 215, 215, 216, 217, 218, 219, 220, 220, 222, 223, 224, 225, 226, 227, 227, 229, 229, 230, 231, 232, 234, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 248, 0, 0, 0, 0, 0, 0, 0};FIXME Range Coding of integers:-------------------------FIXME Neighboring Blocks:===================left and top are set to the respective blocks unless they are outside of the image in which case they are set to the Null block top-left is set to the top left block unless it is outside of the image in which case it is set to the left block if this block has no larger parent block or it is at the left side of its parent block and the top right block is not outside of the image then the top right block is used for top-right else the top-left block is used Null block y, cb, cr are 128 level, ref, mx and my are 0 Motion Vector Prediction:=========================1.the motion vectors of all the neighboring blocks are scaled to compensate for the difference of reference frames scaled_mv=(mv *(256 *(current_reference+1)/(mv.reference+1))+128)> the median of the scaled left
Definition: snow.txt:206
int interlace_type
Definition: pngdec.c:72
PNGImageState
Definition: pngdec.c:46
const uint8_t ff_png_pass_ymask[NB_PASSES]
Definition: png.c:25
int image_linesize
Definition: pngdec.c:81
int extradata_size
Definition: avcodec.h:1723
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:727
#define FF_COMPLIANCE_NORMAL
Definition: avcodec.h:2687
Y , 16bpp, big-endian.
Definition: pixfmt.h:97
static void inflate(uint8_t *dst, const uint8_t *p1, int width, int threshold, const uint8_t *coordinates[], int coord, int maxc)
Definition: vf_neighbor.c:198
Rational number (pair of numerator and denominator).
Definition: rational.h:58
Mastering display metadata capable of representing the color volume of the display used to master the...
int cur_w
Definition: pngdec.c:63
uint8_t transparent_color_be[6]
Definition: pngdec.c:78
#define OP_AVG(x, s, l)
uint8_t * image_buf
Definition: pngdec.c:80
int allocate_progress
Whether to allocate progress for frame threading.
Definition: internal.h:157
uint8_t dispose_op
Definition: pngdec.c:67
AVRational display_primaries[3][2]
CIE 1931 xy chromaticity coords of color primaries (r, g, b order).
uint8_t pixel
Definition: tiny_ssim.c:42
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:554
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
int last_x_offset
Definition: pngdec.c:66
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
#define FAST_DIV255(x)
Definition: pngdec.c:1064
static int handle_p_frame_apng(AVCodecContext *avctx, PNGDecContext *s, AVFrame *p)
Definition: pngdec.c:1066
#define YUV2RGB(NAME, TYPE)
Definition: pngdec.c:309
static const uint8_t png_pass_mask[NB_PASSES]
Definition: pngdec.c:99
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb...
Definition: pixfmt.h:76
Y , 8bpp.
Definition: pixfmt.h:74
static av_cold int png_dec_end(AVCodecContext *avctx)
Definition: pngdec.c:1782
void(* add_paeth_prediction)(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)
Definition: pngdsp.h:33
common internal api header.
static void handle_small_bpp(PNGDecContext *s, AVFrame *p)
Definition: pngdec.c:899
#define FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM
The decoder extracts and fills its parameters even if the frame is skipped due to the skip_frame sett...
Definition: internal.h:60
#define PNG_FILTER_VALUE_NONE
Definition: png.h:38
static int decode_trns_chunk(AVCodecContext *avctx, PNGDecContext *s, uint32_t length)
Definition: pngdec.c:806
packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big...
Definition: pixfmt.h:102
int last_w
Definition: pngdec.c:64
void av_fast_padded_mallocz(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_padded_malloc except that buffer will always be 0-initialized after call...
Definition: utils.c:82
static const uint8_t png_pass_dsp_ymask[NB_PASSES]
Definition: pngdec.c:104
Stereoscopic video.
int den
Denominator.
Definition: rational.h:60
void ff_png_zfree(void *opaque, void *ptr)
Definition: png.c:44
void * priv_data
Definition: avcodec.h:1648
static int png_decode_idat(PNGDecContext *s, int length)
Definition: pngdec.c:414
uint8_t * buffer
Definition: pngdec.c:88
#define av_free(p)
#define FF_DEBUG_STARTCODE
Definition: avcodec.h:2720
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1656
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:373
int row_size
Definition: pngdec.c:92
APNG common header.
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
Definition: bytestream.h:208
PNGDSPContext dsp
Definition: pngdec.c:52
int compression_type
Definition: pngdec.c:71
int last_h
Definition: pngdec.c:64
int ff_png_pass_row_size(int pass, int bits_per_pixel, int width)
Definition: png.c:62
int height
Definition: frame.h:353
FILE * out
Definition: movenc.c:54
int bit_depth
Definition: pngdec.c:69
#define av_freep(p)
int color_type
Definition: pngdec.c:70
ThreadFrame last_picture
Definition: pngdec.c:57
#define av_malloc_array(a, b)
static void png_put_interlaced_row(uint8_t *dst, int width, int bits_per_pixel, int pass, int color_type, const uint8_t *src)
Definition: pngdec.c:116
#define FFSWAP(type, a, b)
Definition: common.h:99
int crow_size
Definition: pngdec.c:91
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2465
static void decode_flush(AVCodecContext *avctx)
Definition: agm.c:1261
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int x_offset
Definition: pngdec.c:65
#define MKTAG(a, b, c, d)
Definition: common.h:366
void * ff_png_zalloc(void *opaque, unsigned int items, unsigned int size)
Definition: png.c:39
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:57
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
This structure stores compressed data.
Definition: avcodec.h:1510
int has_trns
Definition: pngdec.c:77
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:1219
mode
Use these values in ebur128_init (or&#39;ed).
Definition: ebur128.h:83
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:999
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:2684
AVCodec ff_png_decoder
Predicted.
Definition: avutil.h:275
#define UNROLL_FILTER(op)
Definition: pngdec.c:237
#define MNGSIG
Definition: png.h:48